hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace7a67ccf534fdfcf03eaeddc3913c2165314f8 | 148 | py | Python | erichek/__init__.py | Kristinita/SashaPyromaDebugging | f2d48b8a84ef8d8a2fb616f6db06776bb66cdc37 | [
"MIT"
] | null | null | null | erichek/__init__.py | Kristinita/SashaPyromaDebugging | f2d48b8a84ef8d8a2fb616f6db06776bb66cdc37 | [
"MIT"
] | null | null | null | erichek/__init__.py | Kristinita/SashaPyromaDebugging | f2d48b8a84ef8d8a2fb616f6db06776bb66cdc37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: SashaChernykh
# @Date: 2018-01-20 16:08:25
# @Last Modified time: 2018-01-26 17:35:07
"""Initialization file."""
| 24.666667 | 42 | 0.641892 |
ace7a7b154323fd961bb961b825000f69c6772a1 | 352 | py | Python | flow018.py | Mandar-Gajbhiye15/competitive-programming-questions | a9a36d5bf09985e84ea9e52154a73d30665261f1 | [
"MIT"
] | null | null | null | flow018.py | Mandar-Gajbhiye15/competitive-programming-questions | a9a36d5bf09985e84ea9e52154a73d30665261f1 | [
"MIT"
] | null | null | null | flow018.py | Mandar-Gajbhiye15/competitive-programming-questions | a9a36d5bf09985e84ea9e52154a73d30665261f1 | [
"MIT"
] | null | null | null | # t = int(input())
# fact = 1
# i = 1
# x = 0
# list1 = []
# for _ in range(t):
# n = int(input())
# if n>0:
# fact = fact * n
# n-+1
# list1.append(fact)
# elif n == 0:
# list1.append(fact)
# else :
# list1.append(fact=0)
# for x in range(len(list1)):
# print(list1[x])
| 19.555556 | 31 | 0.411932 |
ace7a7d7a3b73ed21691c08dcef08fd167a31431 | 202 | py | Python | bonfo/__init__.py | destos/bonfo | ac6052769f873c743ca14cffbc6d348b321495b7 | [
"MIT"
] | null | null | null | bonfo/__init__.py | destos/bonfo | ac6052769f873c743ca14cffbc6d348b321495b7 | [
"MIT"
] | 1 | 2022-03-31T01:54:53.000Z | 2022-03-31T01:54:53.000Z | bonfo/__init__.py | destos/bonfo | ac6052769f873c743ca14cffbc6d348b321495b7 | [
"MIT"
] | null | null | null | """Top-level package for Bonfo."""
__author__ = """Patrick Forringer"""
__email__ = "patrick@forringer.com"
__version__ = "0.1.0"
from .board import Board # noqa
from .profile import Profile # noqa
| 22.444444 | 36 | 0.707921 |
ace7a8cc0c301038598b914d636c31f55646c107 | 3,717 | py | Python | drf_actions/migrations/0001_initial.py | speechki-book/drf-actions | 7effcf8df2c47ce6a1028ad86f252b3c6378c371 | [
"MIT"
] | null | null | null | drf_actions/migrations/0001_initial.py | speechki-book/drf-actions | 7effcf8df2c47ce6a1028ad86f252b3c6378c371 | [
"MIT"
] | null | null | null | drf_actions/migrations/0001_initial.py | speechki-book/drf-actions | 7effcf8df2c47ce6a1028ad86f252b3c6378c371 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.13 on 2020-06-18 10:39
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ActionContentType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"content_type",
models.CharField(
choices=[("book", "book"), ("user", "user")],
db_index=True,
max_length=50,
),
),
("table", models.CharField(max_length=100)),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="EventJournal",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"reason",
models.CharField(
choices=[
("create", "create"),
("update", "update"),
("delete", "delete"),
],
db_index=True,
max_length=30,
),
),
("object_id", models.CharField(db_index=True, max_length=100)),
(
"content_type",
models.CharField(
choices=[("book", "book"), ("user", "user")],
db_index=True,
max_length=50,
),
),
(
"data",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
],
options={"abstract": False,},
),
]
| 32.321739 | 79 | 0.350282 |
ace7aa10f22027ce97f0b2190d9ae0e91eceb141 | 3,183 | py | Python | statusbar.py | tushar176/Notepad- | ba6b82276a784868f1feaebd13ed63ebfaf1a4d0 | [
"MIT"
] | 1 | 2020-06-23T17:13:42.000Z | 2020-06-23T17:13:42.000Z | statusbar.py | tushar176/Notepad-plus | ba6b82276a784868f1feaebd13ed63ebfaf1a4d0 | [
"MIT"
] | null | null | null | statusbar.py | tushar176/Notepad-plus | ba6b82276a784868f1feaebd13ed63ebfaf1a4d0 | [
"MIT"
] | null | null | null | from tkinter import BOTTOM
from tkinter import ttk
'''Here statusbar and functionality of linebar is defined '''
class Statusbar:
def __init__(self, windows, text_write,linebar):
self.root = windows
self.text_write = text_write
self.linebar=linebar
self.status_bar = ttk.Label(self.root, text='Status Bar')
self.status_bar.pack(side=BOTTOM)
# text_changed -> To work with exit() in menu.py
self.text_changed = False
# '<<Modified>>' -> any change(i,.e; text in text editor)
self.text_write.text_editor.bind('<<Modified>>', self.changed)
#-----------------------------------------------Functions----------------------------------------------------------
#to get the Number of Lines
def get_line_numbers(self):
output = ''
row, col = self.text_write.text_editor.index("end").split('.') #row give the no of row in text
#print(int(row)-1)
for i in range(1, int(row)):
output += str(i) + '\n' #making a string with row no. with \n(next line) [for displaying purpose]
#print(output)
return output
#to dispaly the Number of line at the line Bar text field
def update_line_numbers(self):
line_numbers = self.get_line_numbers()
self.linebar.line_number_bar.config(state='normal')
#changing the font size anf family according to the text editor, if not changed then allignment get disturbed
self.linebar.line_number_bar.config(font=(self.text_write.current_font_family,self.text_write.current_font_size,'normal'))
self.linebar.line_number_bar.delete('1.0', 'end')
self.linebar.line_number_bar.insert('1.0', line_numbers)
self.linebar.line_number_bar.config(state='disabled')
#check whether is there and changes in the text field,if any thwn change the values of statusbar & Linebar
def changed(self, event=None):
# Change in text editor(i.e; empty to increase in characters)
if self.text_write.text_editor.edit_modified():
self.text_changed = True # text editor has some text
self.text_write.text_editor.get(1.0, 'end-1c').split() #(start - end-1char) > This delete last char
# because of it count when line changes
# count word through split() -> for each word
words = len(self.text_write.text_editor.get(1.0, 'end-1c').split())
#count Characters
characters = len(self.text_write.text_editor.get(1.0, 'end-1c'))
#count Lines
row, col = self.text_write.text_editor.index("end").split('.')
# Shows count of chars & words & Number of Lines
self.status_bar.config(text=f'Lines: {int(row)-1} Characters: {characters} Words: {words}')
self.status_bar.config(anchor='e')
self.update_line_numbers()
# to make code again wheneven there is change in text field
self.text_write.text_editor.edit_modified(False)
| 47.507463 | 131 | 0.597235 |
ace7aa3492e6477b87a8ff050110c5c9c9aab60b | 164 | py | Python | metroclima/__main__.py | jonathadv/metroclima-cli | 1511c3495e4f7c2808eabf6f969d0a067002cc02 | [
"MIT"
] | null | null | null | metroclima/__main__.py | jonathadv/metroclima-cli | 1511c3495e4f7c2808eabf6f969d0a067002cc02 | [
"MIT"
] | null | null | null | metroclima/__main__.py | jonathadv/metroclima-cli | 1511c3495e4f7c2808eabf6f969d0a067002cc02 | [
"MIT"
] | null | null | null | """
Metroclima database CLI tool
CLI caller
"""
from .cli import run
if __name__ == '__main__':
try:
run()
except KeyboardInterrupt:
pass
| 12.615385 | 29 | 0.615854 |
ace7ab1c03480ac4b4f41e3fb954c1d488666de5 | 247 | py | Python | trustpayments/models/failure_category.py | TrustPayments/python-sdk | 6fde6eb8cfce270c3612a2903a845c13018c3bb9 | [
"Apache-2.0"
] | 2 | 2020-01-16T13:24:06.000Z | 2020-11-21T17:40:17.000Z | postfinancecheckout/models/failure_category.py | pfpayments/python-sdk | b8ef159ea3c843a8d0361d1e0b122a9958adbcb4 | [
"Apache-2.0"
] | 4 | 2019-10-14T17:33:23.000Z | 2021-10-01T14:49:11.000Z | postfinancecheckout/models/failure_category.py | pfpayments/python-sdk | b8ef159ea3c843a8d0361d1e0b122a9958adbcb4 | [
"Apache-2.0"
] | 2 | 2019-10-15T14:17:10.000Z | 2021-09-17T13:07:09.000Z | # coding: utf-8
from enum import Enum, unique
@unique
class FailureCategory(Enum):
TEMPORARY_ISSUE = "TEMPORARY_ISSUE"
INTERNAL = "INTERNAL"
END_USER = "END_USER"
CONFIGURATION = "CONFIGURATION"
DEVELOPER = "DEVELOPER"
| 17.642857 | 39 | 0.688259 |
ace7ab20469ce3c9c186863d25163e21e63a8f7d | 763 | py | Python | src/958.py | zhaoyi3264/leetcode-solutions | 1a3a2d441cdd07a17e80b0ea43b7b266844f530c | [
"MIT"
] | null | null | null | src/958.py | zhaoyi3264/leetcode-solutions | 1a3a2d441cdd07a17e80b0ea43b7b266844f530c | [
"MIT"
] | null | null | null | src/958.py | zhaoyi3264/leetcode-solutions | 1a3a2d441cdd07a17e80b0ea43b7b266844f530c | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCompleteTree(self, root: TreeNode) -> bool:
q = deque([root])
none = False
while q:
node = q.popleft()
if node.left:
if none:
return False
else:
q.append(node.left)
else:
none = True
if node.right:
if none:
return False
else:
q.append(node.right)
else:
none = True
return True
| 27.25 | 55 | 0.429882 |
ace7abc203fc5753eeb1943e1f4bd42cd358bf64 | 1,324 | py | Python | aes_cipher/file_writer.py | ebellocchia/aes_cipher | aac378f4073b85b9439bf5db080cfe070e9b393e | [
"MIT"
] | 1 | 2021-09-15T02:45:45.000Z | 2021-09-15T02:45:45.000Z | aes_cipher/file_writer.py | ebellocchia/aes_cipher | aac378f4073b85b9439bf5db080cfe070e9b393e | [
"MIT"
] | null | null | null | aes_cipher/file_writer.py | ebellocchia/aes_cipher | aac378f4073b85b9439bf5db080cfe070e9b393e | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Classes
#
# File writer class
class FileWriter:
# Write file
@staticmethod
def Write(file_out: str,
data: bytes) -> None:
with open(file_out, "wb") as fout:
fout.write(data)
| 40.121212 | 79 | 0.743958 |
ace7abef2d454fbed4313f6c5f5a343005f7ca41 | 603 | py | Python | constructer/major-tsv/code.py | aslupin/KUnit-1 | 8274e0535f63a21b3d3d6174658fd4174103d714 | [
"MIT"
] | 1 | 2021-08-16T19:59:10.000Z | 2021-08-16T19:59:10.000Z | constructer/major-tsv/code.py | aslupin/KUnit-1 | 8274e0535f63a21b3d3d6174658fd4174103d714 | [
"MIT"
] | 7 | 2020-01-07T05:52:18.000Z | 2022-02-26T12:36:41.000Z | constructer/major-tsv/code.py | aslupin/KUnit-1 | 8274e0535f63a21b3d3d6174658fd4174103d714 | [
"MIT"
] | 3 | 2019-09-09T17:19:57.000Z | 2021-08-23T16:53:09.000Z | import json
x=open("upm.tsv").read()
y=open("stdList.tsv").read()
alx=x.split()
aly=y.split()
ansx=[]
for i in range (len(alx)):
if(len(alx[i]))<=2:
alx[i]=int(alx[i])
i=12
while i<len(alx):
lis=[]
if type(alx[i+1])!=type(alx[i]):
for j in range (6):
lis.append(alx[i+j])
ansx.append(lis)
i+=6
else:
ansx.append([alx[i]])
i+=1
dicx={}
for i in range (len(ansx)):
if len(ansx[i])==1:
dicx[ansx[i][0]]=[-1,-1,-1,-1,-1]
else:
pp=[]
for j in range (0,(len(ansx[i])-1)):
pp.append(ansx[i][j+1])
dicx[ansx[i][0]]=pp
print(json.dumps(dicx,indent=4, separators=(',', ': ')))
| 18.272727 | 56 | 0.562189 |
ace7abfd01da84cb5d85d5bb51538d256dd38990 | 500 | py | Python | froide/celery.py | ashmpace/question-mtl | 5ce1289cd6db0e629aa138d2dee235d9a4c4546b | [
"MIT"
] | null | null | null | froide/celery.py | ashmpace/question-mtl | 5ce1289cd6db0e629aa138d2dee235d9a4c4546b | [
"MIT"
] | 3 | 2020-02-11T21:43:56.000Z | 2021-11-15T17:47:06.000Z | froide/celery.py | ashmpace/question-mtl | 5ce1289cd6db0e629aa138d2dee235d9a4c4546b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'froide.settings')
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations import importer # noqa
importer.install(check_options=True)
from celery import Celery # noqa
from django.conf import settings # noqa
app = Celery('froide')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, related_name='tasks')
| 26.315789 | 77 | 0.802 |
ace7ac9918254f645a5ab408f2b19c822bc86c5c | 1,271 | py | Python | setup.py | rafa-rod/pyettj | 79de1c4692480131fcc00e74fe18b7f9ab64a43e | [
"MIT"
] | 3 | 2021-05-21T23:39:30.000Z | 2021-06-25T03:17:48.000Z | setup.py | rafa-rod/pyettj | 79de1c4692480131fcc00e74fe18b7f9ab64a43e | [
"MIT"
] | 1 | 2021-06-18T22:08:48.000Z | 2021-06-27T06:52:37.000Z | setup.py | rafa-rod/pyettj | 79de1c4692480131fcc00e74fe18b7f9ab64a43e | [
"MIT"
] | 2 | 2021-05-27T22:49:44.000Z | 2021-06-18T16:27:43.000Z | #!/usr/bin/env python3
# _*_ coding:utf-8 _*_
from setuptools import setup
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README_pypi.md'), encoding="utf-8") as f:
long_description = f.read()
with open(os.path.join(this_directory,'version.py'), encoding="utf-8") as f:
version = f.read()
version = version.split("=")[-1].split("print")[0].replace('"','').strip()
packages = ['pyettj']
package_data = \
{'': ['*'],
'pyettj': ['exemplo/*', 'media/*']}
install_requires = \
['beautifulsoup4>=4.9.3,<5.0.0',
'lxml>=4.6.3,<5.0.0',
'matplotlib>=3.4.2,<4.0.0',
'pandas>=1.2.4,<2.0.0',
'requests>=2.25.1,<3.0.0']
setup_kwargs = {
'name': 'pyettj',
'version': version,
'description': '"Capturar dados das estruturas a termo de algumas taxas de juros (ettj) brasileiras."',
'long_description':long_description,
'long_description_content_type':'text/markdown',
'author': 'Rafael Rodrigues',
'author_email': 'rafael.rafarod@gmail.com',
'maintainer': None,
'maintainer_email': None,
'url': None,
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'python_requires': '>=3.8,<4.0',
}
setup(**setup_kwargs) | 27.042553 | 107 | 0.650669 |
ace7ae082bef79c3779460cd104506d0fc5ed547 | 3,841 | py | Python | eval/pascal.py | florisdesmedt/EfficientDet | a840ca1be55ad84f9aa2517114e467a574c6fea9 | [
"Apache-2.0"
] | null | null | null | eval/pascal.py | florisdesmedt/EfficientDet | a840ca1be55ad84f9aa2517114e467a574c6fea9 | [
"Apache-2.0"
] | null | null | null | eval/pascal.py | florisdesmedt/EfficientDet | a840ca1be55ad84f9aa2517114e467a574c6fea9 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tensorflow import keras
from eval.common import evaluate
class Evaluate(keras.callbacks.Callback):
"""
Evaluation callback for arbitrary datasets.
"""
def __init__(
self,
generator,
model,
iou_threshold=0.5,
score_threshold=0.01,
max_detections=100,
save_path=None,
tensorboard=None,
weighted_average=False,
verbose=1
):
"""
Evaluate a given dataset using a given model at the end of every epoch during training.
Args:
generator: The generator that represents the dataset to evaluate.
iou_threshold: The threshold used to consider when a detection is positive or negative.
score_threshold: The score confidence threshold to use for detections.
max_detections: The maximum number of detections to use per image.
save_path: The path to save images with visualized detections to.
tensorboard: Instance of keras.callbacks.TensorBoard used to log the mAP value.
weighted_average: Compute the mAP using the weighted average of precisions among classes.
verbose: Set the verbosity level, by default this is set to 1.
"""
self.generator = generator
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.save_path = save_path
self.tensorboard = tensorboard
self.weighted_average = weighted_average
self.verbose = verbose
self.active_model = model
super(Evaluate, self).__init__()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
# run evaluation
average_precisions = evaluate(
self.generator,
self.active_model,
iou_threshold=self.iou_threshold,
score_threshold=self.score_threshold,
max_detections=self.max_detections,
visualize=False
)
# compute per class average precision
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
if self.verbose == 1:
print('{:.0f} instances of class'.format(num_annotations),
self.generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
total_instances.append(num_annotations)
precisions.append(average_precision)
if self.weighted_average:
self.mean_ap = sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)
else:
self.mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
if self.tensorboard is not None and self.tensorboard.writer is not None:
import tensorflow as tf
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = self.mean_ap
summary_value.tag = "mAP"
self.tensorboard.writer.add_summary(summary, epoch)
logs['mAP'] = self.mean_ap
if self.verbose == 1:
print('mAP: {:.4f}'.format(self.mean_ap))
| 37.656863 | 118 | 0.653736 |
ace7ae38948319b48501cd05404d574e4dc8702a | 5,170 | py | Python | patterns/builder.py | raldenprog/python_tricks | b30be8672d1a47ed5bfddf2deccd9b28d68e445d | [
"MIT"
] | 1 | 2021-09-08T08:13:42.000Z | 2021-09-08T08:13:42.000Z | patterns/builder.py | raldenprog/python_tricks | b30be8672d1a47ed5bfddf2deccd9b28d68e445d | [
"MIT"
] | null | null | null | patterns/builder.py | raldenprog/python_tricks | b30be8672d1a47ed5bfddf2deccd9b28d68e445d | [
"MIT"
] | null | null | null | """
https://refactoring.guru/ru/design-patterns/builder
Строитель — это порождающий паттерн проектирования, который позволяет создавать сложные объекты пошагово.
Строитель даёт возможность использовать один и тот же код строительства для получения разных представлений объектов.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
class Builder(ABC):
"""
Интерфейс Строителя объявляет создающие методы для различных частей объектов
Продуктов.
"""
@property
@abstractmethod
def product(self) -> None:
pass
@abstractmethod
def produce_part_a(self) -> None:
pass
@abstractmethod
def produce_part_b(self) -> None:
pass
@abstractmethod
def produce_part_c(self) -> None:
pass
class ConcreteBuilder1(Builder):
"""
Классы Конкретного Строителя следуют интерфейсу Строителя и предоставляют
конкретные реализации шагов построения. Ваша программа может иметь несколько
вариантов Строителей, реализованных по-разному.
"""
def __init__(self) -> None:
"""
Новый экземпляр строителя должен содержать пустой объект продукта,
который используется в дальнейшей сборке.
"""
self._product = None
self.reset()
def reset(self) -> None:
self._product = Product1()
@property
def product(self) -> Product1:
"""
Конкретные Строители должны предоставить свои собственные методы
получения результатов. Это связано с тем, что различные типы строителей
могут создавать совершенно разные продукты с разными интерфейсами.
Поэтому такие методы не могут быть объявлены в базовом интерфейсе
Строителя (по крайней мере, в статически типизированном языке
программирования).
Как правило, после возвращения конечного результата клиенту, экземпляр
строителя должен быть готов к началу производства следующего продукта.
Поэтому обычной практикой является вызов метода сброса в конце тела
метода getProduct. Однако такое поведение не является обязательным, вы
можете заставить своих строителей ждать явного запроса на сброс из кода
клиента, прежде чем избавиться от предыдущего результата.
"""
product = self._product
self.reset()
return product
def produce_part_a(self) -> None:
self._product.add("PartA1")
def produce_part_b(self) -> None:
self._product.add("PartB1")
def produce_part_c(self) -> None:
self._product.add("PartC1")
class Product1:
"""
Имеет смысл использовать паттерн Строитель только тогда, когда ваши продукты
достаточно сложны и требуют обширной конфигурации.
В отличие от других порождающих паттернов, различные конкретные строители
могут производить несвязанные продукты. Другими словами, результаты
различных строителей могут не всегда следовать одному и тому же интерфейсу.
"""
def __init__(self) -> None:
self.parts = []
def add(self, part: Any) -> None:
self.parts.append(part)
def list_parts(self) -> None:
print(f"Product parts: {', '.join(self.parts)}", end="")
class Director:
"""
Директор отвечает только за выполнение шагов построения в определённой
последовательности. Это полезно при производстве продуктов в определённом
порядке или особой конфигурации. Строго говоря, класс Директор необязателен,
так как клиент может напрямую управлять строителями.
"""
def __init__(self) -> None:
self._builder = None
@property
def builder(self) -> Builder:
return self._builder
@builder.setter
def builder(self, builder: Builder) -> None:
"""
Директор работает с любым экземпляром строителя, который передаётся ему
клиентским кодом. Таким образом, клиентский код может изменить конечный
тип вновь собираемого продукта.
"""
self._builder = builder
"""
Директор может строить несколько вариаций продукта, используя одинаковые
шаги построения.
"""
def build_minimal_viable_product(self) -> None:
self.builder.produce_part_a()
def build_full_featured_product(self) -> None:
self.builder.produce_part_a()
self.builder.produce_part_b()
self.builder.produce_part_c()
if __name__ == "__main__":
"""
Клиентский код создаёт объект-строитель, передаёт его директору, а затем
инициирует процесс построения. Конечный результат извлекается из объекта-
строителя.
"""
director = Director()
builder = ConcreteBuilder1()
director.builder = builder
print("Standard basic product: ")
director.build_minimal_viable_product()
builder.product.list_parts()
print("\n")
print("Standard full featured product: ")
director.build_full_featured_product()
builder.product.list_parts()
print("\n")
# Помните, что паттерн Строитель можно использовать без класса Директор.
print("Custom product: ")
builder.produce_part_a()
builder.produce_part_b()
builder.product.list_parts() | 30.233918 | 116 | 0.695164 |
ace7aee6014d1153a18504d99bb8e0c7eb4cc378 | 18,336 | py | Python | pybind/nos/v6_0_2c/interface_vlan/interface/vlan/ip/igmp/static_group/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v6_0_2c/interface_vlan/interface/vlan/ip/igmp/static_group/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v6_0_2c/interface_vlan/interface/vlan/ip/igmp/static_group/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class static_group(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface-vlan/interface/vlan/ip/igmp/static-group. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__mcast_address','__interface','__if_type','__value',)
_yang_name = 'static-group'
_rest_name = 'static-group'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__interface = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'interface': {}},), is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)
self.__mcast_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mcast-address", rest_name="mcast-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Address to be Joined in the format A.B.C.D', u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='inet:ipv4-address', is_config=True)
self.__value = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..10']}), is_leaf=True, yang_name="value", rest_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-completion-actionpoint': u'IgmpsShowActionPoint'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='string-type', is_config=True)
self.__if_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'FortyGigabitEthernet': {}, u'Port-channel': {}, u'GigabitEthernet': {}, u'TenGigabitEthernet': {}, u'HundredGigabitEthernet': {}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface-vlan', u'interface', u'vlan', u'ip', u'igmp', u'static-group']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Vlan', u'ip', u'igmp', u'static-group']
def _get_mcast_address(self):
"""
Getter method for mcast_address, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/mcast_address (inet:ipv4-address)
"""
return self.__mcast_address
def _set_mcast_address(self, v, load=False):
"""
Setter method for mcast_address, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/mcast_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mcast_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mcast_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mcast-address", rest_name="mcast-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Address to be Joined in the format A.B.C.D', u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mcast_address must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mcast-address", rest_name="mcast-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Address to be Joined in the format A.B.C.D', u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__mcast_address = t
if hasattr(self, '_set'):
self._set()
def _unset_mcast_address(self):
self.__mcast_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mcast-address", rest_name="mcast-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Address to be Joined in the format A.B.C.D', u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='inet:ipv4-address', is_config=True)
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/interface (enumeration)
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/interface (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'interface': {}},), is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with enumeration""",
'defined-type': "brocade-igmp-snooping:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'interface': {}},), is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'interface': {}},), is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)
def _get_if_type(self):
"""
Getter method for if_type, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/if_type (enumeration)
"""
return self.__if_type
def _set_if_type(self, v, load=False):
"""
Setter method for if_type, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/if_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_if_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_if_type() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'FortyGigabitEthernet': {}, u'Port-channel': {}, u'GigabitEthernet': {}, u'TenGigabitEthernet': {}, u'HundredGigabitEthernet': {}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """if_type must be of a type compatible with enumeration""",
'defined-type': "brocade-igmp-snooping:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'FortyGigabitEthernet': {}, u'Port-channel': {}, u'GigabitEthernet': {}, u'TenGigabitEthernet': {}, u'HundredGigabitEthernet': {}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)""",
})
self.__if_type = t
if hasattr(self, '_set'):
self._set()
def _unset_if_type(self):
self.__if_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'FortyGigabitEthernet': {}, u'Port-channel': {}, u'GigabitEthernet': {}, u'TenGigabitEthernet': {}, u'HundredGigabitEthernet': {}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='enumeration', is_config=True)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/value (string-type)
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /interface_vlan/interface/vlan/ip/igmp/static_group/value (string-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..10']}), is_leaf=True, yang_name="value", rest_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-completion-actionpoint': u'IgmpsShowActionPoint'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='string-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """value must be of a type compatible with string-type""",
'defined-type': "brocade-igmp-snooping:string-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..10']}), is_leaf=True, yang_name="value", rest_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-completion-actionpoint': u'IgmpsShowActionPoint'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='string-type', is_config=True)""",
})
self.__value = t
if hasattr(self, '_set'):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..10']}), is_leaf=True, yang_name="value", rest_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-completion-actionpoint': u'IgmpsShowActionPoint'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='string-type', is_config=True)
mcast_address = __builtin__.property(_get_mcast_address, _set_mcast_address)
interface = __builtin__.property(_get_interface, _set_interface)
if_type = __builtin__.property(_get_if_type, _set_if_type)
value = __builtin__.property(_get_value, _set_value)
_pyangbind_elements = {'mcast_address': mcast_address, 'interface': interface, 'if_type': if_type, 'value': value, }
| 73.935484 | 708 | 0.696226 |
ace7af67d8cd235ab34a713da514972fe4e63c0b | 4,304 | py | Python | test/authentication/test_get_token.py | rubelw/auth0_client | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | [
"MIT"
] | 2 | 2020-10-08T21:42:56.000Z | 2021-03-21T08:17:52.000Z | test/authentication/test_get_token.py | rubelw/auth0_client | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | [
"MIT"
] | null | null | null | test/authentication/test_get_token.py | rubelw/auth0_client | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | [
"MIT"
] | null | null | null | import unittest
import mock
from auth0_client.v3.authentication.get_token import GetToken
class TestGetToken(unittest.TestCase):
@mock.patch('auth0_client.v3.authentication.get_token.GetToken.post')
def test_authorization_code(self, mock_post):
g = GetToken('my.domain.com')
g.authorization_code(client_id='cid',
client_secret='clsec',
code='cd',
grant_type='gt',
redirect_uri='idt')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/token')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'clsec',
'code': 'cd',
'grant_type': 'gt',
'redirect_uri': 'idt'
})
self.assertEqual(kwargs['headers'], {
'Content-Type': 'application/json'
})
@mock.patch('auth0_client.v3.authentication.get_token.GetToken.post')
def test_authorization_code_pkce(self, mock_post):
g = GetToken('my.domain.com')
g.authorization_code_pkce(client_id='cid',
code_verifier='cdver',
code='cd',
grant_type='gt',
redirect_uri='idt')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/token')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'code_verifier': 'cdver',
'code': 'cd',
'grant_type': 'gt',
'redirect_uri': 'idt'
})
self.assertEqual(kwargs['headers'], {
'Content-Type': 'application/json'
})
@mock.patch('auth0_client.v3.authentication.get_token.GetToken.post')
def test_client_credentials(self, mock_post):
g = GetToken('my.domain.com')
g.client_credentials(client_id='cid',
client_secret='clsec',
audience='aud',
grant_type='gt')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/token')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'clsec',
'audience': 'aud',
'grant_type': 'gt'
})
self.assertEqual(kwargs['headers'], {
'Content-Type': 'application/json'
})
@mock.patch('auth0_client.v3.authentication.get_token.GetToken.post')
def test_login(self, mock_post):
g = GetToken('my.domain.com')
g.login(client_id='cid',
client_secret='clsec',
username='usrnm',
password='pswd',
scope='http://test.com/api',
realm='rlm',
audience='aud',
grant_type='gt')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/token')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'clsec',
'username': 'usrnm',
'password': 'pswd',
'scope': 'http://test.com/api',
'realm': 'rlm',
'audience': 'aud',
'grant_type': 'gt'
})
self.assertEqual(kwargs['headers'], {
'Content-Type': 'application/json'
})
@mock.patch('auth0_client.v3.authentication.get_token.GetToken.post')
def test_refresh_token(self, mock_post):
g = GetToken('my.domain.com')
g.refresh_token(client_id='cid',
client_secret='clsec',
refresh_token='rt',
grant_type='gt')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/token')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'clsec',
'refresh_token': 'rt',
'grant_type': 'gt'
})
self.assertEqual(kwargs['headers'], {
'Content-Type': 'application/json'
})
| 32.360902 | 73 | 0.514173 |
ace7af9e80e2de5dbe9f953a6b0ff9099ea91b18 | 1,191 | py | Python | pyside2_style_test/__main__.py | M3TIOR/pyside2-style-test | 1000a00c44de90b1d9c2f2ae121ab127d1499ecd | [
"MIT"
] | 1 | 2020-08-14T16:21:22.000Z | 2020-08-14T16:21:22.000Z | pyside2_style_test/__main__.py | M3TIOR/pyside2-style-test | 1000a00c44de90b1d9c2f2ae121ab127d1499ecd | [
"MIT"
] | 1 | 2020-03-27T00:29:04.000Z | 2020-03-27T05:59:18.000Z | pyside2_style_test/__main__.py | M3TIOR/pyside2-style-test | 1000a00c44de90b1d9c2f2ae121ab127d1499ecd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Ruby Allison Rose (aka: M3TIOR)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE
from .cli import main
main()
| 41.068966 | 80 | 0.764903 |
ace7affcd0023298299302408c9702124d5dcdd4 | 11,377 | py | Python | prism/_gui/widgets/preferences/tests/test_kwargs_dicts.py | vishalbelsare/PRISM | 9565508df8f7f7bba4119f4f523fc4635455a4db | [
"BSD-3-Clause"
] | 43 | 2019-01-28T05:40:43.000Z | 2022-03-29T20:46:57.000Z | prism/_gui/widgets/preferences/tests/test_kwargs_dicts.py | vishalbelsare/PRISM | 9565508df8f7f7bba4119f4f523fc4635455a4db | [
"BSD-3-Clause"
] | 30 | 2019-01-28T10:55:27.000Z | 2021-02-01T04:06:55.000Z | prism/_gui/widgets/preferences/tests/test_kwargs_dicts.py | vishalbelsare/PRISM | 9565508df8f7f7bba4119f4f523fc4635455a4db | [
"BSD-3-Clause"
] | 7 | 2020-01-10T15:54:55.000Z | 2021-02-01T04:46:17.000Z | # -*- coding: utf-8 -*-
# %% IMPORTS
# Built-in imports
from os import path
# Package imports
from matplotlib import cm
import pytest
from qtpy import QtCore as QC, QtWidgets as QW
# PRISM imports
from prism._gui.widgets.core import get_box_value, set_box_value
from prism._gui.widgets.preferences.custom_boxes import (
ColorBox, ColorMapBox, DefaultBox)
from prism._gui.widgets.preferences.kwargs_dicts import (
KwargsDictBoxLayout, KwargsDictDialog, KwargsDictDialogPage)
# %% GLOBALS
DIR_PATH = path.abspath(path.dirname(__file__)) # Path to tests directory
# %% HELPER FUNCTIONS
# Make abbreviation for kwargs_dict dialog
@pytest.fixture(scope='module')
def kwargs_dicts(options):
return(options.dict_dialog)
# %% PYTEST CLASSES AND FUNCTIONS
# Pytest for the kwargs dict box
@pytest.mark.incremental
class TestKwargsDictBoxLayout(object):
# Test if it is bound to options
def test_bound_options(self, options):
assert hasattr(options, 'kwargs_dict_box')
assert isinstance(options.kwargs_dict_box, KwargsDictBoxLayout)
obj = options.kwargs_dict_box
while obj is not options and obj is not None:
obj = obj.parentWidget()
assert obj is not None
# Pytest for the kwargs dict dialog
@pytest.mark.incremental
class TestKwargsDictDialog_Main(object):
# Test if it is bound to options
def test_bound_options(self, options, kwargs_dicts):
assert any([isinstance(child, KwargsDictDialog)
for child in options.children()])
assert isinstance(kwargs_dicts, KwargsDictDialog)
# Test if the dialog contains the correct number of pages
def test_n_pages(self, option_entries, kwargs_dicts):
# Obtain the number of pages there should be
n_pages = len([name for name in option_entries if 'kwargs' in name])
# Check that there are this many pages
assert kwargs_dicts.pages.count() == n_pages
assert kwargs_dicts.contents.count() == kwargs_dicts.pages.count()
# Test if the dialog contains the proper pages
def test_pages(self, kwargs_dicts):
# Check that all pages are instances of KwargsDictDialogPage
n_pages = kwargs_dicts.pages.count()
for page in map(kwargs_dicts.pages.widget, range(n_pages)):
assert isinstance(page.widget(), KwargsDictDialogPage)
# Check that a page has the dialog (somewhere) as its parent
obj = page.widget()
while obj is not kwargs_dicts and obj is not None:
obj = obj.parentWidget()
assert obj is not None
# Test if all pages have the proper layout
def test_page_layouts(self, kwargs_dicts):
# Check that all pages have a grid layout with n_count divisible by 3
n_pages = kwargs_dicts.pages.count()
for page in map(kwargs_dicts.pages.widget, range(n_pages)):
page = page.widget()
assert hasattr(page, 'kwargs_grid')
assert isinstance(page.kwargs_grid, QW.QGridLayout)
assert not page.kwargs_grid.count() % 3
# Test if the kwargs_dict dialog can be opened
def test_open(self, qtbot, options, kwargs_dicts):
# Try to open the kwargs_dicts window
qtbot.mouseClick(options.kwargs_dict_box.view_but, QC.Qt.LeftButton)
# Check that currently the kwargs_dict dialog is open
assert kwargs_dicts.isVisible()
# Test if a new dict entry can be added
def test_add_editable_entry(self, qtbot, option_entries, kwargs_dicts):
# Obtain the fig_kwargs page
page = option_entries['fig_kwargs'].box
# Check that this page currently contains at least 1 row
n_rows = page.kwargs_grid.count()//3
assert n_rows
# Click on the 'add' button of this page
qtbot.mouseClick(page.add_but, QC.Qt.LeftButton)
# Check that an extra row has been added
assert (page.kwargs_grid.count()//3 == n_rows+1)
n_rows += 1
# Obtain the kwargs box at the last row and validate it
kwargs_box = page.kwargs_grid.itemAtPosition(n_rows-1, 1).widget()
assert isinstance(kwargs_box, QW.QComboBox)
# Check that this kwargs_box currently has nothing selected
assert (get_box_value(kwargs_box) == '')
# Add three more entries
page.add_but.click()
page.add_but.click()
page.add_but.click()
# Remove the second-last entry again
row = page.kwargs_grid.count()//3-2
del_but = page.kwargs_grid.itemAtPosition(row, 0).widget()
del_but.click()
# Test if this new entry can have its field set
def test_set_entry_default(self, qtbot, option_entries, kwargs_dicts):
# Obtain the fig_kwargs page and the index of the second-last row
page = option_entries['fig_kwargs'].box
row = page.kwargs_grid.count()//3-3
# Obtain the kwargs_box
kwargs_box = page.kwargs_grid.itemAtPosition(row, 1).widget()
# Make sure that 'a_test' is not a default entry type
assert 'a_test' not in page.std_entries
# Set the value of this kwargs_box
qtbot.keyClicks(kwargs_box, 'a_test')
# Check that the current value of the kwargs_box is 'a_test'
assert (get_box_value(kwargs_box) == 'a_test')
# Check that the field_box is an instance of the DefaultBox
field_box = page.kwargs_grid.itemAtPosition(row, 2).widget()
assert isinstance(field_box, DefaultBox)
# Set the field_box to bool and False
set_box_value(field_box.type_box, 'bool')
set_box_value(field_box.value_box, False)
# Test if another new entry can have its field set the same way
def test_set_entry_duplicate(self, qtbot, option_entries, kwargs_dicts):
# Obtain the fig_kwargs page and the index of the last row
page = option_entries['fig_kwargs'].box
row = page.kwargs_grid.count()//3-2
# Obtain the kwargs_box
kwargs_box = page.kwargs_grid.itemAtPosition(row, 1).widget()
# Set the value of this kwargs_box
qtbot.keyClicks(kwargs_box, 'a_test')
# Set the field box to a float of 1.5
field_box = page.kwargs_grid.itemAtPosition(row, 2).widget()
set_box_value(field_box.type_box, 'float')
set_box_value(field_box.value_box, 1.5)
# Test if saving the kwargs_dicts works
def test_save_kwargs_dicts(self, options, option_entries):
# Try to save the options
options.save_but.click()
# Check that the associated fig_kwargs entry has been updated
entry = option_entries['fig_kwargs']
fig_kwargs = get_box_value(entry.box)
assert (fig_kwargs['a_test'] == 1.5)
assert (fig_kwargs != entry.default)
assert (entry.value == fig_kwargs)
# Test if the kwargs_dict dialog closes when the options menu closes
def test_close_preferences_window(self, options, kwargs_dicts):
# Check that the window is currently open
assert kwargs_dicts.isVisible()
# Close the options menu
options.close_but.click()
# Check that both windows are now closed
assert not options.isVisible()
assert not kwargs_dicts.isVisible()
# Open the kwargs_dicts window again
options.kwargs_dict_box.view_but.click()
# Test what happens if an empty kwargs_box is used
def test_set_entry_empty(self, option_entries, kwargs_dicts):
# Obtain the fig_kwargs page
page = option_entries['fig_kwargs'].box
# Check which row should have 'a_test'
row = option_entries['fig_kwargs'].value.keys().index('a_test')+1
# Check that this row indeed has 'a_test'
kwargs_box = page.kwargs_grid.itemAtPosition(row, 1).widget()
assert (get_box_value(kwargs_box) == 'a_test')
# Set the kwargs_box to empty
set_box_value(kwargs_box, '')
# Check that the associated field_box is now an empty label
field_box = page.kwargs_grid.itemAtPosition(row, 2).widget()
assert isinstance(field_box, QW.QLabel)
assert (field_box.text() == '')
# Test what happens if a banned entry_type is used
def test_set_entry_banned(self, option_entries, kwargs_dicts):
# Obtain the impl_kwargs_2D page
page = option_entries['impl_kwargs_2D'].box
# Add a new entry to this page
page.add_but.click()
# Obtain a banned entry
assert len(page.banned_entries)
entry_type = page.banned_entries[0]
# Set the corresponding kwargs_box to this entry_type
row = page.kwargs_grid.count()//3-1
kwargs_box = page.kwargs_grid.itemAtPosition(row, 1).widget()
set_box_value(kwargs_box, entry_type)
# Check that the field_box is now a non-empty label
field_box = page.kwargs_grid.itemAtPosition(row, 2).widget()
assert isinstance(field_box, QW.QLabel)
assert (field_box.text() != '')
# Test if saving the kwargs_dicts still works
def test_save_kwargs_dicts2(self, options, option_entries):
# Try to save the options
options.save_but.click()
# Check that the impl_kwargs_2D entry has not changed
entry = option_entries['impl_kwargs_2D']
impl_kwargs_2D = get_box_value(entry.box)
assert (impl_kwargs_2D == entry.default)
assert (entry.value == impl_kwargs_2D)
# Pytest for the kwargs dict dialog entry types
class TestKwargsDictDialog_EntryTypes(object):
# Test if all standard entry types can be properly used
@pytest.mark.parametrize(
"page_name, entry_type, field_type, field_value",
[('impl_kwargs_2D', 'alpha', QW.QDoubleSpinBox, 0.5),
('impl_kwargs_3D', 'cmap', ColorMapBox, cm.get_cmap('cmr.freeze')),
('los_kwargs_2D', 'color', ColorBox, 'cyan'),
('fig_kwargs', 'dpi', QW.QSpinBox, 175),
('line_kwargs_est', 'linestyle', QW.QComboBox, '--'),
('line_kwargs_cut', 'linewidth', QW.QDoubleSpinBox, 6.9),
('impl_kwargs_2D', 'marker', QW.QComboBox, '*'),
('los_kwargs_2D', 'markersize', QW.QDoubleSpinBox, 42),
('los_kwargs_3D', 'xscale', QW.QComboBox, 'linear'),
('impl_kwargs_3D', 'yscale', QW.QComboBox, 'log'),
('arrow_kwargs_est', 'fh_arrowlength', QW.QDoubleSpinBox, 1),
('arrow_kwargs_est', 'ft_arrowlength', QW.QDoubleSpinBox, 0.1),
('arrow_kwargs_est', 'fh_arrowwidth', QW.QDoubleSpinBox, 17),
('arrow_kwargs_est', 'ft_arrowwidth', QW.QDoubleSpinBox, 13.13)])
def test_set_standard_entry(self, page_name, entry_type, field_type,
field_value, option_entries, kwargs_dicts):
# Obtain the proper page
page = option_entries[page_name].box
# Add a new entry to this page
page.add_but.click()
# Set the kwargs_box to entry_type
row = page.kwargs_grid.count()//3-1
kwargs_box = page.kwargs_grid.itemAtPosition(row, 1).widget()
set_box_value(kwargs_box, entry_type)
# Check that the field box is an instance of given field_type
field_box = page.kwargs_grid.itemAtPosition(row, 2).widget()
assert isinstance(field_box, field_type)
# Set the value of this box
set_box_value(field_box, field_value)
| 39.366782 | 77 | 0.673288 |
ace7b01bb0b0f2983375ec6360810469844c03b9 | 36,651 | py | Python | crabageprediction/venv/Lib/site-packages/sklearn/base.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 5 | 2022-01-05T00:41:46.000Z | 2022-03-21T07:22:58.000Z | crabageprediction/venv/Lib/site-packages/sklearn/base.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 3 | 2022-03-18T06:16:45.000Z | 2022-03-23T14:26:58.000Z | crabageprediction/venv/Lib/site-packages/sklearn/base.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 2 | 2022-03-20T17:35:44.000Z | 2022-03-21T18:30:31.000Z | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import platform
import inspect
import re
import numpy as np
from . import __version__
from ._config import get_config
from .utils import _IS_32BIT
from .utils._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .utils.validation import check_X_y
from .utils.validation import check_array
from .utils.validation import _check_y
from .utils.validation import _num_features
from .utils.validation import _check_feature_names_in
from .utils._estimator_html_repr import estimator_html_repr
from .utils.validation import _get_feature_names
def clone(estimator, *, safe=True):
"""Construct a new unfitted estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It returns a new estimator
with the same parameters that has not been fitted on any data.
Parameters
----------
estimator : {list, tuple, set} of estimator instance or a single \
estimator instance
The estimator or group of estimators to be cloned.
safe : bool, default=True
If safe is False, clone will fall back to a deep copy on objects
that are not estimators.
Returns
-------
estimator : object
The deep copy of the input, an estimator if input is an estimator.
Notes
-----
If the estimator's `random_state` parameter is an integer (or if the
estimator doesn't have a `random_state` parameter), an *exact clone* is
returned: the clone and the original estimator will give the exact same
results. Otherwise, *statistical clone* is returned: the clone might
return different results from the original estimator. More details can be
found in :ref:`randomness`.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, "get_params") or isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
if isinstance(estimator, type):
raise TypeError(
"Cannot clone object. "
+ "You should provide an instance of "
+ "scikit-learn estimator instead of a class."
)
else:
raise TypeError(
"Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn "
"estimator as it does not implement a "
"'get_params' method." % (repr(estimator), type(estimator))
)
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError(
"Cannot clone object %s, as the constructor "
"either does not set or modifies parameter %s" % (estimator, name)
)
return new_object
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ",\n" + (1 + offset // 2) * " "
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = "%s=%s" % (k, str(v))
else:
# use repr of the rest
this_repr = "%s=%s" % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + "..." + this_repr[-100:]
if i > 0:
if this_line_length + len(this_repr) >= 75 or "\n" in this_repr:
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(", ")
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = "".join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = "\n".join(l.rstrip(" ") for l in lines.split("\n"))
return lines
class BaseEstimator:
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % (cls, init_signature)
)
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params"):
deep_items = value.get_params().items()
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for estimator %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`." % (key, self)
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from .utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True,
indent=1,
indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,
)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len("".join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r"^(\s*\S){%d}" % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if "\n" in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r"[^\n]*\n"
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = "..."
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:]
return repr_
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith("sklearn."):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith("sklearn."):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk. "
"For more info please refer to:\n"
"https://scikit-learn.org/stable/modules/model_persistence"
".html#security-maintainability-limitations".format(
self.__class__.__name__, pickle_version, __version__
),
UserWarning,
)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, "_more_tags"):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
def _check_n_features(self, X, reset):
"""Set the `n_features_in_` attribute, or check against it.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
reset : bool
If True, the `n_features_in_` attribute is set to `X.shape[1]`.
If False and the attribute exists, then check that it is equal to
`X.shape[1]`. If False and the attribute does *not* exist, then
the check is skipped.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
try:
n_features = _num_features(X)
except TypeError as e:
if not reset and hasattr(self, "n_features_in_"):
raise ValueError(
"X does not contain any features, but "
f"{self.__class__.__name__} is expecting "
f"{self.n_features_in_} features"
) from e
# If the number of features is not defined and reset=True,
# then we skip this check
return
if reset:
self.n_features_in_ = n_features
return
if not hasattr(self, "n_features_in_"):
# Skip this check if the expected number of expected input features
# was not recorded by calling fit first. This is typically the case
# for stateless transformers.
return
if n_features != self.n_features_in_:
raise ValueError(
f"X has {n_features} features, but {self.__class__.__name__} "
f"is expecting {self.n_features_in_} features as input."
)
def _check_feature_names(self, X, *, reset):
"""Set or check the `feature_names_in_` attribute.
.. versionadded:: 1.0
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
The input samples.
reset : bool
Whether to reset the `feature_names_in_` attribute.
If False, the input will be checked for consistency with
feature names of data provided when reset was last True.
.. note::
It is recommended to call `reset=True` in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
if reset:
feature_names_in = _get_feature_names(X)
if feature_names_in is not None:
self.feature_names_in_ = feature_names_in
elif hasattr(self, "feature_names_in_"):
# Delete the attribute when the estimator is fitted on a new dataset
# that has no feature names.
delattr(self, "feature_names_in_")
return
fitted_feature_names = getattr(self, "feature_names_in_", None)
X_feature_names = _get_feature_names(X)
if fitted_feature_names is None and X_feature_names is None:
# no feature names seen in fit and in X
return
if X_feature_names is not None and fitted_feature_names is None:
warnings.warn(
f"X has feature names, but {self.__class__.__name__} was fitted without"
" feature names"
)
return
if X_feature_names is None and fitted_feature_names is not None:
warnings.warn(
"X does not have valid feature names, but"
f" {self.__class__.__name__} was fitted with feature names"
)
return
# validate the feature names against the `feature_names_in_` attribute
if len(fitted_feature_names) != len(X_feature_names) or np.any(
fitted_feature_names != X_feature_names
):
message = (
"The feature names should match those that were "
"passed during fit. Starting version 1.2, an error will be raised.\n"
)
fitted_feature_names_set = set(fitted_feature_names)
X_feature_names_set = set(X_feature_names)
unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)
missing_names = sorted(fitted_feature_names_set - X_feature_names_set)
def add_names(names):
output = ""
max_n_names = 5
for i, name in enumerate(names):
if i >= max_n_names:
output += "- ...\n"
break
output += f"- {name}\n"
return output
if unexpected_names:
message += "Feature names unseen at fit time:\n"
message += add_names(unexpected_names)
if missing_names:
message += "Feature names seen at fit time, yet now missing:\n"
message += add_names(missing_names)
if not missing_names and not missing_names:
message += (
"Feature names must be in the same order as they were in fit.\n"
)
warnings.warn(message, FutureWarning)
def _validate_data(
self,
X="no_validation",
y="no_validation",
reset=True,
validate_separately=False,
**check_params,
):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features), default='no validation'
The input samples.
If `'no_validation'`, no validation is performed on `X`. This is
useful for meta-estimator which can delegate input validation to
their underlying estimator(s). In that case `y` must be passed and
the only accepted `check_params` are `multi_output` and
`y_numeric`.
y : array-like of shape (n_samples,), default='no_validation'
The targets.
- If `None`, `check_array` is called on `X`. If the estimator's
requires_y tag is True, then an error will be raised.
- If `'no_validation'`, `check_array` is called on `X` and the
estimator's requires_y tag is ignored. This is a default
placeholder and is never meant to be explicitly set. In that case
`X` must be passed.
- Otherwise, only `y` with `_check_y` or both `X` and `y` are
checked with either `check_array` or `check_X_y` depending on
`validate_separately`.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if both `X` and `y` are
validated.
"""
self._check_feature_names(X, reset=reset)
if y is None and self._get_tags()["requires_y"]:
raise ValueError(
f"This {self.__class__.__name__} estimator "
"requires y to be passed, but the target y is None."
)
no_val_X = isinstance(X, str) and X == "no_validation"
no_val_y = y is None or isinstance(y, str) and y == "no_validation"
if no_val_X and no_val_y:
raise ValueError("Validation should be done on X, y or both.")
elif not no_val_X and no_val_y:
X = check_array(X, **check_params)
out = X
elif no_val_X and not no_val_y:
y = _check_y(y, **check_params)
out = y
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = check_array(X, **check_X_params)
y = check_array(y, **check_y_params)
else:
X, y = check_X_y(X, y, **check_params)
out = X, y
if not no_val_X and check_params.get("ensure_2d", True):
self._check_n_features(X, reset=reset)
return out
@property
def _repr_html_(self):
"""HTML representation of estimator.
This is redundant with the logic of `_repr_mimebundle_`. The latter
should be favorted in the long term, `_repr_html_` is only
implemented for consumers who do not interpret `_repr_mimbundle_`.
"""
if get_config()["display"] != "diagram":
raise AttributeError(
"_repr_html_ is only defined when the "
"'display' configuration option is set to "
"'diagram'"
)
return self._repr_html_inner
def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return estimator_html_repr(self)
def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == "diagram":
output["text/html"] = estimator_html_repr(self)
return output
class ClassifierMixin:
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` wrt. `y`.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
def _more_tags(self):
return {"requires_y": True}
class RegressorMixin:
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination of the prediction.
The coefficient of determination :math:`R^2` is defined as
:math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` wrt. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
"""
from .metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
def _more_tags(self):
return {"requires_y": True}
class ClusterMixin:
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
def _more_tags(self):
return {"preserves_dtype": []}
class BiclusterMixin:
"""Mixin class for all bicluster estimators in scikit-learn."""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse="csr")
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
class TransformerMixin:
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class _OneToOneFeatureMixin:
"""Provides `get_feature_names_out` for simple transformers.
Assumes there's a 1-to-1 correspondence between input features
and output features.
"""
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Same as input features.
"""
return _check_feature_names_in(self, input_features)
class DensityMixin:
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Return the score of the model on the data `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
score : float
"""
pass
class OutlierMixin:
"""Mixin class for all outlier detection estimators in scikit-learn."""
_estimator_type = "outlier_detector"
def fit_predict(self, X, y=None):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X).predict(X)
class MetaEstimatorMixin:
_required_parameters = ["estimator"]
"""Mixin class for all meta estimators in scikit-learn."""
class MultiOutputMixin:
"""Mixin to mark estimators that support multioutput."""
def _more_tags(self):
return {"multioutput": True}
class _UnstableArchMixin:
"""Mark estimators that are non-determinstic on 32bit or PowerPC"""
def _more_tags(self):
return {
"non_deterministic": (
_IS_32BIT or platform.machine().startswith(("ppc", "powerpc"))
)
}
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
def _is_pairwise(estimator):
"""Returns True if estimator is pairwise.
- If the `_pairwise` attribute and the tag are present and consistent,
then use the value and not issue a warning.
- If the `_pairwise` attribute and the tag are present and not
consistent, use the `_pairwise` value and issue a deprecation
warning.
- If only the `_pairwise` attribute is present and it is not False,
issue a deprecation warning and use the `_pairwise` value.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if the estimator is pairwise and False otherwise.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
has_pairwise_attribute = hasattr(estimator, "_pairwise")
pairwise_attribute = getattr(estimator, "_pairwise", False)
pairwise_tag = _safe_tags(estimator, key="pairwise")
if has_pairwise_attribute:
if pairwise_attribute != pairwise_tag:
warnings.warn(
"_pairwise was deprecated in 0.24 and will be removed in 1.1 "
"(renaming of 0.26). Set the estimator tags of your estimator "
"instead",
FutureWarning,
)
return pairwise_attribute
# use pairwise tag when the attribute is not present
return pairwise_tag
| 35.005731 | 88 | 0.589124 |
ace7b138aed1b5e1f08d8abd65c950755a5c2cff | 1,119 | py | Python | setup.py | biochimia/sumsy | c5d13aa29c0e4196cf2b865e4c46268eea74b649 | [
"Apache-2.0"
] | null | null | null | setup.py | biochimia/sumsy | c5d13aa29c0e4196cf2b865e4c46268eea74b649 | [
"Apache-2.0"
] | null | null | null | setup.py | biochimia/sumsy | c5d13aa29c0e4196cf2b865e4c46268eea74b649 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from setuptools import find_packages, setup
with open('requirements.txt') as requirements_file:
requirements = [ requirement.partition(' ')[0] for requirement in requirements_file ]
setup(
name='sumsy',
description='A forklift who greatly resembles another forklift',
version='0.5.0',
author='João Abecasis',
author_email='joao@abecasis.name',
url='https://github.com/biochimia/sumsy',
packages=find_packages(),
install_requires=requirements,
entry_points={
'console_scripts': [
'sumsy = sumsy.cli:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Testing',
'Topic :: System :: Software Distribution',
'Topic :: Utilities',
],
)
| 30.243243 | 89 | 0.620197 |
ace7b169ba4ae200b5a2d5b3f3bf870bd7cc5d84 | 36,423 | py | Python | 2017/adventofcode.py | wmdrthr/advent-of-code | 32f455117abacb41061429824fb4e2471bfe4cb4 | [
"MIT"
] | null | null | null | 2017/adventofcode.py | wmdrthr/advent-of-code | 32f455117abacb41061429824fb4e2471bfe4cb4 | [
"MIT"
] | null | null | null | 2017/adventofcode.py | wmdrthr/advent-of-code | 32f455117abacb41061429824fb4e2471bfe4cb4 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# encoding: utf-8
import os, sys, re
import json
import errno
import atexit
import string
import time
from pprint import pprint
from datetime import datetime
import itertools
import functools
import operator
import collections
import array
import queue
import networkx
import pytz
import requests
SESSIONID_FILE = '~/.config/adventofcode/session'
SESSIONID = None
CACHE_FILE = '~/.config/adventofcode/cache'
USER_AGENT = 'aocd.py/v0.3.6'
URI = 'http://adventofcode.com/{year}/day/{day}/input'
def get_session_id():
global SESSIONID
try:
sessionid = open(os.path.expanduser(SESSIONID_FILE)).read()
SESSIONID = sessionid.strip()
except (OSError, IOError) as err:
print('Could not load session-id - ', str(err))
print("""Puzzle inputs differ by user. Log in to the Advent of Code site,
then check your cookies to get the value of session-id. Save this
value in {}""".format(os.path.expanduser(SESSIONID_FILE)))
sys.exit(3)
def guess_day():
"""
Most recent day, if it's during the Advent of Code. Happy Holidays!
Raises exception otherwise.
"""
aoc_now = datetime.now(tz=pytz.timezone('Asia/Kolkata'))
if aoc_now.month != 12:
raise Exception('guess_day is only available in December')
day = min(aoc_now.day, 25)
return day
CACHE = {}
try:
with open(os.path.expanduser(CACHE_FILE), 'r') as f:
CACHE = json.load(f)
except (OSError, IOError) as err:
# if cache file no tfound, do nothing; for all other errors, raise
# an exception
if err.errno != errno.ENOENT:
raise Exception('Problem loading cache', err)
def save_cache():
with open(os.path.expanduser(CACHE_FILE), 'w') as f:
json.dump(CACHE, f, sort_keys=True, indent=2)
def get_data(day):
"""
Get data for day (1-25)
"""
year = 2017
uri = URI.format(year=year, day=day)
key = '{}?session={}'.format(uri, SESSIONID)
if key not in CACHE:
response = requests.get(uri,
cookies={'session': SESSIONID},
headers={'User-Agent': USER_AGENT})
if response.status_code != 200:
raise Exception('Unexpected response: [{}] {}'.format(response.status_code, response.content))
CACHE[key] = response.text
if not getattr(save_cache, 'registered', False):
atexit.register(save_cache)
save_cache.registered = True
return CACHE[key]
def with_solutions(*expected):
def wrapper(f):
error_msg = 'Incorrect solution for Part {}: Expected "{}", Actual "{}"'
def wrapped_method(*args, **kwargs):
fgen = f(*args)
for index in range(2):
actual = next(fgen)
if expected[index] is not None and not kwargs['skip_verification']:
if actual != expected[index]:
print(error_msg.format(index + 1, expected[index], actual))
sys.exit(23)
print(actual)
return wrapped_method
return wrapper
################################################################################
# Solvers
@with_solutions(1136, 1092)
def solve1(data):
# Inverse Captcha
n = len(data)
yield sum(int(a) for a, b in zip(data, data[1:] + data[0]) if a==b)
n = n // 2
yield sum(int(a) for a, b in zip(data, data[n:] + data[:n]) if a==b)
@with_solutions(41919, 303)
def solve2(data):
# Corruption Checksum
rows = data.split('\n')
rows = [row.split('\t') for row in rows]
rows = [sorted(map(int, row), reverse=True) for row in rows]
yield sum([(row[0] - row[-1]) for row in rows])
res = 0
for row in rows:
for a, b in itertools.combinations(row, 2):
if a % b == 0:
res += a // b
yield res
@with_solutions(480, 349975)
def solve3(data):
# Spiral Memory
input = int(data)
ctr = 3
while True:
square = ctr * ctr
if square >= input:
break
ctr += 2
lesser = abs(input - ((ctr - 1) * (ctr - 1)))
greater = abs(input - square)
if lesser < greater:
yield abs(ctr - 1 - lesser) - 1
else:
yield abs(ctr - greater) - 1
# Part 2 using https://oeis.org/A141481
oeis_uri = 'https://oeis.org/A141481/b141481.txt'
response = requests.get(oeis_uri).content.decode('ascii')
lines = response.splitlines()[2:]
for line in lines:
a, b = line.split()
b = int(b)
if b > input:
yield b
break
@with_solutions(451, 223)
def solve4(data):
# High-Entropy Passphrases
passphrases = data.split('\n')
count = 0
for phrase in passphrases:
words = phrase.split()
wordset = set(words)
if len(words) == len(wordset):
count += 1
yield count
count = 0
for phrase in passphrases:
words = {}
flag = True
for word in phrase.split():
sword = ''.join(sorted(word))
if sword in words:
flag = False
break
else:
words[sword] = 1
if flag:
count += 1
yield count
@with_solutions(360603, 25347697)
def solve5(data):
# A Maze of Twisty Trampolines, All Alike
offsets = [int(s.strip()) for s in data.split()]
steps = index = 0
while True:
newindex = index + offsets[index]
offsets[index] += 1
steps += 1
if newindex < 0 or newindex >= len(offsets):
break
index = newindex
yield steps
offsets = [int(s.strip()) for s in data.split()]
steps = index = 0
while True:
newindex = index + offsets[index]
if offsets[index] >= 3:
offsets[index] -= 1
else:
offsets[index] += 1
steps += 1
if newindex < 0 or newindex >= len(offsets):
break
index = newindex
yield steps
if len(offsets) < 10:
pprint(offsets)
@with_solutions(14029, 2765)
def solve6(data):
# Memory Reallocation
banks = [int(x) for x in data.split()]
configurations = {}
history = []
count = 0
while True:
# add current configuration
config = ','.join([str(x) for x in banks])
configurations[config] = True
history.append(config)
# find largest memory bank
index, maxv = 0, banks[0]
for idx, val in enumerate(banks):
if val > maxv:
maxv = val
index = idx
# empty that bank
banks[index] = 0
# distribute the blocks
while maxv > 0:
index = (index + 1) % len(banks)
banks[index] += 1
maxv -= 1
count += 1
# check configuration
config = ','.join([str(x) for x in banks])
if config in configurations:
history.append(config)
break
yield count
index = len(history) - 1
while index > 0:
index -= 1
if history[index] == history[-1]:
break
yield len(history) - index - 1
@with_solutions('vgzejbd', 1226)
def solve7(data):
# Recursive Circus
disc_regex = re.compile('^(?P<name>[a-z]+)\s\((?P<weight>[\d]+)\)')
child_regex = re.compile('([a-z]+)')
class Disc(object):
discs = {}
def __init__(self, line):
match = disc_regex.search(line)
children = child_regex.findall(line[match.end():])
self.name = match['name']
self.weight = int(match['weight'])
self.children = children
self._weight = None
self.parent = None
self.discs[self.name] = self
def update(self):
self.children = [self.discs[c] for c in self.children]
for child in self.children:
child.parent = self
def cweight(self):
if self._weight is not None:
return self._weight
self._weight = self.weight
for child in self.children:
self._weight += child.cweight()
return self._weight
def balanced(self):
weights = [child.cweight() for child in self.children]
return len(weights) == 0 or weights.count(weights[0]) == len(weights)
def __repr__(self):
return self.name
# create the Disc objects
for line in data.splitlines():
disc = Disc(line)
# set up the parent-child hierarchy
for disc in Disc.discs.values():
disc.update()
# find the root
while disc.parent is not None:
disc = disc.parent
yield disc.name
# we start from the root, which is obviously unbalanced, and
# traverse the tree visiting each unbalanced child until we reach
# a disc with no unbalanced children
while True:
unbalanced = [child for child in disc.children if not child.balanced()]
if len(unbalanced) == 0:
break
disc = unbalanced[0]
# at this point, the current disc is unbalanced, but each of its
# children is balanced; therefore, one of the children has the
# wrong weight.
weights = [child.cweight() for child in disc.children]
unbalanced, balanced = None, None
for child in disc.children:
if balanced is None and weights.count(child.cweight()) > 0:
# found one of the balanced nodes (any will do)
balanced = child
continue
if weights.count(child.cweight()) == 1:
# found the child with the wrong weight
unbalanced = child
continue
weight_difference = unbalanced.cweight() - balanced.cweight()
yield unbalanced.weight - weight_difference
@with_solutions(7787, 8997)
def solve8(data):
# I Heard You Like Registers
registers = collections.defaultdict(int)
def evaluate(condition):
register, operator, value = condition[3:].split()
if operator == '>':
return registers[register] > int(value)
elif operator == '<':
return registers[register] < int(value)
elif operator == '>=':
return registers[register] >= int(value)
elif operator == '<=':
return registers[register] <= int(value)
elif operator == '==':
return registers[register] == int(value)
elif operator == '!=':
return registers[register] != int(value)
else:
raise Exception('Unknown operator %s' % operator)
maximum = 0
for line in data.splitlines():
register, operation, value, condition = line.split(maxsplit=3)
if evaluate(condition):
if operation == 'inc':
registers[register] += int(value)
maximum = max(maximum, registers[register])
elif operation == 'dec':
registers[register] -= int(value)
maximum = max(maximum, registers[register])
yield max(registers.values())
yield maximum
@with_solutions(10050, 4482)
def solve9(data):
# Stream Processing
sum = 0
for line in data.splitlines():
index = 0
count = total = score = 0
garbage = 0
while index < len(line):
char = line[index]
if char == '!':
index += 2
continue
elif char == '{':
if garbage != 1:
score += 1
else:
count += 1
elif char == '}':
if garbage != 1:
total += score
score -= 1
else:
count += 1
elif char == '<':
if garbage:
count += 1
garbage = 1
elif char == '>':
garbage = 0
elif garbage:
count += 1
index += 1
sum += total
if len(line) < 25:
print('%3d\t%3d\t%s' % (total, count, line))
yield sum
yield count
HEX = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '0a', '0b', '0c', '0d', '0e', '0f',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1a', '1b', '1c', '1d', '1e', '1f',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '2a', '2b', '2c', '2d', '2e', '2f',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '3a', '3b', '3c', '3d', '3e', '3f',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '4a', '4b', '4c', '4d', '4e', '4f',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '5a', '5b', '5c', '5d', '5e', '5f',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '8a', '8b', '8c', '8d', '8e', '8f',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '9a', '9b', '9c', '9d', '9e', '9f',
'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'aa', 'ab', 'ac', 'ad', 'ae', 'af',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'ba', 'bb', 'bc', 'bd', 'be', 'bf',
'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'ca', 'cb', 'cc', 'cd', 'ce', 'cf',
'd0', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'da', 'db', 'dc', 'dd', 'de', 'df',
'e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9', 'ea', 'eb', 'ec', 'ed', 'ee', 'ef',
'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'fa', 'fb', 'fc', 'fd', 'fe', 'ff']
def knothash_reverse(string, start, length):
"""
reverse([0, 1, 2, 3, 4], 0, 3) = [2, 1, 0, 3, 4]
reverse([2, 1, 0, 3, 4], 3, 4) = [4, 3, 0, 1, 2]
reverse([4, 3, 0, 1, 2], 3, 1) = [4, 3, 0, 1, 2]
reverse([4, 3, 0, 1, 2], 1, 5) = [3, 4, 2, 1, 0]
"""
end = (start + length - 1) % len(string)
length = length // 2
while length > 0:
try:
string[start], string[end] = string[end], string[start]
start = (start + 1) % len(string)
end -= 1
if end < 0:
end = len(string) - 1
length -= 1
except IndexError:
print(start, end, length)
raise
return string
def knothash_round(string, lengths, current = 0, skip = 0):
for length in lengths:
string = knothash_reverse(string, current, length)
current += (length + skip)
current %= 256
skip += 1
return (string, current, skip)
def knothash(input_data):
lengths = [ord(x) for x in input_data] + [17, 31, 73, 47, 23]
current = skip = 0
string = [x for x in range(0, 256)]
for r in range(64):
string, current, skip = knothash_round(string, lengths, current, skip)
blocks = [iter(string)] * 16
dense_hash = []
for block in itertools.zip_longest(*blocks):
dense_hash.append(functools.reduce(operator.xor, block))
return ''.join([HEX[x] for x in dense_hash])
@with_solutions(23874, 'e1a65bfb5a5ce396025fab5528c25a87')
def solve10(data):
# Knot Hash
# Part 1
string = [x for x in range(0, 256)]
lengths = [int(x) for x in data.split(',')]
current = skip = 0
string, _, _ = knothash_round(string, lengths)
yield string[0] * string[1]
# Part 2
yield knothash(data)
@with_solutions(650, 1465)
def solve11(data):
# Hex Ed
cube_directions = {
'n' : (0, 1, -1),
'ne' : (1, 0, -1),
'se' : (1, -1, 0),
's' : (0, -1, 1),
'sw' : (-1, 0, 1),
'nw' : (-1, 1, 0)
}
def cube_move(cube, direction):
i, j, k = cube_directions[direction]
a, b, c = cube
return (a + i, b + j, c + k)
def cube_distance(a, b):
return max(abs(a[0] - b[0]),
abs(a[1] - b[1]),
abs(a[2] - b[2]))
def _solve(data):
origin = (0, 0, 0)
current = origin
distances = []
for step in data.split(','):
current = cube_move(current, step)
distances.append(cube_distance(current, origin))
return (cube_distance(current, origin), max(distances))
a, b = _solve(data)
yield a
yield b
@with_solutions(141, 171)
def solve12(data):
# Digital Plumber
G = networkx.Graph()
for line in data.splitlines():
head, nodes = line.split('<->')
head = int(head)
nodes = [int(n) for n in nodes.split(',')]
for node in nodes:
G.add_edge(head, node)
G.add_edge(node, head)
connected = list(networkx.connected_components(G))
yield len(connected[0])
yield len(connected)
@with_solutions(632, 3849742)
def solve13(data):
# Packet Scanners
lines = [line.split(':') for line in data.splitlines()]
firewall = {int(pos): int(height) for pos, height in lines}
def scanner(layer, time):
offset = time % ((layer - 1) * 2)
return 2 * (layer - 1) - offset if offset > layer - 1 else offset
severity = 0
for position in firewall:
if scanner(firewall[position], position) == 0:
severity += position * firewall[position]
yield severity
for delay in itertools.count():
result = (scanner(firewall[pos], delay + pos) == 0 for pos in firewall)
if not any(result):
break
yield delay
@with_solutions(8316, 1074)
def solve14(data):
# Disk Defragmentation
BITS = { '0': (0, [False, False, False, False]),
'1': (1, [False, False, False, True]),
'2': (1, [False, False, True, False]),
'3': (2, [False, False, True, True]),
'4': (1, [False, True, False, False]),
'5': (2, [False, True, False, True]),
'6': (2, [False, True, True, False]),
'7': (3, [False, True, True, True]),
'8': (1, [True, False, False, False]),
'9': (2, [ True, False, False, True]),
'a': (2, [ True, False, True, False]),
'b': (3, [ True, False, True, True]),
'c': (2, [ True, True, False, False]),
'd': (3, [ True, True, False, True]),
'e': (3, [ True, True, True, False]),
'f': (4, [ True, True, True, True])}
bitmap = []
total = 0
for n in range(128):
input_data = '%s-%d' % (data, n)
kh = knothash(input_data)
bitmap.append([])
for digit in kh:
total += BITS[digit][0]
for bit in BITS[digit][1]:
bitmap[-1].append(bit)
yield total
seen = set()
n = 0
def dfs(i, j):
if ((i, j)) in seen:
return
if not bitmap[i][j]:
return
seen.add((i, j))
if i > 0:
dfs(i-1, j)
if j > 0:
dfs(i, j-1)
if i < 127:
dfs(i+1, j)
if j < 127:
dfs(i, j+1)
for x in range(128):
for y in range(128):
if (x, y) in seen:
continue
if not bitmap[x][y]:
continue
n += 1
dfs(x, y)
yield n
@with_solutions(609, 253)
def solve15(data):
# Dueling Generators
def generator(factor, start, divisor = None):
current = start
while True:
current *= factor
current = current % 2147483647
if divisor is None or current % divisor == 0:
yield current & 0xffff
A = generator(16807, 883)
B = generator(48271, 879)
yield sum(next(A) == next(B) for _ in range(40000000))
A = generator(16807, 883, 4)
B = generator(48271, 879, 8)
yield sum(next(A) == next(B) for _ in range(5000000))
@with_solutions('gkmndaholjbfcepi', 'abihnfkojcmegldp')
def solve16(data):
# Permutation Promenade
history = {}
c = 0
def oneround(programs):
for move in data.split(','):
if move[0] == 's':
n = int(move[1:])
programs = programs[-n:] + programs[:-n]
elif move[0] == 'x':
a, b = [int(n) for n in move[1:].split('/')]
programs[a], programs[b] = programs[b], programs[a]
elif move[0] == 'p':
a, b = move[1:].split('/')
i, j = programs.index(a), programs.index(b)
programs[i], programs[j] = programs[j], programs[i]
return programs
programs = oneround(list('abcdefghijklmnop'))
yield ''.join(programs)
programs = list('abcdefghijklmnop')
history = []
for x in range(1000000000):
sp = ''.join(programs)
if sp in history:
yield ''.join(history[1000000000 % len(history)])
break
history.append(sp)
programs = oneround(programs)
@with_solutions(1971, 17202899)
def solve17(data):
# Spinlock
step = int(data)
buffer = array.array('H', [0])
position = 0
for current in range(1, 2018):
position = ((position + step) % len(buffer)) + 1
buffer.insert(position, current)
yield buffer[position + 1]
position = val = 0
bufferlen = 1
for current in range(1, 50000001):
position = ((position + step) % bufferlen) + 1
if position == 1:
val = current
bufferlen += 1
yield val
@with_solutions(3188, 7112)
def solve18(data):
# Duet
program = [line.split() for line in data.splitlines()]
# single instruction execution
def execute(registers, pid, ip, queues):
try:
instr, reg, val = program[ip]
except ValueError:
instr, reg = program[ip]
val = 0
try:
val = int(val)
except ValueError:
val = registers[val]
if instr == 'snd':
if queues is None:
registers['lastsnd'] = registers[reg]
else:
if pid == 1:
registers['count'] += 1
queues[pid].put(registers[reg])
elif instr == 'set':
registers[reg] = val
elif instr == 'add':
registers[reg] += val
elif instr == 'mul':
registers[reg] *= val
elif instr == 'mod':
registers[reg] %= val
elif instr == 'rcv':
if queues is None:
if registers[reg] != 0:
return (ip, 'done')
else:
try:
val = queues[1-pid].get(False)
registers[reg] = val
except queue.Empty:
return (ip, 'recv')
elif instr == 'jgz':
if reg in string.ascii_lowercase:
if registers[reg] > 0:
ip += val
return (ip, 'ok')
else:
if int(reg) > 0:
ip += val
return (ip, 'ok')
ip += 1
if 0 <= ip < len(program):
return (ip, 'ok')
else:
return (ip, 'done')
# Part 1
registers = collections.defaultdict(int)
ip = 0
while True:
ip, state = execute(registers, 0, ip, None)
if state == 'done':
break
yield registers['lastsnd']
# Part 2
registers = [collections.defaultdict(int) for x in range(2)]
registers[1]['p'] = 1
ip = [0, 0]
queues = [queue.Queue() for x in range(2)]
states = ['ok', 'ok']
pr = 0
while True:
_ip, _state = execute(registers[pr], pr, ip[pr], queues)
# this program is waiting for a value, but the other program
# has finished; this program will never receive a value so halt
if _state == 'recv' and states[1 - pr] == 'done':
break
# we didn't write anything, but the other program is waiting
# for a value; deadlock!
if _state == 'recv' and queues[pr].empty() and states[1 - pr] == 'recv':
break
# both programs are done
if _state == 'done' and states[1 - pr] == 'done':
break
# save state for the current program
ip[pr] = _ip
states[pr] = _state
# if the current program is done, or waiting, switch to the other one
if _state in ('done', 'recv'):
pr = 1 - pr
yield registers[1]['count']
@with_solutions('SXPZDFJNRL', 18126)
def solve19(data):
# A Series of Tubes
DIRECTIONS = { '↑' : [(-1, 0), '↓'],
'↓' : [( 1, 0), '↑'],
'←' : [( 0, -1), '→'],
'→' : [( 0, 1), '←']}
def move(x, y, direction):
dx, dy = DIRECTIONS[direction][0]
return (x + dx, y + dy)
def opposite(direction):
return DIRECTIONS[direction][1]
diagram = data.splitlines()
# entry point is always at the top
x, y = (0, diagram[0].find('|'))
direction = '↓'
path = []
count = 0
while True:
x, y = move(x, y, direction)
count += 1
if diagram[x][y] == '+':
for dir in ('←', '→', '↑', '↓'):
if dir == opposite(direction):
continue
nx, ny = move(x, y, dir)
if diagram[nx][ny] != ' ':
direction = dir
break
elif diagram[x][y] in string.ascii_uppercase:
path.append(diagram[x][y])
elif diagram[x][y] == ' ':
break
yield ''.join(path)
yield count
@with_solutions('161', 438)
def solve20(data):
# Particle Swarm
def load(input_data):
particles = []
VREG = re.compile('^p=\<(?P<position>[^\>]+)\>, v=\<(?P<velocity>[^\>]+)\>, a=\<(?P<acceleration>[^\>]+)\>')
for index, line in enumerate(input_data.splitlines()):
match = VREG.match(line)
position = [int(p) for p in match.group('position').split(',')]
velocity = [int(v) for v in match.group('velocity').split(',')]
acceleration = [int(a) for a in match.group('acceleration').split(',')]
particles.append({'p': position,
'v':velocity,
'a':acceleration,
'n':'%03d' % index})
return particles
def add(veca, vecb):
ax, ay, az = veca
bx, by, bz = vecb
return (ax + bx, ay + by, az + bz)
def distance(particle):
pos = particle['p']
return abs(pos[0]) + abs(pos[1]) + abs(pos[2])
# Part 1
particles = load(data)
for n in range(1000): # 1000 ticks should be enough
for particle in particles:
particle['v'] = add(particle['v'], particle['a'])
particle['p'] = add(particle['p'], particle['v'])
_particles = sorted(particles, key = distance)
yield _particles[0]['n']
# Part 2
def collisions(_particles):
positionmap = collections.defaultdict(list)
for particle in _particles:
positionmap[particle['p']].append(particle)
_particles = []
for key in positionmap.keys():
if len(positionmap[key]) == 1:
_particles.append(positionmap[key][0])
return _particles
particles = load(data)
for n in range(1000):
for particle in particles:
particle['v'] = add(particle['v'], particle['a'])
particle['p'] = add(particle['p'], particle['v'])
particles = collisions(particles)
yield len(particles)
@with_solutions(205, 3389823)
def solve21(data):
# Fractal Art
def matrix(line):
return tuple([tuple(r) for r in line.split('/')])
def to_str(m, prefix='', sep='/'):
return '%s: [%s]' % (prefix, sep.join([''.join(r) for r in m]))
def show(m, sep='/'):
print(to_str(m, sep))
if sep == '\n':
print()
def load(data):
patterns = {}
for line in data.splitlines():
input, output = [matrix(p) for p in line.split(' => ')]
patterns[input] = output
patterns[flip(input)] = output
for r in range(3):
input = rotate(input)
patterns[input] = output
patterns[flip(input)] = output
return patterns
def flip(m):
return tuple([tuple(reversed(r)) for r in m])
def rotate(m):
n = len(m)
o = []
for r in range(n):
t = []
for c in range(n-1, -1, -1):
t.append(m[c][r])
o.append(tuple(t))
return tuple(o)
def iteration(m):
n = len(m)
if n % 2 == 0:
size = n // 2 * 3
div, newdiv = 2, 3
else:
size = n//3 * 4
div, newdiv = 3, 4
out = [[None]*size for _ in range(size)]
for i in range(0, n // div):
for j in range(0, n // div):
si, sj = i * div, j * div
g = tuple([row[sj:sj+div] for row in m[si:si+div]])
transf = patterns[g]
ni, nj = i * newdiv, j * newdiv
for a in range(newdiv):
for b in range(newdiv):
out[ni+a][nj+b] = transf[a][b]
return tuple([tuple(r) for r in out])
patterns = load(data)
m = matrix('.#./..#/###')
for n in range(18):
m = iteration(m)
if n == 4 or n == 17:
yield to_str(m).count('#')
@with_solutions(5240, 2512144)
def solve22(data):
# Sporifica Virus
CLEAN, INFECTED, WEAKENED, FLAGGED = 0, 1, 2, 3
def load(data):
data = data.splitlines()
grid = collections.defaultdict(int)
for row, line in enumerate(data):
for col, point in enumerate(line):
if point == '#':
grid[(row, col)] = INFECTED
startx, starty = len(data) // 2, len(line) // 2
return grid, (startx, starty)
CARDINALS = ['←', '↑', '→', '↓']
DIRECTIONS = { '↑' : (-1, 0),
'↓' : ( 1, 0),
'←' : ( 0, -1),
'→' : ( 0, 1),}
def move(position, direction):
x, y = position
direction = CARDINALS[direction]
dx, dy = DIRECTIONS[direction]
return ((x + dx, y + dy))
def step(grid, position, direction):
flag = False
if grid[position] == INFECTED:
direction = (direction + 1) % 4
grid[position] = CLEAN
else:
flag = True
direction = (direction - 1) % 4
grid[position] = INFECTED
position = move(position, direction)
return position, direction, flag
def evolved_step(grid, position, direction):
flag = False
if grid[position] == CLEAN:
grid[position] = WEAKENED
direction = (direction - 1) % 4
elif grid[position] == WEAKENED:
grid[position] = INFECTED
flag = True
elif grid[position] == INFECTED:
grid[position] = FLAGGED
direction = (direction + 1) % 4
elif grid[position] == FLAGGED:
grid[position] = CLEAN
direction = (direction + 2) % 4
position = move(position, direction)
return position, direction, flag
# Part 1
grid, position = load(data)
direction = 1
count = 0
for n in range(10000):
position, direction, infected = step(grid, position, direction)
if infected: count += 1
yield count
# Part 2
grid, position = load(data)
direction = 1
count = 0
for n in range(10000000):
position, direction, infected = evolved_step(grid, position, direction)
if infected: count += 1
yield count
@with_solutions(6724, 903)
def solve23(data):
# Coprocessor Conflagaration
program = [line.split() for line in data.splitlines()]
def execute(registers, ip):
instr, reg, val = program[ip]
try:
val = int(val)
except ValueError:
val = registers[val]
if instr == 'set':
registers[reg] = val
elif instr == 'sub':
registers[reg] -= val
elif instr == 'mul':
registers[reg] *= val
registers['count'] += 1
elif instr == 'jnz':
if reg in 'abcdefgh':
if registers[reg] != 0:
ip += val
return ip
else:
if int(reg) != 0:
ip += val
return ip
ip += 1
return ip
# Part 1
registers = collections.defaultdict(int)
ip = 0
while True:
ip = execute(registers, ip)
if ip < 0 or ip >= len(program):
break
yield registers['count']
# Part 2
h = 0
for b in range(108400, 125400 + 1, 17):
for x in range(2, b):
if b % x == 0:
h += 1
break
yield h
@with_solutions(1906, 1824)
def solve24(data):
# Electromagnetic Moat
def build_bridges(components, bridge=None):
if not bridge:
bridge = [(0,0)]
next = bridge[-1][1]
for y in components[next]:
if (next, y) not in bridge and (y, next) not in bridge:
new = bridge + [(next, y)]
yield new
yield from build_bridges(components, new)
components = collections.defaultdict(set)
for line in data.splitlines():
x, y = [int(p) for p in line.split('/')]
components[x].add(y)
components[y].add(x)
# Part 1
bridges = []
maxstrength = maxlength = 0
for bridge in build_bridges(components):
bridges.append(bridge)
maxstrength = max(maxstrength, sum(x+y for x,y in bridge))
maxlength = max(maxlength, len(bridge))
yield maxstrength
# Part 2
maxstrength = 0
for bridge in filter(lambda b : len(b) == maxlength, bridges):
maxstrength = max(maxstrength, sum(x+y for x,y in bridge))
yield maxstrength
@with_solutions(3578, None)
def solve25(data):
# The Halting Problem
lines = data.splitlines()
state = lines[0][-2]
trigger = int(lines[1].split()[-2])
states = {}
for i in range(3, len(lines), 10):
state_name = lines[i][-2]
states[state_name] = [ (int(lines[i+2][-2]), lines[i+3][-6:-1].strip(), lines[i+4][-2]),
(int(lines[i+6][-2]), lines[i+7][-6:-1].strip(), lines[i+8][-2]) ]
cursor = 100
tape = collections.defaultdict(int)
for n in range(trigger):
current = tape[cursor]
val, direction, nextstate = states[state][current]
tape[cursor] = val
if direction == 'left':
cursor -= 1
else:
cursor += 1
state = nextstate
yield list(tape.values()).count(1)
yield None
################################################################################
if __name__ == '__main__':
if len(sys.argv) > 1:
day = int(sys.argv[1])
else:
day = guess_day()
custom_data = False
get_session_id()
if len(sys.argv) > 2:
if sys.argv[2] == '-':
data = sys.stdin.read()
else:
data = sys.argv[2]
custom_data = True
else:
data = get_data(day)
if day not in (19,):
data = data.strip()
solvers = {}
solvers = dict([(fn, f) for fn, f in globals().items()
if callable(f) and fn.startswith('solve')])
solver = solvers.get('solve{}'.format(day), None)
if solver is not None:
start = time.time()
solver(data, skip_verification=custom_data)
end = time.time()
elapsed = (end - start)
if elapsed > 0.001:
print('Elapsed: %4.3f s' % elapsed)
else:
elapsed *= 1000.0
print('Elapsed: %4.3f us' % elapsed)
else:
print('No solver for day {}'.format(day))
| 28.567059 | 116 | 0.503775 |
ace7b17bb0cde7358847798620ebbeebe3911fc6 | 6,479 | py | Python | testbed_backend/agentworker/worker.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | 1 | 2021-05-25T09:33:28.000Z | 2021-05-25T09:33:28.000Z | testbed_backend/agentworker/worker.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | 6 | 2020-06-06T01:59:09.000Z | 2021-06-10T20:17:56.000Z | testbed_backend/agentworker/worker.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | null | null | null | from glob import glob
import json
import os
import signal
import socket
import subprocess
import threading
import time
import warnings
import redis
from agentworker import (
EmulationStatus,
WorkerStatus
)
from agentworker.config import Config
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def get_ip_address():
get_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
get_s.connect(('8.8.8.8', 0))
ip_table = 'ip:{}'.format(get_s.getsockname()[0])
get_s.close()
return ip_table
def init_table(r, ip):
"""
Initialize redis table by given ip address.
Args:
ip: The id address of current machine.
Return:
True, if the initialization is successful, otherwise False.
"""
try:
r.set("stop_agentworker", "")
r.hset(ip, "device_name", "")
r.hset(ip, "device_settings", "")
r.hset(ip, "worker_status", WorkerStatus.WAIT)
except Exception as e:
print("init_table error", e)
return False
def heartbeat(r, ip):
"""
Check if redis is alive per second.
"""
try:
while not bool(r.get("stop_agentworker")):
# Get nanoseconds
now = "{:.20f}".format(time.time())
r.hset(ip, "time", "{}".format(now))
time.sleep(1)
except Exception as e:
print("heartbeat error:", (e))
raise Exception("heartbeat error:", (e))
def remove_old_log():
"""
Remove all log which generated by previous emulation.
"""
try:
DELETED_LOG_PATH = "{}/*.json".format(Config.REPORT_PATH)
for log in glob(DELETED_LOG_PATH):
if os.path.exists(log):
os.remove(log)
except Exception as e:
print("remove old log error", e)
def generate_dds_descriptive_file(r, ip):
"""
Args:
r: Redis instance.
ip: Specific ip.
"""
try:
device_name = r.hget(ip, "device_name")
device_settings = json.loads(r.hget(ip, "device_settings"))
emulation_time = int(r.get("emulation_time"))
dds_file = {
"name": device_name,
"experiment_time": emulation_time,
"measure_count": 100,
"domain": device_settings["domain"],
}
dds_file_path = os.path.join(
CURRENT_DIR, Config.DATASETTING_PATH, "dds_descriptive_file.json")
with open(dds_file_path, "w") as outputfile:
json.dump(dds_file, outputfile)
except Exception as e:
print("generate_dds_descriptive_file error: ", e)
def remove_emulation_signal():
if os.path.isfile(Config.DDS_READY_PATH):
os.remove(Config.DDS_READY_PATH)
if os.path.isfile(Config.DDS_START_PATH):
os.remove(Config.DDS_START_PATH)
def dds_is_ready():
return os.path.isfile(Config.DDS_READY_PATH)
def set_dds_start():
with open(Config.DDS_START_PATH, "w") as f:
f.write("start\n")
def execute_dds(r, ip):
"""
Args:
r: Redis instance.
ip: Specific ip.
"""
cmd = Config.DDS_PATH
result = True
is_start = False
remove_emulation_signal()
print("Wait for releasing resource")
time.sleep(15)
try:
sys_env = os.environ.copy()
proc = subprocess.Popen(cmd, shell=False, env=sys_env, preexec_fn=os.setsid)
print("Run emulation.")
while proc.poll() is None:
print("execute subprocess")
if dds_is_ready():
r.hset(ip, "worker_status", WorkerStatus.READY)
os.remove(Config.DDS_READY_PATH)
emulation_status = r.get("emulation_status")
if emulation_status == EmulationStatus.START and not is_start:
print("DDS start.")
set_dds_start()
is_start = True
elif emulation_status == EmulationStatus.ABORT:
print("Abort emulation.")
r.hset(ip, "worker_status", WorkerStatus.WAIT)
proc.terminate()
proc.wait()
result = False
break
time.sleep(1)
return result
except Exception as e:
print("Execute dds error:", e)
def load_json(path):
with open(path, 'r') as inputfile:
data = json.load(inputfile)
return data
def save_report(r, ip):
try:
report = {}
for filename in os.listdir(Config.REPORT_PATH):
if 'json' in filename:
path = os.path.join(Config.REPORT_PATH, filename)
data = load_json(path)
r.hset(ip, 'device_report', json.dumps(data))
except Exception as e:
print("Trainsmit result to frontend error: ", e)
def wait_emulation_end(r):
"""
r: Redis instance.
"""
try:
while True:
result = r.get('emulation_status')
if (result == EmulationStatus.END or
result == EmulationStatus.EXCEPTION or
result == EmulationStatus.ABORT):
print("emulation end.")
break
else:
time.sleep(1)
except Exception as e:
print('wait emulation end error: ', e)
def main():
print("Agent worker start")
r = redis.StrictRedis(host=Config.SERVER_IP, port=Config.REDIS_PORT, password=Config.REDIS_PASSWORD,
encoding="utf-8",decode_responses=True)
try:
ip = get_ip_address()
init_table(r, ip)
thread_heartbeat = threading.Thread(target=heartbeat, name="heartbeat", args=(r, ip,))
thread_heartbeat.daemon = True
thread_heartbeat.start()
while not bool(r.get("stop_agentworker")):
if r.hget(ip, "worker_status") == WorkerStatus.WAIT:
print("wait")
elif r.hget(ip, "worker_status") == WorkerStatus.PREPARE:
remove_old_log()
generate_dds_descriptive_file(r, ip)
print("Generating dds descriptive file is finish.")
if execute_dds(r, ip) is True:
print("transmit result")
save_report(r, ip)
r.hset(ip, "worker_status", WorkerStatus.DONE)
wait_emulation_end(r)
time.sleep(1)
except Exception as e:
r.hset(ip, "worker_status", WorkerStatus.EXCEPTION)
print("error", e)
if __name__ == "__main__":
main()
| 27.806867 | 105 | 0.580645 |
ace7b1c91c8a423a5259c98ca6a7658e05ae74f7 | 1,728 | py | Python | examples/faceboxes/faceboxes_test.py | Amanda-Barbara/nvcaffe | 5155a708b235a818ce300aa3f9fc235ece9a35fb | [
"BSD-2-Clause"
] | 758 | 2015-03-08T20:54:38.000Z | 2022-01-11T03:14:51.000Z | examples/faceboxes/faceboxes_test.py | Matsuko9/caffe | 17e347e42e664b87d80f63bfbbb89bec5e559242 | [
"BSD-2-Clause"
] | 493 | 2015-04-28T00:08:53.000Z | 2021-08-04T07:26:54.000Z | examples/faceboxes/faceboxes_test.py | Matsuko9/caffe | 17e347e42e664b87d80f63bfbbb89bec5e559242 | [
"BSD-2-Clause"
] | 389 | 2015-03-05T12:11:44.000Z | 2022-03-13T21:49:42.000Z | import numpy as np
import sys, os
import cv2
sys.path.insert(0, '../../python')
import caffe
import time
net_file = 'SSD.prototxt'
caffe_model = 'SSD.caffemodel'
test_dir = "images"
if not os.path.exists(caffe_model):
print("SSD.caffemodel does not exist, see https://github.com/sfzhang15/SFD")
exit()
caffe.set_mode_gpu()
net = caffe.Net(net_file, caffe_model, caffe.TEST)
CLASSES = ('background',
'face')
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
def postprocess(img, out):
h = img.shape[0]
w = img.shape[1]
box = out['detection_out'][0, 0, :, 3:7] * np.array([w, h, w, h])
cls = out['detection_out'][0, 0, :, 1]
conf = out['detection_out'][0, 0, :, 2]
return (box.astype(np.int32), conf, cls)
def detect(imgfile):
frame = cv2.imread(imgfile)
transformed_image = transformer.preprocess('data', frame)
net.blobs['data'].data[...] = transformed_image
time_start = time.time()
out = net.forward()
time_end = time.time()
print (time_end - time_start),
print ("s")
box, conf, cls = postprocess(frame, out)
for i in range(len(box)):
p1 = (box[i][0], box[i][1])
p2 = (box[i][2], box[i][3])
cv2.rectangle(frame, p1, p2, (0, 255, 0))
p3 = (max(p1[0], 15), max(p1[1], 15))
title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
cv2.putText(frame, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
cv2.imshow("SSD, %d boxes" % len(box), frame)
cv2.waitKey()
# if cv2.waitKey(100) & 0xFF == ord('q'):
# break
detect("pepper.jpg")
| 27.870968 | 80 | 0.603009 |
ace7b222583f1c0f13263e4090e7909f967f5fd0 | 9,422 | py | Python | melodic/lib/python2.7/dist-packages/mavros_msgs/msg/_HilSensor.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 10 | 2021-03-15T03:58:06.000Z | 2021-12-30T15:33:38.000Z | devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/msg/_HilSensor.py | arijitnoobstar/UAVProjectileCatcher | 3c1bed80df167192cb4b971b58c891187628142e | [
"Apache-2.0"
] | 1 | 2021-09-09T15:29:31.000Z | 2021-09-09T15:29:31.000Z | melodic/lib/python2.7/dist-packages/mavros_msgs/msg/_HilSensor.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 4 | 2021-03-06T09:35:58.000Z | 2021-05-24T14:34:11.000Z | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/HilSensor.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class HilSensor(genpy.Message):
_md5sum = "2a892891e5c40d6dd1066bf1f394b5dc"
_type = "mavros_msgs/HilSensor"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# HilSensor.msg
#
# ROS representation of MAVLink HIL_SENSOR
# See mavlink message documentation here:
# https://mavlink.io/en/messages/common.html#HIL_SENSOR
std_msgs/Header header
geometry_msgs/Vector3 acc
geometry_msgs/Vector3 gyro
geometry_msgs/Vector3 mag
float32 abs_pressure
float32 diff_pressure
float32 pressure_alt
float32 temperature
uint32 fields_updated
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z"""
__slots__ = ['header','acc','gyro','mag','abs_pressure','diff_pressure','pressure_alt','temperature','fields_updated']
_slot_types = ['std_msgs/Header','geometry_msgs/Vector3','geometry_msgs/Vector3','geometry_msgs/Vector3','float32','float32','float32','float32','uint32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,acc,gyro,mag,abs_pressure,diff_pressure,pressure_alt,temperature,fields_updated
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(HilSensor, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.acc is None:
self.acc = geometry_msgs.msg.Vector3()
if self.gyro is None:
self.gyro = geometry_msgs.msg.Vector3()
if self.mag is None:
self.mag = geometry_msgs.msg.Vector3()
if self.abs_pressure is None:
self.abs_pressure = 0.
if self.diff_pressure is None:
self.diff_pressure = 0.
if self.pressure_alt is None:
self.pressure_alt = 0.
if self.temperature is None:
self.temperature = 0.
if self.fields_updated is None:
self.fields_updated = 0
else:
self.header = std_msgs.msg.Header()
self.acc = geometry_msgs.msg.Vector3()
self.gyro = geometry_msgs.msg.Vector3()
self.mag = geometry_msgs.msg.Vector3()
self.abs_pressure = 0.
self.diff_pressure = 0.
self.pressure_alt = 0.
self.temperature = 0.
self.fields_updated = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_9d4fI().pack(_x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z, _x.abs_pressure, _x.diff_pressure, _x.pressure_alt, _x.temperature, _x.fields_updated))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.acc is None:
self.acc = geometry_msgs.msg.Vector3()
if self.gyro is None:
self.gyro = geometry_msgs.msg.Vector3()
if self.mag is None:
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 92
(_x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z, _x.abs_pressure, _x.diff_pressure, _x.pressure_alt, _x.temperature, _x.fields_updated,) = _get_struct_9d4fI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_9d4fI().pack(_x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z, _x.abs_pressure, _x.diff_pressure, _x.pressure_alt, _x.temperature, _x.fields_updated))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.acc is None:
self.acc = geometry_msgs.msg.Vector3()
if self.gyro is None:
self.gyro = geometry_msgs.msg.Vector3()
if self.mag is None:
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 92
(_x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z, _x.abs_pressure, _x.diff_pressure, _x.pressure_alt, _x.temperature, _x.fields_updated,) = _get_struct_9d4fI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_9d4fI = None
def _get_struct_9d4fI():
global _struct_9d4fI
if _struct_9d4fI is None:
_struct_9d4fI = struct.Struct("<9d4fI")
return _struct_9d4fI
| 38.145749 | 232 | 0.661112 |
ace7b3455d54cc8a4b2201a733983421b2edd554 | 1,674 | py | Python | bitmovin_api_sdk/analytics/impressions/errors/errors_api.py | bitmovin/bitmovin-api-sdk-python | 5a85147669c84b8ca411cf2d4dbdddc92d85bbe7 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/analytics/impressions/errors/errors_api.py | bitmovin/bitmovin-api-sdk-python | 5a85147669c84b8ca411cf2d4dbdddc92d85bbe7 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/analytics/impressions/errors/errors_api.py | bitmovin/bitmovin-api-sdk-python | 5a85147669c84b8ca411cf2d4dbdddc92d85bbe7 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.analytics_error_details_response import AnalyticsErrorDetailsResponse
from bitmovin_api_sdk.models.analytics_license_key import AnalyticsLicenseKey
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class ErrorsApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(ErrorsApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, impression_id, analytics_license_key, **kwargs):
# type: (string_types, AnalyticsLicenseKey, dict) -> AnalyticsErrorDetailsResponse
"""Impression Error Details
:param impression_id: Impression id
:type impression_id: string_types, required
:param analytics_license_key: Analytics license
:type analytics_license_key: AnalyticsLicenseKey, required
:return: List of error details for impression
:rtype: AnalyticsErrorDetailsResponse
"""
return self.api_client.post(
'/analytics/impressions/{impression_id}/errors',
analytics_license_key,
path_params={'impression_id': impression_id},
type=AnalyticsErrorDetailsResponse,
**kwargs
)
| 38.045455 | 98 | 0.725209 |
ace7b3a26292171a0b3e59bbc525d2695d2942b8 | 228 | py | Python | hitchike/settings/database.py | tgy/hitchike | b73c8714f584eeeb432c7c8df706dd8b944d632c | [
"MIT"
] | 4 | 2016-02-17T21:30:23.000Z | 2016-02-20T11:10:05.000Z | hitchike/settings/database.py | tgy/hitchike | b73c8714f584eeeb432c7c8df706dd8b944d632c | [
"MIT"
] | 10 | 2020-09-14T07:56:53.000Z | 2020-09-14T07:56:55.000Z | hitchike/settings/database.py | tgy/hitchike | b73c8714f584eeeb432c7c8df706dd8b944d632c | [
"MIT"
] | null | null | null | import os
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| 19 | 63 | 0.618421 |
ace7b471d27aada98e14fdfa08fa5cafff18521d | 5,184 | py | Python | src/jk_sysinfo/get_lsblk.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null | src/jk_sysinfo/get_lsblk.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null | src/jk_sysinfo/get_lsblk.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null |
import copy
import json
from jk_cachefunccalls import cacheCalls
from .parsing_utils import *
from .invoke_utils import run
_parserColonKVP = ParseAtFirstDelimiter(delimiter="=", valueCanBeWrappedInDoubleQuotes=True)
#
# Returns:
#
# {
# "devtree": [
# {
# "children": [
# {
# "dev": "/dev/sda1",
# "fstype": "ext4",
# "label": null,
# "mountpoint": "/",
# "name": "sda1",
# "uuid": "94933739-97e5-47e8-b7e8-ab8b7ed3f2a7"
# }
# ],
# "dev": "/dev/sda",
# "fstype": null,
# "label": null,
# "mountpoint": null,
# "name": "sda",
# "uuid": null
# },
# {
# "dev": "/dev/sr0",
# "fstype": "iso9660",
# "label": "YYYYYY",
# "mountpoint": "/media/xxxxxxxx/YYYYYY",
# "name": "sr0",
# "uuid": "2001-10-05-02-59-13-00"
# }
# ],
# "mountpoints": {
# "/": {
# "dev": "/dev/sda1",
# "fstype": "ext4",
# "label": null,
# "mountpoint": "/",
# "name": "sda1",
# "uuid": "94933739-97e5-47e8-b7e8-ab8b7ed3f2a7"
# },
# "/media/xxxxxxxx/YYYYYY": {
# "dev": "/dev/sr0",
# "fstype": "iso9660",
# "label": "YYYYYY",
# "mountPoint": "/media/xxxxxxxx/YYYYYY",
# "name": "sr0",
# "uuid": "2001-10-05-02-59-13-00"
# }
# }
# }
#
def parse_lsblk(stdout:str, stderr:str, exitcode:int) -> dict:
"""
{
"blockdevices": [
{"name": "sda", "fstype": null, "label": null, "uuid": null, "mountpoint": null,
"children": [
{"name": "sda1", "fstype": "ext4", "label": null, "uuid": "94933739-97e5-47e8-b7e8-ab8b7ed3f2a7", "mountpoint": "/"}
]
},
{"name": "sr0", "fstype": "iso9660", "label": "YYYYYY", "uuid": "2001-10-05-02-59-13-00", "mountpoint": "/media/xxxxxxxx/YYYYYY"}
]
}
"""
if exitcode != 0:
raise Exception()
ret = json.loads(stdout.strip())["blockdevices"]
mountPointMap = {}
for jBlockDevice in ret:
__parse_lsblk_postproces_dev(jBlockDevice, mountPointMap)
return {
"deviceTree": ret,
"mountPoints": mountPointMap,
}
#
def __parse_lsblk_postproces_dev(j, mountPointMap):
j["dev"] = j["name"]
if "children" in j:
for j2 in j["children"]:
__parse_lsblk_postproces_dev(j2, mountPointMap)
if j["vendor"]:
j["vendor"] = j["vendor"].strip()
if j["mountpoint"]:
j2 = copy.deepcopy(j)
if "children" in j2:
del j2["children"]
mountPointMap[j2["mountpoint"]] = j2
#
class _LsBlkDevTreeFilter(object):
def __init__(self, **filterElements) -> None:
if not filterElements:
raise Exception("No filter elements specified!")
self.__jFilter = {}
for fe_key, fe_valueOrValues in filterElements.items():
assert isinstance(fe_key, str)
if isinstance(fe_valueOrValues, (int,str,bool)):
pass
elif isinstance(fe_valueOrValues, (list, tuple)):
for v in fe_valueOrValues:
if not isinstance(v, (int,str,bool)):
raise Exception("Filter " + fe_key + " has a value of invalid type!")
else:
raise Exception("Filter " + fe_key + " has a value of invalid type!")
self.__jFilter[fe_key] = fe_valueOrValues
#
def checkAccept(self, jData:dict) -> bool:
for filterKey, filterValueOrValues in self.__jFilter.items():
jDataValue = jData.get(filterKey, None)
if jDataValue is None:
# required key-value-pair does not exist or value is (null)
return False
if isinstance(filterValueOrValues, (tuple,list)):
if jDataValue not in filterValueOrValues:
# value is not in list of allowed values
return False
else:
if jDataValue != filterValueOrValues:
# value is not an allowed value
return False
return True
#
#
def filter_lsblk_devtree(jsonRawData:dict, **filterElements) -> list:
if ("deviceTree" not in jsonRawData) or ("mountPoints" not in jsonRawData):
raise Exception("Specified data is no JSON raw data!")
j = jsonRawData["deviceTree"]
assert isinstance(j, list)
filter = _LsBlkDevTreeFilter(**filterElements)
ret = []
for jitem in j:
if filter.checkAccept(jitem):
ret.append(jitem)
return ret
#
#
# Returns:
#
# {
# "devtree": [
# {
# "children": [
# {
# "dev": "/dev/sda1",
# "fstype": "ext4",
# "label": null,
# "mountpoint": "/",
# "name": "sda1",
# "uuid": "94933739-97e5-47e8-b7e8-ab8b7ed3f2a7"
# }
# ],
# "dev": "/dev/sda",
# "fstype": null,
# "label": null,
# "mountpoint": null,
# "name": "sda",
# "uuid": null
# },
# {
# "dev": "/dev/sr0",
# "fstype": "iso9660",
# "label": "YYYYYY",
# "mountpoint": "/media/xxxxxxxx/YYYYYY",
# "name": "sr0",
# "uuid": "2001-10-05-02-59-13-00"
# }
# ],
# "mountpoints": {
# "/": {
# "dev": "/dev/sda1",
# "fstype": "ext4",
# "label": null,
# "mountpoint": "/",
# "name": "sda1",
# "uuid": "94933739-97e5-47e8-b7e8-ab8b7ed3f2a7"
# },
# "/media/xxxxxxxx/YYYYYY": {
# "dev": "/dev/sr0",
# "fstype": "iso9660",
# "label": "YYYYYY",
# "mountPoint": "/media/xxxxxxxx/YYYYYY",
# "name": "sr0",
# "uuid": "2001-10-05-02-59-13-00"
# }
# }
# }
#
@cacheCalls(seconds=3, dependArgs=[0])
def get_lsblk(c = None) -> dict:
stdout, stderr, exitcode = run(c, "/bin/lsblk -bJpO")
return parse_lsblk(stdout, stderr, exitcode)
#
| 21.510373 | 132 | 0.590085 |
ace7b49d974cf8c2c24985ca4f365cce609ce525 | 29,438 | py | Python | tensorflow/python/training/input.py | mheilman/tensorflow | 9215bc32f615af47a32958ef5c9142504632e91e | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/input.py | mheilman/tensorflow | 9215bc32f615af47a32958ef5c9142504632e91e | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/input.py | mheilman/tensorflow | 9215bc32f615af47a32958ef5c9142504632e91e | [
"Apache-2.0"
] | 1 | 2018-09-13T15:49:14.000Z | 2018-09-13T15:49:14.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import queue_runner
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob).
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching pattern.
"""
with ops.op_scope([pattern], name, "matching_filenames") as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False)
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Args:
tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
Raises:
ValueError: if `num_epochs` is invalid.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(zero64, name="epochs", trainable=False)
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity,
shared_name, name, summary_name):
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]],
shared_name=shared_name, name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
def string_input_producer(string_tensor, num_epochs=None, shuffle=True,
seed=None, capacity=32, shared_name=None, name=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an OutOfRange error. If not specified, `string_input_producer`
can cycle through the strings in `string_tensor` an unlimited number of
times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Raises:
ValueError: If the string_tensor is a null Python list. At runtime,
will fail with an assertion if string_tensor becomes a null tensor.
"""
not_null_err = "string_input_producer requires a non-null input tensor"
if not isinstance(string_tensor, ops.Tensor) and not string_tensor:
raise ValueError(not_null_err)
with ops.op_scope([string_tensor], name, "input_producer") as name:
string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
with ops.control_dependencies([
logging_ops.Assert(math_ops.greater(array_ops.size(string_tensor), 0),
[not_null_err])]):
string_tensor = array_ops.identity(string_tensor)
return _input_producer(
input_tensor=string_tensor,
dtype=dtypes.string,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
capacity=capacity,
shared_name=shared_name,
name=name,
summary_name="fraction_of_%d_full" % capacity)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([limit], name, "input_producer") as name:
range_tensor = math_ops.range(limit)
return _input_producer(
range_tensor, dtypes.int32, num_epochs, shuffle, seed, capacity,
shared_name, name, "fraction_of_%d_full" % capacity)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
Raises:
ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity,
shared_name=shared_name)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
def _serialize_sparse_tensors(tensor_list, enqueue_many):
"""Serialize SparseTensors for feeding into batch, etc."""
is_sparse_list = [isinstance(t, ops.SparseTensor) for t in tensor_list]
sparse_dtypes_list = [
t.dtype if isinstance(t, ops.SparseTensor) else None
for t in tensor_list]
def _maybe_serialize(t, is_sparse):
if not is_sparse:
return t
return (sparse_ops.serialize_many_sparse(t) if enqueue_many
else sparse_ops.serialize_sparse(t))
serialized_list = [
_maybe_serialize(t, is_sparse)
for (t, is_sparse) in zip(tensor_list, is_sparse_list)]
return serialized_list, is_sparse_list, sparse_dtypes_list
def _serialize_sparse_tensors_join(tensor_list_list, enqueue_many):
"""Serialize SparseTensors for feeding into batch_join, etc."""
(s0, is_sparse_list, sparse_dtypes_list) = _serialize_sparse_tensors(
tensor_list_list[0], enqueue_many)
serialized_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
(s, is_sparse_candidate, sparse_dtypes_candidate) = (
_serialize_sparse_tensors(tensor_list, enqueue_many))
if is_sparse_candidate != is_sparse_list:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
if sparse_dtypes_candidate != sparse_dtypes_list:
raise ValueError("Inconsistent SparseTensor dtypes in list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
serialized_list_list.append(s)
return (serialized_list_list, is_sparse_list, sparse_dtypes_list)
def _deserialize_sparse_tensors(serialized_list, is_sparse_list, sparse_dtypes):
"""Deserialize SparseTensors after dequeue in batch, batch_join, etc."""
received_sequence = isinstance(serialized_list, collections.Sequence)
if not received_sequence:
serialized_list = (serialized_list,)
tensors = [sparse_ops.deserialize_many_sparse(s, sparse_dtype) if is_sparse
else s
for (s, is_sparse, sparse_dtype)
in zip(serialized_list, is_sparse_list, sparse_dtypes)]
return tensors if received_sequence else tensors[0]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
(", ".join(x.name for x in types),
", ".join(x.name for x in other_types)))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
if shapes is None:
l = len(tensor_list_list[0])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(l)]
return shapes
def _enqueue_join(queue, tensor_list_list, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tl) for tl in tensor_list_list]
else:
enqueue_ops = [queue.enqueue(tl) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tensor_list)] * threads
else:
enqueue_ops = [queue.enqueue(tensor_list)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
# Batching functions ----------------------------------------------------------
def batch(tensor_list, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, shared_name=None, name=None):
"""Creates batches of tensors in `tensor_list`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`. The `capacity` argument
controls the how long the prefetching is allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensor_list`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
tensor_list, is_sparse, sparse_dtypes = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, is_sparse, sparse_dtypes)
return dequeued
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, shared_name=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as
`tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
tensor_list_list, is_sparse, sparse_dtypes = (
_serialize_sparse_tensors_join(tensor_list_list, enqueue_many))
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many)
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, is_sparse, sparse_dtypes)
return dequeued
def shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list`.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
tensor_list, is_sparse, sparse_dtypes = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, is_sparse, sparse_dtypes)
return dequeued
def shuffle_batch_join(tensor_list_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, shared_name=None, name=None):
"""Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list_list`.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y,
z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
tensor_list_list, is_sparse, sparse_dtypes = (
_serialize_sparse_tensors_join(tensor_list_list, enqueue_many))
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, is_sparse, sparse_dtypes)
return dequeued
| 43.547337 | 80 | 0.719444 |
ace7b5926a305c9d94a6a5a7a32ad1fe56913d63 | 5,901 | py | Python | zipline/research/data.py | liudengfeng/zipline | 01fdd51d83efeb3453e92b7d02c255a06eba49ac | [
"Apache-2.0"
] | 6 | 2017-12-11T06:12:00.000Z | 2019-05-23T17:39:10.000Z | zipline/research/data.py | liudengfeng/zipline | 01fdd51d83efeb3453e92b7d02c255a06eba49ac | [
"Apache-2.0"
] | null | null | null | zipline/research/data.py | liudengfeng/zipline | 01fdd51d83efeb3453e92b7d02c255a06eba49ac | [
"Apache-2.0"
] | 1 | 2018-01-26T14:19:38.000Z | 2018-01-26T14:19:38.000Z | import pandas as pd
import os
from zipline.utils.paths import zipline_path
from zipline.pipeline.fundamentals.localdata_wy import get_cn_industry, get_sw_industry, get_zjh_industry
from zipline.pipeline.fundamentals.constants import CN_TO_SECTOR, SECTOR_NAMES
from .core import symbol
from zipline.assets.assets import SymbolNotFound
import random
from cnswd.mongodb import get_db
from trading_calendars import get_calendar
import re
CODE_PAT = re.compile(r"\d{6}")
MATCH_ONLY_A = {
'$match': {
'$expr': {
'$in': [
{
'$substrBytes': [
'$股票代码', 0, 1
]
}, [
'0', '3', '6'
]
]
}
}
}
NUM_MAPS = {
1: '一',
2: '二',
3: '三',
4: '四'
}
def random_sample_codes(n, only_A=True):
"""随机选择N个股票代码"""
db = get_db('cninfo')
coll = db['基本资料']
calendar = get_calendar('XSHG')
last_session = calendar.actual_last_session
projection = {'_id': 0, '股票代码': 1}
pipeline = [
{'$match': {'上市日期': {'$lte': last_session}}},
{'$project': projection},
]
if only_A:
pipeline.insert(0, MATCH_ONLY_A)
cursor = coll.aggregate(pipeline)
codes = [d['股票代码'] for d in list(cursor)]
return random.sample(codes, n)
def get_ff_factors(n):
"""读取3因子或5因子数据"""
assert n in (3, 5), "仅支持3因子或5因子"
file_name = f"ff{n}"
root_dir = zipline_path(['factors'])
result_path_ = os.path.join(root_dir, f'{file_name}.pkl')
return pd.read_pickle(result_path_)
def get_sector_mappings(to_symbol=True):
"""部门映射"""
df = get_cn_industry(True)
codes = df['sid'].map(lambda x: str(x).zfill(6)).values
if to_symbol:
keys = []
for code in codes:
try:
keys.append(symbol(code))
except SymbolNotFound:
pass
else:
keys = codes
names = df['国证一级行业编码'].map(
CN_TO_SECTOR, na_action='ignore').map(SECTOR_NAMES).values
return {
c: v for c, v in zip(keys, names)
}
def get_cn_industry_maps(level=1, to_symbol=True):
"""国证行业分级映射
Args:
level (int, optional): 行业层级[1,2,3,4]. Defaults to 1.
to_symbol (bool, optional): 是否转换为Equity. Defaults to True.
Returns:
dict: key:股票代码或Equity, value:行业分类名称
"""
assert level in (1, 2, 3, 4)
df = get_cn_industry(True)
codes = df['sid'].map(lambda x: str(x).zfill(6)).values
if to_symbol:
keys = []
for code in codes:
try:
keys.append(symbol(code))
except SymbolNotFound:
pass
else:
keys = codes
col = f"国证{NUM_MAPS[level]}级行业"
names = df[col].values
return {
c: v for c, v in zip(keys, names)
}
def get_sw_industry_maps(level=1, to_symbol=True):
"""申万行业分级映射
Args:
level (int, optional): 行业层级[1,2,3]. Defaults to 1.
to_symbol (bool, optional): 是否转换为Equity. Defaults to True.
Returns:
dict: key:股票代码或Equity, value:行业分类名称
"""
assert level in (1, 2, 3)
df = get_sw_industry(True)
codes = df['sid'].map(lambda x: str(x).zfill(6)).values
if to_symbol:
keys = []
for code in codes:
try:
keys.append(symbol(code))
except SymbolNotFound:
pass
else:
keys = codes
col = f"申万{NUM_MAPS[level]}级行业"
names = df[col].values
return {
c: v for c, v in zip(keys, names)
}
def get_zjh_industry_maps(level=1, to_symbol=True):
"""证监会行业分级映射
Args:
level (int, optional): 行业层级[1,2]. Defaults to 1.
to_symbol (bool, optional): 是否转换为Equity. Defaults to True.
Returns:
dict: key:股票代码或Equity, value:行业分类名称
"""
assert level in (1, 2)
df = get_zjh_industry(True)
codes = df['sid'].map(lambda x: str(x).zfill(6)).values
if to_symbol:
keys = []
for code in codes:
try:
keys.append(symbol(code))
except SymbolNotFound:
pass
else:
keys = codes
col = f"证监会{NUM_MAPS[level]}级行业"
names = df[col].values
return {
c: v for c, v in zip(keys, names)
}
def _get_concept_maps(collection, latest):
projection = {
'_id': 0,
'概念名称': 1,
'股票列表': 1,
}
pipeline = [
{
'$project': projection
},
]
if collection.name == '同花顺概念' and latest:
end = pd.Timestamp.today().floor('D')
start = end - pd.Timedelta('730 days')
pipeline.insert(
0, {'$match': {'日期': {'$gte': start, '$lte': end}}}
)
cursor = collection.aggregate(pipeline)
return {record['概念名称']: [c for c in record['股票列表'] if CODE_PAT.match(c)] for record in cursor}
def get_concept_maps(by='all', to_symbol=True, latest=False):
"""概念对应股票列表
Args:
by (str, optional): 分类单位. Defaults to 'all'.
all 代表合并
to_symbol (bool, optional): 转换为Equity. Defaults to True.
latest (bool, optional): 限于最近2年. Defaults to False.
Returns:
dict: 以概念名称为键,股票列表为值
"""
assert by in ('ths', 'tct', 'all')
db = get_db()
if by == 'ths':
collection = db['同花顺概念']
return _get_concept_maps(collection, latest)
elif by == 'tct':
collection = db['腾讯概念']
return _get_concept_maps(collection, latest)
else:
ths = _get_concept_maps(db['同花顺概念'])
tct = _get_concept_maps(db['腾讯概念'])
keys = set(list(ths.keys()) + list(tct.keys()))
res = {}
for key in keys:
p1 = ths.get(key, [])
p2 = tct.get(key, [])
v = set(p1 + p2)
if to_symbol:
v = [symbol(s) for s in v]
res[key] = v
return res
| 26.110619 | 105 | 0.548382 |
ace7b7542202cd0fbd2c394e37a3a33e558c50cd | 11,625 | py | Python | plugins/ida_binsync/ida_binsync/compat.py | GeistInDerSH/binsync | 91c7f0b603429fd9bd0d3dd5f2846508094114ea | [
"MIT"
] | null | null | null | plugins/ida_binsync/ida_binsync/compat.py | GeistInDerSH/binsync | 91c7f0b603429fd9bd0d3dd5f2846508094114ea | [
"MIT"
] | null | null | null | plugins/ida_binsync/ida_binsync/compat.py | GeistInDerSH/binsync | 91c7f0b603429fd9bd0d3dd5f2846508094114ea | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------------
# This file is more of a library for making compatibility calls to IDA for
# things such as getting decompiled function names, start addresses, and
# asking for write permission to ida. This will mostly be called in the
# controller.
#
# Note that anything that requires write permission to IDA will need to pass
# through this program if it is not running in the main thread.
#
# ----------------------------------------------------------------------------
import functools
import threading
import typing
import idc
import idaapi
import ida_kernwin
import ida_hexrays
import ida_funcs
import ida_bytes
import ida_struct
import ida_idaapi
import ida_typeinf
from binsync.data import Struct
from .controller import BinsyncController
#
# Helper classes for wrapping data
#
class IDAStackVar:
def __init__(self, func_addr, offset, name, type_str, size):
self.func_addr = func_addr
self.offset = offset
self.name = name
self.type_str = type_str
self.size = size
#
# Wrappers for IDA Main thread r/w operations
#
# a special note about these functions:
# Any operation that needs to do some type of write to the ida db (idb), needs to be in the main thread due to
# some ida constraints. Sometimes reads also need to be in the main thread. To make things efficient, most heavy
# things are done in the controller and just setters and getters are done here.
def is_mainthread():
"""
Return a bool that indicates if this is the main application thread.
"""
return isinstance(threading.current_thread(), threading._MainThread)
def execute_sync(func, sync_type):
"""
Synchronize with the disassembler for safe database access.
Modified from https://github.com/vrtadmin/FIRST-plugin-ida
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
output = [None]
#
# this inline function definition is technically what will execute
# in the context of the main thread. we use this thunk to capture
# any output the function may want to return to the user.
#
def thunk():
output[0] = func(*args, **kwargs)
return 1
if is_mainthread():
thunk()
else:
idaapi.execute_sync(thunk, sync_type)
# return the output of the synchronized execution
return output[0]
return wrapper
def execute_read(func):
return execute_sync(func, idaapi.MFF_READ)
def execute_write(func):
return execute_sync(func, idaapi.MFF_WRITE)
def execute_ui(func):
return execute_sync(func, idaapi.MFF_FAST)
#
# Data Type Converters
#
@execute_read
def convert_type_str_to_ida_type(type_str) -> typing.Optional['ida_typeinf']:
ida_type_str = type_str + ";"
tif = ida_typeinf.tinfo_t()
valid_parse = ida_typeinf.parse_decl(tif, None, ida_type_str, 1)
return tif if valid_parse is not None else None
@execute_read
def ida_to_angr_stack_offset(func_addr, angr_stack_offset):
frame = idaapi.get_frame(func_addr)
frame_size = idc.get_struc_size(frame)
last_member_size = idaapi.get_member_size(frame.get_member(frame.memqty - 1))
ida_stack_offset = angr_stack_offset - frame_size + last_member_size
return ida_stack_offset
def convert_member_flag(size):
if size == 1:
return 0x400
elif size == 2:
return 0x10000400
elif size == 4:
return 0x20000400
elif size == 8:
return 0x30000400
#
# IDA Function r/w
#
@execute_read
def ida_func_addr(addr):
ida_func = ida_funcs.get_func(addr)
if ida_func is None:
return None
func_addr = ida_func.start_ea
return func_addr
@execute_read
def get_func_name(ea):
return idc.get_func_name(ea)
@execute_write
def set_ida_func_name(func_addr, new_name):
idaapi.set_name(func_addr, new_name, idaapi.SN_FORCE)
ida_kernwin.request_refresh(ida_kernwin.IWID_DISASMS)
ida_kernwin.request_refresh(ida_kernwin.IWID_STRUCTS)
ida_kernwin.request_refresh(ida_kernwin.IWID_STKVIEW)
#
# IDA Comment r/w
#
@execute_write
def set_ida_comment(addr, cmt, decompiled=False):
func = ida_funcs.get_func(addr)
rpt = 1
# function comment
if addr == func.start_ea:
idc.set_func_cmt(addr, cmt, rpt)
return True
# a comment in decompilation
elif decompiled:
cfunc = idaapi.decompile(addr)
eamap = cfunc.get_eamap()
decomp_obj_addr = eamap[addr][0].ea
tl = idaapi.treeloc_t()
# try to set a comment using the cfunc obj and normal address
for a in [addr, decomp_obj_addr]:
tl.ea = a
for itp in range(idaapi.ITP_SEMI, idaapi.ITP_COLON):
tl.itp = itp
cfunc.set_user_cmt(tl, cmt)
cfunc.save_user_cmts()
cfunc.refresh_func_ctext()
# attempt to set until it does not fail (orphan itself)
if not cfunc.has_orphan_cmts():
cfunc.save_user_cmts()
return True
cfunc.del_orphan_cmts()
return False
# a comment in disassembly
else:
ida_bytes.set_cmt(addr, cmt, rpt)
return True
@execute_write
def set_decomp_comments(func_addr, cmt_dict: typing.Dict[int, str]):
for addr in cmt_dict:
ida_cmts = ida_hexrays.user_cmts_new()
comment = cmt_dict[addr]
tl = ida_hexrays.treeloc_t()
tl.ea = addr
# XXX: need a real value here at some point
tl.itp = 90
ida_cmts.insert(tl, ida_hexrays.citem_cmt_t(comment))
ida_hexrays.save_user_cmts(func_addr, ida_cmts)
#
# IDA Stack Var r/w
#
@execute_read
def get_func_stack_var_info(func_addr) -> typing.Dict[int, IDAStackVar]:
try:
decompilation = ida_hexrays.decompile(func_addr)
except ida_hexrays.DecompilationFailure:
print("[BinSync]: Decompiling too many functions too fast! Slow down and try that operation again.")
return {}
stack_var_info = {}
for var in decompilation.lvars:
if not var.is_stk_var():
continue
size = var.width
name = var.name
offset = var.location.stkoff() - decompilation.get_stkoff_delta()
type_str = str(var.type())
stack_var_info[offset] = IDAStackVar(func_addr, offset, name, type_str, size)
return stack_var_info
@execute_write
def set_stack_vars_types(var_type_dict, code_view, controller: "BinsyncController") -> bool:
"""
Sets the type of a stack variable, which should be a local variable.
Take special note of the types of first two parameters used here:
var_type_dict is a dictionary of the offsets and the new proposed type info for each offset.
This typeinfo should be gotten either by manully making a new typeinfo object or using the
parse_decl function. code_view is a instance of vdui_t, which should be gotten through
open_pseudocode() from ida_hexrays.
This function also is special since it needs to iterate all of the stack variables an unknown amount
of times until a fixed point of variables types not changing is met.
@param var_type_dict: Dict[stack_offset, ida_typeinf_t]
@param code_view: A pointer to a vdui_t screen
@param controller: The BinSync controller to do operations on
@return:
"""
all_success = True
fixed_point = False
while not fixed_point:
fixed_point = True
for lvar in code_view.cfunc.lvars:
cur_off = lvar.location.stkoff() - code_view.cfunc.get_stkoff_delta()
if lvar.is_stk_var() and cur_off in var_type_dict:
if str(lvar.type()) != str(var_type_dict[cur_off]):
controller.inc_api_count()
all_success &= code_view.set_lvar_type(lvar, var_type_dict.pop(cur_off))
fixed_point = False
# make sure to break, in case the size of lvars array has now changed
break
return all_success
@execute_read
def ida_get_frame(func_addr):
return idaapi.get_frame(func_addr)
#
# IDA Struct r/w
#
@execute_write
def set_struct_member_name(ida_struct, frame, offset, name):
ida_struct.set_member_name(frame, offset, name)
@execute_write
def set_ida_struct(struct: Struct, controller) -> bool:
# first, delete any struct by the same name if it exists
sid = ida_struct.get_struc_id(struct.name)
if sid != 0xffffffffffffffff:
sptr = ida_struct.get_struc(sid)
controller.inc_api_count()
ida_struct.del_struc(sptr)
# now make a struct header
controller.inc_api_count()
ida_struct.add_struc(ida_idaapi.BADADDR, struct.name, False)
sid = ida_struct.get_struc_id(struct.name)
sptr = ida_struct.get_struc(sid)
# expand the struct to the desired size
# XXX: do not increment API here, why? Not sure, but you cant do it here.
ida_struct.expand_struc(sptr, 0, struct.size)
# add every member of the struct
for member in struct.struct_members:
# convert to ida's flag system
mflag = convert_member_flag(member.size)
# create the new member
controller.inc_api_count()
ida_struct.add_struc_member(
sptr,
member.member_name,
member.offset,
mflag,
None,
member.size,
)
@execute_write
def set_ida_struct_member_types(struct: Struct, controller) -> bool:
# find the specific struct
sid = ida_struct.get_struc_id(struct.name)
sptr = ida_struct.get_struc(sid)
all_typed_success = True
for idx, member in enumerate(struct.struct_members):
# set the new member type if it has one
if member.type == "":
continue
# assure its convertible
tif = convert_type_str_to_ida_type(member.type)
if tif is None:
all_typed_success = False
continue
# set the type
mptr = sptr.get_member(idx)
controller.inc_api_count()
was_set = ida_struct.set_member_tinfo(
sptr,
mptr,
0,
tif,
mptr.flag
)
all_typed_success &= True if was_set == 1 else False
return all_typed_success
#
# IDA GUI r/w
#
@execute_ui
def refresh_pseudocode_view(ea):
"""Refreshes the pseudocode view in IDA."""
names = ["Pseudocode-%c" % chr(ord("A") + i) for i in range(5)]
for name in names:
widget = ida_kernwin.find_widget(name)
if widget:
vu = ida_hexrays.get_widget_vdui(widget)
# Check if the address is in the same function
func_ea = vu.cfunc.entry_ea
func = ida_funcs.get_func(func_ea)
if ida_funcs.func_contains(func, ea):
vu.refresh_view(True)
class IDAViewCTX:
@execute_ui
def __init__(self, func_addr):
self.view = ida_hexrays.open_pseudocode(func_addr, 0)
def __enter__(self):
return self.view
@execute_ui
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_pseudocode_view(self.view)
@execute_ui
def close_pseudocode_view(self, ida_vdui_t):
widget = ida_vdui_t.toplevel
idaapi.close_pseudocode(widget)
def get_screen_ea():
return idc.get_screen_ea()
def get_function_cursor_at():
curr_addr = get_screen_ea()
if curr_addr is None:
return None
return ida_func_addr(curr_addr)
| 28.216019 | 112 | 0.65772 |
ace7b7a7b7964c9a1bde62c257254ecd694a5df4 | 140,407 | py | Python | evennia/commands/default/building.py | DarkSir23/evennia | 8f3ed8c9d57edf72619ccea1540e08ba305883dc | [
"BSD-3-Clause"
] | null | null | null | evennia/commands/default/building.py | DarkSir23/evennia | 8f3ed8c9d57edf72619ccea1540e08ba305883dc | [
"BSD-3-Clause"
] | null | null | null | evennia/commands/default/building.py | DarkSir23/evennia | 8f3ed8c9d57edf72619ccea1540e08ba305883dc | [
"BSD-3-Clause"
] | null | null | null | """
Building and world design commands
"""
import re
from django.conf import settings
from django.db.models import Q, Min, Max
from evennia.objects.models import ObjectDB
from evennia.locks.lockhandler import LockException
from evennia.commands.cmdhandler import get_and_merge_cmdsets
from evennia.utils import create, utils, search, logger
from evennia.utils.utils import (
inherits_from,
class_from_module,
get_all_typeclasses,
variable_from_module,
dbref,
interactive,
list_to_string,
)
from evennia.utils.eveditor import EvEditor
from evennia.utils.evmore import EvMore
from evennia.prototypes import spawner, prototypes as protlib, menus as olc_menus
from evennia.utils.ansi import raw
COMMAND_DEFAULT_CLASS = class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = (
"ObjManipCommand",
"CmdSetObjAlias",
"CmdCopy",
"CmdCpAttr",
"CmdMvAttr",
"CmdCreate",
"CmdDesc",
"CmdDestroy",
"CmdDig",
"CmdTunnel",
"CmdLink",
"CmdUnLink",
"CmdSetHome",
"CmdListCmdSets",
"CmdName",
"CmdOpen",
"CmdSetAttribute",
"CmdTypeclass",
"CmdWipe",
"CmdLock",
"CmdExamine",
"CmdFind",
"CmdTeleport",
"CmdScript",
"CmdTag",
"CmdSpawn",
)
# used by set
from ast import literal_eval as _LITERAL_EVAL
LIST_APPEND_CHAR = "+"
# used by find
CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
ROOM_TYPECLASS = settings.BASE_ROOM_TYPECLASS
EXIT_TYPECLASS = settings.BASE_EXIT_TYPECLASS
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
_PROTOTYPE_PARENTS = None
class ObjManipCommand(COMMAND_DEFAULT_CLASS):
"""
This is a parent class for some of the defining objmanip commands
since they tend to have some more variables to define new objects.
Each object definition can have several components. First is
always a name, followed by an optional alias list and finally an
some optional data, such as a typeclass or a location. A comma ','
separates different objects. Like this:
name1;alias;alias;alias:option, name2;alias;alias ...
Spaces between all components are stripped.
A second situation is attribute manipulation. Such commands
are simpler and offer combinations
objname/attr/attr/attr, objname/attr, ...
"""
# OBS - this is just a parent - it's not intended to actually be
# included in a commandset on its own!
def parse(self):
"""
We need to expand the default parsing to get all
the cases, see the module doc.
"""
# get all the normal parsing done (switches etc)
super().parse()
obj_defs = ([], []) # stores left- and right-hand side of '='
obj_attrs = ([], []) # "
for iside, arglist in enumerate((self.lhslist, self.rhslist)):
# lhslist/rhslist is already split by ',' at this point
for objdef in arglist:
aliases, option, attrs = [], None, []
if ":" in objdef:
objdef, option = [part.strip() for part in objdef.rsplit(":", 1)]
if ";" in objdef:
objdef, aliases = [part.strip() for part in objdef.split(";", 1)]
aliases = [alias.strip() for alias in aliases.split(";") if alias.strip()]
if "/" in objdef:
objdef, attrs = [part.strip() for part in objdef.split("/", 1)]
attrs = [part.strip().lower() for part in attrs.split("/") if part.strip()]
# store data
obj_defs[iside].append({"name": objdef, "option": option, "aliases": aliases})
obj_attrs[iside].append({"name": objdef, "attrs": attrs})
# store for future access
self.lhs_objs = obj_defs[0]
self.rhs_objs = obj_defs[1]
self.lhs_objattr = obj_attrs[0]
self.rhs_objattr = obj_attrs[1]
class CmdSetObjAlias(COMMAND_DEFAULT_CLASS):
"""
adding permanent aliases for object
Usage:
alias <obj> [= [alias[,alias,alias,...]]]
alias <obj> =
alias/category <obj> = [alias[,alias,...]:<category>
Switches:
category - requires ending input with :category, to store the
given aliases with the given category.
Assigns aliases to an object so it can be referenced by more
than one name. Assign empty to remove all aliases from object. If
assigning a category, all aliases given will be using this category.
Observe that this is not the same thing as personal aliases
created with the 'nick' command! Aliases set with alias are
changing the object in question, making those aliases usable
by everyone.
"""
key = "alias"
aliases = "setobjalias"
switch_options = ("category",)
locks = "cmd:perm(setobjalias) or perm(Builder)"
help_category = "Building"
def func(self):
"""Set the aliases."""
caller = self.caller
if not self.lhs:
string = "Usage: alias <obj> [= [alias[,alias ...]]]"
self.caller.msg(string)
return
objname = self.lhs
# Find the object to receive aliases
obj = caller.search(objname)
if not obj:
return
if self.rhs is None:
# no =, so we just list aliases on object.
aliases = obj.aliases.all(return_key_and_category=True)
if aliases:
caller.msg(
"Aliases for %s: %s"
% (
obj.get_display_name(caller),
", ".join(
"'%s'%s"
% (alias, "" if category is None else "[category:'%s']" % category)
for (alias, category) in aliases
),
)
)
else:
caller.msg("No aliases exist for '%s'." % obj.get_display_name(caller))
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have permission to do that.")
return
if not self.rhs:
# we have given an empty =, so delete aliases
old_aliases = obj.aliases.all()
if old_aliases:
caller.msg(
"Cleared aliases from %s: %s"
% (obj.get_display_name(caller), ", ".join(old_aliases))
)
obj.aliases.clear()
else:
caller.msg("No aliases to clear.")
return
category = None
if "category" in self.switches:
if ":" in self.rhs:
rhs, category = self.rhs.rsplit(":", 1)
category = category.strip()
else:
caller.msg(
"If specifying the /category switch, the category must be given "
"as :category at the end."
)
else:
rhs = self.rhs
# merge the old and new aliases (if any)
old_aliases = obj.aliases.get(category=category, return_list=True)
new_aliases = [alias.strip().lower() for alias in rhs.split(",") if alias.strip()]
# make the aliases only appear once
old_aliases.extend(new_aliases)
aliases = list(set(old_aliases))
# save back to object.
obj.aliases.add(aliases, category=category)
# we need to trigger this here, since this will force
# (default) Exits to rebuild their Exit commands with the new
# aliases
obj.at_cmdset_get(force_init=True)
# report all aliases on the object
caller.msg(
"Alias(es) for '%s' set to '%s'%s."
% (
obj.get_display_name(caller),
str(obj.aliases),
" (category: '%s')" % category if category else "",
)
)
class CmdCopy(ObjManipCommand):
"""
copy an object and its properties
Usage:
copy <original obj> [= <new_name>][;alias;alias..]
[:<new_location>] [,<new_name2> ...]
Create one or more copies of an object. If you don't supply any targets,
one exact copy of the original object will be created with the name *_copy.
"""
key = "copy"
locks = "cmd:perm(copy) or perm(Builder)"
help_category = "Building"
def func(self):
"""Uses ObjManipCommand.parse()"""
caller = self.caller
args = self.args
if not args:
caller.msg(
"Usage: copy <obj> [=<new_name>[;alias;alias..]]"
"[:<new_location>] [, <new_name2>...]"
)
return
if not self.rhs:
# this has no target =, so an identical new object is created.
from_obj_name = self.args
from_obj = caller.search(from_obj_name)
if not from_obj:
return
to_obj_name = "%s_copy" % from_obj_name
to_obj_aliases = ["%s_copy" % alias for alias in from_obj.aliases.all()]
copiedobj = ObjectDB.objects.copy_object(
from_obj, new_key=to_obj_name, new_aliases=to_obj_aliases
)
if copiedobj:
string = "Identical copy of %s, named '%s' was created." % (
from_obj_name,
to_obj_name,
)
else:
string = "There was an error copying %s."
else:
# we have specified =. This might mean many object targets
from_obj_name = self.lhs_objs[0]["name"]
from_obj = caller.search(from_obj_name)
if not from_obj:
return
for objdef in self.rhs_objs:
# loop through all possible copy-to targets
to_obj_name = objdef["name"]
to_obj_aliases = objdef["aliases"]
to_obj_location = objdef["option"]
if to_obj_location:
to_obj_location = caller.search(to_obj_location, global_search=True)
if not to_obj_location:
return
copiedobj = ObjectDB.objects.copy_object(
from_obj,
new_key=to_obj_name,
new_location=to_obj_location,
new_aliases=to_obj_aliases,
)
if copiedobj:
string = "Copied %s to '%s' (aliases: %s)." % (
from_obj_name,
to_obj_name,
to_obj_aliases,
)
else:
string = "There was an error copying %s to '%s'." % (from_obj_name, to_obj_name)
# we are done, echo to user
caller.msg(string)
class CmdCpAttr(ObjManipCommand):
"""
copy attributes between objects
Usage:
cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
move - delete the attribute from the source object after copying.
Example:
cpattr coolness = Anna/chillout, Anna/nicety, Tom/nicety
->
copies the coolness attribute (defined on yourself), to attributes
on Anna and Tom.
Copy the attribute one object to one or more attributes on another object.
If you don't supply a source object, yourself is used.
"""
key = "cpattr"
switch_options = ("move",)
locks = "cmd:perm(cpattr) or perm(Builder)"
help_category = "Building"
def check_from_attr(self, obj, attr, clear=False):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can copy the attr from the object in question. If not, return a
false value and the command will abort. An error message should be
provided by this function.
If clear is True, user is attempting to move the attribute.
"""
return True
def check_to_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can write to the specified attribute on the specified object.
If not, return a false value and the attribute will be skipped. An
error message should be provided by this function.
"""
return True
def check_has_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and verify an object has an attribute.
"""
if not obj.attributes.has(attr):
self.caller.msg("%s doesn't have an attribute %s." % (obj.name, attr))
return False
return True
def get_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and get the attribute from the object.
"""
return obj.attributes.get(attr)
def func(self):
"""
Do the copying.
"""
caller = self.caller
if not self.rhs:
string = """Usage:
cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
caller.msg(string)
return
lhs_objattr = self.lhs_objattr
to_objs = self.rhs_objattr
from_obj_name = lhs_objattr[0]["name"]
from_obj_attrs = lhs_objattr[0]["attrs"]
if not from_obj_attrs:
# this means the from_obj_name is actually an attribute
# name on self.
from_obj_attrs = [from_obj_name]
from_obj = self.caller
else:
from_obj = caller.search(from_obj_name)
if not from_obj or not to_objs:
caller.msg("You have to supply both source object and target(s).")
return
# copy to all to_obj:ects
if "move" in self.switches:
clear = True
else:
clear = False
if not self.check_from_attr(from_obj, from_obj_attrs[0], clear=clear):
return
for attr in from_obj_attrs:
if not self.check_has_attr(from_obj, attr):
return
if (len(from_obj_attrs) != len(set(from_obj_attrs))) and clear:
self.caller.msg("|RCannot have duplicate source names when moving!")
return
result = []
for to_obj in to_objs:
to_obj_name = to_obj["name"]
to_obj_attrs = to_obj["attrs"]
to_obj = caller.search(to_obj_name)
if not to_obj:
result.append("\nCould not find object '%s'" % to_obj_name)
continue
for inum, from_attr in enumerate(from_obj_attrs):
try:
to_attr = to_obj_attrs[inum]
except IndexError:
# if there are too few attributes given
# on the to_obj, we copy the original name instead.
to_attr = from_attr
if not self.check_to_attr(to_obj, to_attr):
continue
value = self.get_attr(from_obj, from_attr)
to_obj.attributes.add(to_attr, value)
if clear and not (from_obj == to_obj and from_attr == to_attr):
from_obj.attributes.remove(from_attr)
result.append(
"\nMoved %s.%s -> %s.%s. (value: %s)"
% (from_obj.name, from_attr, to_obj_name, to_attr, repr(value))
)
else:
result.append(
"\nCopied %s.%s -> %s.%s. (value: %s)"
% (from_obj.name, from_attr, to_obj_name, to_attr, repr(value))
)
caller.msg("".join(result))
class CmdMvAttr(ObjManipCommand):
"""
move attributes between objects
Usage:
mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
copy - Don't delete the original after moving.
Move an attribute from one object to one or more attributes on another
object. If you don't supply a source object, yourself is used.
"""
key = "mvattr"
switch_options = ("copy",)
locks = "cmd:perm(mvattr) or perm(Builder)"
help_category = "Building"
def func(self):
"""
Do the moving
"""
if not self.rhs:
string = """Usage:
mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
self.caller.msg(string)
return
# simply use cpattr for all the functionality
if "copy" in self.switches:
self.execute_cmd("cpattr %s" % self.args)
else:
self.execute_cmd("cpattr/move %s" % self.args)
class CmdCreate(ObjManipCommand):
"""
create new objects
Usage:
create[/drop] <objname>[;alias;alias...][:typeclass], <objname>...
switch:
drop - automatically drop the new object into your current
location (this is not echoed). This also sets the new
object's home to the current location rather than to you.
Creates one or more new objects. If typeclass is given, the object
is created as a child of this typeclass. The typeclass script is
assumed to be located under types/ and any further
directory structure is given in Python notation. So if you have a
correct typeclass 'RedButton' defined in
types/examples/red_button.py, you could create a new
object of this type like this:
create/drop button;red : examples.red_button.RedButton
"""
key = "create"
switch_options = ("drop",)
locks = "cmd:perm(create) or perm(Builder)"
help_category = "Building"
# lockstring of newly created objects, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_obj_lockstring = "control:id({id}) or perm(Admin);delete:id({id}) or perm(Admin)"
def func(self):
"""
Creates the object.
"""
caller = self.caller
if not self.args:
string = "Usage: create[/drop] <newname>[;alias;alias...] [:typeclass.path]"
caller.msg(string)
return
# create the objects
for objdef in self.lhs_objs:
string = ""
name = objdef["name"]
aliases = objdef["aliases"]
typeclass = objdef["option"]
# create object (if not a valid typeclass, the default
# object typeclass will automatically be used)
lockstring = self.new_obj_lockstring.format(id=caller.id)
obj = create.create_object(
typeclass,
name,
caller,
home=caller,
aliases=aliases,
locks=lockstring,
report_to=caller,
)
if not obj:
continue
if aliases:
string = "You create a new %s: %s (aliases: %s)."
string = string % (obj.typename, obj.name, ", ".join(aliases))
else:
string = "You create a new %s: %s."
string = string % (obj.typename, obj.name)
# set a default desc
if not obj.db.desc:
obj.db.desc = "You see nothing special."
if "drop" in self.switches:
if caller.location:
obj.home = caller.location
obj.move_to(caller.location, quiet=True)
if string:
caller.msg(string)
def _desc_load(caller):
return caller.db.evmenu_target.db.desc or ""
def _desc_save(caller, buf):
"""
Save line buffer to the desc prop. This should
return True if successful and also report its status to the user.
"""
caller.db.evmenu_target.db.desc = buf
caller.msg("Saved.")
return True
def _desc_quit(caller):
caller.attributes.remove("evmenu_target")
caller.msg("Exited editor.")
class CmdDesc(COMMAND_DEFAULT_CLASS):
"""
describe an object or the current room.
Usage:
desc [<obj> =] <description>
Switches:
edit - Open up a line editor for more advanced editing.
Sets the "desc" attribute on an object. If an object is not given,
describe the current room.
"""
key = "desc"
aliases = "describe"
switch_options = ("edit",)
locks = "cmd:perm(desc) or perm(Builder)"
help_category = "Building"
def edit_handler(self):
if self.rhs:
self.msg("|rYou may specify a value, or use the edit switch, " "but not both.|n")
return
if self.args:
obj = self.caller.search(self.args)
else:
obj = self.caller.location or self.msg("|rYou can't describe oblivion.|n")
if not obj:
return
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
self.caller.msg("You don't have permission to edit the description of %s." % obj.key)
self.caller.db.evmenu_target = obj
# launch the editor
EvEditor(
self.caller,
loadfunc=_desc_load,
savefunc=_desc_save,
quitfunc=_desc_quit,
key="desc",
persistent=True,
)
return
def func(self):
"""Define command"""
caller = self.caller
if not self.args and "edit" not in self.switches:
caller.msg("Usage: desc [<obj> =] <description>")
return
if "edit" in self.switches:
self.edit_handler()
return
if "=" in self.args:
# We have an =
obj = caller.search(self.lhs)
if not obj:
return
desc = self.rhs or ""
else:
obj = caller.location or self.msg("|rYou can't describe oblivion.|n")
if not obj:
return
desc = self.args
if obj.access(self.caller, "control") or obj.access(self.caller, "edit"):
obj.db.desc = desc
caller.msg("The description was set on %s." % obj.get_display_name(caller))
else:
caller.msg("You don't have permission to edit the description of %s." % obj.key)
class CmdDestroy(COMMAND_DEFAULT_CLASS):
"""
permanently delete objects
Usage:
destroy[/switches] [obj, obj2, obj3, [dbref-dbref], ...]
Switches:
override - The destroy command will usually avoid accidentally
destroying account objects. This switch overrides this safety.
force - destroy without confirmation.
Examples:
destroy house, roof, door, 44-78
destroy 5-10, flower, 45
destroy/force north
Destroys one or many objects. If dbrefs are used, a range to delete can be
given, e.g. 4-10. Also the end points will be deleted. This command
displays a confirmation before destroying, to make sure of your choice.
You can specify the /force switch to bypass this confirmation.
"""
key = "destroy"
aliases = ["delete", "del"]
switch_options = ("override", "force")
locks = "cmd:perm(destroy) or perm(Builder)"
help_category = "Building"
confirm = True # set to False to always bypass confirmation
default_confirm = "yes" # what to assume if just pressing enter (yes/no)
def func(self):
"""Implements the command."""
caller = self.caller
delete = True
if not self.args or not self.lhslist:
caller.msg("Usage: destroy[/switches] [obj, obj2, obj3, [dbref-dbref],...]")
delete = False
def delobj(obj):
# helper function for deleting a single object
string = ""
if not obj.pk:
string = "\nObject %s was already deleted." % obj.db_key
else:
objname = obj.name
if not (obj.access(caller, "control") or obj.access(caller, "delete")):
return "\nYou don't have permission to delete %s." % objname
if obj.account and "override" not in self.switches:
return (
"\nObject %s is controlled by an active account. Use /override to delete anyway."
% objname
)
if obj.dbid == int(settings.DEFAULT_HOME.lstrip("#")):
return (
"\nYou are trying to delete |c%s|n, which is set as DEFAULT_HOME. "
"Re-point settings.DEFAULT_HOME to another "
"object before continuing." % objname
)
had_exits = hasattr(obj, "exits") and obj.exits
had_objs = hasattr(obj, "contents") and any(
obj
for obj in obj.contents
if not (hasattr(obj, "exits") and obj not in obj.exits)
)
# do the deletion
okay = obj.delete()
if not okay:
string += (
"\nERROR: %s not deleted, probably because delete() returned False."
% objname
)
else:
string += "\n%s was destroyed." % objname
if had_exits:
string += " Exits to and from %s were destroyed as well." % objname
if had_objs:
string += " Objects inside %s were moved to their homes." % objname
return string
objs = []
for objname in self.lhslist:
if not delete:
continue
if "-" in objname:
# might be a range of dbrefs
dmin, dmax = [utils.dbref(part, reqhash=False) for part in objname.split("-", 1)]
if dmin and dmax:
for dbref in range(int(dmin), int(dmax + 1)):
obj = caller.search("#" + str(dbref))
if obj:
objs.append(obj)
continue
else:
obj = caller.search(objname)
else:
obj = caller.search(objname)
if obj is None:
self.caller.msg(
" (Objects to destroy must either be local or specified with a unique #dbref.)"
)
elif obj not in objs:
objs.append(obj)
if objs and ("force" not in self.switches and type(self).confirm):
confirm = "Are you sure you want to destroy "
if len(objs) == 1:
confirm += objs[0].get_display_name(caller)
elif len(objs) < 5:
confirm += ", ".join([obj.get_display_name(caller) for obj in objs])
else:
confirm += ", ".join(["#{}".format(obj.id) for obj in objs])
confirm += " [yes]/no?" if self.default_confirm == "yes" else " yes/[no]"
answer = ""
answer = yield (confirm)
answer = self.default_confirm if answer == "" else answer
if answer and answer not in ("yes", "y", "no", "n"):
caller.msg(
"Canceled: Either accept the default by pressing return or specify yes/no."
)
delete = False
elif answer.strip().lower() in ("n", "no"):
caller.msg("Canceled: No object was destroyed.")
delete = False
if delete:
results = []
for obj in objs:
results.append(delobj(obj))
if results:
caller.msg("".join(results).strip())
class CmdDig(ObjManipCommand):
"""
build new rooms and connect them to the current location
Usage:
dig[/switches] <roomname>[;alias;alias...][:typeclass]
[= <exit_to_there>[;alias][:typeclass]]
[, <exit_to_here>[;alias][:typeclass]]
Switches:
tel or teleport - move yourself to the new room
Examples:
dig kitchen = north;n, south;s
dig house:myrooms.MyHouseTypeclass
dig sheer cliff;cliff;sheer = climb up, climb down
This command is a convenient way to build rooms quickly; it creates the
new room and you can optionally set up exits back and forth between your
current room and the new one. You can add as many aliases as you
like to the name of the room and the exits in question; an example
would be 'north;no;n'.
"""
key = "dig"
switch_options = ("teleport",)
locks = "cmd:perm(dig) or perm(Builder)"
help_category = "Building"
# lockstring of newly created rooms, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_room_lockstring = (
"control:id({id}) or perm(Admin); "
"delete:id({id}) or perm(Admin); "
"edit:id({id}) or perm(Admin)"
)
def func(self):
"""Do the digging. Inherits variables from ObjManipCommand.parse()"""
caller = self.caller
if not self.lhs:
string = "Usage: dig[/teleport] <roomname>[;alias;alias...]" "[:parent] [= <exit_there>"
string += "[;alias;alias..][:parent]] "
string += "[, <exit_back_here>[;alias;alias..][:parent]]"
caller.msg(string)
return
room = self.lhs_objs[0]
if not room["name"]:
caller.msg("You must supply a new room name.")
return
location = caller.location
# Create the new room
typeclass = room["option"]
if not typeclass:
typeclass = settings.BASE_ROOM_TYPECLASS
# create room
new_room = create.create_object(
typeclass, room["name"], aliases=room["aliases"], report_to=caller
)
lockstring = self.new_room_lockstring.format(id=caller.id)
new_room.locks.add(lockstring)
alias_string = ""
if new_room.aliases.all():
alias_string = " (%s)" % ", ".join(new_room.aliases.all())
room_string = "Created room %s(%s)%s of type %s." % (
new_room,
new_room.dbref,
alias_string,
typeclass,
)
# create exit to room
exit_to_string = ""
exit_back_string = ""
if self.rhs_objs:
to_exit = self.rhs_objs[0]
if not to_exit["name"]:
exit_to_string = "\nNo exit created to new room."
elif not location:
exit_to_string = "\nYou cannot create an exit from a None-location."
else:
# Build the exit to the new room from the current one
typeclass = to_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_to_exit = create.create_object(
typeclass,
to_exit["name"],
location,
aliases=to_exit["aliases"],
locks=lockstring,
destination=new_room,
report_to=caller,
)
alias_string = ""
if new_to_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_to_exit.aliases.all())
exit_to_string = "\nCreated Exit from %s to %s: %s(%s)%s."
exit_to_string = exit_to_string % (
location.name,
new_room.name,
new_to_exit,
new_to_exit.dbref,
alias_string,
)
# Create exit back from new room
if len(self.rhs_objs) > 1:
# Building the exit back to the current room
back_exit = self.rhs_objs[1]
if not back_exit["name"]:
exit_back_string = "\nNo back exit created."
elif not location:
exit_back_string = "\nYou cannot create an exit back to a None-location."
else:
typeclass = back_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_back_exit = create.create_object(
typeclass,
back_exit["name"],
new_room,
aliases=back_exit["aliases"],
locks=lockstring,
destination=location,
report_to=caller,
)
alias_string = ""
if new_back_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_back_exit.aliases.all())
exit_back_string = "\nCreated Exit back from %s to %s: %s(%s)%s."
exit_back_string = exit_back_string % (
new_room.name,
location.name,
new_back_exit,
new_back_exit.dbref,
alias_string,
)
caller.msg("%s%s%s" % (room_string, exit_to_string, exit_back_string))
if new_room and "teleport" in self.switches:
caller.move_to(new_room)
class CmdTunnel(COMMAND_DEFAULT_CLASS):
"""
create new rooms in cardinal directions only
Usage:
tunnel[/switch] <direction>[:typeclass] [= <roomname>[;alias;alias;...][:typeclass]]
Switches:
oneway - do not create an exit back to the current location
tel - teleport to the newly created room
Example:
tunnel n
tunnel n = house;mike's place;green building
This is a simple way to build using pre-defined directions:
|wn,ne,e,se,s,sw,w,nw|n (north, northeast etc)
|wu,d|n (up and down)
|wi,o|n (in and out)
The full names (north, in, southwest, etc) will always be put as
main name for the exit, using the abbreviation as an alias (so an
exit will always be able to be used with both "north" as well as
"n" for example). Opposite directions will automatically be
created back from the new room unless the /oneway switch is given.
For more flexibility and power in creating rooms, use dig.
"""
key = "tunnel"
aliases = ["tun"]
switch_options = ("oneway", "tel")
locks = "cmd: perm(tunnel) or perm(Builder)"
help_category = "Building"
# store the direction, full name and its opposite
directions = {
"n": ("north", "s"),
"ne": ("northeast", "sw"),
"e": ("east", "w"),
"se": ("southeast", "nw"),
"s": ("south", "n"),
"sw": ("southwest", "ne"),
"w": ("west", "e"),
"nw": ("northwest", "se"),
"u": ("up", "d"),
"d": ("down", "u"),
"i": ("in", "o"),
"o": ("out", "i"),
}
def func(self):
"""Implements the tunnel command"""
if not self.args or not self.lhs:
string = (
"Usage: tunnel[/switch] <direction>[:typeclass] [= <roomname>"
"[;alias;alias;...][:typeclass]]"
)
self.caller.msg(string)
return
# If we get a typeclass, we need to get just the exitname
exitshort = self.lhs.split(":")[0]
if exitshort not in self.directions:
string = "tunnel can only understand the following directions: %s." % ",".join(
sorted(self.directions.keys())
)
string += "\n(use dig for more freedom)"
self.caller.msg(string)
return
# retrieve all input and parse it
exitname, backshort = self.directions[exitshort]
backname = self.directions[backshort][0]
# if we recieved a typeclass for the exit, add it to the alias(short name)
if ":" in self.lhs:
# limit to only the first : character
exit_typeclass = ":" + self.lhs.split(":", 1)[-1]
# exitshort and backshort are the last part of the exit strings,
# so we add our typeclass argument after
exitshort += exit_typeclass
backshort += exit_typeclass
roomname = "Some place"
if self.rhs:
roomname = self.rhs # this may include aliases; that's fine.
telswitch = ""
if "tel" in self.switches:
telswitch = "/teleport"
backstring = ""
if "oneway" not in self.switches:
backstring = ", %s;%s" % (backname, backshort)
# build the string we will use to call dig
digstring = "dig%s %s = %s;%s%s" % (telswitch, roomname, exitname, exitshort, backstring)
self.execute_cmd(digstring)
class CmdLink(COMMAND_DEFAULT_CLASS):
"""
link existing rooms together with exits
Usage:
link[/switches] <object> = <target>
link[/switches] <object> =
link[/switches] <object>
Switch:
twoway - connect two exits. For this to work, BOTH <object>
and <target> must be exit objects.
If <object> is an exit, set its destination to <target>. Two-way operation
instead sets the destination to the *locations* of the respective given
arguments.
The second form (a lone =) sets the destination to None (same as
the unlink command) and the third form (without =) just shows the
currently set destination.
"""
key = "link"
locks = "cmd:perm(link) or perm(Builder)"
help_category = "Building"
def func(self):
"""Perform the link"""
caller = self.caller
if not self.args:
caller.msg("Usage: link[/twoway] <object> = <target>")
return
object_name = self.lhs
# try to search locally first
results = caller.search(object_name, quiet=True)
if len(results) > 1: # local results was a multimatch. Inform them to be more specific
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1))
return _AT_SEARCH_RESULT(results, caller, query=object_name)
elif len(results) == 1: # A unique local match
obj = results[0]
else: # No matches. Search globally
obj = caller.search(object_name, global_search=True)
if not obj:
return
if self.rhs:
# this means a target name was given
target = caller.search(self.rhs, global_search=True)
if not target:
return
if target == obj:
self.caller.msg("Cannot link an object to itself.")
return
string = ""
note = "Note: %s(%s) did not have a destination set before. Make sure you linked the right thing."
if not obj.destination:
string = note % (obj.name, obj.dbref)
if "twoway" in self.switches:
if not (target.location and obj.location):
string = "To create a two-way link, %s and %s must both have a location" % (
obj,
target,
)
string += " (i.e. they cannot be rooms, but should be exits)."
self.caller.msg(string)
return
if not target.destination:
string += note % (target.name, target.dbref)
obj.destination = target.location
target.destination = obj.location
string += "\nLink created %s (in %s) <-> %s (in %s) (two-way)." % (
obj.name,
obj.location,
target.name,
target.location,
)
else:
obj.destination = target
string += "\nLink created %s -> %s (one way)." % (obj.name, target)
elif self.rhs is None:
# this means that no = was given (otherwise rhs
# would have been an empty string). So we inspect
# the home/destination on object
dest = obj.destination
if dest:
string = "%s is an exit to %s." % (obj.name, dest.name)
else:
string = "%s is not an exit. Its home location is %s." % (obj.name, obj.home)
else:
# We gave the command link 'obj = ' which means we want to
# clear destination.
if obj.destination:
obj.destination = None
string = "Former exit %s no longer links anywhere." % obj.name
else:
string = "%s had no destination to unlink." % obj.name
# give feedback
caller.msg(string.strip())
class CmdUnLink(CmdLink):
"""
remove exit-connections between rooms
Usage:
unlink <Object>
Unlinks an object, for example an exit, disconnecting
it from whatever it was connected to.
"""
# this is just a child of CmdLink
key = "unlink"
locks = "cmd:perm(unlink) or perm(Builder)"
help_key = "Building"
def func(self):
"""
All we need to do here is to set the right command
and call func in CmdLink
"""
caller = self.caller
if not self.args:
caller.msg("Usage: unlink <object>")
return
# This mimics 'link <obj> = ' which is the same as unlink
self.rhs = ""
# call the link functionality
super().func()
class CmdSetHome(CmdLink):
"""
set an object's home location
Usage:
sethome <obj> [= <home_location>]
sethom <obj>
The "home" location is a "safety" location for objects; they
will be moved there if their current location ceases to exist. All
objects should always have a home location for this reason.
It is also a convenient target of the "home" command.
If no location is given, just view the object's home location.
"""
key = "sethome"
locks = "cmd:perm(sethome) or perm(Builder)"
help_category = "Building"
def func(self):
"""implement the command"""
if not self.args:
string = "Usage: sethome <obj> [= <home_location>]"
self.caller.msg(string)
return
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if not self.rhs:
# just view
home = obj.home
if not home:
string = "This object has no home location set!"
else:
string = "%s's current home is %s(%s)." % (obj, home, home.dbref)
else:
# set a home location
new_home = self.caller.search(self.rhs, global_search=True)
if not new_home:
return
old_home = obj.home
obj.home = new_home
if old_home:
string = "Home location of %s was changed from %s(%s) to %s(%s)." % (
obj,
old_home,
old_home.dbref,
new_home,
new_home.dbref,
)
else:
string = "Home location of %s was set to %s(%s)." % (obj, new_home, new_home.dbref)
self.caller.msg(string)
class CmdListCmdSets(COMMAND_DEFAULT_CLASS):
"""
list command sets defined on an object
Usage:
cmdsets <obj>
This displays all cmdsets assigned
to a user. Defaults to yourself.
"""
key = "cmdsets"
aliases = "listcmsets"
locks = "cmd:perm(listcmdsets) or perm(Builder)"
help_category = "Building"
def func(self):
"""list the cmdsets"""
caller = self.caller
if self.arglist:
obj = caller.search(self.arglist[0])
if not obj:
return
else:
obj = caller
string = "%s" % obj.cmdset
caller.msg(string)
class CmdName(ObjManipCommand):
"""
change the name and/or aliases of an object
Usage:
name <obj> = <newname>;alias1;alias2
Rename an object to something new. Use *obj to
rename an account.
"""
key = "name"
aliases = ["rename"]
locks = "cmd:perm(rename) or perm(Builder)"
help_category = "Building"
def func(self):
"""change the name"""
caller = self.caller
if not self.args:
caller.msg("Usage: name <obj> = <newname>[;alias;alias;...]")
return
obj = None
if self.lhs_objs:
objname = self.lhs_objs[0]["name"]
if objname.startswith("*"):
# account mode
obj = caller.account.search(objname.lstrip("*"))
if obj:
if self.rhs_objs[0]["aliases"]:
caller.msg("Accounts can't have aliases.")
return
newname = self.rhs
if not newname:
caller.msg("No name defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have right to edit this account %s." % obj)
return
obj.username = newname
obj.save()
caller.msg("Account's name changed to '%s'." % newname)
return
# object search, also with *
obj = caller.search(objname)
if not obj:
return
if self.rhs_objs:
newname = self.rhs_objs[0]["name"]
aliases = self.rhs_objs[0]["aliases"]
else:
newname = self.rhs
aliases = None
if not newname and not aliases:
caller.msg("No names or aliases defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have the right to edit %s." % obj)
return
# change the name and set aliases:
if newname:
obj.name = newname
astring = ""
if aliases:
[obj.aliases.add(alias) for alias in aliases]
astring = " (%s)" % (", ".join(aliases))
# fix for exits - we need their exit-command to change name too
if obj.destination:
obj.flush_from_cache(force=True)
caller.msg("Object's name changed to '%s'%s." % (newname, astring))
class CmdOpen(ObjManipCommand):
"""
open a new exit from the current room
Usage:
open <new exit>[;alias;alias..][:typeclass] [,<return exit>[;alias;..][:typeclass]]] = <destination>
Handles the creation of exits. If a destination is given, the exit
will point there. The <return exit> argument sets up an exit at the
destination leading back to the current room. Destination name
can be given both as a #dbref and a name, if that name is globally
unique.
"""
key = "open"
locks = "cmd:perm(open) or perm(Builder)"
help_category = "Building"
# a custom member method to chug out exits and do checks
def create_exit(self, exit_name, location, destination, exit_aliases=None, typeclass=None):
"""
Helper function to avoid code duplication.
At this point we know destination is a valid location
"""
caller = self.caller
string = ""
# check if this exit object already exists at the location.
# we need to ignore errors (so no automatic feedback)since we
# have to know the result of the search to decide what to do.
exit_obj = caller.search(exit_name, location=location, quiet=True, exact=True)
if len(exit_obj) > 1:
# give error message and return
caller.search(exit_name, location=location, exact=True)
return None
if exit_obj:
exit_obj = exit_obj[0]
if not exit_obj.destination:
# we are trying to link a non-exit
string = "'%s' already exists and is not an exit!\nIf you want to convert it "
string += (
"to an exit, you must assign an object to the 'destination' property first."
)
caller.msg(string % exit_name)
return None
# we are re-linking an old exit.
old_destination = exit_obj.destination
if old_destination:
string = "Exit %s already exists." % exit_name
if old_destination.id != destination.id:
# reroute the old exit.
exit_obj.destination = destination
if exit_aliases:
[exit_obj.aliases.add(alias) for alias in exit_aliases]
string += " Rerouted its old destination '%s' to '%s' and changed aliases." % (
old_destination.name,
destination.name,
)
else:
string += " It already points to the correct place."
else:
# exit does not exist before. Create a new one.
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
exit_obj = create.create_object(
typeclass, key=exit_name, location=location, aliases=exit_aliases, report_to=caller
)
if exit_obj:
# storing a destination is what makes it an exit!
exit_obj.destination = destination
string = (
""
if not exit_aliases
else " (aliases: %s)" % (", ".join([str(e) for e in exit_aliases]))
)
string = "Created new Exit '%s' from %s to %s%s." % (
exit_name,
location.name,
destination.name,
string,
)
else:
string = "Error: Exit '%s' not created." % exit_name
# emit results
caller.msg(string)
return exit_obj
def func(self):
"""
This is where the processing starts.
Uses the ObjManipCommand.parser() for pre-processing
as well as the self.create_exit() method.
"""
caller = self.caller
if not self.args or not self.rhs:
string = "Usage: open <new exit>[;alias...][:typeclass][,<return exit>[;alias..][:typeclass]]] "
string += "= <destination>"
caller.msg(string)
return
# We must have a location to open an exit
location = caller.location
if not location:
caller.msg("You cannot create an exit from a None-location.")
return
# obtain needed info from cmdline
exit_name = self.lhs_objs[0]["name"]
exit_aliases = self.lhs_objs[0]["aliases"]
exit_typeclass = self.lhs_objs[0]["option"]
dest_name = self.rhs
# first, check so the destination exists.
destination = caller.search(dest_name, global_search=True)
if not destination:
return
# Create exit
ok = self.create_exit(exit_name, location, destination, exit_aliases, exit_typeclass)
if not ok:
# an error; the exit was not created, so we quit.
return
# Create back exit, if any
if len(self.lhs_objs) > 1:
back_exit_name = self.lhs_objs[1]["name"]
back_exit_aliases = self.lhs_objs[1]["aliases"]
back_exit_typeclass = self.lhs_objs[1]["option"]
self.create_exit(
back_exit_name, destination, location, back_exit_aliases, back_exit_typeclass
)
def _convert_from_string(cmd, strobj):
"""
Converts a single object in *string form* to its equivalent python
type.
Python earlier than 2.6:
Handles floats, ints, and limited nested lists and dicts
(can't handle lists in a dict, for example, this is mainly due to
the complexity of parsing this rather than any technical difficulty -
if there is a need for set-ing such complex structures on the
command line we might consider adding it).
Python 2.6 and later:
Supports all Python structures through literal_eval as long as they
are valid Python syntax. If they are not (such as [test, test2], ie
without the quotes around the strings), the entire structure will
be converted to a string and a warning will be given.
We need to convert like this since all data being sent over the
telnet connection by the Account is text - but we will want to
store it as the "real" python type so we can do convenient
comparisons later (e.g. obj.db.value = 2, if value is stored as a
string this will always fail).
"""
# Use literal_eval to parse python structure exactly.
try:
return _LITERAL_EVAL(strobj)
except (SyntaxError, ValueError):
# treat as string
strobj = utils.to_str(strobj)
string = (
'|RNote: name "|r%s|R" was converted to a string. '
"Make sure this is acceptable." % strobj
)
cmd.caller.msg(string)
return strobj
except Exception as err:
string = "|RUnknown error in evaluating Attribute: {}".format(err)
return string
class CmdSetAttribute(ObjManipCommand):
"""
set attribute on an object or account
Usage:
set <obj>/<attr> = <value>
set <obj>/<attr> =
set <obj>/<attr>
set *<account>/<attr> = <value>
Switch:
edit: Open the line editor (string values only)
script: If we're trying to set an attribute on a script
channel: If we're trying to set an attribute on a channel
account: If we're trying to set an attribute on an account
room: Setting an attribute on a room (global search)
exit: Setting an attribute on an exit (global search)
char: Setting an attribute on a character (global search)
character: Alias for char, as above.
Sets attributes on objects. The second example form above clears a
previously set attribute while the third form inspects the current value of
the attribute (if any). The last one (with the star) is a shortcut for
operating on a player Account rather than an Object.
The most common data to save with this command are strings and
numbers. You can however also set Python primitives such as lists,
dictionaries and tuples on objects (this might be important for
the functionality of certain custom objects). This is indicated
by you starting your value with one of |c'|n, |c"|n, |c(|n, |c[|n
or |c{ |n.
Once you have stored a Python primitive as noted above, you can include
|c[<key>]|n in <attr> to reference nested values in e.g. a list or dict.
Remember that if you use Python primitives like this, you must
write proper Python syntax too - notably you must include quotes
around your strings or you will get an error.
"""
key = "set"
locks = "cmd:perm(set) or perm(Builder)"
help_category = "Building"
nested_re = re.compile(r"\[.*?\]")
not_found = object()
def check_obj(self, obj):
"""
This may be overridden by subclasses in case restrictions need to be
placed on whether certain objects can have attributes set by certain
accounts.
This function is expected to display its own error message.
Returning False will abort the command.
"""
return True
def check_attr(self, obj, attr_name):
"""
This may be overridden by subclasses in case restrictions need to be
placed on what attributes can be set by who beyond the normal lock.
This functions is expected to display its own error message. It is
run once for every attribute that is checked, blocking only those
attributes which are not permitted and letting the others through.
"""
return attr_name
def split_nested_attr(self, attr):
"""
Yields tuples of (possible attr name, nested keys on that attr).
For performance, this is biased to the deepest match, but allows compatability
with older attrs that might have been named with `[]`'s.
> list(split_nested_attr("nested['asdf'][0]"))
[
('nested', ['asdf', 0]),
("nested['asdf']", [0]),
("nested['asdf'][0]", []),
]
"""
quotes = "\"'"
def clean_key(val):
val = val.strip("[]")
if val[0] in quotes:
return val.strip(quotes)
if val[0] == LIST_APPEND_CHAR:
# List insert/append syntax
return val
try:
return int(val)
except ValueError:
return val
parts = self.nested_re.findall(attr)
base_attr = ""
if parts:
base_attr = attr[: attr.find(parts[0])]
for index, part in enumerate(parts):
yield (base_attr, [clean_key(p) for p in parts[index:]])
base_attr += part
yield (attr, [])
def do_nested_lookup(self, value, *keys):
result = value
for key in keys:
try:
result = result.__getitem__(key)
except (IndexError, KeyError, TypeError):
return self.not_found
return result
def view_attr(self, obj, attr):
"""
Look up the value of an attribute and return a string displaying it.
"""
nested = False
for key, nested_keys in self.split_nested_attr(attr):
nested = True
if obj.attributes.has(key):
val = obj.attributes.get(key)
val = self.do_nested_lookup(val, *nested_keys)
if val is not self.not_found:
return "\nAttribute %s/%s = %s" % (obj.name, attr, val)
error = "\n%s has no attribute '%s'." % (obj.name, attr)
if nested:
error += " (Nested lookups attempted)"
return error
def rm_attr(self, obj, attr):
"""
Remove an attribute from the object, or a nested data structure, and report back.
"""
nested = False
for key, nested_keys in self.split_nested_attr(attr):
nested = True
if obj.attributes.has(key):
if nested_keys:
del_key = nested_keys[-1]
val = obj.attributes.get(key)
deep = self.do_nested_lookup(val, *nested_keys[:-1])
if deep is not self.not_found:
try:
del deep[del_key]
except (IndexError, KeyError, TypeError):
continue
return "\nDeleted attribute '%s' (= nested) from %s." % (attr, obj.name)
else:
exists = obj.attributes.has(key)
obj.attributes.remove(attr)
return "\nDeleted attribute '%s' (= %s) from %s." % (attr, exists, obj.name)
error = "\n%s has no attribute '%s'." % (obj.name, attr)
if nested:
error += " (Nested lookups attempted)"
return error
def set_attr(self, obj, attr, value):
done = False
for key, nested_keys in self.split_nested_attr(attr):
if obj.attributes.has(key) and nested_keys:
acc_key = nested_keys[-1]
lookup_value = obj.attributes.get(key)
deep = self.do_nested_lookup(lookup_value, *nested_keys[:-1])
if deep is not self.not_found:
# To support appending and inserting to lists
# a key that starts with LIST_APPEND_CHAR will insert a new item at that
# location, and move the other elements down.
# Using LIST_APPEND_CHAR alone will append to the list
if isinstance(acc_key, str) and acc_key[0] == LIST_APPEND_CHAR:
try:
if len(acc_key) > 1:
where = int(acc_key[1:])
deep.insert(where, value)
else:
deep.append(value)
except (ValueError, AttributeError):
pass
else:
value = lookup_value
attr = key
done = True
break
# List magic failed, just use like a key/index
try:
deep[acc_key] = value
except TypeError as err:
# Tuples can't be modified
return "\n%s - %s" % (err, deep)
value = lookup_value
attr = key
done = True
break
verb = "Modified" if obj.attributes.has(attr) else "Created"
try:
if not done:
obj.attributes.add(attr, value)
return "\n%s attribute %s/%s = %s" % (verb, obj.name, attr, repr(value))
except SyntaxError:
# this means literal_eval tried to parse a faulty string
return (
"\n|RCritical Python syntax error in your value. Only "
"primitive Python structures are allowed.\nYou also "
"need to use correct Python syntax. Remember especially "
"to put quotes around all strings inside lists and "
"dicts.|n"
)
def edit_handler(self, obj, attr):
"""Activate the line editor"""
def load(caller):
"""Called for the editor to load the buffer"""
old_value = obj.attributes.get(attr)
if old_value is not None and not isinstance(old_value, str):
typ = type(old_value).__name__
self.caller.msg(
"|RWARNING! Saving this buffer will overwrite the "
"current attribute (of type %s) with a string!|n" % typ
)
return str(old_value)
return old_value
def save(caller, buf):
"""Called when editor saves its buffer."""
obj.attributes.add(attr, buf)
caller.msg("Saved Attribute %s." % attr)
# start the editor
EvEditor(self.caller, load, save, key="%s/%s" % (obj, attr))
def search_for_obj(self, objname):
"""
Searches for an object matching objname. The object may be of different typeclasses.
Args:
objname: Name of the object we're looking for
Returns:
A typeclassed object, or None if nothing is found.
"""
from evennia.utils.utils import variable_from_module
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1))
caller = self.caller
if objname.startswith("*") or "account" in self.switches:
found_obj = caller.search_account(objname.lstrip("*"))
elif "script" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_script(objname), caller)
elif "channel" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_channel(objname), caller)
else:
global_search = True
if "char" in self.switches or "character" in self.switches:
typeclass = settings.BASE_CHARACTER_TYPECLASS
elif "room" in self.switches:
typeclass = settings.BASE_ROOM_TYPECLASS
elif "exit" in self.switches:
typeclass = settings.BASE_EXIT_TYPECLASS
else:
global_search = False
typeclass = None
found_obj = caller.search(objname, global_search=global_search, typeclass=typeclass)
return found_obj
def func(self):
"""Implement the set attribute - a limited form of py."""
caller = self.caller
if not self.args:
caller.msg("Usage: set obj/attr = value. Use empty value to clear.")
return
# get values prepared by the parser
value = self.rhs
objname = self.lhs_objattr[0]["name"]
attrs = self.lhs_objattr[0]["attrs"]
obj = self.search_for_obj(objname)
if not obj:
return
if not self.check_obj(obj):
return
result = []
if "edit" in self.switches:
# edit in the line editor
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
if len(attrs) > 1:
caller.msg("The Line editor can only be applied " "to one attribute at a time.")
return
self.edit_handler(obj, attrs[0])
return
if not value:
if self.rhs is None:
# no = means we inspect the attribute(s)
if not attrs:
attrs = [attr.key for attr in obj.attributes.all()]
for attr in attrs:
if not self.check_attr(obj, attr):
continue
result.append(self.view_attr(obj, attr))
# we view it without parsing markup.
self.caller.msg("".join(result).strip(), options={"raw": True})
return
else:
# deleting the attribute(s)
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr):
continue
result.append(self.rm_attr(obj, attr))
else:
# setting attribute(s). Make sure to convert to real Python type before saving.
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr):
continue
value = _convert_from_string(self, value)
result.append(self.set_attr(obj, attr, value))
# send feedback
caller.msg("".join(result).strip("\n"))
class CmdTypeclass(COMMAND_DEFAULT_CLASS):
"""
set or change an object's typeclass
Usage:
typeclass[/switch] <object> [= typeclass.path]
typeclass/prototype <object> = prototype_key
typeclass/list/show [typeclass.path]
swap - this is a shorthand for using /force/reset flags.
update - this is a shorthand for using the /force/reload flag.
Switch:
show, examine - display the current typeclass of object (default) or, if
given a typeclass path, show the docstring of that typeclass.
update - *only* re-run at_object_creation on this object
meaning locks or other properties set later may remain.
reset - clean out *all* the attributes and properties on the
object - basically making this a new clean object.
force - change to the typeclass also if the object
already has a typeclass of the same name.
list - show available typeclasses. Only typeclasses in modules actually
imported or used from somewhere in the code will show up here
(those typeclasses are still available if you know the path)
prototype - clean and overwrite the object with the specified
prototype key - effectively making a whole new object.
Example:
type button = examples.red_button.RedButton
type/prototype button=a red button
If the typeclass_path is not given, the current object's typeclass is
assumed.
View or set an object's typeclass. If setting, the creation hooks of the
new typeclass will be run on the object. If you have clashing properties on
the old class, use /reset. By default you are protected from changing to a
typeclass of the same name as the one you already have - use /force to
override this protection.
The given typeclass must be identified by its location using python
dot-notation pointing to the correct module and class. If no typeclass is
given (or a wrong typeclass is given). Errors in the path or new typeclass
will lead to the old typeclass being kept. The location of the typeclass
module is searched from the default typeclass directory, as defined in the
server settings.
"""
key = "typeclass"
aliases = ["type", "parent", "swap", "update"]
switch_options = ("show", "examine", "update", "reset", "force", "list", "prototype")
locks = "cmd:perm(typeclass) or perm(Builder)"
help_category = "Building"
def func(self):
"""Implements command"""
caller = self.caller
if "list" in self.switches:
tclasses = get_all_typeclasses()
contribs = [key for key in sorted(tclasses) if key.startswith("evennia.contrib")] or [
"<None loaded>"
]
core = [
key for key in sorted(tclasses) if key.startswith("evennia") and key not in contribs
] or ["<None loaded>"]
game = [key for key in sorted(tclasses) if not key.startswith("evennia")] or [
"<None loaded>"
]
string = (
"|wCore typeclasses|n\n"
" {core}\n"
"|wLoaded Contrib typeclasses|n\n"
" {contrib}\n"
"|wGame-dir typeclasses|n\n"
" {game}"
).format(
core="\n ".join(core), contrib="\n ".join(contribs), game="\n ".join(game)
)
EvMore(caller, string, exit_on_lastpage=True)
return
if not self.args:
caller.msg("Usage: %s <object> [= typeclass]" % self.cmdstring)
return
if "show" in self.switches or "examine" in self.switches:
oquery = self.lhs
obj = caller.search(oquery, quiet=True)
if not obj:
# no object found to examine, see if it's a typeclass-path instead
tclasses = get_all_typeclasses()
matches = [
(key, tclass) for key, tclass in tclasses.items() if key.endswith(oquery)
]
nmatches = len(matches)
if nmatches > 1:
caller.msg(
"Multiple typeclasses found matching {}:\n {}".format(
oquery, "\n ".join(tup[0] for tup in matches)
)
)
elif not matches:
caller.msg("No object or typeclass path found to match '{}'".format(oquery))
else:
# one match found
caller.msg(
"Docstring for typeclass '{}':\n{}".format(oquery, matches[0][1].__doc__)
)
else:
# do the search again to get the error handling in case of multi-match
obj = caller.search(oquery)
if not obj:
return
caller.msg(
"{}'s current typeclass is '{}.{}'".format(
obj.name, obj.__class__.__module__, obj.__class__.__name__
)
)
return
# get object to swap on
obj = caller.search(self.lhs)
if not obj:
return
if not hasattr(obj, "__dbclass__"):
string = "%s is not a typed object." % obj.name
caller.msg(string)
return
new_typeclass = self.rhs or obj.path
prototype = None
if "prototype" in self.switches:
key = self.rhs
prototype = protlib.search_prototype(key=key)
if len(prototype) > 1:
caller.msg(
"More than one match for {}:\n{}".format(
key, "\n".join(proto.get("prototype_key", "") for proto in prototype)
)
)
return
elif prototype:
# one match
prototype = prototype[0]
else:
# no match
caller.msg("No prototype '{}' was found.".format(key))
return
new_typeclass = prototype["typeclass"]
self.switches.append("force")
if "show" in self.switches or "examine" in self.switches:
string = "%s's current typeclass is %s." % (obj.name, obj.__class__)
caller.msg(string)
return
if self.cmdstring == "swap":
self.switches.append("force")
self.switches.append("reset")
elif self.cmdstring == "update":
self.switches.append("force")
self.switches.append("update")
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
if not hasattr(obj, "swap_typeclass"):
caller.msg("This object cannot have a type at all!")
return
is_same = obj.is_typeclass(new_typeclass, exact=True)
if is_same and "force" not in self.switches:
string = "%s already has the typeclass '%s'. Use /force to override." % (
obj.name,
new_typeclass,
)
else:
update = "update" in self.switches
reset = "reset" in self.switches
hooks = "at_object_creation" if update else "all"
old_typeclass_path = obj.typeclass_path
# special prompt for the user in cases where we want
# to confirm changes.
if "prototype" in self.switches:
diff, _ = spawner.prototype_diff_from_object(prototype, obj)
txt = spawner.format_diff(diff)
prompt = (
"Applying prototype '%s' over '%s' will cause the follow changes:\n%s\n"
% (prototype["key"], obj.name, txt)
)
if not reset:
prompt += "\n|yWARNING:|n Use the /reset switch to apply the prototype over a blank state."
prompt += "\nAre you sure you want to apply these changes [yes]/no?"
answer = yield (prompt)
if answer and answer in ("no", "n"):
caller.msg("Canceled: No changes were applied.")
return
# we let this raise exception if needed
obj.swap_typeclass(
new_typeclass, clean_attributes=reset, clean_cmdsets=reset, run_start_hooks=hooks
)
if "prototype" in self.switches:
modified = spawner.batch_update_objects_with_prototype(prototype, objects=[obj])
prototype_success = modified > 0
if not prototype_success:
caller.msg("Prototype %s failed to apply." % prototype["key"])
if is_same:
string = "%s updated its existing typeclass (%s).\n" % (obj.name, obj.path)
else:
string = "%s changed typeclass from %s to %s.\n" % (
obj.name,
old_typeclass_path,
obj.typeclass_path,
)
if update:
string += "Only the at_object_creation hook was run (update mode)."
else:
string += "All object creation hooks were run."
if reset:
string += " All old attributes where deleted before the swap."
else:
string += " Attributes set before swap were not removed."
if "prototype" in self.switches and prototype_success:
string += (
" Prototype '%s' was successfully applied over the object type."
% prototype["key"]
)
caller.msg(string)
class CmdWipe(ObjManipCommand):
"""
clear all attributes from an object
Usage:
wipe <object>[/<attr>[/<attr>...]]
Example:
wipe box
wipe box/colour
Wipes all of an object's attributes, or optionally only those
matching the given attribute-wildcard search string.
"""
key = "wipe"
locks = "cmd:perm(wipe) or perm(Builder)"
help_category = "Building"
def func(self):
"""
inp is the dict produced in ObjManipCommand.parse()
"""
caller = self.caller
if not self.args:
caller.msg("Usage: wipe <object>[/<attr>/<attr>...]")
return
# get the attributes set by our custom parser
objname = self.lhs_objattr[0]["name"]
attrs = self.lhs_objattr[0]["attrs"]
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
if not attrs:
# wipe everything
obj.attributes.clear()
string = "Wiped all attributes on %s." % obj.name
else:
for attrname in attrs:
obj.attributes.remove(attrname)
string = "Wiped attributes %s on %s."
string = string % (",".join(attrs), obj.name)
caller.msg(string)
class CmdLock(ObjManipCommand):
"""
assign a lock definition to an object
Usage:
lock <object or *account>[ = <lockstring>]
or
lock[/switch] <object or *account>/<access_type>
Switch:
del - delete given access type
view - view lock associated with given access type (default)
If no lockstring is given, shows all locks on
object.
Lockstring is of the form
access_type:[NOT] func1(args)[ AND|OR][ NOT] func2(args) ...]
Where func1, func2 ... valid lockfuncs with or without arguments.
Separator expressions need not be capitalized.
For example:
'get: id(25) or perm(Admin)'
The 'get' lock access_type is checked e.g. by the 'get' command.
An object locked with this example lock will only be possible to pick up
by Admins or by an object with id=25.
You can add several access_types after one another by separating
them by ';', i.e:
'get:id(25); delete:perm(Builder)'
"""
key = "lock"
aliases = ["locks"]
locks = "cmd: perm(locks) or perm(Builder)"
help_category = "Building"
def func(self):
"""Sets up the command"""
caller = self.caller
if not self.args:
string = (
"Usage: lock <object>[ = <lockstring>] or lock[/switch] " "<object>/<access_type>"
)
caller.msg(string)
return
if "/" in self.lhs:
# call of the form lock obj/access_type
objname, access_type = [p.strip() for p in self.lhs.split("/", 1)]
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip("*"))
if not obj:
obj = caller.search(objname)
if not obj:
return
has_control_access = obj.access(caller, "control")
if access_type == "control" and not has_control_access:
# only allow to change 'control' access if you have 'control' access already
caller.msg("You need 'control' access to change this type of lock.")
return
if not (has_control_access or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
lockdef = obj.locks.get(access_type)
if lockdef:
if "del" in self.switches:
obj.locks.delete(access_type)
string = "deleted lock %s" % lockdef
else:
string = lockdef
else:
string = "%s has no lock of access type '%s'." % (obj, access_type)
caller.msg(string)
return
if self.rhs:
# we have a = separator, so we are assigning a new lock
if self.switches:
swi = ", ".join(self.switches)
caller.msg(
"Switch(es) |w%s|n can not be used with a "
"lock assignment. Use e.g. "
"|wlock/del objname/locktype|n instead." % swi
)
return
objname, lockdef = self.lhs, self.rhs
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip("*"))
if not obj:
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
ok = False
lockdef = re.sub(r"\'|\"", "", lockdef)
try:
ok = obj.locks.add(lockdef)
except LockException as e:
caller.msg(str(e))
if "cmd" in lockdef.lower() and inherits_from(
obj, "evennia.objects.objects.DefaultExit"
):
# special fix to update Exits since "cmd"-type locks won't
# update on them unless their cmdsets are rebuilt.
obj.at_init()
if ok:
caller.msg("Added lock '%s' to %s." % (lockdef, obj))
return
# if we get here, we are just viewing all locks on obj
obj = None
if self.lhs.startswith("*"):
obj = caller.search_account(self.lhs.lstrip("*"))
if not obj:
obj = caller.search(self.lhs)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
caller.msg("\n".join(obj.locks.all()))
class CmdExamine(ObjManipCommand):
"""
get detailed information about an object
Usage:
examine [<object>[/attrname]]
examine [*<account>[/attrname]]
Switch:
account - examine an Account (same as adding *)
object - examine an Object (useful when OOC)
The examine command shows detailed game info about an
object and optionally a specific attribute on it.
If object is not specified, the current location is examined.
Append a * before the search string to examine an account.
"""
key = "examine"
aliases = ["ex", "exam"]
locks = "cmd:perm(examine) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
account_mode = False
def list_attribute(self, crop, attr, category, value):
"""
Formats a single attribute line.
"""
if crop:
if not isinstance(value, str):
value = utils.to_str(value)
value = utils.crop(value)
if category:
string = "\n %s[%s] = %s" % (attr, category, value)
else:
string = "\n %s = %s" % (attr, value)
string = raw(string)
return string
def format_attributes(self, obj, attrname=None, crop=True):
"""
Helper function that returns info about attributes and/or
non-persistent data stored on object
"""
if attrname:
db_attr = [(attrname, obj.attributes.get(attrname), None)]
try:
ndb_attr = [(attrname, object.__getattribute__(obj.ndb, attrname))]
except Exception:
ndb_attr = None
else:
db_attr = [(attr.key, attr.value, attr.category) for attr in obj.db_attributes.all()]
try:
ndb_attr = obj.nattributes.all(return_tuples=True)
except Exception:
ndb_attr = None
string = ""
if db_attr and db_attr[0]:
string += "\n|wPersistent attributes|n:"
for attr, value, category in db_attr:
string += self.list_attribute(crop, attr, category, value)
if ndb_attr and ndb_attr[0]:
string += "\n|wNon-Persistent attributes|n:"
for attr, value in ndb_attr:
string += self.list_attribute(crop, attr, None, value)
return string
def format_output(self, obj, avail_cmdset):
"""
Helper function that creates a nice report about an object.
returns a string.
"""
string = "\n|wName/key|n: |c%s|n (%s)" % (obj.name, obj.dbref)
if hasattr(obj, "aliases") and obj.aliases.all():
string += "\n|wAliases|n: %s" % (", ".join(utils.make_iter(str(obj.aliases))))
if hasattr(obj, "sessions") and obj.sessions.all():
string += "\n|wSession id(s)|n: %s" % (
", ".join("#%i" % sess.sessid for sess in obj.sessions.all())
)
if hasattr(obj, "email") and obj.email:
string += "\n|wEmail|n: |c%s|n" % obj.email
if hasattr(obj, "has_account") and obj.has_account:
string += "\n|wAccount|n: |c%s|n" % obj.account.name
perms = obj.account.permissions.all()
if obj.account.is_superuser:
perms = ["<Superuser>"]
elif not perms:
perms = ["<None>"]
string += "\n|wAccount Perms|n: %s" % (", ".join(perms))
if obj.account.attributes.has("_quell"):
string += " |r(quelled)|n"
string += "\n|wTypeclass|n: %s (%s)" % (obj.typename, obj.typeclass_path)
if hasattr(obj, "location"):
string += "\n|wLocation|n: %s" % obj.location
if obj.location:
string += " (#%s)" % obj.location.id
if hasattr(obj, "home"):
string += "\n|wHome|n: %s" % obj.home
if obj.home:
string += " (#%s)" % obj.home.id
if hasattr(obj, "destination") and obj.destination:
string += "\n|wDestination|n: %s" % obj.destination
if obj.destination:
string += " (#%s)" % obj.destination.id
perms = obj.permissions.all()
if perms:
perms_string = ", ".join(perms)
else:
perms_string = "<None>"
if obj.is_superuser:
perms_string += " [Superuser]"
string += "\n|wPermissions|n: %s" % perms_string
locks = str(obj.locks)
if locks:
locks_string = utils.fill("; ".join([lock for lock in locks.split(";")]), indent=6)
else:
locks_string = " Default"
string += "\n|wLocks|n:%s" % locks_string
if not (len(obj.cmdset.all()) == 1 and obj.cmdset.current.key == "_EMPTY_CMDSET"):
# all() returns a 'stack', so make a copy to sort.
stored_cmdsets = sorted(obj.cmdset.all(), key=lambda x: x.priority, reverse=True)
string += "\n|wStored Cmdset(s)|n:\n %s" % (
"\n ".join(
"%s [%s] (%s, prio %s)"
% (cmdset.path, cmdset.key, cmdset.mergetype, cmdset.priority)
for cmdset in stored_cmdsets
if cmdset.key != "_EMPTY_CMDSET"
)
)
# this gets all components of the currently merged set
all_cmdsets = [(cmdset.key, cmdset) for cmdset in avail_cmdset.merged_from]
# we always at least try to add account- and session sets since these are ignored
# if we merge on the object level.
if hasattr(obj, "account") and obj.account:
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.cmdset.all()])
if obj.sessions.count():
# if there are more sessions than one on objects it's because of multisession mode 3.
# we only show the first session's cmdset here (it is -in principle- possible that
# different sessions have different cmdsets but for admins who want such madness
# it is better that they overload with their own CmdExamine to handle it).
all_cmdsets.extend(
[
(cmdset.key, cmdset)
for cmdset in obj.account.sessions.all()[0].cmdset.all()
]
)
else:
try:
# we have to protect this since many objects don't have sessions.
all_cmdsets.extend(
[
(cmdset.key, cmdset)
for cmdset in obj.get_session(obj.sessions.get()).cmdset.all()
]
)
except (TypeError, AttributeError):
# an error means we are merging an object without a session
pass
all_cmdsets = [cmdset for cmdset in dict(all_cmdsets).values()]
all_cmdsets.sort(key=lambda x: x.priority, reverse=True)
string += "\n|wMerged Cmdset(s)|n:\n %s" % (
"\n ".join(
"%s [%s] (%s, prio %s)"
% (cmdset.path, cmdset.key, cmdset.mergetype, cmdset.priority)
for cmdset in all_cmdsets
)
)
# list the commands available to this object
avail_cmdset = sorted([cmd.key for cmd in avail_cmdset if cmd.access(obj, "cmd")])
cmdsetstr = utils.fill(", ".join(avail_cmdset), indent=2)
string += "\n|wCommands available to %s (result of Merged CmdSets)|n:\n %s" % (
obj.key,
cmdsetstr,
)
if hasattr(obj, "scripts") and hasattr(obj.scripts, "all") and obj.scripts.all():
string += "\n|wScripts|n:\n %s" % obj.scripts
# add the attributes
string += self.format_attributes(obj)
# display Tags
tags_string = utils.fill(
", ".join(
"%s[%s]" % (tag, category)
for tag, category in obj.tags.all(return_key_and_category=True)
),
indent=5,
)
if tags_string:
string += "\n|wTags[category]|n: %s" % tags_string.strip()
# add the contents
exits = []
pobjs = []
things = []
if hasattr(obj, "contents"):
for content in obj.contents:
if content.destination:
exits.append(content)
elif content.account:
pobjs.append(content)
else:
things.append(content)
if exits:
string += "\n|wExits|n: %s" % ", ".join(
["%s(%s)" % (exit.name, exit.dbref) for exit in exits]
)
if pobjs:
string += "\n|wCharacters|n: %s" % ", ".join(
["|c%s|n(%s)" % (pobj.name, pobj.dbref) for pobj in pobjs]
)
if things:
string += "\n|wContents|n: %s" % ", ".join(
[
"%s(%s)" % (cont.name, cont.dbref)
for cont in obj.contents
if cont not in exits and cont not in pobjs
]
)
separator = "-" * _DEFAULT_WIDTH
# output info
return "%s\n%s\n%s" % (separator, string.strip(), separator)
def func(self):
"""Process command"""
caller = self.caller
def get_cmdset_callback(cmdset):
"""
We make use of the cmdhandeler.get_and_merge_cmdsets below. This
is an asynchronous function, returning a Twisted deferred.
So in order to properly use this we need use this callback;
it is called with the result of get_and_merge_cmdsets, whenever
that function finishes. Taking the resulting cmdset, we continue
to format and output the result.
"""
string = self.format_output(obj, cmdset)
self.msg(string.strip())
if not self.args:
# If no arguments are provided, examine the invoker's location.
if hasattr(caller, "location"):
obj = caller.location
if not obj.access(caller, "examine"):
# If we don't have special info access, just look at the object instead.
self.msg(caller.at_look(obj))
return
obj_session = obj.sessions.get()[0] if obj.sessions.count() else None
# using callback for printing result whenever function returns.
get_and_merge_cmdsets(
obj, obj_session, self.account, obj, "object", self.raw_string
).addCallback(get_cmdset_callback)
else:
self.msg("You need to supply a target to examine.")
return
# we have given a specific target object
for objdef in self.lhs_objattr:
obj = None
obj_name = objdef["name"]
obj_attrs = objdef["attrs"]
self.account_mode = (
utils.inherits_from(caller, "evennia.accounts.accounts.DefaultAccount")
or "account" in self.switches
or obj_name.startswith("*")
)
if self.account_mode:
try:
obj = caller.search_account(obj_name.lstrip("*"))
except AttributeError:
# this means we are calling examine from an account object
obj = caller.search(
obj_name.lstrip("*"), search_object="object" in self.switches
)
else:
obj = caller.search(obj_name)
if not obj:
continue
if not obj.access(caller, "examine"):
# If we don't have special info access, just look
# at the object instead.
self.msg(caller.at_look(obj))
continue
if obj_attrs:
for attrname in obj_attrs:
# we are only interested in specific attributes
caller.msg(self.format_attributes(obj, attrname, crop=False))
else:
session = None
if obj.sessions.count():
mergemode = "session"
session = obj.sessions.get()[0]
elif self.account_mode:
mergemode = "account"
else:
mergemode = "object"
account = None
objct = None
if self.account_mode:
account = obj
else:
account = obj.account
objct = obj
# using callback to print results whenever function returns.
get_and_merge_cmdsets(
obj, session, account, objct, mergemode, self.raw_string
).addCallback(get_cmdset_callback)
class CmdFind(COMMAND_DEFAULT_CLASS):
"""
search the database for objects
Usage:
find[/switches] <name or dbref or *account> [= dbrefmin[-dbrefmax]]
locate - this is a shorthand for using the /loc switch.
Switches:
room - only look for rooms (location=None)
exit - only look for exits (destination!=None)
char - only look for characters (BASE_CHARACTER_TYPECLASS)
exact - only exact matches are returned.
loc - display object location if exists and match has one result
startswith - search for names starting with the string, rather than containing
Searches the database for an object of a particular name or exact #dbref.
Use *accountname to search for an account. The switches allows for
limiting object matches to certain game entities. Dbrefmin and dbrefmax
limits matches to within the given dbrefs range, or above/below if only
one is given.
"""
key = "find"
aliases = "search, locate"
switch_options = ("room", "exit", "char", "exact", "loc", "startswith")
locks = "cmd:perm(find) or perm(Builder)"
help_category = "Building"
def func(self):
"""Search functionality"""
caller = self.caller
switches = self.switches
if not self.args or (not self.lhs and not self.rhs):
caller.msg("Usage: find <string> [= low [-high]]")
return
if "locate" in self.cmdstring: # Use option /loc as a default for locate command alias
switches.append("loc")
searchstring = self.lhs
try:
# Try grabbing the actual min/max id values by database aggregation
qs = ObjectDB.objects.values("id").aggregate(low=Min("id"), high=Max("id"))
low, high = sorted(qs.values())
if not (low and high):
raise ValueError(
f"{self.__class__.__name__}: Min and max ID not returned by aggregation; falling back to queryset slicing."
)
except Exception as e:
logger.log_trace(e)
# If that doesn't work for some reason (empty DB?), guess the lower
# bound and do a less-efficient query to find the upper.
low, high = 1, ObjectDB.objects.all().order_by("-id").first().id
if self.rhs:
try:
# Check that rhs is either a valid dbref or dbref range
bounds = tuple(
sorted(dbref(x, False) for x in re.split("[-\s]+", self.rhs.strip()))
)
# dbref() will return either a valid int or None
assert bounds
# None should not exist in the bounds list
assert None not in bounds
low = bounds[0]
if len(bounds) > 1:
high = bounds[-1]
except AssertionError:
caller.msg("Invalid dbref range provided (not a number).")
return
except IndexError as e:
logger.log_err(
f"{self.__class__.__name__}: Error parsing upper and lower bounds of query."
)
logger.log_trace(e)
low = min(low, high)
high = max(low, high)
is_dbref = utils.dbref(searchstring)
is_account = searchstring.startswith("*")
restrictions = ""
if self.switches:
restrictions = ", %s" % (", ".join(self.switches))
if is_dbref or is_account:
if is_dbref:
# a dbref search
result = caller.search(searchstring, global_search=True, quiet=True)
string = "|wExact dbref match|n(#%i-#%i%s):" % (low, high, restrictions)
else:
# an account search
searchstring = searchstring.lstrip("*")
result = caller.search_account(searchstring, quiet=True)
string = "|wMatch|n(#%i-#%i%s):" % (low, high, restrictions)
if "room" in switches:
result = result if inherits_from(result, ROOM_TYPECLASS) else None
if "exit" in switches:
result = result if inherits_from(result, EXIT_TYPECLASS) else None
if "char" in switches:
result = result if inherits_from(result, CHAR_TYPECLASS) else None
if not result:
string += "\n |RNo match found.|n"
elif not low <= int(result[0].id) <= high:
string += "\n |RNo match found for '%s' in #dbref interval.|n" % searchstring
else:
result = result[0]
string += "\n|g %s - %s|n" % (result.get_display_name(caller), result.path)
if "loc" in self.switches and not is_account and result.location:
string += " (|wlocation|n: |g{}|n)".format(
result.location.get_display_name(caller)
)
else:
# Not an account/dbref search but a wider search; build a queryset.
# Searches for key and aliases
if "exact" in switches:
keyquery = Q(db_key__iexact=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__iexact=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
elif "startswith" in switches:
keyquery = Q(db_key__istartswith=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__istartswith=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
else:
keyquery = Q(db_key__icontains=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__icontains=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
# Keep the initial queryset handy for later reuse
result_qs = ObjectDB.objects.filter(keyquery | aliasquery).distinct()
nresults = result_qs.count()
# Use iterator to minimize memory ballooning on large result sets
results = result_qs.iterator()
# Check and see if type filtering was requested; skip it if not
if any(x in switches for x in ("room", "exit", "char")):
obj_ids = set()
for obj in results:
if (
("room" in switches and inherits_from(obj, ROOM_TYPECLASS))
or ("exit" in switches and inherits_from(obj, EXIT_TYPECLASS))
or ("char" in switches and inherits_from(obj, CHAR_TYPECLASS))
):
obj_ids.add(obj.id)
# Filter previous queryset instead of requesting another
filtered_qs = result_qs.filter(id__in=obj_ids).distinct()
nresults = filtered_qs.count()
# Use iterator again to minimize memory ballooning
results = filtered_qs.iterator()
# still results after type filtering?
if nresults:
if nresults > 1:
header = f"{nresults} Matches"
else:
header = "One Match"
string = f"|w{header}|n(#{low}-#{high}{restrictions}):"
res = None
for res in results:
string += f"\n |g{res.get_display_name(caller)} - {res.path}|n"
if (
"loc" in self.switches
and nresults == 1
and res
and getattr(res, "location", None)
):
string += f" (|wlocation|n: |g{res.location.get_display_name(caller)}|n)"
else:
string = f"|wNo Matches|n(#{low}-#{high}{restrictions}):"
string += f"\n |RNo matches found for '{searchstring}'|n"
# send result
caller.msg(string.strip())
class CmdTeleport(COMMAND_DEFAULT_CLASS):
"""
teleport object to another location
Usage:
tel/switch [<object> to||=] <target location>
Examples:
tel Limbo
tel/quiet box = Limbo
tel/tonone box
Switches:
quiet - don't echo leave/arrive messages to the source/target
locations for the move.
intoexit - if target is an exit, teleport INTO
the exit object instead of to its destination
tonone - if set, teleport the object to a None-location. If this
switch is set, <target location> is ignored.
Note that the only way to retrieve
an object from a None location is by direct #dbref
reference. A puppeted object cannot be moved to None.
loc - teleport object to the target's location instead of its contents
Teleports an object somewhere. If no object is given, you yourself are
teleported to the target location.
"""
key = "tel"
aliases = "teleport"
switch_options = ("quiet", "intoexit", "tonone", "loc")
rhs_split = ("=", " to ") # Prefer = delimiter, but allow " to " usage.
locks = "cmd:perm(teleport) or perm(Builder)"
help_category = "Building"
def func(self):
"""Performs the teleport"""
caller = self.caller
args = self.args
lhs, rhs = self.lhs, self.rhs
switches = self.switches
# setting switches
tel_quietly = "quiet" in switches
to_none = "tonone" in switches
to_loc = "loc" in switches
if to_none:
# teleporting to None
if not args:
obj_to_teleport = caller
else:
obj_to_teleport = caller.search(lhs, global_search=True)
if not obj_to_teleport:
caller.msg("Did not find object to teleport.")
return
if obj_to_teleport.has_account:
caller.msg(
"Cannot teleport a puppeted object "
"(%s, puppeted by %s) to a None-location."
% (obj_to_teleport.key, obj_to_teleport.account)
)
return
caller.msg("Teleported %s -> None-location." % obj_to_teleport)
if obj_to_teleport.location and not tel_quietly:
obj_to_teleport.location.msg_contents(
"%s teleported %s into nothingness." % (caller, obj_to_teleport), exclude=caller
)
obj_to_teleport.location = None
return
# not teleporting to None location
if not args and not to_none:
caller.msg("Usage: teleport[/switches] [<obj> =] <target_loc>||home")
return
if rhs:
obj_to_teleport = caller.search(lhs, global_search=True)
destination = caller.search(rhs, global_search=True)
else:
obj_to_teleport = caller
destination = caller.search(lhs, global_search=True)
if not obj_to_teleport:
caller.msg("Did not find object to teleport.")
return
if not destination:
caller.msg("Destination not found.")
return
if to_loc:
destination = destination.location
if not destination:
caller.msg("Destination has no location.")
return
if obj_to_teleport == destination:
caller.msg("You can't teleport an object inside of itself!")
return
if obj_to_teleport == destination.location:
caller.msg("You can't teleport an object inside something it holds!")
return
if obj_to_teleport.location and obj_to_teleport.location == destination:
caller.msg("%s is already at %s." % (obj_to_teleport, destination))
return
use_destination = True
if "intoexit" in self.switches:
use_destination = False
# try the teleport
if obj_to_teleport.move_to(
destination, quiet=tel_quietly, emit_to_obj=caller, use_destination=use_destination
):
if obj_to_teleport == caller:
caller.msg("Teleported to %s." % destination)
else:
caller.msg("Teleported %s -> %s." % (obj_to_teleport, destination))
class CmdScript(COMMAND_DEFAULT_CLASS):
"""
attach a script to an object
Usage:
script[/switch] <obj> [= script_path or <scriptkey>]
Switches:
start - start all non-running scripts on object, or a given script only
stop - stop all scripts on objects, or a given script only
If no script path/key is given, lists all scripts active on the given
object.
Script path can be given from the base location for scripts as given in
settings. If adding a new script, it will be started automatically
(no /start switch is needed). Using the /start or /stop switches on an
object without specifying a script key/path will start/stop ALL scripts on
the object.
"""
key = "script"
aliases = "addscript"
switch_options = ("start", "stop")
locks = "cmd:perm(script) or perm(Builder)"
help_category = "Building"
def func(self):
"""Do stuff"""
caller = self.caller
if not self.args:
string = "Usage: script[/switch] <obj> [= script_path or <script key>]"
caller.msg(string)
return
if not self.lhs:
caller.msg("To create a global script you need |wscripts/add <typeclass>|n.")
return
obj = caller.search(self.lhs)
if not obj:
return
result = []
if not self.rhs:
# no rhs means we want to operate on all scripts
scripts = obj.scripts.all()
if not scripts:
result.append("No scripts defined on %s." % obj.get_display_name(caller))
elif not self.switches:
# view all scripts
from evennia.commands.default.system import format_script_list
result.append(format_script_list(scripts))
elif "start" in self.switches:
num = sum([obj.scripts.start(script.key) for script in scripts])
result.append("%s scripts started on %s." % (num, obj.get_display_name(caller)))
elif "stop" in self.switches:
for script in scripts:
result.append(
"Stopping script %s on %s."
% (script.get_display_name(caller), obj.get_display_name(caller))
)
script.stop()
obj.scripts.validate()
else: # rhs exists
if not self.switches:
# adding a new script, and starting it
ok = obj.scripts.add(self.rhs, autostart=True)
if not ok:
result.append(
"\nScript %s could not be added and/or started on %s "
"(or it started and immediately shut down)."
% (self.rhs, obj.get_display_name(caller))
)
else:
result.append(
"Script |w%s|n successfully added and started on %s."
% (self.rhs, obj.get_display_name(caller))
)
else:
paths = [self.rhs] + [
"%s.%s" % (prefix, self.rhs) for prefix in settings.TYPECLASS_PATHS
]
if "stop" in self.switches:
# we are stopping an already existing script
for path in paths:
ok = obj.scripts.stop(path)
if not ok:
result.append("\nScript %s could not be stopped. Does it exist?" % path)
else:
result = ["Script stopped and removed from object."]
break
if "start" in self.switches:
# we are starting an already existing script
for path in paths:
ok = obj.scripts.start(path)
if not ok:
result.append("\nScript %s could not be (re)started." % path)
else:
result = ["Script started successfully."]
break
EvMore(caller, "".join(result).strip())
class CmdTag(COMMAND_DEFAULT_CLASS):
"""
handles the tags of an object
Usage:
tag[/del] <obj> [= <tag>[:<category>]]
tag/search <tag>[:<category]
Switches:
search - return all objects with a given Tag
del - remove the given tag. If no tag is specified,
clear all tags on object.
Manipulates and lists tags on objects. Tags allow for quick
grouping of and searching for objects. If only <obj> is given,
list all tags on the object. If /search is used, list objects
with the given tag.
The category can be used for grouping tags themselves, but it
should be used with restrain - tags on their own are usually
enough to for most grouping schemes.
"""
key = "tag"
aliases = ["tags"]
options = ("search", "del")
locks = "cmd:perm(tag) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
def func(self):
"""Implement the tag functionality"""
if not self.args:
self.caller.msg("Usage: tag[/switches] <obj> [= <tag>[:<category>]]")
return
if "search" in self.switches:
# search by tag
tag = self.args
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
objs = search.search_tag(tag, category=category)
nobjs = len(objs)
if nobjs > 0:
catstr = (
" (category: '|w%s|n')" % category
if category
else ("" if nobjs == 1 else " (may have different tag categories)")
)
matchstr = ", ".join(o.get_display_name(self.caller) for o in objs)
string = "Found |w%i|n object%s with tag '|w%s|n'%s:\n %s" % (
nobjs,
"s" if nobjs > 1 else "",
tag,
catstr,
matchstr,
)
else:
string = "No objects found with tag '%s%s'." % (
tag,
" (category: %s)" % category if category else "",
)
self.caller.msg(string)
return
if "del" in self.switches:
# remove one or all tags
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if self.rhs:
# remove individual tag
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
if obj.tags.get(tag, category=category):
obj.tags.remove(tag, category=category)
string = "Removed tag '%s'%s from %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
else:
string = "No tag '%s'%s to delete on %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
else:
# no tag specified, clear all tags
old_tags = [
"%s%s" % (tag, " (category: %s)" % category if category else "")
for tag, category in obj.tags.all(return_key_and_category=True)
]
if old_tags:
obj.tags.clear()
string = "Cleared all tags from %s: %s" % (obj, ", ".join(sorted(old_tags)))
else:
string = "No Tags to clear on %s." % obj
self.caller.msg(string)
return
# no search/deletion
if self.rhs:
# = is found; command args are of the form obj = tag
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
# create the tag
obj.tags.add(tag, category=category)
string = "Added tag '%s'%s to %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
self.caller.msg(string)
else:
# no = found - list tags on object
obj = self.caller.search(self.args, global_search=True)
if not obj:
return
tagtuples = obj.tags.all(return_key_and_category=True)
ntags = len(tagtuples)
tags = [tup[0] for tup in tagtuples]
categories = [" (category: %s)" % tup[1] if tup[1] else "" for tup in tagtuples]
if ntags:
string = "Tag%s on %s: %s" % (
"s" if ntags > 1 else "",
obj,
", ".join(sorted("'%s'%s" % (tags[i], categories[i]) for i in range(ntags))),
)
else:
string = "No tags attached to %s." % obj
self.caller.msg(string)
# helper functions for spawn
class CmdSpawn(COMMAND_DEFAULT_CLASS):
"""
spawn objects from prototype
Usage:
spawn[/noloc] <prototype_key>
spawn[/noloc] <prototype_dict>
spawn/search [prototype_keykey][;tag[,tag]]
spawn/list [tag, tag, ...]
spawn/show [<prototype_key>]
spawn/update <prototype_key>
spawn/save <prototype_dict>
spawn/edit [<prototype_key>]
olc - equivalent to spawn/edit
Switches:
noloc - allow location to be None if not specified explicitly. Otherwise,
location will default to caller's current location.
search - search prototype by name or tags.
list - list available prototypes, optionally limit by tags.
show, examine - inspect prototype by key. If not given, acts like list.
raw - show the raw dict of the prototype as a one-line string for manual editing.
save - save a prototype to the database. It will be listable by /list.
delete - remove a prototype from database, if allowed to.
update - find existing objects with the same prototype_key and update
them with latest version of given prototype. If given with /save,
will auto-update all objects with the old version of the prototype
without asking first.
edit, menu, olc - create/manipulate prototype in a menu interface.
Example:
spawn GOBLIN
spawn {"key":"goblin", "typeclass":"monster.Monster", "location":"#2"}
spawn/save {"key": "grunt", prototype: "goblin"};;mobs;edit:all()
\f
Dictionary keys:
|wprototype_parent |n - name of parent prototype to use. Required if typeclass is
not set. Can be a path or a list for multiple inheritance (inherits
left to right). If set one of the parents must have a typeclass.
|wtypeclass |n - string. Required if prototype_parent is not set.
|wkey |n - string, the main object identifier
|wlocation |n - this should be a valid object or #dbref
|whome |n - valid object or #dbref
|wdestination|n - only valid for exits (object or dbref)
|wpermissions|n - string or list of permission strings
|wlocks |n - a lock-string
|waliases |n - string or list of strings.
|wndb_|n<name> - value of a nattribute (ndb_ is stripped)
|wprototype_key|n - name of this prototype. Unique. Used to store/retrieve from db
and update existing prototyped objects if desired.
|wprototype_desc|n - desc of this prototype. Used in listings
|wprototype_locks|n - locks of this prototype. Limits who may use prototype
|wprototype_tags|n - tags of this prototype. Used to find prototype
any other keywords are interpreted as Attributes and their values.
The available prototypes are defined globally in modules set in
settings.PROTOTYPE_MODULES. If spawn is used without arguments it
displays a list of available prototypes.
"""
key = "spawn"
aliases = ["olc"]
switch_options = (
"noloc",
"search",
"list",
"show",
"raw",
"examine",
"save",
"delete",
"menu",
"olc",
"update",
"edit",
)
locks = "cmd:perm(spawn) or perm(Builder)"
help_category = "Building"
def _search_prototype(self, prototype_key, quiet=False):
"""
Search for prototype and handle no/multi-match and access.
Returns a single found prototype or None - in the
case, the caller has already been informed of the
search error we need not do any further action.
"""
prototypes = protlib.search_prototype(prototype_key)
nprots = len(prototypes)
# handle the search result
err = None
if not prototypes:
err = f"No prototype named '{prototype_key}' was found."
elif nprots > 1:
err = "Found {} prototypes matching '{}':\n {}".format(
nprots,
prototype_key,
", ".join(proto.get("prototype_key", "") for proto in prototypes),
)
else:
# we have a single prototype, check access
prototype = prototypes[0]
if not self.caller.locks.check_lockstring(
self.caller, prototype.get("prototype_locks", ""), access_type="spawn", default=True
):
err = "You don't have access to use this prototype."
if err:
# return None on any error
if not quiet:
self.caller.msg(err)
return
return prototype
def _parse_prototype(self, inp, expect=dict):
"""
Parse a prototype dict or key from the input and convert it safely
into a dict if appropriate.
Args:
inp (str): The input from user.
expect (type, optional):
Returns:
prototype (dict, str or None): The parsed prototype. If None, the error
was already reported.
"""
eval_err = None
try:
prototype = _LITERAL_EVAL(inp)
except (SyntaxError, ValueError) as err:
# treat as string
eval_err = err
prototype = utils.to_str(inp)
finally:
# it's possible that the input was a prototype-key, in which case
# it's okay for the LITERAL_EVAL to fail. Only if the result does not
# match the expected type do we have a problem.
if not isinstance(prototype, expect):
if eval_err:
string = (
f"{inp}\n{eval_err}\n|RCritical Python syntax error in argument. Only primitive "
"Python structures are allowed. \nMake sure to use correct "
"Python syntax. Remember especially to put quotes around all "
"strings inside lists and dicts.|n For more advanced uses, embed "
"inlinefuncs in the strings."
)
else:
string = "Expected {}, got {}.".format(expect, type(prototype))
self.caller.msg(string)
return
if expect == dict:
# an actual prototype. We need to make sure it's safe,
# so don't allow exec.
# TODO: Exec support is deprecated. Remove completely for 1.0.
if "exec" in prototype and not self.caller.check_permstring("Developer"):
self.caller.msg(
"Spawn aborted: You are not allowed to " "use the 'exec' prototype key."
)
return
try:
# we homogenize the protoype first, to be more lenient with free-form
protlib.validate_prototype(protlib.homogenize_prototype(prototype))
except RuntimeError as err:
self.caller.msg(str(err))
return
return prototype
def _get_prototype_detail(self, query=None, prototypes=None):
"""
Display the detailed specs of one or more prototypes.
Args:
query (str, optional): If this is given and `prototypes` is not, search for
the prototype(s) by this query. This may be a partial query which
may lead to multiple matches, all being displayed.
prototypes (list, optional): If given, ignore `query` and only show these
prototype-details.
Returns:
display (str, None): A formatted string of one or more prototype details.
If None, the caller was already informed of the error.
"""
if not prototypes:
# we need to query. Note that if query is None, all prototypes will
# be returned.
prototypes = protlib.search_prototype(key=query)
if prototypes:
return "\n".join(protlib.prototype_to_str(prot) for prot in prototypes)
elif query:
self.caller.msg(f"No prototype named '{query}' was found.")
else:
self.caller.msg(f"No prototypes found.")
def _list_prototypes(self, key=None, tags=None):
"""Display prototypes as a list, optionally limited by key/tags. """
table = protlib.list_prototypes(self.caller, key=key, tags=tags)
if not table:
return True
EvMore(
self.caller, str(table), exit_on_lastpage=True, justify_kwargs=False,
)
@interactive
def _update_existing_objects(self, caller, prototype_key, quiet=False):
"""
Update existing objects (if any) with this prototype-key to the latest
prototype version.
Args:
caller (Object): This is necessary for @interactive to work.
prototype_key (str): The prototype to update.
quiet (bool, optional): If set, don't report to user if no
old objects were found to update.
Returns:
n_updated (int): Number of updated objects.
"""
prototype = self._search_prototype(prototype_key)
if not prototype:
return
existing_objects = protlib.search_objects_with_prototype(prototype_key)
if not existing_objects:
if not quiet:
caller.msg("No existing objects found with an older version of this prototype.")
return
if existing_objects:
n_existing = len(existing_objects)
slow = " (note that this may be slow)" if n_existing > 10 else ""
string = (
f"There are {n_existing} existing object(s) with an older version "
f"of prototype '{prototype_key}'. Should it be re-applied to them{slow}? [Y]/N"
)
answer = yield (string)
if answer.lower() in ["n", "no"]:
caller.msg(
"|rNo update was done of existing objects. "
"Use spawn/update <key> to apply later as needed.|n"
)
return
try:
n_updated = spawner.batch_update_objects_with_prototype(
prototype, objects=existing_objects
)
except Exception:
logger.log_trace()
caller.msg(f"{n_updated} objects were updated.")
return
def _parse_key_desc_tags(self, argstring, desc=True):
"""
Parse ;-separated input list.
"""
key, desc, tags = "", "", []
if ";" in argstring:
parts = [part.strip().lower() for part in argstring.split(";")]
if len(parts) > 1 and desc:
key = parts[0]
desc = parts[1]
tags = parts[2:]
else:
key = parts[0]
tags = parts[1:]
else:
key = argstring.strip().lower()
return key, desc, tags
def func(self):
"""Implements the spawner"""
caller = self.caller
noloc = "noloc" in self.switches
# run the menu/olc
if (
self.cmdstring == "olc"
or "menu" in self.switches
or "olc" in self.switches
or "edit" in self.switches
):
# OLC menu mode
prototype = None
if self.lhs:
prototype_key = self.lhs
prototype = self._search_prototype(prototype_key)
if not prototype:
return
olc_menus.start_olc(caller, session=self.session, prototype=prototype)
return
if "search" in self.switches:
# query for a key match. The arg is a search query or nothing.
if not self.args:
# an empty search returns the full list
self._list_prototypes()
return
# search for key;tag combinations
key, _, tags = self._parse_key_desc_tags(self.args, desc=False)
self._list_prototypes(key, tags)
return
if "raw" in self.switches:
# query for key match and return the prototype as a safe one-liner string.
if not self.args:
caller.msg("You need to specify a prototype-key to get the raw data for.")
prototype = self._search_prototype(self.args)
if not prototype:
return
caller.msg(str(prototype))
return
if "show" in self.switches or "examine" in self.switches:
# show a specific prot detail. The argument is a search query or empty.
if not self.args:
# we don't show the list of all details, that's too spammy.
caller.msg("You need to specify a prototype-key to show.")
return
detail_string = self._get_prototype_detail(self.args)
if not detail_string:
return
caller.msg(detail_string)
return
if "list" in self.switches:
# for list, all optional arguments are tags.
tags = self.lhslist
err = self._list_prototypes(tags=tags)
if err:
caller.msg(
"No prototypes found with prototype-tag(s): {}".format(
list_to_string(tags, "or")
)
)
return
if "save" in self.switches:
# store a prototype to the database store
if not self.args:
caller.msg(
"Usage: spawn/save [<key>[;desc[;tag,tag[,...][;lockstring]]]] = <prototype_dict>"
)
return
if self.rhs:
# input on the form key = prototype
prototype_key, prototype_desc, prototype_tags = self._parse_key_desc_tags(self.lhs)
prototype_key = None if not prototype_key else prototype_key
prototype_desc = None if not prototype_desc else prototype_desc
prototype_tags = None if not prototype_tags else prototype_tags
prototype_input = self.rhs.strip()
else:
prototype_key = prototype_desc = None
prototype_tags = None
prototype_input = self.lhs.strip()
# handle parsing
prototype = self._parse_prototype(prototype_input)
if not prototype:
return
prot_prototype_key = prototype.get("prototype_key")
if not (prototype_key or prot_prototype_key):
caller.msg(
"A prototype_key must be given, either as `prototype_key = <prototype>` "
"or as a key 'prototype_key' inside the prototype structure."
)
return
if prototype_key is None:
prototype_key = prot_prototype_key
if prot_prototype_key != prototype_key:
caller.msg("(Replacing `prototype_key` in prototype with given key.)")
prototype["prototype_key"] = prototype_key
if prototype_desc is not None and prot_prototype_key != prototype_desc:
caller.msg("(Replacing `prototype_desc` in prototype with given desc.)")
prototype["prototype_desc"] = prototype_desc
if prototype_tags is not None and prototype.get("prototype_tags") != prototype_tags:
caller.msg("(Replacing `prototype_tags` in prototype with given tag(s))")
prototype["prototype_tags"] = prototype_tags
string = ""
# check for existing prototype (exact match)
old_prototype = self._search_prototype(prototype_key, quiet=True)
diff = spawner.prototype_diff(old_prototype, prototype, homogenize=True)
diffstr = spawner.format_diff(diff)
new_prototype_detail = self._get_prototype_detail(prototypes=[prototype])
if old_prototype:
if not diffstr:
string = f"|yAlready existing Prototype:|n\n{new_prototype_detail}\n"
question = (
"\nThere seems to be no changes. Do you still want to (re)save? [Y]/N"
)
else:
string = (
f'|yExisting prototype "{prototype_key}" found. Change:|n\n{diffstr}\n'
f"|yNew changed prototype:|n\n{new_prototype_detail}"
)
question = (
"\n|yDo you want to apply the change to the existing prototype?|n [Y]/N"
)
else:
string = f"|yCreating new prototype:|n\n{new_prototype_detail}"
question = "\nDo you want to continue saving? [Y]/N"
answer = yield (string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rSave cancelled.|n")
return
# all seems ok. Try to save.
try:
prot = protlib.save_prototype(prototype)
if not prot:
caller.msg("|rError saving:|R {}.|n".format(prototype_key))
return
except protlib.PermissionError as err:
caller.msg("|rError saving:|R {}|n".format(err))
return
caller.msg("|gSaved prototype:|n {}".format(prototype_key))
# check if we want to update existing objects
self._update_existing_objects(self.caller, prototype_key, quiet=True)
return
if not self.args:
# all switches beyond this point gets a common non-arg return
ncount = len(protlib.search_prototype())
caller.msg(
"Usage: spawn <prototype-key> or {{key: value, ...}}"
f"\n ({ncount} existing prototypes. Use /list to inspect)"
)
return
if "delete" in self.switches:
# remove db-based prototype
prototype_detail = self._get_prototype_detail(self.args)
if not prototype_detail:
return
string = f"|rDeleting prototype:|n\n{prototype_detail}"
question = "\nDo you want to continue deleting? [Y]/N"
answer = yield (string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rDeletion cancelled.|n")
return
try:
success = protlib.delete_prototype(self.args)
except protlib.PermissionError as err:
retmsg = f"|rError deleting:|R {err}|n"
else:
retmsg = (
"Deletion successful"
if success
else "Deletion failed (does the prototype exist?)"
)
caller.msg(retmsg)
return
if "update" in self.switches:
# update existing prototypes
prototype_key = self.args.strip().lower()
self._update_existing_objects(self.caller, prototype_key)
return
# If we get to this point, we use not switches but are trying a
# direct creation of an object from a given prototype or -key
prototype = self._parse_prototype(
self.args, expect=dict if self.args.strip().startswith("{") else str
)
if not prototype:
# this will only let through dicts or strings
return
key = "<unnamed>"
if isinstance(prototype, str):
# A prototype key we are looking to apply
prototype_key = prototype
prototype = self._search_prototype(prototype_key)
if not prototype:
return
# proceed to spawning
try:
for obj in spawner.spawn(prototype):
self.caller.msg("Spawned %s." % obj.get_display_name(self.caller))
if not prototype.get("location") and not noloc:
# we don't hardcode the location in the prototype (unless the user
# did so manually) - that would lead to it having to be 'removed' every
# time we try to update objects with this prototype in the future.
obj.location = caller.location
except RuntimeError as err:
caller.msg(err)
| 37.521913 | 127 | 0.539069 |
ace7b7f81d97af5b4b37d4200363daae267c2c48 | 269 | py | Python | topsides/pidh.py | EasternEdgeRobotics/2019 | 19f833262e7bdd026fffc0ac894327369d6bb66f | [
"MIT"
] | 1 | 2020-07-24T20:40:49.000Z | 2020-07-24T20:40:49.000Z | topsides/pidh.py | EasternEdgeRobotics/2019 | 19f833262e7bdd026fffc0ac894327369d6bb66f | [
"MIT"
] | 9 | 2019-08-23T01:39:53.000Z | 2019-08-23T01:42:48.000Z | topsides/pidh.py | EasternEdgeRobotics/2019 | 19f833262e7bdd026fffc0ac894327369d6bb66f | [
"MIT"
] | null | null | null |
class pid:
def __init__(self):
self.kP = 0.0
self.kI = 0.0
self.kD = 0.0
self.target = 0.0
self.error = 0.0
self.integral = 0.0
self.derivative = 0.0
self.last_error = 0.0
self.intError = 0.0
| 20.692308 | 29 | 0.483271 |
ace7b925010e866ff6a7c0f0cb314ba20d484166 | 471 | py | Python | chatbot_env/Lib/site-packages/sklearn/cluster/birch.py | rakmakan/Chatbot | d04bc1526b56961a16c25148d9ef18c4f157e9c4 | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | chatbot_env/Lib/site-packages/sklearn/cluster/birch.py | rakmakan/Chatbot | d04bc1526b56961a16c25148d9ef18c4f157e9c4 | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | site-packages/sklearn/cluster/birch.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
from . import _birch
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.cluster.birch'
correct_import_path = 'sklearn.cluster'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_birch, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| 26.166667 | 70 | 0.806794 |
ace7ba18755f0d8fe2314ed068a6eeef2a40ea89 | 1,946 | py | Python | 2021/4/solution.py | iangregson/advent-of-code | e2a2dde30dcaed027a5ba78f9270f8a1976577f1 | [
"MIT"
] | null | null | null | 2021/4/solution.py | iangregson/advent-of-code | e2a2dde30dcaed027a5ba78f9270f8a1976577f1 | [
"MIT"
] | null | null | null | 2021/4/solution.py | iangregson/advent-of-code | e2a2dde30dcaed027a5ba78f9270f8a1976577f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
lines = [l.strip() for l in file.readlines()]
# lines = [
# '7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1',
# '',
# '22 13 17 11 0',
# ' 8 2 23 4 24',
# '21 9 14 16 7',
# ' 6 10 3 18 5',
# ' 1 12 20 15 19',
# '',
# ' 3 15 0 2 22',
# ' 9 18 13 17 5',
# '19 8 7 25 23',
# '20 11 10 24 4',
# '14 21 16 12 6',
# '',
# '14 21 17 24 4',
# '10 16 15 9 19',
# '18 8 23 26 20',
# '22 11 13 6 5',
# ' 2 0 12 3 7',
# ]
numbers = [int(n) for n in lines[0].split(',')]
numbers_pt2 = [int(n) for n in lines[0].split(',')]
lines = lines[2:]
boards = []
while len(lines) != 0:
board = [[int(n.strip()) for n in row.split()] for row in lines[0:5]]
boards.append(board)
lines = lines[6:]
def check_winner(draw, rows):
cols = list(zip(*rows))
winner = False
# by row
for i in range(5):
winner = (len(set(draw).intersection(set(rows[i]))) == 5) or (len(set(draw).intersection(set(cols[i]))) == 5)
if winner == True:
break
return winner
draw = []
winner = None
while len(numbers) != 0:
draw.append(numbers.pop(0))
for board in boards:
if check_winner(draw, board) == True:
winner = board
if winner != None:
break
# print(winner)
sum_winner = sum([sum([n for n in row if n not in draw]) for row in winner])
print(sum_winner * draw.pop())
numbers = numbers_pt2
draw = []
draws = []
winners = []
while len(numbers) != 0:
draw.append(numbers.pop(0))
for idx, board in enumerate(boards):
if check_winner(draw, board) == True:
winners.append(boards.pop(idx))
draws.append(draw.copy())
last_winner = winners.pop()
draw_winner = draws.pop()
sum_winner = sum([sum([n for n in row if n not in draw_winner]) for row in last_winner])
print(sum_winner * draw_winner.pop()) | 22.894118 | 113 | 0.580678 |
ace7ba82a6dafefbd8755c7a2298b9805dc0c650 | 973 | py | Python | moonfire_tokenomics/data/ren.py | moonfire-ventures/tokenomics | 4152bfd0e831262139ce50c6baae2748f19af850 | [
"MIT"
] | 2 | 2022-03-30T17:12:03.000Z | 2022-03-30T17:14:20.000Z | moonfire_tokenomics/data/ren.py | moonfire-ventures/tokenomics | 4152bfd0e831262139ce50c6baae2748f19af850 | [
"MIT"
] | null | null | null | moonfire_tokenomics/data/ren.py | moonfire-ventures/tokenomics | 4152bfd0e831262139ce50c6baae2748f19af850 | [
"MIT"
] | null | null | null | from moonfire_tokenomics.data_types import Allocation, AllocationRecord, Blockchain, Category, CommonType, Sector, Token
ren = Token(
name="REN",
project="Ren",
sector=Sector.DEFI,
blockchain=[Blockchain.ETH, Blockchain.ONE, Blockchain.SORA],
category=[Category.OTHER],
capped=True,
allocations=[
Allocation(
month=24,
records=[
AllocationRecord(type="Main Sale", common_type=CommonType.PUBLIC_SALE, share=0.082),
AllocationRecord(type="Presale", common_type=CommonType.INVESTORS, share=0.52),
AllocationRecord(type="Team, Founders and Advisors", common_type=CommonType.TEAM, share=0.298),
AllocationRecord(type="Community Partners", common_type=CommonType.ECOSYSTEM, share=0.1),
],
),
],
sources=[
"https://medium.com/@JamesTodaro/republic-protocol-analysis-and-valuation-7ebef4b3c4f9",
],
year=2017,
)
| 37.423077 | 120 | 0.651593 |
ace7bb33ccd3622c50b5daedd9bae52e2c057ac8 | 18,708 | py | Python | src/sage/interfaces/scilab.py | sensen1/sage | d6c5cd9be78cc448ee4c54bac93385b1244a234c | [
"BSL-1.0"
] | 1 | 2021-03-15T21:45:56.000Z | 2021-03-15T21:45:56.000Z | src/sage/interfaces/scilab.py | sensen1/sage | d6c5cd9be78cc448ee4c54bac93385b1244a234c | [
"BSL-1.0"
] | null | null | null | src/sage/interfaces/scilab.py | sensen1/sage | d6c5cd9be78cc448ee4c54bac93385b1244a234c | [
"BSL-1.0"
] | null | null | null | r"""
Interface to Scilab
Scilab is a scientific software package for numerical computations
providing a powerful open computing environment for engineering and
scientific applications. Scilab includes hundreds of mathematical
functions with the possibility to add interactively programs from
various languages (C, C++, Fortran...). It has sophisticated data
structures (including lists, polynomials, rational functions, linear
systems...), an interpreter and a high level programming language.
The commands in this section only work if you have the "scilab"
interpreter installed and available in your PATH. It's not necessary
to install any special Sage packages.
EXAMPLES::
sage: scilab.eval('2+2') # optional - scilab
'ans =\n \n 4.'
sage: scilab('2+2') # optional - scilab
4.
sage: a = scilab(10) # optional - scilab
sage: a**10 # optional - scilab
1.000D+10
Tutorial based the MATLAB interface tutorial:
EXAMPLES::
sage: scilab('4+10') # optional - scilab
14.
sage: scilab('date') # optional - scilab; random output
15-Feb-2010
sage: scilab('5*10 + 6') # optional - scilab
56.
sage: scilab('(6+6)/3') # optional - scilab
4.
sage: scilab('9')^2 # optional - scilab
81.
sage: a = scilab(10); b = scilab(20); c = scilab(30) # optional - scilab
sage: avg = (a+b+c)/3 # optional - scilab
sage: avg # optional - scilab
20.
sage: parent(avg) # optional - scilab
Scilab
sage: my_scalar = scilab('3.1415') # optional - scilab
sage: my_scalar # optional - scilab
3.1415
sage: my_vector1 = scilab('[1,5,7]') # optional - scilab
sage: my_vector1 # optional - scilab
1. 5. 7.
sage: my_vector2 = scilab('[1;5;7]') # optional - scilab
sage: my_vector2 # optional - scilab
1.
5.
7.
sage: my_vector1 * my_vector2 # optional - scilab
75.
sage: row_vector1 = scilab('[1 2 3]') # optional - scilab
sage: row_vector2 = scilab('[3 2 1]') # optional - scilab
sage: matrix_from_row_vec = scilab('[%s; %s]'%(row_vector1.name(), row_vector2.name())) # optional - scilab
sage: matrix_from_row_vec # optional - scilab
1. 2. 3.
3. 2. 1.
sage: column_vector1 = scilab('[1;3]') # optional - scilab
sage: column_vector2 = scilab('[2;8]') # optional - scilab
sage: matrix_from_col_vec = scilab('[%s %s]'%(column_vector1.name(), column_vector2.name())) # optional - scilab
sage: matrix_from_col_vec # optional - scilab
1. 2.
3. 8.
sage: my_matrix = scilab('[8, 12, 19; 7, 3, 2; 12, 4, 23; 8, 1, 1]') # optional - scilab
sage: my_matrix # optional - scilab
8. 12. 19.
7. 3. 2.
12. 4. 23.
8. 1. 1.
sage: combined_matrix = scilab('[%s, %s]'%(my_matrix.name(), my_matrix.name())) # optional - scilab
sage: combined_matrix # optional - scilab
8. 12. 19. 8. 12. 19.
7. 3. 2. 7. 3. 2.
12. 4. 23. 12. 4. 23.
8. 1. 1. 8. 1. 1.
sage: tm = scilab('0.5:2:10') # optional - scilab
sage: tm # optional - scilab
0.5 2.5 4.5 6.5 8.5
sage: my_vector1 = scilab('[1,5,7]') # optional - scilab
sage: my_vector1(1) # optional - scilab
1.
sage: my_vector1(2) # optional - scilab
5.
sage: my_vector1(3) # optional - scilab
7.
Matrix indexing works as follows::
sage: my_matrix = scilab('[8, 12, 19; 7, 3, 2; 12, 4, 23; 8, 1, 1]') # optional - scilab
sage: my_matrix(3,2) # optional - scilab
4.
One can also use square brackets::
sage: my_matrix[3,2] # optional - scilab
4.
Setting using parenthesis cannot work (because of how the Python
language works). Use square brackets or the set function::
sage: my_matrix = scilab('[8, 12, 19; 7, 3, 2; 12, 4, 23; 8, 1, 1]') # optional - scilab
sage: my_matrix.set(2,3, 1999) # optional - scilab
sage: my_matrix # optional - scilab
8. 12. 19.
7. 3. 1999.
12. 4. 23.
8. 1. 1.
sage: my_matrix[2,3] = -126 # optional - scilab
sage: my_matrix # optional - scilab
8. 12. 19.
7. 3. - 126.
12. 4. 23.
8. 1. 1.
TESTS::
sage: M = scilab(x) # optional - scilab
Traceback (most recent call last):
...
TypeError: _interface_init_() takes exactly one argument (0 given)
sage: M = scilab(matrix(3,range(9))); M # optional - scilab
0. 1. 2.
3. 4. 5.
6. 7. 8.
sage: M(10) # optional - scilab
Traceback (most recent call last):
...
TypeError: Error executing code in Scilab
...
Invalid index.
sage: M[10] # optional - scilab
Traceback (most recent call last):
...
TypeError: Error executing code in Scilab
...
Invalid index.
sage: M(4,2) # optional - scilab
Traceback (most recent call last):
...
TypeError: Error executing code in Scilab
...
Invalid index.
sage: M[2,4] # optional - scilab
Traceback (most recent call last):
...
TypeError: Error executing code in Scilab
...
Invalid index.
sage: M(9) = x # optional - scilab
Traceback (most recent call last):
...
SyntaxError: can...t assign to function call (..., line 1)
AUTHORS:
-- Ronan Paixao (2008-11-26), based on the MATLAB tutorial by
William Stein (2006-10-11)
"""
##############################################################################
# Copyright (C) 2006 William Stein <wstein@gmail.com>
# Copyright (C) 2008 Ronan Paixao <ronanpaixao@yahoo.com.br>
#
# Distributed under the terms of the GNU General Public License (GPL).
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
##############################################################################
import os
from .expect import Expect, ExpectElement
from sage.docs.instancedoc import instancedoc
class Scilab(Expect):
"""
Interface to the Scilab interpreter.
EXAMPLES::
sage: a = scilab('[ 1, 1, 2; 3, 5, 8; 13, 21, 33 ]') # optional - scilab
sage: b = scilab('[ 1; 3; 13]') # optional - scilab
sage: c = a * b # optional - scilab
sage: print(c) # optional - scilab
30.
122.
505.
"""
def __init__(self, maxread=None, script_subdirectory=None,
logfile=None, server=None,server_tmpdir=None,
seed=None):
"""
Initializes the Scilab class.
EXAMPLES::
sage: from sage.interfaces.scilab import Scilab
sage: sci_obj = Scilab()
sage: del sci_obj
"""
Expect.__init__(self,
name = 'scilab',
prompt = '-->',
command = "scilab -nw",
server = server,
server_tmpdir = server_tmpdir,
script_subdirectory = script_subdirectory,
restart_on_ctrlc = False,
verbose_start = False,
logfile = logfile,
eval_using_file_cutoff=100)
self._seed = seed
def set_seed(self, seed=None):
"""
Set the seed for gp interpreter.
The seed should be an integer.
EXAMPLES::
sage: from sage.interfaces.scilab import Scilab # optional - scilab
sage: s = Scilab() # optional - scilab
sage: s.set_seed(1) # optional - scilab
1
sage: [s.rand() for i in range(5)] # optional - scilab
[
<BLANKLINE>
0.6040239,
<BLANKLINE>
0.0079647,
<BLANKLINE>
0.6643966,
<BLANKLINE>
0.9832111,
<BLANKLINE>
0.5321420]
"""
if seed is None:
seed = self.rand_seed()
self.eval("rand('seed',%d)" % seed)
self._seed = seed
return seed
def _quit_string(self):
"""
Returns the string used to quit the pexpect interface.
EXAMPLES::
sage: scilab._quit_string() # optional - scilab
'quit;'
"""
return 'quit;'
def _install_hints(self):
"""
Hints for installing Scilab.
EXAMPLES::
sage: print(scilab._install_hints()) # optional - scilab
You must ...
"""
return """
You must obtain the Scilab program in order to use Scilab
from Sage. You can read all about Scilab at
http://www.scilab.org/
The executable must be accessible system-wide.
"""
def _start(self):
"""
Starts Scilab and sets some options.
EXAMPLES::
sage: scilab._start() # optional - scilab
"""
Expect._start(self)
self.eval("mode(0)")
# set random seed
self.set_seed(self._seed)
def eval(self, command, *args, **kwds):
"""
Evaluates commands.
EXAMPLES::
sage: scilab.eval("5") # optional - scilab
'ans =\n \n 5.'
sage: scilab.eval("d=44") # optional - scilab
'd =\n \n 44.'
"""
s = Expect.eval(self, command, **kwds).replace("\x1b[?1l\x1b>","").strip()
return s
def whos(self, name=None, typ=None):
"""
Returns information about current objects.
Arguments:
nam: first characters of selected names
typ: name of selected Scilab variable type
EXAMPLES::
sage: scilab.whos("core") # optional - scilab
'Name Type Size Bytes...'
sage: scilab.whos(typ='function') # optional - scilab
'Name Type Size Bytes...'
"""
parameters = ""
if name:
parameters += " -name %s" % (str(name))
if typ:
parameters += " -type %s" % (str(typ))
return self.eval('whos' + parameters)
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: scilab.set('a', 123) # optional - scilab
sage: scilab.get('a') # optional - scilab
'\n \n 123.'
"""
cmd = '%s=%s;'%(var,value)
out = self.eval(cmd)
if out.find("error") != -1:
raise TypeError("Error executing code in Scilab\nCODE:\n\t%s\nScilab ERROR:\n\t%s"%(cmd, out))
def get(self, var):
"""
Get the value of the variable var.
EXAMPLES::
sage: scilab.eval('b=124;') # optional - scilab
''
sage: scilab.get('b') # optional - scilab
'\n \n 124.'
"""
s = self.eval('%s'%var)
i = s.find('=')
return s[i+1:]
def console(self):
"""
Starts Scilab console.
EXAMPLES::
sage: scilab.console() # optional - scilab; not tested
"""
scilab_console()
def version(self):
"""
Returns the version of the Scilab software used.
EXAMPLES::
sage: scilab.version() # optional - scilab
'scilab-...'
"""
return scilab_version()
def sage2scilab_matrix_string(self, A):
"""
Return a Scilab matrix from a Sage matrix.
INPUT:
A Sage matrix with entries in the rationals or reals.
OUTPUT:
A string that evaluates to an Scilab matrix.
EXAMPLES::
sage: M33 = MatrixSpace(QQ,3,3) # optional - scilab
sage: A = M33([1,2,3,4,5,6,7,8,0]) # optional - scilab
sage: scilab.sage2scilab_matrix_string(A) # optional - scilab
'[1, 2, 3; 4, 5, 6; 7, 8, 0]'
"""
return str(A.rows()).replace('), (', '; ').replace('(', '').replace(')','')
def _object_class(self):
"""
Returns the class of the object.
EXAMPLES::
sage: scilab._object_class() # optional - scilab
<class 'sage.interfaces.scilab.ScilabElement'>
sage: type(scilab(2)) # optional - scilab
<class 'sage.interfaces.scilab.ScilabElement'>
"""
return ScilabElement
@instancedoc
class ScilabElement(ExpectElement):
def __getitem__(self, n):
"""
Use parenthesis for Scilab matrices instead.
EXAMPLES::
sage: M = scilab('[1,2,3;4,5,6;7,8,9]') # optional - scilab
sage: M[1] # optional - scilab
1.
sage: M[7] # optional - scilab
3.
sage: M[3,2] # optional - scilab
8.
"""
if isinstance(n, tuple):
index = str(n)[1:-1]
else:
index = str(n)
return self.parent()('%s(%s)' % (self._name, index))
def __setitem__(self, n, value):
"""
Sets an element of a matrix.
EXAMPLES::
sage: M = scilab('[1,2,3;4,5,6;7,8,9]') # optional - scilab
sage: M[6] = 0 # optional - scilab
sage: M # optional - scilab
1. 2. 3.
4. 5. 6.
7. 0. 9.
sage: M[3,2] = 10 # optional - scilab
sage: M # optional - scilab
1. 2. 3.
4. 5. 6.
7. 10. 9.
"""
if isinstance(n, tuple):
index = str(n)[1:-1]
else:
index = str(n)
self.parent().eval('%s(%s) = %s' % (self._name, index, value))
def _matrix_(self, R):
r"""
Return \sage matrix from this scilab element.
EXAMPLES::
sage: A = scilab('[1,2;3,4]') # optional - scilab
sage: matrix(ZZ, A) # optional - scilab
[1 2]
[3 4]
sage: A = scilab('[1,2;3,4.5]') # optional - scilab
sage: matrix(RR, A) # optional - scilab
[1.00000000000000 2.00000000000000]
[3.00000000000000 4.50000000000000]
"""
from sage.matrix.all import MatrixSpace
s = str(self).strip()
v = s.split('\n ')
nrows = len(v)
if nrows == 0:
return MatrixSpace(R, 0, 0)(0)
ncols = len(v[0].split())
M = MatrixSpace(R, nrows, ncols)
v = sum([[x.rstrip('.') for x in w.split()] for w in v], [])
return M(v)
def set(self, i, j, x):
"""
Set the variable var to the given value.
EXAMPLES::
sage: scilab.set('c', 125) # optional - scilab
sage: scilab.get('c') # optional - scilab
'\n \n 125.'
"""
P = self._check_valid()
z = P(x)
P.eval('%s(%s,%s) = %s'%(self.name(), i, j, z.name()))
# An instance
scilab = Scilab()
def scilab_console():
"""
This requires that the optional Scilab program be installed and in
your PATH, but no optional Sage packages need to be installed.
EXAMPLES::
sage: from sage.interfaces.scilab import scilab_console # optional - scilab
sage: scilab_console() # optional - scilab; not tested
___________________________________________
scilab-5.0.3
Consortium Scilab (DIGITEO)
Copyright (c) 1989-2008 (INRIA)
Copyright (c) 1989-2007 (ENPC)
___________________________________________
Startup execution:
loading initial environment
-->2+3
ans =
5.
-->quit
Typing quit exits the Scilab console and returns you to Sage.
Scilab, like Sage, remembers its history from one session to
another.
"""
os.system('scilab -nw')
def scilab_version():
"""
Return the version of Scilab installed.
EXAMPLES::
sage: from sage.interfaces.scilab import scilab_version # optional - scilab
sage: scilab_version() # optional - scilab
'scilab-...'
"""
return str(scilab('getversion()')).strip()
| 33.111504 | 151 | 0.466431 |
ace7bbdcb86f24e70a7b5fec8af8e2948279724e | 169 | py | Python | script/noaxis.py | TomohikoK/Sheaves-and-Homological-Algebra | ef8edebc0a5dde09305dd876d5df6ecace3e58a1 | [
"Apache-2.0"
] | null | null | null | script/noaxis.py | TomohikoK/Sheaves-and-Homological-Algebra | ef8edebc0a5dde09305dd876d5df6ecace3e58a1 | [
"Apache-2.0"
] | null | null | null | script/noaxis.py | TomohikoK/Sheaves-and-Homological-Algebra | ef8edebc0a5dde09305dd876d5df6ecace3e58a1 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
plt.axis('off')
ax = plt.axes()
ax.plot([1, 2, 3])
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
plt.show()
| 15.363636 | 35 | 0.686391 |
ace7bc3b707ebabd9fd04b2a3a46c7b60638666a | 1,313 | py | Python | pclib/fl/ListBytesProxy_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
] | 206 | 2015-01-05T21:53:56.000Z | 2022-03-14T08:04:49.000Z | pclib/fl/ListBytesProxy_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
] | 84 | 2015-01-25T19:57:33.000Z | 2021-05-11T15:46:56.000Z | pclib/fl/ListBytesProxy_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
] | 99 | 2015-02-17T17:43:44.000Z | 2022-02-14T17:58:18.000Z | #=========================================================================
# ListBytesProxy_test
#=========================================================================
from pymtl import Bits
from Bytes import Bytes
from ListBytesProxy import ListBytesProxy
#-------------------------------------------------------------------------
# vvadd
#-------------------------------------------------------------------------
def vvadd( dest, src0, src1 ):
for i, _ in enumerate(dest):
dest[i] = src0[i] + src1[i]
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
def test_basic():
data = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ]
mem = Bytes(18*4)
for i in range(18):
mem[i*4:i*4+1] = Bits( 32, data[i] )
src0 = ListBytesProxy( mem, 0*4, 4 )
src1 = ListBytesProxy( mem, 4*4, 4 )
dest = ListBytesProxy( mem, 8*4, 4 )
vvadd( dest, src0, src1 )
data_ref = [ 0, 1, 2, 3,
4, 5, 6, 7,
4, 6, 8, 10,
12, 13, 14, 15,
16, 17 ]
data_ref_bytes = Bytes(18*4)
for i in range(18):
data_ref_bytes[i*4:i*4+1] = Bits( 32, data_ref[i] )
assert mem == data_ref_bytes
| 29.177778 | 74 | 0.354912 |
ace7bcbe9c3c56291f5a1802a24f27594c6161c4 | 3,394 | py | Python | ckan/tests/lib/test_uploader.py | robin-NEC/ckan | 71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a | [
"BSD-3-Clause"
] | 1 | 2021-10-01T12:47:19.000Z | 2021-10-01T12:47:19.000Z | ckan/tests/lib/test_uploader.py | robin-NEC/ckan | 71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:09:04.000Z | 2022-03-12T01:09:04.000Z | ckan/tests/lib/test_uploader.py | robin-NEC/ckan | 71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a | [
"BSD-3-Clause"
] | 2 | 2018-01-21T17:03:08.000Z | 2019-07-23T08:49:52.000Z | # encoding: utf-8
import six
from io import BytesIO
from werkzeug.datastructures import FileStorage
from ckan.lib.uploader import ResourceUpload, Upload
class TestInitResourceUpload(object):
def test_resource_without_upload_with_old_werkzeug(
self, ckan_config, monkeypatch, tmpdir):
monkeypatch.setitem(ckan_config, u'ckan.storage_path', str(tmpdir))
# this test data is based on real observation using a browser
# and werkzeug 0.14.1
res = {u'clear_upload': u'true',
u'format': u'CSV',
u'url': u'https://example.com/data.csv',
u'description': u'',
u'upload': u'',
u'package_id': u'dataset1',
u'id': u'8a3a874e-5ee1-4e43-bdaf-e2569cf72344',
u'name': u'data.csv'}
res_upload = ResourceUpload(res)
assert res_upload.filename is None
def test_resource_without_upload(
self, ckan_config, monkeypatch, tmpdir):
monkeypatch.setitem(ckan_config, u'ckan.storage_path', str(tmpdir))
# this test data is based on real observation using a browser
res = {u'clear_upload': u'true',
u'format': u'PNG',
u'url': u'https://example.com/data.csv',
u'description': u'',
u'upload': FileStorage(filename=u''),
u'package_id': u'dataset1',
u'id': u'8a3a874e-5ee1-4e43-bdaf-e2569cf72344',
u'name': u'data.csv'}
res_upload = ResourceUpload(res)
assert res_upload.filename is None
def test_resource_with_upload(
self, ckan_config, monkeypatch, tmpdir):
monkeypatch.setitem(ckan_config, u'ckan.storage_path', str(tmpdir))
# this test data is based on real observation using a browser
res = {u'clear_upload': u'',
u'format': u'PNG',
u'url': u'https://example.com/data.csv',
u'description': u'',
u'upload': FileStorage(filename=u'data.csv', content_type=u'CSV'),
u'package_id': u'dataset1',
u'id': u'8a3a874e-5ee1-4e43-bdaf-e2569cf72344',
u'name': u'data.csv'}
res_upload = ResourceUpload(res)
assert res_upload.filesize == 0
assert res_upload.filename == u'data.csv'
class TestUpload(object):
def test_group_upload(self, monkeypatch, tmpdir, make_app, ckan_config):
"""Reproduce group's logo upload and check that file available through
public url.
"""
monkeypatch.setitem(ckan_config, u'ckan.storage_path', str(tmpdir))
group = {u'clear_upload': u'',
u'upload': FileStorage(
BytesIO(six.ensure_binary(u'hello')),
filename=u'logo.png',
content_type=u'PNG'
),
u'name': u'test-group-upload'}
group_upload = Upload(u'group')
group_upload.update_data_dict(group, u'url', u'upload', u'clear_upload')
group_upload.upload()
uploads_dir = tmpdir / u'storage' / u'uploads' / u'group'
logo = uploads_dir.listdir()[0]
assert logo.basename == group[u'url']
app = make_app()
resp = app.get(u'/uploads/group/' + group[u'url'])
assert resp.status_code == 200
assert resp.body == u'hello'
| 39.465116 | 81 | 0.586329 |
ace7bd5416bd901f487267b8bf85d1b0e0ad1f2f | 494 | py | Python | bash/grading_scripts/is_file_line_greater_than_80_chars.py | cosmicexplorer/snippets | 6374a8925156bbd8805c5497506e57e4433f8675 | [
"MIT"
] | null | null | null | bash/grading_scripts/is_file_line_greater_than_80_chars.py | cosmicexplorer/snippets | 6374a8925156bbd8805c5497506e57e4433f8675 | [
"MIT"
] | null | null | null | bash/grading_scripts/is_file_line_greater_than_80_chars.py | cosmicexplorer/snippets | 6374a8925156bbd8805c5497506e57e4433f8675 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# authored by Danny McClanahan
# <daniel.d.mcclanahan@vanderbilt.edu>
import sys
import os
infile_path = os.path.expanduser(sys.argv[1])
infile = open(infile_path, "r")
line_index = 0
for line in infile:
line_index = line_index + 1
# the + 1 and - 1 are because i think it counts the newline as a character
if (len(line) > 80 + 1):
print(infile_path + ":" + str(line_index) + " ("
+ str(len(line) - 1) + " chars):" + line)
break
| 24.7 | 78 | 0.625506 |
ace7be0e0aea59eb5e6a50d0e6c7bece711b6c17 | 67,988 | py | Python | tests/test_closing.py | Chirimen-Jako/lightning | 899f5dee53203fb0fe49a0fcfc19c77454226261 | [
"MIT"
] | null | null | null | tests/test_closing.py | Chirimen-Jako/lightning | 899f5dee53203fb0fe49a0fcfc19c77454226261 | [
"MIT"
] | null | null | null | tests/test_closing.py | Chirimen-Jako/lightning | 899f5dee53203fb0fe49a0fcfc19c77454226261 | [
"MIT"
] | null | null | null | from fixtures import * # noqa: F401,F403
from flaky import flaky
from lightning import RpcError
from utils import only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND, SLOW_MACHINE, COMPAT
import os
import queue
import pytest
import re
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
fee = 5430 if not chainparams['elements'] else 8955
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-fast-gossip
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(VALGRIND, "Flaky under valgrind")
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if VALGRIND:
num_nodes -= 4 # => 15 (135 seconds)
if SLOW_MACHINE:
num_nodes -= 1 # => 36/10 (37/95 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = l1.fund_channel(l2, 10**6)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12 = l1.fund_channel(l2, 10**6)
chan13 = l1.fund_channel(l3, 10**6)
chan14 = l1.fund_channel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
bitcoind.generate_block(5)
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 3
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey']['addresses'][0]
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
@unittest.skipIf(not COMPAT, "needs COMPAT=1")
def test_deprecated_closing_compat(node_factory, bitcoind, chainparams):
""" The old-style close command is:
close {id} {force} {timeout}
"""
l1, l2 = node_factory.get_nodes(2, opts=[{'allow-deprecated-apis': True}, {}])
addr = chainparams['example_addr']
nodeid = l2.info['id']
l1.rpc.check(command_to_check='close', id=nodeid)
# New-style
l1.rpc.check(command_to_check='close', id=nodeid, unilateraltimeout=10, destination=addr)
l1.rpc.check(command_to_check='close', id=nodeid, unilateraltimeout=0)
l1.rpc.check(command_to_check='close', id=nodeid, destination=addr)
# Old-style
l1.rpc.check(command_to_check='close', id=nodeid, force=False)
l1.rpc.check(command_to_check='close', id=nodeid, force=False, timeout=10)
l1.rpc.check(command_to_check='close', id=nodeid, timeout=10)
l1.rpc.call('check', ['close', nodeid])
# Array(new-style)
l1.rpc.call('check', ['close', nodeid, 10])
l1.rpc.call('check', ['close', nodeid, 0, addr])
l1.rpc.call('check', ['close', nodeid, None, addr])
# Array(old-style)
l1.rpc.call('check', ['close', nodeid, True, 10])
l1.rpc.call('check', ['close', nodeid, False])
l1.rpc.call('check', ['close', nodeid, None, 10])
# Not new-style nor old-style
with pytest.raises(RpcError, match=r'Expected unilerataltimeout to be a number'):
l1.rpc.call('check', ['close', nodeid, "Given enough eyeballs, all bugs are shallow."])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
# Allow some lossage for fees.
slack = 27000 if chainparams['elements'] else 15000
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - slack
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
# Allow some lossage for fees.
slack = 27000 if chainparams['elements'] else 15000
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - slack
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where funder immediately drops to chain"""
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
l1 = node_factory.get_node(disconnect=disconnects)
# Make locktime different, as we once had them reversed!
l2 = node_factory.get_node(options={'watchtime-blocks': 10})
l1.fundwallet(10**7)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
options = {'watchtime-blocks': 201, 'cltv-delta': 101}
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(options=options)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(5)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l3 = node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fund_channel(l1, 10**6)
c23 = l2.fund_channel(l3, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-chan#1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = txout['scriptPubKey']['addresses'][0]
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not VALGRIND:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
l1 = node_factory.get_node(start=False)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, 1 for the channel final address,
# which are discarded as the 'scratch' tx that the fundchannel
# plugin makes, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 3])['addresses'][-1]
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
| 40.73577 | 165 | 0.677796 |
ace7be41ebd3353ee52a6948c1ad240096707ebe | 813 | py | Python | solar_index/__init__.py | jklenzing/solar_index | 1706ce1541c9e2ae59f1b5d61bbdd9540b8a1461 | [
"BSD-3-Clause"
] | 1 | 2018-02-08T19:05:37.000Z | 2018-02-08T19:05:37.000Z | solar_index/__init__.py | jklenzing/solar_index | 1706ce1541c9e2ae59f1b5d61bbdd9540b8a1461 | [
"BSD-3-Clause"
] | 15 | 2018-02-13T18:28:42.000Z | 2021-05-12T16:58:47.000Z | solar_index/__init__.py | jklenzing/solar_index | 1706ce1541c9e2ae59f1b5d61bbdd9540b8a1461 | [
"BSD-3-Clause"
] | 2 | 2018-02-13T18:10:44.000Z | 2020-10-20T06:35:30.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, JK & AGB
# Full license can be found in License.md
# -----------------------------------------------------------------------------
"""
solar_index
-----------
Solar index information, currently focused on TIMED/SEE EUV spectra
Classes
---------------------------------------------------------------------------
SolarIndex Solar Index data
"""
from os import path
__version__ = str('0.2-alpha')
_ROOT = path.abspath(path.dirname(__file__))
_data_dir = path.join(_ROOT, "data")
try:
from solar_index import (spectral_data, omni_data, utils)
from solar_index.spectral_data import EUVspectra
from solar_index.omni_data import OMNIvals
except ImportError as err:
raise ImportError('problem importing solar_index: ' + str(err))
| 29.035714 | 79 | 0.586716 |
ace7bf07521697066258e7657c106aa172735209 | 1,558 | py | Python | examples/AdCampaignPostAdsetUpdateCpa.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 576 | 2018-05-01T19:09:32.000Z | 2022-03-31T11:45:11.000Z | examples/AdCampaignPostAdsetUpdateCpa.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 217 | 2018-05-03T07:31:59.000Z | 2022-03-29T14:19:52.000Z | examples/AdCampaignPostAdsetUpdateCpa.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 323 | 2018-05-01T20:32:26.000Z | 2022-03-29T07:05:12.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.adset import AdSet
from facebook_business.api import FacebookAdsApi
access_token = '<ACCESS_TOKEN>'
app_secret = '<APP_SECRET>'
app_id = '<APP_ID>'
id = '<AD_SET_ID>'
FacebookAdsApi.init(access_token=access_token)
fields = [
]
params = {
'billing_event': 'IMPRESSIONS',
'optimization_goal': 'LINK_CLICKS',
'bid_amount': '200',
'targeting': {'geo_locations':{'countries':['US']},'facebook_positions':['feed']},
}
print AdSet(id).update(
fields=fields,
params=params,
) | 38 | 84 | 0.7638 |
ace7bf0f90e82c52d8335165a91236ac973b9e75 | 803 | py | Python | core/manage.py | Epimetrics-Inc/project_deathstar | 4499348fe06b824813c0108913f5566e7de096c8 | [
"MIT"
] | null | null | null | core/manage.py | Epimetrics-Inc/project_deathstar | 4499348fe06b824813c0108913f5566e7de096c8 | [
"MIT"
] | null | null | null | core/manage.py | Epimetrics-Inc/project_deathstar | 4499348fe06b824813c0108913f5566e7de096c8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 33.458333 | 77 | 0.6401 |
ace7bf310f6c7574ea5db7a73a83cad595a31608 | 16,775 | py | Python | session8/complete/network.py | zhiweih/pb-exercises | c5e64075c47503a40063aa836c06a452af14246d | [
"BSD-2-Clause"
] | 153 | 2017-09-27T01:10:19.000Z | 2022-03-17T12:13:59.000Z | session8/complete/network.py | zhiweih/pb-exercises | c5e64075c47503a40063aa836c06a452af14246d | [
"BSD-2-Clause"
] | 3 | 2018-11-10T20:04:13.000Z | 2022-02-15T23:12:53.000Z | session8/complete/network.py | zhiweih/pb-exercises | c5e64075c47503a40063aa836c06a452af14246d | [
"BSD-2-Clause"
] | 85 | 2017-10-09T16:18:00.000Z | 2022-02-09T14:21:08.000Z | import socket
import time
from io import BytesIO
from random import randint
from unittest import TestCase
from block import Block
from helper import (
hash256,
decode_base58,
encode_varint,
int_to_little_endian,
little_endian_to_int,
read_varint,
)
from tx import Tx
TX_DATA_TYPE = 1
BLOCK_DATA_TYPE = 2
FILTERED_BLOCK_DATA_TYPE = 3
COMPACT_BLOCK_DATA_TYPE = 4
NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9'
TESTNET_NETWORK_MAGIC = b'\x0b\x11\x09\x07'
class NetworkEnvelope:
def __init__(self, command, payload, testnet=False):
self.command = command
self.payload = payload
if testnet:
self.magic = TESTNET_NETWORK_MAGIC
else:
self.magic = NETWORK_MAGIC
def __repr__(self):
return '{self.command.decode("ascii"")}: {self.payload.hex()}'
@classmethod
def parse(cls, s, testnet=False):
'''Takes a stream and creates a NetworkEnvelope'''
# check the network magic
magic = s.read(4)
if magic == b'':
raise RuntimeError('Connection reset!')
if testnet:
expected_magic = TESTNET_NETWORK_MAGIC
else:
expected_magic = NETWORK_MAGIC
if magic != expected_magic:
raise RuntimeError(f'magic is not right {magic.hx()} vs {expected_magic.hex()}')
# command 12 bytes, strip the trailing 0's using .strip(b'\x00')
command = s.read(12).strip(b'\x00')
# payload length 4 bytes, little endian
payload_length = little_endian_to_int(s.read(4))
# checksum 4 bytes, first four of hash256 of payload
checksum = s.read(4)
# payload is of length payload_length
payload = s.read(payload_length)
# verify checksum
calculated_checksum = hash256(payload)[:4]
if calculated_checksum != checksum:
raise RuntimeError('checksum does not match')
return cls(command, payload, testnet=testnet)
def serialize(self):
'''Returns the byte serialization of the entire network message'''
# add the network magic using self.magic
result = self.magic
# command 12 bytes, fill leftover with b'\x00' * (12 - len(self.command))
result += self.command + b'\x00' * (12 - len(self.command))
# payload length 4 bytes, little endian
result += int_to_little_endian(len(self.payload), 4)
# checksum 4 bytes, first four of hash256 of payload
result += hash256(self.payload)[:4]
# payload
result += self.payload
return result
def stream(self):
'''Returns a stream for parsing the payload'''
return BytesIO(self.payload)
class NetworkEnvelopeTest(TestCase):
def test_parse(self):
msg = bytes.fromhex('f9beb4d976657261636b000000000000000000005df6e0e2')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.command, b'verack')
self.assertEqual(envelope.payload, b'')
msg = bytes.fromhex('f9beb4d976657273696f6e0000000000650000005f1a69d2721101000100000000000000bc8f5e5400000000010000000000000000000000000000000000ffffc61b6409208d010000000000000000000000000000000000ffffcb0071c0208d128035cbc97953f80f2f5361746f7368693a302e392e332fcf05050001')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.command, b'version')
self.assertEqual(envelope.payload, msg[24:])
def test_serialize(self):
msg = bytes.fromhex('f9beb4d976657261636b000000000000000000005df6e0e2')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.serialize(), msg)
msg = bytes.fromhex('f9beb4d976657273696f6e0000000000650000005f1a69d2721101000100000000000000bc8f5e5400000000010000000000000000000000000000000000ffffc61b6409208d010000000000000000000000000000000000ffffcb0071c0208d128035cbc97953f80f2f5361746f7368693a302e392e332fcf05050001')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.serialize(), msg)
class VersionMessage:
command = b'version'
define_network = False
def __init__(self, version=70015, services=0, timestamp=None,
receiver_services=0,
receiver_ip=b'\x00\x00\x00\x00', receiver_port=8333,
sender_services=0,
sender_ip=b'\x00\x00\x00\x00', sender_port=8333,
nonce=None, user_agent=b'/programmingblockchain:0.1/',
latest_block=0, relay=True):
self.version = version
self.services = services
if timestamp is None:
self.timestamp = int(time.time())
else:
self.timestamp = timestamp
self.receiver_services = receiver_services
self.receiver_ip = receiver_ip
self.receiver_port = receiver_port
self.sender_services = sender_services
self.sender_ip = sender_ip
self.sender_port = sender_port
if nonce is None:
self.nonce = int_to_little_endian(randint(0, 2**64), 8)
else:
self.nonce = nonce
self.user_agent = user_agent
self.latest_block = latest_block
self.relay = relay
def serialize(self):
'''Serialize this message to send over the network'''
# version is 4 bytes little endian
result = int_to_little_endian(self.version, 4)
# services is 8 bytes little endian
result += int_to_little_endian(self.services, 8)
# timestamp is 8 bytes little endian
result += int_to_little_endian(self.timestamp, 8)
# receiver services is 8 bytes little endian
result += int_to_little_endian(self.receiver_services, 8)
# IPV4 is 10 00 bytes and 2 ff bytes then receiver ip
result += b'\x00' * 10 + b'\xff\xff' + self.receiver_ip
# receiver port is 2 bytes, little endian
result += int_to_little_endian(self.receiver_port, 2)
# sender services is 8 bytes little endian
result += int_to_little_endian(self.sender_services, 8)
# IPV4 is 10 00 bytes and 2 ff bytes then sender ip
result += b'\x00' * 10 + b'\xff\xff' + self.sender_ip
# sender port is 2 bytes, little endian
result += int_to_little_endian(self.sender_port, 2)
# nonce
result += self.nonce
# useragent is a variable string, so varint first
result += encode_varint(len(self.user_agent))
result += self.user_agent
# latest block is 4 bytes little endian
result += int_to_little_endian(self.latest_block, 4)
# relay is 00 if false, 01 if true
if self.relay:
result += b'\x01'
else:
result += b'\x00'
return result
class VersionMessageTest(TestCase):
def test_serialize(self):
v = VersionMessage(timestamp=0, nonce=b'\x00' * 8)
self.assertEqual(v.serialize().hex(), '7f11010000000000000000000000000000000000000000000000000000000000000000000000ffff000000008d20000000000000000000000000000000000000ffff000000008d2000000000000000001b2f70726f6772616d6d696e67626c6f636b636861696e3a302e312f0000000001')
class VerAckMessage:
command = b'verack'
define_network = False
def __init__(self):
pass
@classmethod
def parse(cls, s):
return cls()
def serialize(self):
return b''
class PingMessage:
command = b'ping'
define_network = False
def __init__(self, nonce):
self.nonce = nonce
@classmethod
def parse(cls, s):
nonce = s.read(8)
return cls(nonce)
def serialize(self):
return self.nonce
class PongMessage:
command = b'pong'
define_network = False
def __init__(self, nonce):
self.nonce = nonce
def parse(cls, s):
nonce = s.read(8)
return cls(nonce)
def serialize(self):
return self.nonce
class GetHeadersMessage:
command = b'getheaders'
define_network = False
def __init__(self, version=70015, num_hashes=1, start_block=None, end_block=None):
self.version = version
self.num_hashes = num_hashes
if start_block is None:
raise RuntimeError('a start block is required')
self.start_block = start_block
if end_block is None:
self.end_block = b'\x00' * 32
else:
self.end_block = end_block
def serialize(self):
'''Serialize this message to send over the network'''
# protocol version is 4 bytes little-endian
result = int_to_little_endian(self.version, 4)
# number of hashes is a varint
result += encode_varint(self.num_hashes)
# start block is in little-endian
result += self.start_block[::-1]
# end block is also in little-endian
result += self.end_block[::-1]
return result
class GetHeadersMessageTest(TestCase):
def test_serialize(self):
block_hex = '0000000000000000001237f46acddf58578a37e213d2a6edc4884a2fcad05ba3'
gh = GetHeadersMessage(start_block=bytes.fromhex(block_hex))
self.assertEqual(gh.serialize().hex(), '7f11010001a35bd0ca2f4a88c4eda6d213e2378a5758dfcd6af437120000000000000000000000000000000000000000000000000000000000000000000000000000000000')
class HeadersMessage:
command = b'headers'
define_network = False
def __init__(self, headers):
self.headers = headers
def __iter__(self):
for header in self.headers:
yield header
@classmethod
def parse(cls, s):
# number of headers is in a varint
num_headers = read_varint(s)
# initialize the headers array
headers = []
# loop through number of headers times
for _ in range(num_headers):
# parse a header using Block.parse(s)
header = Block.parse(s)
# add the header to the headers array
headers.append(header)
# check that the length of the tx_hashes to be 0 or raise a RuntimeError
if len(header.tx_hashes) != 0:
raise RuntimeError('number of txs not 0')
# return a class instance
return cls(headers)
def is_valid(self):
'''Return whether the headers satisfy proof-of-work and are sequential and have the correct bits'''
last_block = None
for h in self.headers:
if not h.check_pow():
return False
if last_block and h.prev_block != last_block:
return False
last_block = h.hash()
return True
class HeadersMessageTest(TestCase):
def test_parse(self):
hex_msg = '0200000020df3b053dc46f162a9b00c7f0d5124e2676d47bbe7c5d0793a500000000000000ef445fef2ed495c275892206ca533e7411907971013ab83e3b47bd0d692d14d4dc7c835b67d8001ac157e670000000002030eb2540c41025690160a1014c577061596e32e426b712c7ca00000000000000768b89f07044e6130ead292a3f51951adbd2202df447d98789339937fd006bd44880835b67d8001ade09204600'
stream = BytesIO(bytes.fromhex(hex_msg))
headers = HeadersMessage.parse(stream)
self.assertEqual(len(headers.headers), 2)
for b in headers.headers:
self.assertEqual(b.__class__, Block)
class GetDataMessage:
command = b'getdata'
define_network = False
def __init__(self):
self.data = []
def add_data(self, data_type, identifier):
self.data.append((data_type, identifier))
def serialize(self):
# start with the number of items as a varint
result = encode_varint(len(self.data))
for data_type, identifier in self.data:
# data type is 4 bytes little endian
result += int_to_little_endian(data_type, 4)
# identifier needs to be in little endian
result += identifier[::-1]
return result
class GetDataMessageTest(TestCase):
def test_serialize(self):
hex_msg = '020300000030eb2540c41025690160a1014c577061596e32e426b712c7ca00000000000000030000001049847939585b0652fba793661c361223446b6fc41089b8be00000000000000'
get_data = GetDataMessage()
block1 = bytes.fromhex('00000000000000cac712b726e4326e596170574c01a16001692510c44025eb30')
get_data.add_data(FILTERED_BLOCK_DATA_TYPE, block1)
block2 = bytes.fromhex('00000000000000beb88910c46f6b442312361c6693a7fb52065b583979844910')
get_data.add_data(FILTERED_BLOCK_DATA_TYPE, block2)
self.assertEqual(get_data.serialize().hex(), hex_msg)
class GenericMessage:
define_network = False
def __init__(self, command, payload):
self.command = command
self.payload = payload
def serialize(self):
return self.payload
class SimpleNode:
def __init__(self, host, port=None, testnet=False, logging=False):
if port is None:
if testnet:
port = 18333
else:
port = 8333
self.testnet = testnet
self.logging = logging
# connect to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
# create a stream that we can use with the rest of the library
self.stream = self.socket.makefile('rb', None)
def handshake(self):
'''Do a handshake with the other node. Handshake is sending a version message and getting a verack back.'''
# create a version message
version = VersionMessage()
# send the command
self.send(version)
# wait for a verack message
self.wait_for(VerAckMessage)
def send(self, message):
'''Send a message to the connected node'''
# create a network envelope
envelope = NetworkEnvelope(
message.command, message.serialize(), testnet=self.testnet)
if self.logging:
print(f'sending: {envelope}')
# send the serialized envelope over the socket using sendall
self.socket.sendall(envelope.serialize())
def read(self):
'''Read a message from the socket'''
envelope = NetworkEnvelope.parse(self.stream, testnet=self.testnet)
if self.logging:
print(f'receiving: {envelope}')
return envelope
def wait_for(self, *message_classes):
'''Wait for one of the messages in the list'''
# initialize the command we have, which should be None
command = None
command_to_class = {m.command: m for m in message_classes}
# loop until the command is in the commands we want
while command not in command_to_class.keys():
# get the next network message
envelope = self.read()
# set the command to be evaluated
command = envelope.command
# we know how to respond to version and ping, handle that here
if command == VersionMessage.command:
# send verack
self.send(VerAckMessage())
elif command == PingMessage.command:
# send pong
self.send(PongMessage(envelope.payload))
# return the envelope parsed as a member of the right message class
cls = command_to_class[command]
if cls.define_network:
return cls.parse(envelope.stream(), testnet=self.testnet)
else:
return cls.parse(envelope.stream())
def is_tx_accepted(self, tx_obj):
'''Returns whether a transaction has been accepted on the network'''
# create a GetDataMessage
get_data = GetDataMessage()
# ask for the tx
get_data.add_data(TX_DATA_TYPE, tx_obj.hash())
# send the GetDataMessage
self.send(get_data)
# now wait for a response
got_tx = self.wait_for(Tx)
if got_tx.id() == tx_obj.id():
return True
def get_block(self, block_hash):
# create a GetDataMessage
get_data = GetDataMessage()
# add the block hash to the getdata message
get_data.add_data(BLOCK_DATA_TYPE, block_hash)
# send the message
self.send(get_data)
# wait for the Block message and send it back
return self.wait_for(Block)
class SimpleNodeTest(TestCase):
def test_handshake(self):
node = SimpleNode('seed.tbtc.petertodd.org', testnet=True)
node.handshake()
def test_get_block(self):
node = SimpleNode('seed.tbtc.petertodd.org', testnet=True)
node.handshake()
want = '00000000b4a283fd078500ef347c1646985261f925a4d4b67c143cc1ba2a3b57'
b = node.get_block(bytes.fromhex(want))
self.assertEqual(b.hash().hex(), want)
| 35.997854 | 346 | 0.657466 |
ace7bf6c86b1babef7250c326d7533e914c4eab0 | 2,029 | py | Python | tedx2/app.py | chudichudichudi/neuro-tedx-2 | 4ee8c4fc3e311a169d597deed9e3ddefff13709f | [
"BSD-3-Clause"
] | null | null | null | tedx2/app.py | chudichudichudi/neuro-tedx-2 | 4ee8c4fc3e311a169d597deed9e3ddefff13709f | [
"BSD-3-Clause"
] | null | null | null | tedx2/app.py | chudichudichudi/neuro-tedx-2 | 4ee8c4fc3e311a169d597deed9e3ddefff13709f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
from tedx2.settings import ProdConfig
from tedx2.assets import assets
from tedx2.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
apimanager,
admin,
)
from tedx2 import public, user, experimentos, gameconfig
from flask.ext.admin.contrib.sqla import ModelView
def create_app(config_object=ProdConfig):
'''An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
'''
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_api_endpoints()
register_admin_views()
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
admin.init_app(app)
apimanager.init_app(app, flask_sqlalchemy_db=db)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(experimentos.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_api_endpoints():
apimanager.create_api(gameconfig.models.GameConfig,
methods=['GET'], primary_key='name')
def register_admin_views():
admin.add_view(ModelView(gameconfig.models.GameConfig, db.session))
| 26.350649 | 73 | 0.716609 |
ace7bfe6e8e50705dafac87f9840bf6b2f9e6d6d | 4,991 | py | Python | deepclustering/arch/classification/IIC/net5g_multi_head.py | jizongFox/deep-clustering-toolbox | 0721cbbb278af027409ed4c115ccc743b6daed1b | [
"MIT"
] | 34 | 2019-08-05T03:48:36.000Z | 2022-03-29T03:04:51.000Z | deepclustering/arch/classification/IIC/net5g_multi_head.py | jizongFox/deep-clustering-toolbox | 0721cbbb278af027409ed4c115ccc743b6daed1b | [
"MIT"
] | 10 | 2019-05-03T21:02:50.000Z | 2021-12-23T08:01:30.000Z | deepclustering/arch/classification/IIC/net5g_multi_head.py | ETS-Research-Repositories/deep-clustering-toolbox | 0721cbbb278af027409ed4c115ccc743b6daed1b | [
"MIT"
] | 5 | 2019-09-29T07:56:03.000Z | 2021-04-22T12:08:50.000Z | import warnings
from typing import List
import torch
import torch.nn as nn
from .net5g import ClusterNet5gTrunk
from .residual import BasicBlock, ResNet
from deepclustering.decorator.decorator import export
# resnet34 and full channels
__all__ = ["ClusterNet5gMultiHead", "ClusterNet5gMultiHead_Param"]
class ClusterNet5gMultiHeadHead(nn.Module):
def __init__(
self,
output_k: int,
num_sub_heads: int,
semisup: bool = False,
batchnorm_track: bool = True,
):
super(ClusterNet5gMultiHeadHead, self).__init__()
self.batchnorm_track = batchnorm_track
self.semisup = semisup
if not self.semisup:
""" Here the difference between semisup and not are the Softmax layer."""
self.num_sub_heads = num_sub_heads
self.heads = nn.ModuleList(
[
nn.Sequential(
nn.Linear(512 * BasicBlock.expansion, output_k),
nn.Softmax(dim=1),
)
for _ in range(self.num_sub_heads)
]
)
else:
self.head = nn.Linear(512 * BasicBlock.expansion, output_k)
def forward(self, x: torch.Tensor, kmeans_use_features: bool = False):
if not self.semisup:
results = []
for i in range(self.num_sub_heads):
if kmeans_use_features:
results.append(x) # duplicates
else:
results.append(self.heads[i](x))
return results
else:
return self.head(x)
@export
class ClusterNet5gMultiHead(ResNet):
"""
based on resnet with two heads (multiple subheads). One head is for overclustering and
other is for normal clustering
"""
num_name_mapping = {1: "A", 2: "B", 3: "C", 4: "D", 5: "E", 6: "F", 7: "G"}
name_num_mapping = {v: k for k, v in num_name_mapping.items()}
def __init__(
self,
num_channel: int = 3,
output_k_list: List[int] = [70, 10],
semisup: bool = False,
num_sub_heads: int = 5,
batchnorm_track: bool = True,
verbose=False,
):
r"""
:param input_size: image size of the raw image, only support 96, 64, 32
:param num_channel: image channel
:param output_k_list: list of clustering nums for clusterings. last one should be the ground truth class_num
:param semisup: return semi supervised feature
:param num_sub_heads: sub-head number to form an ensemble-like prediction for each head
:param batchnorm_track: whether to track the batchnorm states
"""
super(ClusterNet5gMultiHead, self).__init__()
if isinstance(output_k_list, int):
output_k_list = [output_k_list]
assert isinstance(
output_k_list, (list, tuple)
), f"output_k_list should be a list or tuple, given {output_k_list}."
self.output_k_list: List[int] = output_k_list
self.batchnorm_track = batchnorm_track
# resnet structure
self.trunk = ClusterNet5gTrunk(
num_channel=num_channel, batchnorm_track=self.batchnorm_track
)
for head_i, cluster_num in enumerate(self.output_k_list):
setattr(
self,
f"head_{self.num_name_mapping[head_i + 1]}",
ClusterNet5gMultiHeadHead(
output_k=cluster_num,
num_sub_heads=num_sub_heads,
semisup=semisup,
batchnorm_track=self.batchnorm_track,
),
)
self.verbose = verbose
if self.verbose:
print("semisup: %s" % semisup)
self._initialize_weights()
def forward(
self,
x,
head=None,
kmeans_use_features=False,
trunk_features=False,
penultimate_features=False,
):
if head is None:
warnings.warn(
"head is None, using the last head: head_%s."
% self.num_name_mapping[len(self.output_k_list)]
)
head = self.num_name_mapping[len(self.output_k_list)]
assert isinstance(head, str) and head in list(self.name_num_mapping.keys()), (
f"head given {head} should be "
f"within {', '.join(list(self.name_num_mapping.keys())[:len(self.output_k_list)])}."
)
# default is "B" for use by eval IIC
# training script switches between A and B
x = self.trunk(x, penultimate_features=penultimate_features)
if trunk_features: # for semisup
return x
else:
x = getattr(self, f"head_{head}")(
x, kmeans_use_features=kmeans_use_features
)
return x
ClusterNet5gMultiHead_Param = {
"num_channel": 3,
"output_k_list": [150, 70, 10],
"num_sub_heads": 5,
"semisup": False,
}
| 33.722973 | 116 | 0.583851 |
ace7c19fb298ffa69b9c4134723811a95bb962c8 | 1,980 | py | Python | examples/reinforcement_learning/vin/loaddata.py | FrostByte266/neupy | 4b7127e5e4178b0cce023ba36542f5ad3f1d798c | [
"MIT"
] | 801 | 2015-09-23T09:24:47.000Z | 2022-03-29T19:19:03.000Z | examples/reinforcement_learning/vin/loaddata.py | FrostByte266/neupy | 4b7127e5e4178b0cce023ba36542f5ad3f1d798c | [
"MIT"
] | 277 | 2015-09-22T19:48:50.000Z | 2022-03-11T23:25:32.000Z | examples/reinforcement_learning/vin/loaddata.py | FrostByte266/neupy | 4b7127e5e4178b0cce023ba36542f5ad3f1d798c | [
"MIT"
] | 194 | 2015-09-23T15:03:57.000Z | 2022-03-31T13:54:46.000Z | import pickle
import argparse
import scipy.io
import numpy as np
from sklearn.utils import shuffle
from neupy.utils import asfloat
from settings import environments
def save_data(data, filepath):
with open(filepath, 'wb') as f:
# Use protocol 2, for python 2 and 3 compatibility
return pickle.dump(data, f, protocol=2)
def load_data(filepath):
with open(filepath, 'rb') as f:
return pickle.load(f)
parser = argparse.ArgumentParser()
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--imsize', '-i', choices=[8, 16, 28],
type=int, required=True)
if __name__ == '__main__':
args = parser.parse_args()
env = environments[args.imsize]
np.random.seed(args.seed)
matlab_data = scipy.io.loadmat(env['mat_file'])
image_data = matlab_data["batch_im_data"]
image_data = (image_data - 1) / 255. # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
s1_data = matlab_data["state_x_data"].astype('int8')
s2_data = matlab_data["state_y_data"].astype('int8')
y_data = matlab_data["batch_label_data"].astype('int8')
image_data = asfloat(image_data.reshape(-1, 1, *env['image_size']))
value_data = asfloat(value_data.reshape(-1, 1, *env['image_size']))
x_data = np.append(image_data, value_data, axis=1)
x_data = np.transpose(x_data, (0, 2, 3, 1))
n_samples = x_data.shape[0]
training_samples = int(6 / 7.0 * n_samples)
x_train, x_test = np.split(x_data, [training_samples])
s1_train, s1_test = np.split(s1_data, [training_samples])
s2_train, s2_test = np.split(s2_data, [training_samples])
y_train, y_test = np.split(y_data, [training_samples])
x_train, s1_train, s2_train, y_train = shuffle(
x_train, s1_train, s2_train, y_train)
save_data((x_train, s1_train, s2_train, y_train), env['train_data_file'])
save_data((x_test, s1_test, s2_test, y_test), env['test_data_file'])
| 31.935484 | 77 | 0.684848 |
ace7c213ddc6e0026381bd96bba9e028e59827ee | 2,768 | py | Python | src/mem/ruby/system/RubySystem.py | multifacet/ASAP | 68cb32c43e3ebad2a5dfb947ce98442375b235c7 | [
"BSD-3-Clause"
] | 2 | 2021-11-10T08:03:19.000Z | 2021-12-15T04:09:03.000Z | src/mem/ruby/system/RubySystem.py | multifacet/ASAP | 68cb32c43e3ebad2a5dfb947ce98442375b235c7 | [
"BSD-3-Clause"
] | null | null | null | src/mem/ruby/system/RubySystem.py | multifacet/ASAP | 68cb32c43e3ebad2a5dfb947ce98442375b235c7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.objects.ClockedObject import ClockedObject
from m5.objects.SimpleMemory import *
class RubySystem(ClockedObject):
type = 'RubySystem'
cxx_header = "mem/ruby/system/RubySystem.hh"
randomization = Param.Bool(False,
"insert random delays on message enqueue times (if True, all message \
buffers are enforced to have randomization; otherwise, a message \
buffer set its own flag to enable/disable randomization)");
block_size_bytes = Param.UInt32(64,
"default cache block size; must be a power of two");
memory_size_bits = Param.UInt32(64,
"number of bits that a memory address requires");
# phys_mem = VectorParam.SimpleMemory(NULL, "")
phys_mem = Param.SimpleMemory(NULL, "")
access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
store and only use ruby for timing.")
check_backing_store = Param.Bool(False, "Check ruby data against \
functional store on reads.")
# Profiler related configuration variables
hot_lines = Param.Bool(False, "")
all_instructions = Param.Bool(False, "")
num_of_sequencers = Param.Int("")
number_of_virtual_networks = Param.Unsigned("")
| 48.561404 | 78 | 0.755058 |
ace7c2f354fffa58989764913eaa9c17713a50fd | 905 | py | Python | transformers/timeseries/general_time_series_transformer.py | james94/driverlessai-recipes | 87c35460db59ffda8dc18ad82cb3a9b8291410e4 | [
"Apache-2.0"
] | null | null | null | transformers/timeseries/general_time_series_transformer.py | james94/driverlessai-recipes | 87c35460db59ffda8dc18ad82cb3a9b8291410e4 | [
"Apache-2.0"
] | null | null | null | transformers/timeseries/general_time_series_transformer.py | james94/driverlessai-recipes | 87c35460db59ffda8dc18ad82cb3a9b8291410e4 | [
"Apache-2.0"
] | null | null | null | """Demonstrates the API for custom time-series transformers."""
from h2oaicore.transformer_utils import CustomTimeSeriesTransformer
import datatable as dt
import numpy as np
class GeneralTimeSeriesTransformer(CustomTimeSeriesTransformer):
_causal_recipe_allowed = False # need self.encoder only available in lag time series recipe mode
def fit_transform(self, X: dt.Frame, y: np.array = None):
# FIXME - use the following attributes
# self.encoder
# self.tgc
# self.pred_gap
# self.pred_periods
# self.lag_sizes
# self.lag_feature
# self.target
# self.tsp
# self.time_column
# self.datetime_formats
self.encoder.fit(X[:, self.time_column].to_pandas())
return self.transform(X)
def transform(self, X: dt.Frame):
return self.encoder.transform(X[:, self.time_column].to_pandas())
| 34.807692 | 101 | 0.680663 |
ace7c2fbd20d529eef669323f0c1ec223acfe9a2 | 6,765 | py | Python | dl_and_create.py | ZubairLK/ka-static | dbb56cb67ae49e12cd96be17f40f6328dfc4c2d0 | [
"MIT"
] | 1 | 2018-11-21T06:44:52.000Z | 2018-11-21T06:44:52.000Z | dl_and_create.py | ZubairLK/ka-static | dbb56cb67ae49e12cd96be17f40f6328dfc4c2d0 | [
"MIT"
] | null | null | null | dl_and_create.py | ZubairLK/ka-static | dbb56cb67ae49e12cd96be17f40f6328dfc4c2d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script basically downloads topictree from khan academy
# Then parses it and creates a folder Khan Academy with files.
# Kept simple on purpose to just see how parsing the topic tree json works in python
import json
import pprint
import inspect
import os
import re
import urllib
global_index = 0
global_level = 0
#http://stackoverflow.com/questions/3663450/python-remove-substring-only-at-the-end-of-string
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
#somestring = rchop(somestring, ' rec')
def make_video_file(data, dirname, title):
global global_index
# for key in data:
# print key
video_title = re.sub('[^A-Za-z0-9]+', '_', data["translated_title"])
# for key in data["download_urls"]:
# print key
if "mp4-low" in data["download_urls"]:
download_url = data["download_urls"]["mp4-low"]
elif "mp4" in data["download_urls"]:
download_url = data["download_urls"]["mp4"]
else:
# Looks like there is a download_urls json that doesn't have a video.
# print "No mp4 or mp4 for some videos"
# print dirname
# print global_index
# print video_title
# for key in data["download_urls"]:
# print key
return
if title == "New_and_noteworthy":
directory = dirname
else:
directory = rchop(dirname , title)
# directory = dirname + title
# directory = dirname
# print directory
# directory = re.sub('[^A-Za-z0-9]+', '_', directory)
# print dirname
# print video_title
# print title
# print global_index
# print video_index
if data['translated_title'] is not None:
full_title = data['translated_title'].encode('utf-8')
else:
# Use file name
full_title = video_title
if data['translated_description'] is not None:
full_description = data['translated_description'].encode('utf-8')
else:
full_description = "No description"
id = []
if data['id'] is not None:
id = data['id']
directory = directory + "/" + title
if not os.path.exists(directory):
os.makedirs(directory)
fp = open(directory + "/" + str(global_index) + "_" + video_title + ".html", "wb")
fp_head = open("head.html", "rb")
fp_tail = open("tail.html", "rb")
fp.write(fp_head.read())
fp.write("<p>")
fp.write("Video Title : ")
fp.write(full_title + "\n")
fp.write("</p>")
fp.write("<p>")
fp.write("Video Description : ")
fp.write(full_description + "\n")
fp.write("</p>")
# fp.write("<p>")
# fp.write("Download URL : ")
# fp.write(download_url + "\n")
# fp.write("</p>")
fp.write("<video width=\"80%\" controls>")
fp.write("<source src=" + download_url + " type=\"video/mp4\">")
fp.write("Your browser does not support HTML5 video. Direct link " + download_url)
fp.write("</video>")
fp.write("<p>")
fp.write("Video ID : ")
fp.write(id + "\n")
fp.write("</p>")
fp.write(fp_tail.read())
fp.close()
def list_dict_keys(data, level, dirname, title):
# print type(data)
global global_index
base = (" " * level)
if type(data) is dict:
for key in data:
# print base + key
# if key == 'title':
if key == 'translated_title':
title = re.sub('[^A-Za-z0-9]+', '_', data[key])
dirname = dirname + "/" + title
# print base + dirname
# print base + data[key]
if key == 'relative_url':
continue
print base + data[key]
if key == 'translated_youtube_id':
continue
print base + data[key]
if key == 'download_urls':
# Now that we have recursively reached a node with a video
# Create a folder and file.
global_index = global_index + 1
make_video_file(data, dirname, title)
# list_dict_keys(data[key], level, dirname, title)
# if key == 'mp4':
# print base + data[key]
if key == 'children':
# Write some metadata in folder before proceeding to child node.
if not os.path.exists(dirname):
os.makedirs(dirname)
fp = open(dirname + "/metadata", "wb")
fp.write(data['description'].encode('utf-8'))
fp.close
# Enter child node.
list_members(data[key], level + 1, dirname, title)
def list_members(data, level, dirname, title):
for index, item in enumerate(data):
if type(data[index]) is dict:
list_dict_keys(data[index], level, dirname, title)
def create_index_html(mydirname, dirnames):
# print mydirname
# print dirnames
global global_level
global_level = global_level + 1
# print global_level
# print mydirname
# print dirnames
if mydirname.endswith("fonts") is True:
return
if mydirname.endswith("css") is True:
return
if mydirname.endswith("js") is True:
return
fp = open(mydirname + "/index.html", "wb")
fp_head = open("head.html", "rb")
fp_tail = open("tail.html", "rb")
fp.write(fp_head.read())
if dirnames:
# print "here"
for index, item in enumerate(dirnames):
if dirnames[index] == "fonts":
continue
if dirnames[index] == "css":
continue
if dirnames[index] == "js":
continue
# print mydirname
# print " " + dirnames[index]
fp.write("<div class=\"col-sm-4\">")
fp.write("<p>")
fp.write("<a href=" + dirnames[index] + "/index.html" + ">" + dirnames[index] + "</a>")
fp.write("</p>")
if dirnames[index] == "New_and_noteworthy":
fp_meta = open(mydirname + "/" + "/metadata", "r")
else:
fp_meta = open(mydirname + "/" + dirnames[index] + "/metadata", "r")
# Writing description
fp.write("<p>")
fp.write(fp_meta.read())
fp_meta.close()
fp.write("</p>")
fp.write("</div>")
else:
# print "here2"
# print dirnames
# print os.listdir( mydirname )
files = [f for f in os.listdir(mydirname) if os.path.isfile(os.path.join(mydirname, f))]
# print files
if mydirname.endswith("New_and_noteworthy"):
mydirname = rchop(mydirname, "New_and_noteworthy")
fp_meta = open(mydirname + "/" + "metadata", "r")
fp.write("<p>")
fp.write(fp_meta.read())
fp_meta.close()
fp.write("</p>")
for f in files:
if f == "index.html":
continue
if f == "metadata":
continue
# print f
fp.write("<p>")
fp.write("<a href=" + f + ">" + f + "</a>")
fp.write("\n")
fp.write("</p>")
# do something
# files = filter(os.path.isfile, os.listdir( mydirname ) )
fp.write(fp_tail.read())
def create_index(mydirname):
mydirname = mydirname + "/Khan_Academy"
for dirname, dirnames, filenames in os.walk(mydirname):
create_index_html(dirname, dirnames)
# Main
if not os.path.exists('topictree'):
print "Downloading Topictree JSON. Roughly 60+MB. This can take a while"
topictree = urllib.urlretrieve("http://www.khanacademy.org/api/v1/topictree", "topictree")
print "Downloaded Topic Tree. Now parsing"
with open('topictree') as data_file:
data = json.load(data_file)
# print json.dumps(data)
list_dict_keys(data,0, ".", "title")
create_index(".")
quit()
| 25.242537 | 93 | 0.656467 |
ace7c31452989663d37f810f1c912e2a3c3198fd | 934 | py | Python | 2.1/21.py | SuperFlanker2014/PADS_Spring_2019 | 73a805d1da3b71a3fc27f2515a57907f16417c14 | [
"MIT"
] | null | null | null | 2.1/21.py | SuperFlanker2014/PADS_Spring_2019 | 73a805d1da3b71a3fc27f2515a57907f16417c14 | [
"MIT"
] | null | null | null | 2.1/21.py | SuperFlanker2014/PADS_Spring_2019 | 73a805d1da3b71a3fc27f2515a57907f16417c14 | [
"MIT"
] | null | null | null | def merge(a, b):
i, j = 0, 0
n, m = len(a), len(b)
c = []
while i < n or j < m:
if j == m or (i < n and a[i] <= b[j]):
c.append(a[i])
i = i + 1
else:
c.append(b[j])
j = j + 1
return c
def merge_sort(a, ind, logger):
n = len(a)
if n == 1:
return a
l = a[0:n//2]
r = a[n//2:n]
l = merge_sort(l,ind, logger)
r = merge_sort(r,ind+n//2, logger)
m = merge(l, r)
logger(ind, ind + n - 1, m[0], m[-1])
return m
with open('input.txt','r') as INPUT:
lines = INPUT.readlines()
n = int(lines[0])
original_list = [int(i) for i in lines[1].split(' ')]
logger = lambda i1,i2,i3,i4: OUTPUT.write(f'{i1+1} {i2+1} {i3} {i4}\n')
with open('output.txt', 'w') as OUTPUT:
A = merge_sort(original_list, 0, logger)
OUTPUT.write(' '.join([str(i) for i in A])) | 27.470588 | 76 | 0.455032 |
ace7c346440abe2b6abdca326f9d2469a4ad8d9a | 5,254 | py | Python | Choose Your Own Colors/contracting_square_1.py | ShineTop/Unicorn-HAT | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | [
"MIT"
] | null | null | null | Choose Your Own Colors/contracting_square_1.py | ShineTop/Unicorn-HAT | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | [
"MIT"
] | null | null | null | Choose Your Own Colors/contracting_square_1.py | ShineTop/Unicorn-HAT | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Contracting Square 1 - Choose Your Own Color
Creates a square that decreases in size. It does this with each of the
colors.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from time import sleep
import unicornhat
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def contracting_square_1():
"""
Creates a square that decreases in size. It does this with each of
the colors.
"""
sleep_speed = 0.1
colors = [C1, C2, C3, C4, C5, C6, C7, C8]
for color in colors:
for _ in range(5):
unicornhat.set_pixel(0, 0, color)
unicornhat.set_pixel(1, 0, color)
unicornhat.set_pixel(2, 0, color)
unicornhat.set_pixel(3, 0, color)
unicornhat.set_pixel(4, 0, color)
unicornhat.set_pixel(5, 0, color)
unicornhat.set_pixel(6, 0, color)
unicornhat.set_pixel(7, 0, color)
unicornhat.set_pixel(0, 1, color)
unicornhat.set_pixel(0, 2, color)
unicornhat.set_pixel(0, 3, color)
unicornhat.set_pixel(0, 4, color)
unicornhat.set_pixel(0, 5, color)
unicornhat.set_pixel(0, 6, color)
unicornhat.set_pixel(0, 7, color)
unicornhat.set_pixel(1, 7, color)
unicornhat.set_pixel(2, 7, color)
unicornhat.set_pixel(3, 7, color)
unicornhat.set_pixel(4, 7, color)
unicornhat.set_pixel(5, 7, color)
unicornhat.set_pixel(6, 7, color)
unicornhat.set_pixel(7, 7, color)
unicornhat.set_pixel(7, 1, color)
unicornhat.set_pixel(7, 2, color)
unicornhat.set_pixel(7, 3, color)
unicornhat.set_pixel(7, 4, color)
unicornhat.set_pixel(7, 5, color)
unicornhat.set_pixel(7, 6, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.clear()
unicornhat.set_pixel(1, 1, color)
unicornhat.set_pixel(2, 1, color)
unicornhat.set_pixel(3, 1, color)
unicornhat.set_pixel(4, 1, color)
unicornhat.set_pixel(5, 1, color)
unicornhat.set_pixel(6, 1, color)
unicornhat.set_pixel(1, 2, color)
unicornhat.set_pixel(1, 3, color)
unicornhat.set_pixel(1, 4, color)
unicornhat.set_pixel(1, 5, color)
unicornhat.set_pixel(1, 6, color)
unicornhat.set_pixel(2, 6, color)
unicornhat.set_pixel(3, 6, color)
unicornhat.set_pixel(4, 6, color)
unicornhat.set_pixel(5, 6, color)
unicornhat.set_pixel(6, 6, color)
unicornhat.set_pixel(6, 2, color)
unicornhat.set_pixel(6, 3, color)
unicornhat.set_pixel(6, 4, color)
unicornhat.set_pixel(6, 5, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.clear()
unicornhat.set_pixel(2, 2, color)
unicornhat.set_pixel(3, 2, color)
unicornhat.set_pixel(4, 2, color)
unicornhat.set_pixel(5, 2, color)
unicornhat.set_pixel(2, 3, color)
unicornhat.set_pixel(2, 4, color)
unicornhat.set_pixel(2, 5, color)
unicornhat.set_pixel(3, 5, color)
unicornhat.set_pixel(4, 5, color)
unicornhat.set_pixel(5, 5, color)
unicornhat.set_pixel(5, 3, color)
unicornhat.set_pixel(5, 4, color)
unicornhat.set_pixel(5, 5, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.clear()
unicornhat.set_pixel(3, 3, color)
unicornhat.set_pixel(3, 4, color)
unicornhat.set_pixel(4, 3, color)
unicornhat.set_pixel(4, 4, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.clear()
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
contracting_square_1()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| 32.8375 | 72 | 0.521317 |
ace7c43da93143c7c357910130c61823f6576267 | 11,958 | py | Python | core/storage/topic/gae_models.py | agstaples/oppia | 2cced6aadf935ea5dd6bf6779b909807e7d14d9a | [
"Apache-2.0"
] | null | null | null | core/storage/topic/gae_models.py | agstaples/oppia | 2cced6aadf935ea5dd6bf6779b909807e7d14d9a | [
"Apache-2.0"
] | null | null | null | core/storage/topic/gae_models.py | agstaples/oppia | 2cced6aadf935ea5dd6bf6779b909807e7d14d9a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for topics and related constructs."""
from constants import constants
from core.platform import models
from google.appengine.ext import ndb
(base_models, user_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
class TopicSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic snapshot."""
pass
class TopicSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic snapshot."""
pass
class TopicModel(base_models.VersionedModel):
"""Model for storing Topics.
This class should only be imported by the topic services file
and the topic model test file.
"""
SNAPSHOT_METADATA_CLASS = TopicSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicSnapshotContentModel
ALLOW_REVERT = False
# The name of the topic.
name = ndb.StringProperty(required=True, indexed=True)
# The description of the topic.
description = ndb.TextProperty(indexed=False)
# This consists of the list of canonical story ids that are part of
# this topic.
canonical_story_ids = ndb.StringProperty(repeated=True, indexed=True)
# This consists of the list of additional (non-canonical) story ids that
# are part of this topic.
additional_story_ids = ndb.StringProperty(repeated=True, indexed=True)
# This consists of the list of uncategorized skill ids that are not part of
# any subtopic.
uncategorized_skill_ids = ndb.StringProperty(repeated=True, indexed=True)
# The list of subtopics that are part of the topic.
subtopics = ndb.JsonProperty(repeated=True, indexed=False)
# The schema version of the subtopic dict.
subtopic_schema_version = ndb.IntegerProperty(required=True, indexed=True)
# The id for the next subtopic.
next_subtopic_id = ndb.IntegerProperty(required=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
topic_commit_log_entry = TopicCommitLogEntryModel.create(
self.id, self.version, committer_id, committer_username,
commit_type, commit_message, commit_cmds,
constants.ACTIVITY_STATUS_PUBLIC, False
)
topic_commit_log_entry.topic_id = self.id
topic_commit_log_entry.put()
@classmethod
def get_by_name(cls, topic_name):
"""Gets TopicModel by topic_name. Returns None if the topic with
name topic_name doesn't exist.
Args:
topic_name: str. The name of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
return TopicModel.query().filter(cls.name == topic_name).get()
class TopicCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to topics.
A new instance of this model is created and saved every time a commit to
TopicModel occurs.
The id for this model is of the form
'topic-{{TOPIC_ID}}-{{TOPIC_VERSION}}'.
"""
# The id of the topic being edited.
topic_id = ndb.StringProperty(indexed=True, required=True)
@classmethod
def _get_instance_id(cls, topic_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
topic_id: str. The id of the topic being edited.
version: int. The version number of the topic after the commit.
Returns:
str. The commit id with the topic id and version number.
"""
return 'topic-%s-%s' % (topic_id, version)
class TopicSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia Topic.
This should be used whenever the content blob of the topic is not
needed (e.g. search results, etc).
A TopicSummaryModel instance stores the following information:
id, description, language_code, last_updated, created_on, version.
The key of each instance is the topic id.
"""
# The name of the topic.
name = ndb.StringProperty(required=True, indexed=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# Time when the topic model was last updated (not to be
# confused with last_updated, which is the time when the
# topic *summary* model was last updated).
topic_model_last_updated = ndb.DateTimeProperty(required=True, indexed=True)
# Time when the topic model was created (not to be confused
# with created_on, which is the time when the topic *summary*
# model was created).
topic_model_created_on = ndb.DateTimeProperty(required=True, indexed=True)
# The number of canonical stories that are part of this topic.
canonical_story_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of additional stories that are part of this topic.
additional_story_count = ndb.IntegerProperty(required=True, indexed=True)
# The total number of skills in the topic (including those that are
# uncategorized).
total_skill_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of skills that are not part of any subtopic.
uncategorized_skill_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of subtopics of the topic.
subtopic_count = ndb.IntegerProperty(required=True, indexed=True)
version = ndb.IntegerProperty(required=True)
class SubtopicPageSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a subtopic page snapshot."""
pass
class SubtopicPageSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a subtopic page snapshot."""
pass
class SubtopicPageModel(base_models.VersionedModel):
"""Model for storing Subtopic pages.
This stores the HTML data for a subtopic page.
"""
SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
ALLOW_REVERT = False
# The topic id that this subtopic is a part of.
topic_id = ndb.StringProperty(required=True, indexed=True)
# The html data of the subtopic.
html_data = ndb.TextProperty(required=True)
# The ISO 639-1 code for the language this subtopic page is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(SubtopicPageModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
self.id, self.version, committer_id, committer_username,
commit_type, commit_message, commit_cmds,
constants.ACTIVITY_STATUS_PUBLIC, False
)
subtopic_page_commit_log_entry.subtopic_page_id = self.id
subtopic_page_commit_log_entry.put()
class SubtopicPageCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to subtopic pages.
A new instance of this model is created and saved every time a commit to
SubtopicPageModel occurs.
The id for this model is of the form
'subtopicpage-{{SUBTOPIC_PAGE_ID}}-{{SUBTOPIC_PAGE_VERSION}}'.
"""
# The id of the subtopic page being edited.
subtopic_page_id = ndb.StringProperty(indexed=True, required=True)
@classmethod
def _get_instance_id(cls, subtopic_page_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
subtopic_page_id: str. The id of the subtopic page being edited.
version: int. The version number of the subtopic page after the
commit.
Returns:
str. The commit id with the subtopic page id and version number.
"""
return 'subtopicpage-%s-%s' % (subtopic_page_id, version)
class TopicRightsSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic rights snapshot."""
pass
class TopicRightsSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic rights snapshot."""
pass
class TopicRightsModel(base_models.VersionedModel):
"""Storage model for rights related to a topic.
The id of each instance is the id of the corresponding topic.
"""
SNAPSHOT_METADATA_CLASS = TopicRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_ids of the managers of this topic.
manager_ids = ndb.StringProperty(indexed=True, repeated=True)
# Whether this topic is published.
topic_is_published = ndb.BooleanProperty(
indexed=True, required=True, default=False)
| 39.727575 | 80 | 0.707978 |
ace7c4455a90f4b220792757ded0efe70c875d8f | 5,181 | py | Python | homeassistant/components/device_tracker/upc_connect.py | adolfoeliazat/voidhomecontrol | 6d733253811c553912e46e24debec818b28b0688 | [
"Apache-2.0"
] | 1 | 2021-08-06T09:54:39.000Z | 2021-08-06T09:54:39.000Z | homeassistant/components/device_tracker/upc_connect.py | adolfoeliazat/voidhomecontrol | 6d733253811c553912e46e24debec818b28b0688 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/device_tracker/upc_connect.py | adolfoeliazat/voidhomecontrol | 6d733253811c553912e46e24debec818b28b0688 | [
"Apache-2.0"
] | 1 | 2020-08-26T20:54:14.000Z | 2020-08-26T20:54:14.000Z | """
Support for UPC ConnectBox router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.upc_connect/
"""
import asyncio
import logging
import xml.etree.ElementTree as ET
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
DEFAULT_IP = '192.168.0.1'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_IP): cv.string,
})
CMD_LOGIN = 15
CMD_LOGOUT = 16
CMD_DEVICES = 123
@asyncio.coroutine
def async_get_scanner(hass, config):
"""Return the UPC device scanner."""
scanner = UPCDeviceScanner(hass, config[DOMAIN])
success_init = yield from scanner.async_login()
return scanner if success_init else None
class UPCDeviceScanner(DeviceScanner):
"""This class queries a router running UPC ConnectBox firmware."""
def __init__(self, hass, config):
"""Initialize the scanner."""
self.hass = hass
self.host = config[CONF_HOST]
self.password = config[CONF_PASSWORD]
self.data = {}
self.token = None
self.headers = {
'X-Requested-With': 'XMLHttpRequest',
'Referer': "http://{}/index.html".format(self.host),
'User-Agent': ("Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/47.0.2526.106 Safari/537.36")
}
self.websession = async_get_clientsession(hass)
@asyncio.coroutine
def async_logout(event):
"""Logout from upc connect box."""
yield from self._async_ws_function(CMD_LOGOUT)
self.token = None
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_logout)
@asyncio.coroutine
def async_scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
if self.token is None:
reconnect = yield from self.async_login()
if not reconnect:
_LOGGER.error("Not connected to %s", self.host)
return []
raw = yield from self._async_ws_function(CMD_DEVICES)
try:
xml_root = ET.fromstring(raw)
return [mac.text for mac in xml_root.iter('MACAddr')]
except (ET.ParseError, TypeError):
_LOGGER.warning("Can't read device from %s", self.host)
self.token = None
return []
@asyncio.coroutine
def async_get_device_name(self, device):
"""Ge the firmware doesn't save the name of the wireless device."""
return None
@asyncio.coroutine
def async_login(self):
"""Login into firmware and get first token."""
try:
# get first token
with async_timeout.timeout(10, loop=self.hass.loop):
response = yield from self.websession.get(
"http://{}/common_page/login.html".format(self.host)
)
yield from response.text()
self.token = response.cookies['sessionToken'].value
# login
data = yield from self._async_ws_function(CMD_LOGIN, {
'Username': 'NULL',
'Password': self.password,
})
# Successful?
return data is not None
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Can not load login page from %s", self.host)
return False
@asyncio.coroutine
def _async_ws_function(self, function, additional_form=None):
"""Execute a command on UPC firmware webservice."""
form_data = {
'token': self.token,
'fun': function
}
if additional_form:
form_data.update(additional_form)
redirects = function != CMD_DEVICES
try:
with async_timeout.timeout(10, loop=self.hass.loop):
response = yield from self.websession.post(
"http://{}/xml/getter.xml".format(self.host),
data=form_data,
headers=self.headers,
allow_redirects=redirects
)
# error?
if response.status != 200:
_LOGGER.warning("Receive http code %d", response.status)
self.token = None
return
# load data, store token for next request
self.token = response.cookies['sessionToken'].value
return (yield from response.text())
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error on %s", function)
self.token = None
| 31.981481 | 76 | 0.607991 |
ace7c44d39c8b8a77909fbd062e3b198f17b64de | 7,851 | py | Python | simscale_sdk/models/force_load_bc.py | SimScaleGmbH/simscale-python-sdk | 34c881ca0be87e2b0bb315a9fee1d73f0da61e78 | [
"MIT"
] | 8 | 2021-01-22T13:41:03.000Z | 2022-01-03T09:00:10.000Z | simscale_sdk/models/force_load_bc.py | eljoelopez/simscale-python-sdk | 189f1337b2ab40feed123111ddead0cdecf83c93 | [
"MIT"
] | null | null | null | simscale_sdk/models/force_load_bc.py | eljoelopez/simscale-python-sdk | 189f1337b2ab40feed123111ddead0cdecf83c93 | [
"MIT"
] | 3 | 2021-03-18T15:52:52.000Z | 2022-01-03T08:59:30.000Z | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class ForceLoadBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'name': 'str',
'force': 'DimensionalVectorFunctionForce',
'scaling': 'DimensionalFunctionDimensionless',
'phase_angle': 'DimensionalAngle',
'topological_reference': 'TopologicalReference'
}
attribute_map = {
'type': 'type',
'name': 'name',
'force': 'force',
'scaling': 'scaling',
'phase_angle': 'phaseAngle',
'topological_reference': 'topologicalReference'
}
def __init__(self, type='FORCE_LOAD', name=None, force=None, scaling=None, phase_angle=None, topological_reference=None, local_vars_configuration=None): # noqa: E501
"""ForceLoadBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._name = None
self._force = None
self._scaling = None
self._phase_angle = None
self._topological_reference = None
self.discriminator = None
self.type = type
if name is not None:
self.name = name
if force is not None:
self.force = force
if scaling is not None:
self.scaling = scaling
if phase_angle is not None:
self.phase_angle = phase_angle
if topological_reference is not None:
self.topological_reference = topological_reference
@property
def type(self):
"""Gets the type of this ForceLoadBC. # noqa: E501
<p>This is a <b>force</b> boundary condition representing a <b>distributed force on the selection</b>. The total force is defined in the global coordinate system and each element of the assignment is loaded with a surface traction depending on the area of the element.<br /><a href= https://www.simscale.com/docs/simulation-setup/boundary-conditions/force/' target='_blank'>Learn more</a>.</p> Schema name: ForceLoadBC # noqa: E501
:return: The type of this ForceLoadBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ForceLoadBC.
<p>This is a <b>force</b> boundary condition representing a <b>distributed force on the selection</b>. The total force is defined in the global coordinate system and each element of the assignment is loaded with a surface traction depending on the area of the element.<br /><a href= https://www.simscale.com/docs/simulation-setup/boundary-conditions/force/' target='_blank'>Learn more</a>.</p> Schema name: ForceLoadBC # noqa: E501
:param type: The type of this ForceLoadBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def name(self):
"""Gets the name of this ForceLoadBC. # noqa: E501
:return: The name of this ForceLoadBC. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ForceLoadBC.
:param name: The name of this ForceLoadBC. # noqa: E501
:type: str
"""
self._name = name
@property
def force(self):
"""Gets the force of this ForceLoadBC. # noqa: E501
:return: The force of this ForceLoadBC. # noqa: E501
:rtype: DimensionalVectorFunctionForce
"""
return self._force
@force.setter
def force(self, force):
"""Sets the force of this ForceLoadBC.
:param force: The force of this ForceLoadBC. # noqa: E501
:type: DimensionalVectorFunctionForce
"""
self._force = force
@property
def scaling(self):
"""Gets the scaling of this ForceLoadBC. # noqa: E501
:return: The scaling of this ForceLoadBC. # noqa: E501
:rtype: DimensionalFunctionDimensionless
"""
return self._scaling
@scaling.setter
def scaling(self, scaling):
"""Sets the scaling of this ForceLoadBC.
:param scaling: The scaling of this ForceLoadBC. # noqa: E501
:type: DimensionalFunctionDimensionless
"""
self._scaling = scaling
@property
def phase_angle(self):
"""Gets the phase_angle of this ForceLoadBC. # noqa: E501
:return: The phase_angle of this ForceLoadBC. # noqa: E501
:rtype: DimensionalAngle
"""
return self._phase_angle
@phase_angle.setter
def phase_angle(self, phase_angle):
"""Sets the phase_angle of this ForceLoadBC.
:param phase_angle: The phase_angle of this ForceLoadBC. # noqa: E501
:type: DimensionalAngle
"""
self._phase_angle = phase_angle
@property
def topological_reference(self):
"""Gets the topological_reference of this ForceLoadBC. # noqa: E501
:return: The topological_reference of this ForceLoadBC. # noqa: E501
:rtype: TopologicalReference
"""
return self._topological_reference
@topological_reference.setter
def topological_reference(self, topological_reference):
"""Sets the topological_reference of this ForceLoadBC.
:param topological_reference: The topological_reference of this ForceLoadBC. # noqa: E501
:type: TopologicalReference
"""
self._topological_reference = topological_reference
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForceLoadBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ForceLoadBC):
return True
return self.to_dict() != other.to_dict()
| 31.154762 | 441 | 0.612279 |
ace7c486faf79bd15c48ba178e7a5463ac8f3a70 | 22,612 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_inbound_nat_rules_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_inbound_nat_rules_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_inbound_nat_rules_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations(object):
"""InboundNatRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.InboundNatRuleListResult"]
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.InboundNatRule"]
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2020_06_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
| 50.58613 | 232 | 0.670308 |
ace7c57297cb918811e6432ace44094fea2c20de | 1,138 | py | Python | create_nodes.py | revathivijay/VJTI-Navigation | 29d25c6ffa69937b73b75a064017799d6d583459 | [
"MIT"
] | null | null | null | create_nodes.py | revathivijay/VJTI-Navigation | 29d25c6ffa69937b73b75a064017799d6d583459 | [
"MIT"
] | null | null | null | create_nodes.py | revathivijay/VJTI-Navigation | 29d25c6ffa69937b73b75a064017799d6d583459 | [
"MIT"
] | 2 | 2020-09-09T11:20:08.000Z | 2022-02-24T21:20:22.000Z | import pandas as pd
import numpy as np
import json
import csv
def create_nodes(input_file, output_file):
"""
Function to create Nodes of the graph
Input: input_file : ".csv" format file
columns in the file:
1. Node name (Will be blank for minor nodes)
2. x_pos (x coordinate in grid)
3. y_pos (y coordinate in grid)
4. Type (Major/Minor) (not actually reqd)
5. Floor (0,1,2,3)
6. Building (If any building associated)
7. Map number (To map coordinate)
Returns json file of the format : {Node# : {Node details}}
"""
## for temporary storage of csv data
data = {}
## read csv data into dictionary
with open(input_file, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
id = row['Node number']
data[id] = row
## dump dictionary data to json file
with open(output_file, 'w') as json_file:
json_file.write(json.dumps(data, indent=4))
## driver code
if __name__=='__main__':
create_nodes('colleg-loc-new.csv', 'nodes.json')
| 31.611111 | 62 | 0.609842 |
ace7c80c13430982714e988eba3752a5c4983a97 | 2,754 | py | Python | noggin/controller/authentication.py | mscherer/noggin | 0e3be29de02a1ba7aaf247493c5adf7d08e5f64b | [
"MIT"
] | null | null | null | noggin/controller/authentication.py | mscherer/noggin | 0e3be29de02a1ba7aaf247493c5adf7d08e5f64b | [
"MIT"
] | null | null | null | noggin/controller/authentication.py | mscherer/noggin | 0e3be29de02a1ba7aaf247493c5adf7d08e5f64b | [
"MIT"
] | null | null | null | import python_freeipa
from flask import flash, redirect, render_template, session, url_for
from flask_babel import _
from noggin import app
from noggin.form.sync_token import SyncTokenForm
from noggin.security.ipa import maybe_ipa_login, untouched_ipa_client
from noggin.utility.forms import FormError, handle_form_errors
def handle_login_form(form):
username = form.username.data.lower()
password = form.password.data
try:
# This call will set the cookie itself, we don't have to.
ipa = maybe_ipa_login(app, session, username, password)
except python_freeipa.exceptions.PasswordExpired:
flash(_('Password expired. Please reset it.'), 'danger')
return redirect(url_for('password_reset', username=username))
except python_freeipa.exceptions.Unauthorized as e:
raise FormError("non_field_errors", e.message)
except python_freeipa.exceptions.FreeIPAError as e:
# If we made it here, we hit something weird not caught above. We didn't
# bomb out, but we don't have IPA creds, either.
app.logger.error(
f'An unhandled error {e.__class__.__name__} happened while logging in user '
f'{username}: {e.message}'
)
raise FormError("non_field_errors", _('Could not log in to the IPA server.'))
if not ipa:
app.logger.error(
f'An unhandled situation happened while logging in user {username}: '
f'could not connect to the IPA server'
)
raise FormError("non_field_errors", _('Could not log in to the IPA server.'))
flash(_('Welcome, %(username)s!', username=username), 'success')
return redirect(url_for('user', username=username))
@app.route('/otp/sync/', methods=['GET', 'POST'])
def otp_sync():
form = SyncTokenForm()
if form.validate_on_submit():
with handle_form_errors(form):
try:
ipa = untouched_ipa_client(app)
ipa.otptoken_sync(
user=form.username.data,
password=form.password.data,
first_code=form.first_code.data,
second_code=form.second_code.data,
token=form.token.data,
)
flash(_('Token successfully synchronized'), category='success')
return redirect(url_for('root'))
except python_freeipa.exceptions.BadRequest as e:
app.logger.error(
f'An error {e.__class__.__name__} happened while syncing a token for user '
f'{form.username}: {e}'
)
raise FormError("non_field_errors", e.message)
return render_template('sync-token.html', sync_form=form)
| 39.913043 | 95 | 0.638707 |
ace7c8810024c1511cc0974f59c29f6ef7f43400 | 465 | py | Python | seahub/onlyoffice/settings.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | null | null | null | seahub/onlyoffice/settings.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | 6 | 2019-12-13T09:55:45.000Z | 2022-03-11T23:47:29.000Z | seahub/onlyoffice/settings.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | 1 | 2019-05-16T06:58:16.000Z | 2019-05-16T06:58:16.000Z | # Copyright (c) 2012-2016 Seafile Ltd.
from django.conf import settings
ENABLE_ONLYOFFICE = getattr(settings, 'ENABLE_ONLYOFFICE', False)
ONLYOFFICE_APIJS_URL = getattr(settings, 'ONLYOFFICE_APIJS_URL', '')
ONLYOFFICE_FILE_EXTENSION = getattr(settings, 'ONLYOFFICE_FILE_EXTENSION', ())
ONLYOFFICE_EDIT_FILE_EXTENSION = getattr(settings, 'ONLYOFFICE_EDIT_FILE_EXTENSION', ())
VERIFY_ONLYOFFICE_CERTIFICATE = getattr(settings, 'VERIFY_ONLYOFFICE_CERTIFICATE', True)
| 51.666667 | 88 | 0.821505 |
ace7c90eeb9385b776a3c3c01aa7adeaafc85090 | 1,776 | py | Python | dbinit/rcv.py | zsffq999/helloworld | aba6b4bcbae496766bf371681fe126d11051d40b | [
"Apache-2.0"
] | null | null | null | dbinit/rcv.py | zsffq999/helloworld | aba6b4bcbae496766bf371681fe126d11051d40b | [
"Apache-2.0"
] | null | null | null | dbinit/rcv.py | zsffq999/helloworld | aba6b4bcbae496766bf371681fe126d11051d40b | [
"Apache-2.0"
] | null | null | null | import os
import sys
from dbinit.interests import *
def rcv_interests(price, rights, ration, ration_price, interests):
res = (price - interests + ration*ration_price) / (1 + rights + ration)
return res
def rcv_interests_abv(stk_info, interests_info):
for k, v in sorted(interests_info.items()):
idx = np.searchsorted(stk_info['datetime'], datetime(k/10000, k/100%100, k%100))
rights, ration, ration_price, interests = v
stk_info['before'][:idx] = rcv_interests(stk_info['before'][:idx], rights, ration, ration_price, interests)
stk_info['open'][:idx] = rcv_interests(stk_info['open'][:idx], rights, ration, ration_price, interests)
stk_info['high'][:idx] = rcv_interests(stk_info['high'][:idx], rights, ration, ration_price, interests)
stk_info['low'][:idx] = rcv_interests(stk_info['low'][:idx], rights, ration, ration_price, interests)
stk_info['close'][:idx] = rcv_interests(stk_info['close'][:idx], rights, ration, ration_price, interests)
#save 2 float numbers
for idx in ['before', 'open', 'high', 'low', 'close']:
stk_info[idx] = np.round(stk_info[idx]*100)/100.0
return stk_info
def rcv_batch(src, dst, interests_info, filter):
listdir = os.listdir(src)
listdir = sorted(listdir)
for stk in listdir:
data = np.load(os.path.join(src, stk))
stk = stk[:-4]
if filter(stk):
print('executing', stk, '...')
np.save(os.path.join(dst, stk), rcv_interests_abv(data, interests_info[stk] if stk in interests_info else {}))
if __name__ == "__main__":
series = sys.argv[1]
#load interests information
interests_info = interestsinfo('data/wsSHSZ_SPLITs_' + series)
#execute
rcv_batch('data/processing_day', 'data/rehv_day', interests_info, filter=lambda x:x[-1] == '1')
#rec_batch('processing_min', 'rehv_min', interests_info, 1)
| 39.466667 | 113 | 0.713401 |
ace7c92e914a4e6f1791d33007e3114390a3c59b | 577 | py | Python | setup.py | railnova/python-netlist | 6fbaa55aa0fc724f67c9358c0bfdceca78c30735 | [
"MIT"
] | null | null | null | setup.py | railnova/python-netlist | 6fbaa55aa0fc724f67c9358c0bfdceca78c30735 | [
"MIT"
] | 1 | 2020-11-25T22:21:33.000Z | 2020-11-26T15:03:42.000Z | setup.py | railnova/python-netlist | 6fbaa55aa0fc724f67c9358c0bfdceca78c30735 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Setup file for pyton_netlist.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 24.041667 | 75 | 0.705373 |
ace7c98b30b2b2b7792a6f7e30f7f328c97039fb | 498 | py | Python | box_rules/box_user_permission_updates.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | box_rules/box_user_permission_updates.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | box_rules/box_user_permission_updates.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | def rule(event):
return (event.get('event_type') == 'CHANGE_FOLDER_PERMISSION' or
event.get('event_type') == 'ITEM_SHARED_CREATE' or
event.get('event_type') == 'ITEM_SHARED' or
event.get('event_type') == 'SHARE')
def title(event):
message = ('User [{}] exceeded threshold for number ' +
'of permission changes in the configured time frame.')
return message.format(
event.get('created_by', {}).get('login', '<UNKNOWN_USER>'))
| 38.307692 | 69 | 0.610442 |
ace7c9af9eb249c27faf798e56fca31751c8a6ad | 1,030 | py | Python | lrp_toolbox/training_test.py | KushDen/deepimportance_code_release | 5d16f1f95568dc402be6dfed4ad993ec0dbaa356 | [
"MIT"
] | 18 | 2020-07-11T01:58:02.000Z | 2021-09-17T07:08:34.000Z | lrp_toolbox/training_test.py | KushDen/deepimportance_code_release | 5d16f1f95568dc402be6dfed4ad993ec0dbaa356 | [
"MIT"
] | 13 | 2021-01-13T14:41:26.000Z | 2021-12-29T02:15:10.000Z | lrp_toolbox/training_test.py | KushDen/deepimportance_code_release | 5d16f1f95568dc402be6dfed4ad993ec0dbaa356 | [
"MIT"
] | 8 | 2020-02-19T21:30:30.000Z | 2022-03-11T01:34:33.000Z | '''
@author: Sebastian Lapuschkin
@maintainer: Sebastian Lapuschkin
@contact: sebastian.lapuschkin@hhi.fraunhofer.de, wojciech.samek@hhi.fraunhofer.de
@date: 30.09.2015
@version: 1.0
@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek
@license : BSD-2-Clause
'''
import modules
import model_io
import numpy as np ; na = np.newaxis
D,N = 2,200000
#this is the XOR problem.
X = np.random.rand(N,D) #we want [NxD] data
X = (X > 0.5)*1.0
Y = X[:,0] == X[:,1]
Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels
X += np.random.randn(N,D)*0.1 # add some noise to the data.
#build a network
nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
#train the network.
nn.train(X,Y,Xval=X,Yval=Y, batchsize = 5)
#save the network
model_io.write(nn, '../xor_net_small_1000.txt')
| 28.611111 | 216 | 0.703883 |
ace7cabf4cbeec6b99544617e3026385577bde08 | 26,765 | py | Python | cctbx/regression/tst_geometry_restraints_2.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/regression/tst_geometry_restraints_2.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/regression/tst_geometry_restraints_2.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
from cctbx import geometry_restraints
from cctbx.geometry_restraints.distance_least_squares \
import distance_and_repulsion_least_squares
import cctbx.geometry_restraints.manager
from cctbx import crystal
from cctbx.array_family import flex
from six.moves import cStringIO as StringIO
import libtbx.utils
from libtbx.test_utils import approx_equal, show_diff
import libtbx.load_env
import sys, os
from mmtbx.monomer_library import pdb_interpretation
from six.moves import range
# ============================================================
# Edit notes: show_interactions function is obsolete and removed from GRM
# 5/22/2015
# ============================================================
def exercise_with_zeolite(verbose):
if (not libtbx.env.has_module("iotbx")):
print("Skipping exercise_with_zeolite(): iotbx not available")
return
from iotbx.kriber import strudat
atlas_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/misc/strudat_zeolite_atlas",
test=os.path.isfile)
if (atlas_file is None):
print("Skipping exercise_with_zeolite(): input file not available")
return
strudat_contents = strudat.read_all_entries(open(atlas_file))
strudat_entry = strudat_contents.get("YUG")
si_structure = strudat_entry.as_xray_structure()
if (verbose):
out = sys.stdout
else:
out = StringIO()
drls = distance_and_repulsion_least_squares(
si_structure=si_structure,
distance_cutoff=3.5,
nonbonded_repulsion_function_type="prolsq",
n_macro_cycles=2,
out=out)
#
nbp = drls.geometry_restraints_manager.pair_proxies().nonbonded_proxies
assert nbp.n_total() > 50
# expected is 60, but the exact number depends on the minimizer
#
site_labels = drls.minimized_structure.scatterers().extract_labels()
sites_cart = drls.start_structure.sites_cart()
pair_proxies = drls.geometry_restraints_manager.pair_proxies()
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
f=out)
if (verbose):
sys.stdout.write(out.getvalue())
assert len(out.getvalue().splitlines()) == 48*4+2
assert out.getvalue().splitlines()[-1].find("remaining") < 0
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
f=out,
prefix="0^",
max_items=28)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue().replace("e-00", "e-0"), """\
0^Bond restraints: 48
0^Sorted by residual:
0^bond O3
0^ O4
0^ ideal model delta sigma weight residual
0^ 2.629 2.120 0.509 1.56e+00 4.10e-01 1.06e-01
...
0^bond SI1
0^ SI1
0^ ideal model delta sigma weight residual sym.op.
0^ 3.071 3.216 -0.145 2.08e+00 2.31e-01 4.83e-03 -x+1/2,-y+1/2,-z+1
0^... (remaining 20 not shown)
""",
selections=[range(6), range(-5,0)])
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="delta",
sites_cart=sites_cart,
site_labels=site_labels,
f=out,
prefix="0^",
max_items=28)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue().replace("e-00", "e-0"), """\
0^Bond restraints: 48
0^Sorted by delta:
0^bond O3
0^ O4
0^ ideal model delta sigma weight residual
0^ 2.629 2.120 0.509 1.56e+00 4.10e-01 1.06e-01
...
0^... (remaining 20 not shown)
""",
selections=[range(6), [-1]])
site_labels_long = ["abc"+label+"def" for label in site_labels]
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels_long,
f=out,
prefix="^0",
max_items=28)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue().replace("e-00", "e-0"), """\
^0Bond restraints: 48
^0Sorted by residual:
^0bond abcO3def
^0 abcO4def
^0 ideal model delta sigma weight residual
^0 2.629 2.120 0.509 1.56e+00 4.10e-01 1.06e-01
...
^0bond abcSI1def
^0 abcSI1def
^0 ideal model delta sigma weight residual sym.op.
^0 3.071 3.216 -0.145 2.08e+00 2.31e-01 4.83e-03 -x+1/2,-y+1/2,-z+1
^0... (remaining 20 not shown)
""",
selections=[range(6), range(-5,0)])
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
f=out,
prefix=".=",
max_items=28)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue().replace("e-00", "e-0"), """\
.=Bond restraints: 48
.=Sorted by residual:
.=bond 4
.= 5
.= ideal model delta sigma weight residual
.= 2.629 2.120 0.509 1.56e+00 4.10e-01 1.06e-01
...
.=bond 0
.= 0
.= ideal model delta sigma weight residual sym.op.
.= 3.071 3.216 -0.145 2.08e+00 2.31e-01 4.83e-03 -x+1/2,-y+1/2,-z+1
.=... (remaining 20 not shown)
""",
selections=[range(6), range(-5,0)])
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
f=out,
prefix="-+",
max_items=1)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue().replace("e-00", "e-0"), """\
-+Bond restraints: 48
-+Sorted by residual:
-+bond 4
-+ 5
-+ ideal model delta sigma weight residual
-+ 2.629 2.120 0.509 1.56e+00 4.10e-01 1.06e-01
-+... (remaining 47 not shown)
""")
out = StringIO()
pair_proxies.bond_proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
f=out,
prefix="=+",
max_items=0)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue(), """\
=+Bond restraints: 48
=+Sorted by residual:
=+... (remaining 48 not shown)
""")
#
sites_cart = si_structure.sites_cart()
site_labels = [sc.label for sc in si_structure.scatterers()]
asu_mappings = si_structure.asu_mappings(buffer_thickness=3.5)
for min_cubicle_edge in [0, 5]:
pair_generator = crystal.neighbors_fast_pair_generator(
asu_mappings=asu_mappings,
distance_cutoff=asu_mappings.buffer_thickness(),
minimal=False,
min_cubicle_edge=min_cubicle_edge)
sorted_asu_proxies = geometry_restraints.nonbonded_sorted_asu_proxies(
asu_mappings=asu_mappings)
while (not pair_generator.at_end()):
p = geometry_restraints.nonbonded_asu_proxy(
pair=next(pair_generator),
vdw_distance=3)
sorted_asu_proxies.process(p)
out = StringIO()
sorted_asu_proxies.show_sorted(
by_value="delta",
sites_cart=sites_cart,
site_labels=site_labels,
f=out,
prefix="d%")
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue(), """\
d%Nonbonded interactions: 7
d%Sorted by model distance:
...
d%nonbonded SI2
d% SI2
d% model vdw sym.op.
d% 3.092 3.000 -x+1,y,-z
...
d%nonbonded SI1
d% SI1
d% model vdw sym.op.
d% 3.216 3.000 -x+1/2,-y+1/2,-z+1
""",
selections=[range(2), range(10,14), range(26,30)])
out = StringIO()
sorted_asu_proxies.show_sorted(
by_value="delta",
sites_cart=sites_cart,
f=out,
prefix="*j",
max_items=5)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue(), """\
*jNonbonded interactions: 7
*jSorted by model distance:
...
*jnonbonded 0
*j 1
*j model vdw
*j 3.107 3.000
*jnonbonded 0
*j 0
*j model vdw sym.op.
*j 3.130 3.000 -x+1,y,-z+1
*j... (remaining 2 not shown)
""",
selections=[range(2), range(-9,0)])
out = StringIO()
sorted_asu_proxies.show_sorted(
by_value="delta",
sites_cart=sites_cart,
f=out,
prefix="@r",
max_items=0)
if (verbose):
sys.stdout.write(out.getvalue())
assert not show_diff(out.getvalue(), """\
@rNonbonded interactions: 7
""")
enk_pdb = """\
CRYST1 10.851 13.095 21.192 90.00 90.00 90.00 P 21 21 21
ATOM 1 CA TYR A 1 8.787 2.175 5.487 1.00 0.91 C
ATOM 2 CB TYR A 1 8.968 2.012 6.998 1.00 1.05 C
ATOM 3 CG TYR A 1 9.527 0.669 7.410 1.00 1.04 C
ATOM 4 CD2 TYR A 1 8.768 -0.222 8.157 1.00 1.37 C
ATOM 5 CE2 TYR A 1 9.275 -1.449 8.537 1.00 1.50 C
ATOM 6 CZ TYR A 1 10.559 -1.798 8.175 1.00 1.15 C
ATOM 7 CE1 TYR A 1 11.334 -0.929 7.437 1.00 1.37 C
ATOM 8 CD1 TYR A 1 10.818 0.296 7.062 1.00 1.34 C
ATOM 9 C TYR A 1 7.767 1.172 4.959 1.00 0.85 C
ATOM 10 OH TYR A 1 11.069 -3.019 8.553 1.00 1.40 O
ATOM 11 O TYR A 1 6.576 1.472 4.870 1.00 1.01 O
ATOM 12 N TYR A 1 8.388 3.541 5.166 1.00 1.11 N
ATOM 13 CA GLY A 2 7.387 -1.050 4.044 1.00 1.14 C
ATOM 14 C GLY A 2 6.345 -1.569 5.016 1.00 0.97 C
ATOM 15 O GLY A 2 5.234 -1.918 4.619 1.00 1.15 O
ATOM 16 N GLY A 2 8.241 -0.020 4.608 1.00 0.93 N
ATOM 17 CA GLY A 3 5.804 -2.100 7.324 1.00 1.36 C
ATOM 18 C GLY A 3 4.651 -1.149 7.578 1.00 1.01 C
ATOM 19 O GLY A 3 3.598 -1.553 8.071 1.00 1.38 O
ATOM 20 N GLY A 3 6.706 -1.622 6.294 1.00 1.11 N
ATOM 21 CA PHE A 4 3.819 1.134 7.419 1.00 0.89 C
ATOM 22 CB PHE A 4 4.397 2.380 8.094 1.00 1.13 C
ATOM 23 CG PHE A 4 4.930 2.130 9.475 1.00 1.00 C
ATOM 24 CD1 PHE A 4 6.267 1.825 9.673 1.00 1.51 C
ATOM 25 CE1 PHE A 4 6.760 1.595 10.943 1.00 1.80 C
ATOM 26 CZ PHE A 4 5.916 1.667 12.033 1.00 1.59 C
ATOM 27 CE2 PHE A 4 4.582 1.970 11.850 1.00 1.49 C
ATOM 28 CD2 PHE A 4 4.095 2.199 10.577 1.00 1.24 C
ATOM 29 C PHE A 4 3.185 1.509 6.084 1.00 0.94 C
ATOM 30 N PHE A 4 4.852 0.121 7.242 1.00 0.88 N
ATOM 31 O PHE A 4 2.361 2.421 6.010 1.00 1.47 O
ATOM 32 CA LEU A 5 3.055 1.059 3.693 1.00 0.87 C
ATOM 33 CB LEU A 5 3.965 0.435 2.634 1.00 1.13 C
ATOM 34 CG LEU A 5 3.531 0.603 1.177 1.00 1.16 C
ATOM 35 CD1 LEU A 5 3.411 2.076 0.818 1.00 1.88 C
ATOM 36 CD2 LEU A 5 4.502 -0.103 0.245 1.00 1.67 C
ATOM 37 C LEU A 5 1.634 0.527 3.541 1.00 0.87 C
ATOM 38 N LEU A 5 3.576 0.800 5.030 1.00 0.92 N
ATOM 39 OXT LEU A 5 1.246 -0.440 4.196 1.00 1.23 O
ATOM 41 O HOH B 1 14.655 -4.248 8.995 1.00 1.51 O
ATOM 81 H1 HOH B 1 14.954 -3.400 8.664 1.00 1.13 H
ATOM 82 H2 HOH B 1 13.712 -4.246 8.805 1.00 0.76 H
ATOM 42 O HOH B 2 12.055 -3.540 8.243 1.00 1.81 O
ATOM 83 H1 HOH B 2 11.456 -4.167 7.841 1.00 0.96 H
ATOM 84 H2 HOH B 2 11.476 -3.010 8.803 1.00 1.17 H
ATOM 43 O HOH B 3 9.622 -2.103 5.551 1.00 2.24 O
ATOM 85 H1 HOH B 3 10.394 -1.650 5.193 1.00 1.02 H
ATOM 86 H2 HOH B 3 9.588 -2.843 4.937 1.00 1.29 H
END
"""
def exercise_with_pdb(verbose):
if (not libtbx.env.has_module(name="mmtbx")):
print("Skipping exercise_with_pdb():", \
"mmtbx.monomer_library.pdb_interpretation not available")
return
if (libtbx.env.find_in_repositories(relative_path="chem_data") is None):
print("Skipping exercise_with_pdb(): chem_data directory not available")
return
if (verbose):
out = sys.stdout
else:
out = StringIO()
open("tmp_cctbx_geometry_restraints.pdb", "w").write(enk_pdb)
pdb_interpretation_params = pdb_interpretation.master_params.extract()
pdb_interpretation_params.sort_atoms=False
processed_pdb_file = pdb_interpretation.run(
args=["tmp_cctbx_geometry_restraints.pdb"],
strict_conflict_handling=False,
params=pdb_interpretation_params,
log=out,)
geo = processed_pdb_file.geometry_restraints_manager()
site_labels = processed_pdb_file.xray_structure().scatterers() \
.extract_labels()
#
assert approx_equal(flex.min(geo.nonbonded_model_distances()), 0.4777342)
#
geo._sites_cart_used_for_pair_proxies = None
#
sel0 = geo.simple_edge_list()
assert len(sel0) == 46
assert sel0[:4] == [(0, 1), (0, 8), (0, 11), (1, 2)]
assert sel0[-4:] == [(42, 43), (42, 44), (45, 46), (45, 47)]
geo.bond_params_table[13][14].slack = 0.1
geo.bond_params_table[28][30].slack = 0.3
sel = geo.simple_edge_list()
assert sorted(set(sel0) - set(sel)) == [(13, 14), (28, 30)]
sel = geo.simple_edge_list(omit_slack_greater_than=0.2)
assert sorted(set(sel0) - set(sel)) == [(28, 30)]
#
d = geo.discard_symmetry(new_unit_cell=(10,10,10,90,90,90))
assert d.site_symmetry_table.special_position_indices().size()==0
#
clusters = geo.rigid_clusters_due_to_dihedrals_and_planes(
constrain_dihedrals_with_sigma_less_than=10)
assert sorted([tuple(sorted(c)) for c in clusters]) == [
(0, 8, 10, 15), (0, 8, 12, 15), (1, 2, 3, 4, 5, 6, 7, 9),
(5, 6, 7, 9), (12, 13, 14, 19), (12, 13, 16, 19), (16, 17, 18, 29),
(16, 17, 20, 29), (20, 28, 30, 37), (20, 28, 31, 37),
(21, 22, 23, 24, 25, 26, 27)]
def exercise_non_crystallographic_conserving_bonds_and_angles():
sites_cart, geo = geometry_restraints.manager \
.construct_non_crystallographic_conserving_bonds_and_angles(
sites_cart=flex.vec3_double([
(10.949, 12.815, 15.189),
(10.405, 13.954, 15.917),
(10.779, 15.262, 15.227),
( 9.916, 16.090, 14.936)]),
edge_list_bonds=[(0, 1), (1, 2), (2, 3)],
edge_list_angles=[(0, 2), (1, 3)])
assert approx_equal(sites_cart, [
(6.033, 5.000, 5.253),
(5.489, 6.139, 5.981),
(5.863, 7.447, 5.291),
(5.000, 8.275, 5.000)])
assert approx_equal(geo.energies_sites(sites_cart=sites_cart).target, 0)
sites_cart_noise = flex.vec3_double([ # Just to make all residuals unique,
(6.043, 5.030, 5.233), # so that the sorted bond list below
(5.469, 6.119, 5.941), # has the same order on all platforms.
(5.893, 7.487, 5.281),
(5.040, 8.225, 5.020)])
sio = StringIO()
geo.show_sorted(sites_cart=sites_cart_noise, f=sio)
expected_first_part = """\
Bond restraints: 5
Sorted by residual:
bond 2
3
ideal model delta sigma weight residual
1.231 1.158 0.073 1.00e-01 1.00e+02 5.35e-01
bond 1
2
ideal model delta sigma weight residual
1.525 1.577 -0.052 1.00e-01 1.00e+02 2.66e-01
bond 1
3
ideal model delta sigma weight residual
2.401 2.338 0.063 1.41e-01 5.00e+01 1.96e-01
bond 0
1
ideal model delta sigma weight residual
1.457 1.420 0.037 1.00e-01 1.00e+02 1.37e-01
bond 0
2
ideal model delta sigma weight residual
2.453 2.462 -0.009 1.41e-01 5.00e+01 3.92e-03
"""
assert not show_diff(sio.getvalue(), expected_first_part + """\
Nonbonded interactions: 0
""")
#
sites_cart, geo = geometry_restraints.manager \
.construct_non_crystallographic_conserving_bonds_and_angles(
sites_cart=flex.vec3_double([
(10.949, 12.815, 15.189),
(10.405, 13.954, 15.917),
(10.779, 15.262, 15.227),
( 9.916, 16.090, 14.936),
(10.749, 12.615, 15.389)]),
edge_list_bonds=[(0, 1), (1, 2), (2, 3)],
edge_list_angles=[(0, 2), (1, 3)])
sites_cart_noise.append(sites_cart[-1])
sio = StringIO()
geo.show_sorted(sites_cart=sites_cart_noise, f=sio)
assert not show_diff(sio.getvalue(), expected_first_part + """\
Nonbonded interactions: 2
Sorted by model distance:
nonbonded 0
4
model vdw
0.306 1.200
nonbonded 1
4
model vdw
1.274 1.200
""")
def exercise_na_restraints_output_to_geo(verbose=False):
for dependency in ("chem_data", "ksdssp"):
if not libtbx.env.has_module(dependency):
print("Skipping exercise_na_restraints_output_to_geo(): %s not available" %(
dependency))
return
pdb_str_1dpl_cutted="""\
CRYST1 24.627 42.717 46.906 90.00 90.00 90.00 P 21 21 21 8
ATOM 184 P DG A 9 9.587 13.026 19.037 1.00 6.28 P
ATOM 185 OP1 DG A 9 9.944 14.347 19.602 1.00 8.07 O
ATOM 186 OP2 DG A 9 10.654 12.085 18.639 1.00 8.27 O
ATOM 187 O5' DG A 9 8.717 12.191 20.048 1.00 5.88 O
ATOM 188 C5' DG A 9 7.723 12.833 20.854 1.00 5.45 C
ATOM 189 C4' DG A 9 7.145 11.818 21.807 1.00 5.40 C
ATOM 190 O4' DG A 9 6.435 10.777 21.087 1.00 5.77 O
ATOM 191 C3' DG A 9 8.142 11.036 22.648 1.00 5.10 C
ATOM 192 O3' DG A 9 8.612 11.838 23.723 1.00 5.90 O
ATOM 193 C2' DG A 9 7.300 9.857 23.068 1.00 5.97 C
ATOM 194 C1' DG A 9 6.619 9.536 21.805 1.00 5.97 C
ATOM 195 N9 DG A 9 7.390 8.643 20.931 1.00 5.97 N
ATOM 196 C8 DG A 9 8.074 8.881 19.775 1.00 6.62 C
ATOM 197 N7 DG A 9 8.647 7.820 19.249 1.00 6.57 N
ATOM 198 C5 DG A 9 8.308 6.806 20.141 1.00 6.22 C
ATOM 199 C6 DG A 9 8.620 5.431 20.136 1.00 6.03 C
ATOM 200 O6 DG A 9 9.297 4.803 19.296 1.00 7.21 O
ATOM 201 N1 DG A 9 8.101 4.773 21.247 1.00 6.10 N
ATOM 202 C2 DG A 9 7.365 5.351 22.260 1.00 6.24 C
ATOM 203 N2 DG A 9 6.948 4.569 23.241 1.00 7.88 N
ATOM 204 N3 DG A 9 7.051 6.652 22.257 1.00 6.53 N
ATOM 205 C4 DG A 9 7.539 7.295 21.184 1.00 5.69 C
ATOM 206 P DC A 10 10.081 11.538 24.300 1.00 5.91 P
ATOM 207 OP1 DC A 10 10.273 12.645 25.291 1.00 7.27 O
ATOM 208 OP2 DC A 10 11.063 11.363 23.228 1.00 6.84 O
ATOM 209 O5' DC A 10 9.953 10.128 25.026 1.00 5.75 O
ATOM 210 C5' DC A 10 9.077 9.959 26.149 1.00 5.87 C
ATOM 211 C4' DC A 10 9.188 8.549 26.672 1.00 5.56 C
ATOM 212 O4' DC A 10 8.708 7.612 25.667 1.00 5.70 O
ATOM 213 C3' DC A 10 10.580 8.059 27.007 1.00 5.27 C
ATOM 214 O3' DC A 10 11.010 8.447 28.315 1.00 5.83 O
ATOM 215 C2' DC A 10 10.422 6.549 26.893 1.00 5.34 C
ATOM 216 C1' DC A 10 9.436 6.405 25.754 1.00 5.23 C
ATOM 217 N1 DC A 10 10.113 6.168 24.448 1.00 5.30 N
ATOM 218 C2 DC A 10 10.514 4.871 24.152 1.00 5.28 C
ATOM 219 O2 DC A 10 10.283 3.972 25.000 1.00 5.75 O
ATOM 220 N3 DC A 10 11.131 4.627 22.965 1.00 5.65 N
ATOM 221 C4 DC A 10 11.395 5.628 22.138 1.00 5.80 C
ATOM 222 N4 DC A 10 12.034 5.327 21.005 1.00 6.75 N
ATOM 223 C5 DC A 10 11.029 6.970 22.449 1.00 5.99 C
ATOM 224 C6 DC A 10 10.394 7.203 23.612 1.00 5.56 C
ATOM 226 O5' DG B 11 12.424 -4.393 18.427 1.00 22.70 O
ATOM 227 C5' DG B 11 12.380 -5.516 19.282 1.00 14.75 C
ATOM 228 C4' DG B 11 11.969 -5.112 20.676 1.00 10.42 C
ATOM 229 O4' DG B 11 12.972 -4.192 21.210 1.00 10.51 O
ATOM 230 C3' DG B 11 10.649 -4.394 20.782 1.00 8.57 C
ATOM 231 O3' DG B 11 9.618 -5.363 20.846 1.00 8.69 O
ATOM 232 C2' DG B 11 10.822 -3.597 22.051 1.00 8.63 C
ATOM 233 C1' DG B 11 12.236 -3.233 21.980 1.00 9.81 C
ATOM 234 N9 DG B 11 12.509 -1.902 21.305 1.00 8.66 N
ATOM 235 C8 DG B 11 13.175 -1.667 20.135 1.00 9.57 C
ATOM 236 N7 DG B 11 13.255 -0.407 19.824 1.00 9.04 N
ATOM 237 C5 DG B 11 12.613 0.235 20.869 1.00 7.63 C
ATOM 238 C6 DG B 11 12.388 1.612 21.119 1.00 7.05 C
ATOM 239 O6 DG B 11 12.723 2.590 20.419 1.00 7.81 O
ATOM 240 N1 DG B 11 11.715 1.819 22.317 1.00 6.27 N
ATOM 241 C2 DG B 11 11.264 0.828 23.159 1.00 6.05 C
ATOM 242 N2 DG B 11 10.611 1.219 24.248 1.00 5.85 N
ATOM 243 N3 DG B 11 11.483 -0.457 22.942 1.00 6.55 N
ATOM 244 C4 DG B 11 12.150 -0.687 21.797 1.00 6.84 C
ATOM 245 P DC B 12 8.134 -5.009 20.350 1.00 8.13 P
ATOM 246 OP1 DC B 12 7.367 -6.252 20.459 1.00 10.02 O
ATOM 247 OP2 DC B 12 8.172 -4.307 19.052 1.00 9.79 O
ATOM 248 O5' DC B 12 7.564 -3.912 21.389 1.00 8.18 O
ATOM 249 C5' DC B 12 7.275 -4.296 22.719 1.00 8.00 C
ATOM 250 C4' DC B 12 6.856 -3.057 23.487 1.00 8.01 C
ATOM 251 O4' DC B 12 8.006 -2.146 23.615 1.00 7.35 O
ATOM 252 C3' DC B 12 5.763 -2.208 22.890 1.00 7.04 C
ATOM 253 O3' DC B 12 4.456 -2.800 23.100 1.00 9.82 O
ATOM 254 C2' DC B 12 6.019 -0.916 23.630 1.00 6.50 C
ATOM 255 C1' DC B 12 7.467 -0.808 23.608 1.00 7.35 C
ATOM 256 N1 DC B 12 8.040 -0.143 22.396 1.00 6.64 N
ATOM 257 C2 DC B 12 8.017 1.257 22.382 1.00 5.68 C
ATOM 258 O2 DC B 12 7.524 1.832 23.357 1.00 6.32 O
ATOM 259 N3 DC B 12 8.543 1.930 21.312 1.00 6.18 N
ATOM 260 C4 DC B 12 9.009 1.236 20.266 1.00 6.48 C
ATOM 261 N4 DC B 12 9.518 1.926 19.243 1.00 7.43 N
ATOM 262 C5 DC B 12 9.012 -0.198 20.248 1.00 6.83 C
ATOM 263 C6 DC B 12 8.502 -0.825 21.311 1.00 6.80 C
"""
identical_portions = [
"""\
Histogram of bond lengths:
1.23 - 1.31: 5
1.31 - 1.39: 25
1.39 - 1.46: 27
1.46 - 1.54: 25
1.54 - 1.61: 5
Bond restraints: 87""",
'''\
Histogram of bond angle deviations from ideal:
99.49 - 105.87: 23
105.87 - 112.26: 36
112.26 - 118.65: 28
118.65 - 125.04: 30
125.04 - 131.42: 13
Bond angle restraints: 130''',
]
open("tst_cctbx_geometry_restraints_2_na.pdb", "w").write(pdb_str_1dpl_cutted)
out1 = StringIO()
out2 = StringIO()
from mmtbx.monomer_library.server import MonomerLibraryServerError
try:
processed_pdb_file = pdb_interpretation.run(
args=["tst_cctbx_geometry_restraints_2_na.pdb"],
strict_conflict_handling=False,
log=out1)
except MonomerLibraryServerError:
print("Skipping exercise_na_restraints_output_to_geo(): Encountered MonomerLibraryServerError.\n")
print("Is the CCP4 monomer library installed and made available through environment variables MMTBX_CCP4_MONOMER_LIB or CLIBD_MON?")
return
geo1 = processed_pdb_file.geometry_restraints_manager()
hbp = geo1.get_n_hbond_proxies()
from mmtbx import monomer_library
params = monomer_library.pdb_interpretation.master_params.extract()
params.secondary_structure.enabled=True
processed_pdb_file = pdb_interpretation.run(
args=["tst_cctbx_geometry_restraints_2_na.pdb"],
params=params,
strict_conflict_handling=False,
log=out2)
geo2 = processed_pdb_file.geometry_restraints_manager()
hbp = geo2.get_n_hbond_proxies()
v_out1 = out1.getvalue()
v_out2 = out2.getvalue()
assert v_out2.find("""\
Restraints generated for nucleic acids:
6 hydrogen bonds
12 hydrogen bond angles
0 basepair planarities
2 basepair parallelities
2 stacking parallelities""") > 0
for v in [v_out1, v_out2]:
for portion in identical_portions:
if not v.find(portion) > 0:
print("This portion was not found:\n%s\n=====End of portion." % portion)
assert 0, "the portion above does not match expected portion."
# check .geo output
geo_identical_portions = ["Bond restraints: 87",
"Bond angle restraints: 130", "Dihedral angle restraints: 33",
"Chirality restraints: 15",
"Planarity restraints: 4"]
ss_geo_portions = ["Bond-like restraints: 6",
'Secondary Structure restraints around h-bond angle restraints: 12',
"Parallelity restraints: 4",
"Nonbonded interactions: 504"]
non_ss_geo_portions = [
#"Bond-like restraints: 0",
#'Secondary Structure restraints around h-bond angle restraints: 0',
"Parallelity restraints: 0",
"Nonbonded interactions: 526"]
acp = processed_pdb_file.all_chain_proxies
sites_cart = acp.sites_cart_exact()
site_labels = [atom.id_str() for atom in acp.pdb_atoms]
geo_out1 = StringIO()
geo_out2 = StringIO()
geo1.show_sorted(sites_cart=sites_cart, site_labels=site_labels, f=geo_out1)
geo2.show_sorted(sites_cart=sites_cart, site_labels=site_labels, f=geo_out2)
v_geo_out_noss = geo_out1.getvalue()
v_geo_out_ss = geo_out2.getvalue()
for portion in geo_identical_portions+ss_geo_portions:
assert v_geo_out_ss.find(portion) >= 0
for portion in geo_identical_portions+non_ss_geo_portions:
assert v_geo_out_noss.find(portion) >= 0
def exercise_all(args):
verbose = "--verbose" in args
exercise_with_zeolite(verbose=verbose)
exercise_with_pdb(verbose=verbose)
exercise_non_crystallographic_conserving_bonds_and_angles()
exercise_na_restraints_output_to_geo(verbose=verbose)
print(libtbx.utils.format_cpu_times())
if (__name__ == "__main__"):
exercise_all(sys.argv[1:])
| 42.149606 | 136 | 0.579002 |
ace7cc128e46f98e47c7a3f13587e4a9dd6d785e | 252 | py | Python | setup.py | cgosmeyer/record | 90f514b719bb1d23da21bd20d076271192245c85 | [
"MIT"
] | null | null | null | setup.py | cgosmeyer/record | 90f514b719bb1d23da21bd20d076271192245c85 | [
"MIT"
] | null | null | null | setup.py | cgosmeyer/record | 90f514b719bb1d23da21bd20d076271192245c85 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import find_packages
from setuptools import setup
setup(name = 'record_imports',
author = 'C.M. Gosmeyer',
url = 'https://github.com/cgosmeyer/record_imports',
packages = find_packages(),
) | 25.2 | 58 | 0.686508 |
ace7cc605bc27038c4df90a39fa14e66c9d37a34 | 833 | py | Python | fairseq/models/nat/__init__.py | DanielLiuYZ/DSLP | bea7ea0deba183e78d11edc0d11accca44b45120 | [
"MIT"
] | 2 | 2021-12-12T05:43:02.000Z | 2021-12-12T05:43:16.000Z | fairseq/models/nat/__init__.py | DanielLiuYZ/DSLP | bea7ea0deba183e78d11edc0d11accca44b45120 | [
"MIT"
] | null | null | null | fairseq/models/nat/__init__.py | DanielLiuYZ/DSLP | bea7ea0deba183e78d11edc0d11accca44b45120 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .fairseq_nat_model import *
from .nat import *
from .nat_crf_transformer import *
from .iterative_nonautoregressive_transformer import *
from .cmlm_transformer import *
from .levenshtein_transformer import *
from .insertion_transformer import *
from .nat_glat import *
from .nat_sd import *
from .nat_ctc_sd import *
from .nat_ctc_s import *
from .nat_ctc_d import *
from .nat_sd_shared import * # Note: redundant remove in the future.
from .nat_s import *
from .nat_d import *
from .nat_ctc import *
from .cmlm_sd import *
from .nat_cf import *
from .nat_sd_ss import *
from .nat_ctc_sd_ss import *
from .cmlm_sd_ss import *
| 28.724138 | 69 | 0.771909 |
ace7ccc3c72c219d2e54a61862e024c2056b2389 | 4,831 | py | Python | unet_model/unet_pointnet.py | lvxingvir/template | 089f5817e031a7c2b2d82e239158a6a5488b3b26 | [
"MIT"
] | null | null | null | unet_model/unet_pointnet.py | lvxingvir/template | 089f5817e031a7c2b2d82e239158a6a5488b3b26 | [
"MIT"
] | null | null | null | unet_model/unet_pointnet.py | lvxingvir/template | 089f5817e031a7c2b2d82e239158a6a5488b3b26 | [
"MIT"
] | null | null | null | __copyright__ = \
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 03/03/2019
"""
__license__ = "CC BY-NC-SA 4.0"
__authors__ = "Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp"
__version__ = "1.5.1"
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes,
height, width,
known_n_points=None,
device=torch.device('cuda')):
super(UNet, self).__init__()
self.device = device
# With this network depth, there is a minimum image size
if height < 256 or width < 256:
raise ValueError('Minimum input image size is 256x256, got {}x{}'.\
format(height, width))
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.down5 = down(512, 512)
self.down6 = down(512, 512)
self.down7 = down(512, 512)
self.down8 = down(512, 512, normaliz=False)
self.up1 = up(1024, 512)
self.up2 = up(1024, 512)
self.up3 = up(1024, 512)
self.up4 = up(1024, 512)
self.up5 = up(1024, 256)
self.up6 = up(512, 128)
self.up7 = up(256, 64)
self.up8 = up(128, 64, activ=False)
self.outc = outconv(64, n_classes)
self.out_nonlin = nn.Sigmoid() # to solve the predict map wired problem @ 20190924 Xing
self.known_n_points = known_n_points
if known_n_points is None:
self.branch_1 = nn.Sequential(nn.Linear(512, 64),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5))
self.branch_2 = nn.Sequential(nn.Linear(256*256, 64),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5))
self.regressor = nn.Sequential(nn.Linear(64 + 64, 1),
nn.ReLU())
# This layer is not connected anywhere
# It is only here for backward compatibility
self.lin = nn.Linear(1, 1, bias=False)
def forward(self, x):
batch_size = x.shape[0]
print(batch_size)
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x6 = self.down5(x5)
x7 = self.down6(x6)
x8 = self.down7(x7)
x9 = self.down8(x8)
x = self.up1(x9, x8)
x = self.up2(x, x7)
x = self.up3(x, x6)
x = self.up4(x, x5)
x = self.up5(x, x4)
x = self.up6(x, x3)
x = self.up7(x, x2)
x = self.up8(x, x1)
x= self.outc(x)
x = self.out_nonlin(x) # do not use the sigmoid @20190924 by Xing
# Reshape Bx1xHxW -> BxHxW
# because probability map is real-valued by definition
x = x.squeeze(1)
# x_map = x_map.squeeze(1)
# print(x_map,x)
if self.known_n_points is None:
x9_flat = x9.view(batch_size, -1)
x_flat = x.view(batch_size, -1)
print(x9_flat.shape)
x10_flat = self.branch_1(x9_flat)
x_flat = self.branch_2(x_flat)
regression_features = torch.cat((x_flat, x10_flat), dim=1)
regression = self.regressor(regression_features)
return x, regression
else:
n_pts = torch.tensor([self.known_n_points]*batch_size,
dtype=torch.get_default_dtype())
n_pts = n_pts.to(self.device)
return x, n_pts
# summ = torch.sum(x)
# count = self.lin(summ)
# count = torch.abs(count)
# if self.known_n_points is not None:
# count = Variable(torch.cuda.FloatTensor([self.known_n_points]))
# return x, count
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 03/03/2019
"""
| 33.317241 | 110 | 0.572759 |
ace7cda3913ce50943958c71e88f1879323408bf | 17,291 | py | Python | sdk/python/pulumi_gcp/healthcare/dataset_iam_policy.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 121 | 2018-06-18T19:16:42.000Z | 2022-03-31T06:06:48.000Z | sdk/python/pulumi_gcp/healthcare/dataset_iam_policy.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 492 | 2018-06-22T19:41:03.000Z | 2022-03-31T15:33:53.000Z | sdk/python/pulumi_gcp/healthcare/dataset_iam_policy.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2018-06-19T01:43:13.000Z | 2022-03-23T22:43:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DatasetIamPolicyArgs', 'DatasetIamPolicy']
@pulumi.input_type
class DatasetIamPolicyArgs:
def __init__(__self__, *,
dataset_id: pulumi.Input[str],
policy_data: pulumi.Input[str]):
"""
The set of arguments for constructing a DatasetIamPolicy resource.
:param pulumi.Input[str] dataset_id: The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
"""
pulumi.set(__self__, "dataset_id", dataset_id)
pulumi.set(__self__, "policy_data", policy_data)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> pulumi.Input[str]:
"""
The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
"""
return pulumi.get(self, "dataset_id")
@dataset_id.setter
def dataset_id(self, value: pulumi.Input[str]):
pulumi.set(self, "dataset_id", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Input[str]:
"""
The policy data generated by
a `organizations.get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_data", value)
@pulumi.input_type
class _DatasetIamPolicyState:
def __init__(__self__, *,
dataset_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DatasetIamPolicy resources.
:param pulumi.Input[str] dataset_id: The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
:param pulumi.Input[str] etag: (Computed) The etag of the dataset's IAM policy.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
"""
if dataset_id is not None:
pulumi.set(__self__, "dataset_id", dataset_id)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if policy_data is not None:
pulumi.set(__self__, "policy_data", policy_data)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> Optional[pulumi.Input[str]]:
"""
The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
"""
return pulumi.get(self, "dataset_id")
@dataset_id.setter
def dataset_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset_id", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The etag of the dataset's IAM policy.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> Optional[pulumi.Input[str]]:
"""
The policy data generated by
a `organizations.get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_data", value)
class DatasetIamPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset_id: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Three different resources help you manage your IAM policy for Healthcare dataset. Each of these resources serves a different use case:
* `healthcare.DatasetIamPolicy`: Authoritative. Sets the IAM policy for the dataset and replaces any existing policy already attached.
* `healthcare.DatasetIamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the dataset are preserved.
* `healthcare.DatasetIamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the dataset are preserved.
> **Note:** `healthcare.DatasetIamPolicy` **cannot** be used in conjunction with `healthcare.DatasetIamBinding` and `healthcare.DatasetIamMember` or they will fight over what your policy should be.
> **Note:** `healthcare.DatasetIamBinding` resources **can be** used in conjunction with `healthcare.DatasetIamMember` resources **only if** they do not grant privilege to the same role.
## google\_healthcare\_dataset\_iam\_policy
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/editor",
members=["user:jane@example.com"],
)])
dataset = gcp.healthcare.DatasetIamPolicy("dataset",
dataset_id="your-dataset-id",
policy_data=admin.policy_data)
```
## google\_healthcare\_dataset\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
dataset = gcp.healthcare.DatasetIamBinding("dataset",
dataset_id="your-dataset-id",
members=["user:jane@example.com"],
role="roles/editor")
```
## google\_healthcare\_dataset\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
dataset = gcp.healthcare.DatasetIamMember("dataset",
dataset_id="your-dataset-id",
member="user:jane@example.com",
role="roles/editor")
```
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
This member resource can be imported using the `dataset_id`, role, and account e.g.
```sh
$ pulumi import gcp:healthcare/datasetIamPolicy:DatasetIamPolicy dataset_iam "your-project-id/location-name/dataset-name roles/viewer user:foo@example.com"
```
IAM binding imports use space-delimited identifiers; the resource in question and the role.
This binding resource can be imported using the `dataset_id` and role, e.g.
```sh
$ pulumi import gcp:healthcare/datasetIamPolicy:DatasetIamPolicy dataset_iam "your-project-id/location-name/dataset-name roles/viewer"
```
IAM policy imports use the identifier of the resource in question.
This policy resource can be imported using the `dataset_id`, role, and account e.g.
```sh
$ pulumi import gcp:healthcare/datasetIamPolicy:DatasetIamPolicy dataset_iam your-project-id/location-name/dataset-name
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dataset_id: The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatasetIamPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Three different resources help you manage your IAM policy for Healthcare dataset. Each of these resources serves a different use case:
* `healthcare.DatasetIamPolicy`: Authoritative. Sets the IAM policy for the dataset and replaces any existing policy already attached.
* `healthcare.DatasetIamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the dataset are preserved.
* `healthcare.DatasetIamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the dataset are preserved.
> **Note:** `healthcare.DatasetIamPolicy` **cannot** be used in conjunction with `healthcare.DatasetIamBinding` and `healthcare.DatasetIamMember` or they will fight over what your policy should be.
> **Note:** `healthcare.DatasetIamBinding` resources **can be** used in conjunction with `healthcare.DatasetIamMember` resources **only if** they do not grant privilege to the same role.
## google\_healthcare\_dataset\_iam\_policy
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/editor",
members=["user:jane@example.com"],
)])
dataset = gcp.healthcare.DatasetIamPolicy("dataset",
dataset_id="your-dataset-id",
policy_data=admin.policy_data)
```
## google\_healthcare\_dataset\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
dataset = gcp.healthcare.DatasetIamBinding("dataset",
dataset_id="your-dataset-id",
members=["user:jane@example.com"],
role="roles/editor")
```
## google\_healthcare\_dataset\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
dataset = gcp.healthcare.DatasetIamMember("dataset",
dataset_id="your-dataset-id",
member="user:jane@example.com",
role="roles/editor")
```
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
This member resource can be imported using the `dataset_id`, role, and account e.g.
```sh
$ pulumi import gcp:healthcare/datasetIamPolicy:DatasetIamPolicy dataset_iam "your-project-id/location-name/dataset-name roles/viewer user:foo@example.com"
```
IAM binding imports use space-delimited identifiers; the resource in question and the role.
This binding resource can be imported using the `dataset_id` and role, e.g.
```sh
$ pulumi import gcp:healthcare/datasetIamPolicy:DatasetIamPolicy dataset_iam "your-project-id/location-name/dataset-name roles/viewer"
```
IAM policy imports use the identifier of the resource in question.
This policy resource can be imported using the `dataset_id`, role, and account e.g.
```sh
$ pulumi import gcp:healthcare/datasetIamPolicy:DatasetIamPolicy dataset_iam your-project-id/location-name/dataset-name
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param DatasetIamPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatasetIamPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset_id: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatasetIamPolicyArgs.__new__(DatasetIamPolicyArgs)
if dataset_id is None and not opts.urn:
raise TypeError("Missing required property 'dataset_id'")
__props__.__dict__["dataset_id"] = dataset_id
if policy_data is None and not opts.urn:
raise TypeError("Missing required property 'policy_data'")
__props__.__dict__["policy_data"] = policy_data
__props__.__dict__["etag"] = None
super(DatasetIamPolicy, __self__).__init__(
'gcp:healthcare/datasetIamPolicy:DatasetIamPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
dataset_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None) -> 'DatasetIamPolicy':
"""
Get an existing DatasetIamPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dataset_id: The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
:param pulumi.Input[str] etag: (Computed) The etag of the dataset's IAM policy.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DatasetIamPolicyState.__new__(_DatasetIamPolicyState)
__props__.__dict__["dataset_id"] = dataset_id
__props__.__dict__["etag"] = etag
__props__.__dict__["policy_data"] = policy_data
return DatasetIamPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> pulumi.Output[str]:
"""
The dataset ID, in the form
`{project_id}/{location_name}/{dataset_name}` or
`{location_name}/{dataset_name}`. In the second form, the provider's
project setting will be used as a fallback.
"""
return pulumi.get(self, "dataset_id")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
(Computed) The etag of the dataset's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Output[str]:
"""
The policy data generated by
a `organizations.get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
| 42.484029 | 205 | 0.64895 |
ace7cdf21e9e2280db0db7a3f7939ee90f63b05b | 162 | py | Python | output/models/ms_data/complex_type/ct_g010_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/complex_type/ct_g010_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/complex_type/ct_g010_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.complex_type.ct_g010_xsd.ct_g010 import (
FooType,
MyType,
Root,
)
__all__ = [
"FooType",
"MyType",
"Root",
]
| 13.5 | 68 | 0.617284 |
ace7d05dfcd3254f94d7ce4d44cabe9158ffa5ec | 340 | py | Python | app/schemas.py | smartgic/shortgic | cb779762843e4a713cfdedbff34b89d59b09c83f | [
"MIT"
] | 3 | 2021-03-29T21:11:45.000Z | 2021-04-19T15:44:41.000Z | app/schemas.py | smartgic/shortgic | cb779762843e4a713cfdedbff34b89d59b09c83f | [
"MIT"
] | null | null | null | app/schemas.py | smartgic/shortgic | cb779762843e4a713cfdedbff34b89d59b09c83f | [
"MIT"
] | null | null | null | """FastAPI Pydantic schema
"""
from typing import Optional, Any, Dict
from pydantic import BaseModel, HttpUrl, typing
class Link(BaseModel):
"""Link structure validation
"""
target: HttpUrl
extras: Optional[typing.Dict[str, Any]] = {}
# Required because used with SQLAlchemy
class Config:
orm_mode = True
| 21.25 | 48 | 0.682353 |
ace7d0ec7c8feafd716dba90851ff5cdeb149b76 | 231 | py | Python | Core/Block_0/R0000_Factory.py | BernardoB95/Extrator_SPEDFiscal | 10b4697833c561d24654251da5f22d044f03fc16 | [
"MIT"
] | 1 | 2021-04-25T13:53:20.000Z | 2021-04-25T13:53:20.000Z | Core/Block_0/R0000_Factory.py | BernardoB95/Extrator_SPEDFiscal | 10b4697833c561d24654251da5f22d044f03fc16 | [
"MIT"
] | null | null | null | Core/Block_0/R0000_Factory.py | BernardoB95/Extrator_SPEDFiscal | 10b4697833c561d24654251da5f22d044f03fc16 | [
"MIT"
] | null | null | null | from Core.IFactory import IFactory
from Regs.Block_0 import R0000
class R0000Factory(IFactory):
def create_block_object(self, line):
self.r0000 = _r0000 = R0000()
_r0000.reg_list = line
return _r0000
| 21 | 40 | 0.69697 |
ace7d134afdb16fbc83531387a6bc8b584825805 | 6,599 | py | Python | scanners/footprints.py | pretech86/phonia | 521e3fd110f60a220e5d47c46ed84facd394ddd3 | [
"MIT"
] | 1 | 2020-08-30T21:13:53.000Z | 2020-08-30T21:13:53.000Z | scanners/footprints.py | pretech86/phonia | 521e3fd110f60a220e5d47c46ed84facd394ddd3 | [
"MIT"
] | null | null | null | scanners/footprints.py | pretech86/phonia | 521e3fd110f60a220e5d47c46ed84facd394ddd3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from urllib.parse import urlencode
from lib.output import *
from lib.format import *
from lib.request import send
from lib.googlesearch import search
numberObj = {}
number = ''
localNumber = ''
internationalNumber = ''
numberCountryCode = ''
customFormatting = ''
def osintIndividualScan():
global numberObj
global number
global internationalNumber
global numberCountryCode
global customFormatting
info('---- Phone books footprints ----')
if numberCountryCode == '+1':
info("Generating URL on True People... ")
plus('https://www.truepeoplesearch.com/results?phoneno={}'.format(
internationalNumber.replace(' ', '')))
dorks = json.load(open('osint/individuals.json'))
for dork in dorks:
if dork['dialCode'] is None or dork['dialCode'] == numberCountryCode:
if customFormatting:
dorkRequest = replaceVariables(
dork['request'], numberObj) + ' OR "{}"'.format(customFormatting)
else:
dorkRequest = replaceVariables(dork['request'], numberObj)
test("Searching for footprints on {}...".format(dork['site']))
for result in search(dorkRequest, stop=dork['stop']):
plus("URL: " + result)
else:
return -1
def osintReputationScan():
global numberObj
global number
global internationalNumber
global customFormatting
info('---- Reputation footprints ----')
dorks = json.load(open('osint/reputation.json'))
for dork in dorks:
if customFormatting:
dorkRequest = replaceVariables(
dork['request'], numberObj) + ' OR "{}"'.format(customFormatting)
else:
dorkRequest = replaceVariables(dork['request'], numberObj)
test("Searching for {}...".format(dork['title']))
for result in search(dorkRequest, stop=dork['stop']):
plus("URL: " + result)
def osintSocialMediaScan():
global numberObj
global number
global internationalNumber
global customFormatting
info('---- Social media footprints ----')
dorks = json.load(open('osint/social_medias.json'))
for dork in dorks:
if customFormatting:
dorkRequest = replaceVariables(
dork['request'], numberObj) + ' OR "{}"'.format(customFormatting)
else:
dorkRequest = replaceVariables(dork['request'], numberObj)
test("Searching for footprints on {}...".format(dork['site']))
for result in search(dorkRequest, stop=dork['stop']):
plus("URL: " + result)
def osintDisposableNumScan():
global numberObj
global number
info('---- Temporary number providers footprints ----')
try:
test("Searching for phone number on tempophone.com...")
response = send(
"GET", "https://tempophone.com/api/v1/phones")
data = json.loads(response.content.decode('utf-8'))
for voip_number in data['objects']:
if voip_number['phone'] == formatNumber(number):
plus("Found a temporary number provider: tempophone.com")
askForExit()
except Exception as e:
error("Unable to reach tempophone.com API.")
dorks = json.load(open('osint/disposable_num_providers.json'))
for dork in dorks:
dorkRequest = replaceVariables(dork['request'], numberObj)
test("Searching for footprints on {}...".format(dork['site']))
for result in search(dorkRequest, stop=dork['stop']):
plus("Result found: {}".format(dork['site']))
plus("URL: " + result)
askForExit()
def osintScan(numberObject, rerun=False):
if not args.scanner == 'footprints' and not args.scanner == 'all':
return -1
global numberObj
global number
global localNumber
global internationalNumber
global numberCountryCode
global customFormatting
numberObj = numberObject
number = numberObj['default']
localNumber = numberObj['local']
internationalNumber = numberObj['international']
numberCountryCode = numberObj['countryCode']
test('Running OSINT footprint reconnaissance...')
if not rerun:
# Whitepages
test("Generating scan URL on 411.com...")
plus("Scan URL: https://www.411.com/phone/{}".format(
internationalNumber.replace('+', '').replace(' ', '-')))
askingCustomPayload = ask(
'Would you like to use an additional format for this number? (y/N) ')
if rerun or askingCustomPayload == 'y' or askingCustomPayload == 'yes':
info('We recommand: {} or {}'.format(internationalNumber,
internationalNumber.replace(numberCountryCode + ' ', '')))
customFormatting = ask('Custom format: ')
info('---- Web pages footprints ----')
test("Searching for footprints on web pages... (limit=10)")
if customFormatting:
req = '{} OR "{}" OR "{}" OR "{}"'.format(
number, number, internationalNumber, customFormatting)
else:
req = '{} OR "{}" OR "{}"'.format(
number, number, internationalNumber)
for result in search(req, stop=10):
plus("Result found: " + result)
# Documents
info("Searching for documents... (limit=10)")
if customFormatting:
req = '(ext:doc OR ext:docx OR ext:odt OR ext:pdf OR ext:rtf OR ext:sxw OR ext:psw OR ext:ppt OR ext:pptx OR ext:pps OR ext:csv OR ext:txt OR ext:xls) AND ("{}")'.format(
customFormatting)
else:
req = '(ext:doc OR ext:docx OR ext:odt OR ext:pdf OR ext:rtf OR ext:sxw OR ext:psw OR ext:ppt OR ext:pptx OR ext:pps OR ext:csv OR ext:txt OR ext:xls) AND ("{}" OR "{}")'.format(
internationalNumber, localNumber)
for result in search(req, stop=10):
plus("Result found: " + result)
osintReputationScan()
test("Generating URL on scamcallfighters.com...")
plus('http://www.scamcallfighters.com/search-phone-{}.html'.format(number))
tmpNumAsk = ask(
"Would you like to search for temporary number providers footprints? (Y/n) ")
if tmpNumAsk.lower() != 'n' and tmpNumAsk.lower() != 'no':
osintDisposableNumScan()
osintSocialMediaScan()
osintIndividualScan()
retry_input = ask(
"Would you like to rerun OSINT scan? (e.g to use a different format) (y/N) ")
if retry_input.lower() == 'y' or retry_input.lower() == 'yes':
osintScan(numberObj, True)
else:
return -1
| 32.348039 | 186 | 0.616457 |
ace7d2494058bb5f084ac0cf41ad40a13c8e0bce | 4,273 | py | Python | google/ads/google_ads/v4/proto/enums/change_status_operation_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/enums/change_status_operation_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/enums/change_status_operation_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/enums/change_status_operation.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/change_status_operation.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\032ChangeStatusOperationProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nAgoogle/ads/googleads_v4/proto/enums/change_status_operation.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"w\n\x19\x43hangeStatusOperationEnum\"Z\n\x15\x43hangeStatusOperation\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x02\x12\x0b\n\x07\x43HANGED\x10\x03\x12\x0b\n\x07REMOVED\x10\x04\x42\xef\x01\n!com.google.ads.googleads.v4.enumsB\x1a\x43hangeStatusOperationProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CHANGESTATUSOPERATIONENUM_CHANGESTATUSOPERATION = _descriptor.EnumDescriptor(
name='ChangeStatusOperation',
full_name='google.ads.googleads.v4.enums.ChangeStatusOperationEnum.ChangeStatusOperation',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADDED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHANGED', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REMOVED', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=159,
serialized_end=249,
)
_sym_db.RegisterEnumDescriptor(_CHANGESTATUSOPERATIONENUM_CHANGESTATUSOPERATION)
_CHANGESTATUSOPERATIONENUM = _descriptor.Descriptor(
name='ChangeStatusOperationEnum',
full_name='google.ads.googleads.v4.enums.ChangeStatusOperationEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CHANGESTATUSOPERATIONENUM_CHANGESTATUSOPERATION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=249,
)
_CHANGESTATUSOPERATIONENUM_CHANGESTATUSOPERATION.containing_type = _CHANGESTATUSOPERATIONENUM
DESCRIPTOR.message_types_by_name['ChangeStatusOperationEnum'] = _CHANGESTATUSOPERATIONENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChangeStatusOperationEnum = _reflection.GeneratedProtocolMessageType('ChangeStatusOperationEnum', (_message.Message,), dict(
DESCRIPTOR = _CHANGESTATUSOPERATIONENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.change_status_operation_pb2'
,
__doc__ = """Container for enum describing operations for the ChangeStatus resource.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.enums.ChangeStatusOperationEnum)
))
_sym_db.RegisterMessage(ChangeStatusOperationEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 40.311321 | 705 | 0.789375 |
ace7d2d118b64adcc0fcb473873750fb5a64b588 | 2,028 | py | Python | dns_sprockets_lib/validators/tests/harness.py | roeckelein/sprocket | 8e7f9acf4d330d7b1005ba7a2ae8b644571f11fb | [
"Apache-2.0"
] | 7 | 2015-09-11T04:08:12.000Z | 2021-01-04T21:47:30.000Z | dns_sprockets_lib/validators/tests/harness.py | roeckelein/sprocket | 8e7f9acf4d330d7b1005ba7a2ae8b644571f11fb | [
"Apache-2.0"
] | null | null | null | dns_sprockets_lib/validators/tests/harness.py | roeckelein/sprocket | 8e7f9acf4d330d7b1005ba7a2ae8b644571f11fb | [
"Apache-2.0"
] | null | null | null | '''
ValidatorTestHarness - Test harness for validators.
---------------------------------------------------
.. Copyright (c) 2015 Neustar, Inc. All rights reserved.
.. See COPYRIGHT.txt for full notice. See LICENSE.txt for terms and conditions.
'''
import dns_sprockets_lib.utils as utils
import dns_sprockets_lib.loaders as loaders
import dns_sprockets_lib.validators as validators
from dns_sprockets_lib.dns_sprockets_impl import DNSSprocketsImpl
def test_validator(test_name, zone_name, file_name,
extra_defines=None, dnssec_type='detect'):
'''
Tests a validator using SprocketImpl instance.
:param str test_name: The name of the test to run.
:param str zone_name: The name of the zone to load.
:param str file_name: The name of the file to load (w.r.t. dns_sprockets_lib/tests/data)
:param list extra_defines: List of extra defines, in command-line format.
:param str dnssec_type: Set to force dnssec_type.
:return: A 2-tuple (test_cnt, error_cnt).
'''
class TestArgs(object):
# pylint: disable=too-few-public-methods
'''
Simulated command-line arguments.
'''
zone = zone_name
loader = 'file'
source = 'dns_sprockets_lib/tests/data/%s' % (file_name)
include_tests = [test_name]
exclude_tests = []
force_dnssec_type = dnssec_type
errors_only = False
defines = extra_defines
verbose = False
avail_loaders = utils.public_modules_in_package(loaders, ['tests'])
avail_tests = utils.public_modules_in_package(validators, ['tests'])
test_args_inst = TestArgs()
if test_args_inst.defines is None:
test_args_inst.defines = []
for sec_param in test_args_inst.defines:
(p_name, p_val) = sec_param.split('=')[:2]
setattr(test_args_inst, p_name, p_val)
sprocket = DNSSprocketsImpl(avail_loaders, avail_tests, test_args_inst)
(_, test_cnt, err_cnt) = sprocket.run()
return (test_cnt, err_cnt)
# end of file
| 34.372881 | 92 | 0.675542 |
ace7d3534f78e664d9421121b27f0c27fb70d294 | 4,345 | py | Python | contrib/seeds/generate-seeds.py | Moabbit/bitcoin | 7c32b414b6325743c99fed1208bc53ab0fa1996f | [
"MIT"
] | 3 | 2020-12-11T08:00:30.000Z | 2021-02-03T04:09:02.000Z | contrib/seeds/generate-seeds.py | ksystemsio/bitcoin | a589f536b5e15daf3ac6ffcc137a146514c81967 | [
"MIT"
] | 1 | 2021-02-15T23:45:53.000Z | 2021-02-15T23:45:53.000Z | contrib/seeds/generate-seeds.py | ksystemsio/bitcoin | a589f536b5e15daf3ac6ffcc137a146514c81967 | [
"MIT"
] | 1 | 2018-07-30T17:20:18.000Z | 2018-07-30T17:20:18.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.035714 | 99 | 0.582739 |
ace7d3d25308c12f3f96213695ca6fa1f25ff9fd | 899 | py | Python | supriya/exceptions.py | josiah-wolf-oberholtzer/supriya | b147fb06532f2c7fb0d13c88a4d551e35f796d92 | [
"MIT"
] | 191 | 2015-11-13T02:28:42.000Z | 2022-03-29T10:26:44.000Z | supriya/exceptions.py | josiah-wolf-oberholtzer/supriya | b147fb06532f2c7fb0d13c88a4d551e35f796d92 | [
"MIT"
] | 130 | 2016-01-04T16:59:02.000Z | 2022-02-26T15:37:20.000Z | supriya/exceptions.py | josiah-wolf-oberholtzer/supriya | b147fb06532f2c7fb0d13c88a4d551e35f796d92 | [
"MIT"
] | 22 | 2016-05-04T10:32:16.000Z | 2022-02-26T19:22:45.000Z | """
Exceptions.
"""
class AlreadyAllocated(Exception):
pass
class NotAllocated(Exception):
pass
class BufferAlreadyAllocated(AlreadyAllocated):
pass
class BufferNotAllocated(NotAllocated):
pass
class BusAlreadyAllocated(AlreadyAllocated):
pass
class BusNotAllocated(NotAllocated):
pass
class IncompatibleRate(Exception):
pass
class NodeAlreadyAllocated(AlreadyAllocated):
pass
class NodeNotAllocated(NotAllocated):
pass
class NonrealtimeOutputMissing(Exception):
pass
class NonrealtimeRenderError(Exception):
pass
class RequestTimeout(Exception):
pass
class ServerCannotBoot(Exception):
pass
class ServerOnline(Exception):
pass
class ServerOffline(Exception):
pass
class OwnedServerShutdown(Exception):
pass
class UnownedServerShutdown(Exception):
pass
class TooManyClients(Exception):
pass
| 11.828947 | 47 | 0.74861 |
ace7d435021eeae44cfab863449dd24c5692f9bf | 10,743 | py | Python | Code/linkedlist.py | KhallilB/CS1.3 | bf938d86f8308f3d1e1cdc82b9f7e1250c3c0cfa | [
"MIT"
] | null | null | null | Code/linkedlist.py | KhallilB/CS1.3 | bf938d86f8308f3d1e1cdc82b9f7e1250c3c0cfa | [
"MIT"
] | 5 | 2019-06-14T04:30:00.000Z | 2019-07-06T19:12:52.000Z | Code/linkedlist.py | KhallilB/CS1.3 | bf938d86f8308f3d1e1cdc82b9f7e1250c3c0cfa | [
"MIT"
] | null | null | null | #!python
class Node(object):
def __init__(self, data):
"""Initialize this node with the given data."""
self.data = data
self.next = None
def __repr__(self):
"""Return a string representation of this node."""
return 'Node({!r})'.format(self.data)
class LinkedList(object):
def __init__(self, iterable=None):
"""Initialize this linked list and append the given items, if any."""
self.head = None # First node
self.tail = None # Last node
self.size = 0 # Number of nodes
# Append the given items
if iterable is not None:
for item in iterable:
self.append(item)
def __str__(self):
"""Return a formatted string representation of this linked list."""
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' -> '.join(items))
def __repr__(self):
"""Return a string representation of this linked list."""
return 'LinkedList({!r})'.format(self.items())
def items(self):
"""Return a list of all items in this linked list.
Best and worst case running time: Theta(n) for n items in the list
because we always need to loop through all n nodes."""
# Create an empty list of results
result = [] # Constant time to create a new list
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Always n iterations because no early exit
# Append this node's data to the results list
result.append(node.data) # Constant time to append to a list
# Skip to the next node
node = node.next # Constant time to reassign a variable
# Now result contains the data from all nodes
return result # Constant time to return a list
def is_empty(self):
"""Return True if this linked list is empty, or False."""
return self.head is None
def length(self):
"""Return the length of this linked list by traversing its nodes.
O(1): Always because we're only returning an variable."""
# Node counter initialized to zero
return self.size
def get_at_index(self, index):
"""Return the item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: O(1): The item is the first item in the list.
Worst case running time: O(n): The item is near the end of the list or
not in the list at all."""
# Check if the given index is out of range and if so raise an error
if not (0 <= index < self.size):
raise ValueError('List index out of range: {}'.format(index))
# TODO: Find the node at the given index and return its data
curr = self.head
count = 0
while (curr):
if count == index:
return curr.data
count += 1
curr = curr.next
def insert_at_index(self, index, item):
"""Insert the given item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: O(1): The insertion is at the beginning or end of
the list.
Worst case running time: O(n): You have to loop through the entire list to find
the index to insert at"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index <= self.size): # Error if index out of range
raise ValueError('List index out of range: {}'.format(index))
elif index == 0:
return self.prepend(item)
elif index == self.size:
return self.append(item)
else:
current_node = self.head
for _ in range(index - 1):
current_node = current_node.next
chosen_node = Node(item)
current_node.next = chosen_node.next
self.size += 1
def append(self, item):
"""Insert the given item at the tail of this linked list.
Best and worst case running time: O(1): Reassigning head"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign head to new node
self.head = new_node
else:
# Otherwise insert new node after tail
self.tail.next = new_node
# Update tail to new node regardless
self.tail = new_node
self.size += 1
def prepend(self, item):
"""Insert the given item at the head of this linked list.
Best and worst case running time: O(1): Reassigning tail """
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign tail to new node
self.tail = new_node
else:
# Otherwise insert new node before head
new_node.next = self.head
# Update head to new node regardless
self.head = new_node
self.size += 1
def find(self, quality):
"""Return an item from this linked list satisfying the given quality.
Best case running time: Omega(1) if item is near the head of the list.
Worst case running time: O(n) if item is near the tail of the list or
not present and we need to loop through all n nodes in the list."""
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Up to n iterations if we don't exit early
# Check if this node's data satisfies the given quality function
if quality(node.data): # Constant time to call quality function
# We found data satisfying the quality function, so exit early
return node.data # Constant time to return data
# Skip to the next node
node = node.next # Constant time to reassign a variable
# We never found data satisfying quality, but have to return something
return None # Constant time to return None
def replace(self, old_item, new_item):
"""Replace the given old_item in this linked list with given new_item
using the same node, or raise ValueError if old_item is not found.
Best case running time: O(1): If the node being replaced is the first or last
node in the list.
Worst case running time: O(n): You have to loop through the entire list to find
the item you want to replace."""
# TODO: Find the node containing the given old_item and replace its
# data with new_item, without creating a new node object
current_node = self.head
if self.head.data == old_item:
self.head.data = new_item
return
elif self.tail.data == old_item:
self.tail.data = new_item
return
for _ in range(self.size):
if current_node.data == old_item:
current_node.data = new_item
return
current_node = current_node.next
raise ValueError('Value not found in linked list: {}'.format(old_item))
def delete(self, item):
"""Delete the given item from this linked list, or raise ValueError.
Best case running time: O(1): If the node being deleted is the head or tail.
Worst case running time: O(n): If the node is not the head or tail node. You
would have to loop through the entire list."""
# Start at the head node
node = self.head
# Keep track of the node before the one containing the given item
previous = None
# Create a flag to track if we have found the given item
found = False
# Loop until we have found the given item or the node is None
while not found and node is not None:
# Check if the node's data matches the given item
if node.data == item:
# We found data matching the given item, so update found flag
found = True
else:
# Skip to the next node
previous = node
node = node.next
# Check if we found the given item or we never did and reached the tail
if found:
self.size -= 1
# Check if we found a node in the middle of this linked list
if node is not self.head and node is not self.tail:
# Update the previous node to skip around the found node
previous.next = node.next
# Unlink the found node from its next node
node.next = None
# Check if we found a node at the head
if node is self.head:
# Update head to the next node
self.head = node.next
# Unlink the found node from the next node
node.next = None
# Check if we found a node at the tail
if node is self.tail:
# Check if there is a node before the found node
if previous is not None:
# Unlink the previous node from the found node
previous.next = None
# Update tail to the previous node regardless
self.tail = previous
else:
# Otherwise raise an error to tell the user that delete has failed
raise ValueError('Item not found: {}'.format(item))
def test_linked_list():
ll = LinkedList()
print(ll)
print('Appending items:')
ll.append('A')
print(ll)
ll.append('B')
print(ll)
ll.append('C')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
print('Getting items by index:')
for index in range(ll.size):
item = ll.get_at_index(index)
print('get_at_index({}): {!r}'.format(index, item))
print('Deleting items:')
ll.delete('B')
print(ll)
ll.delete('C')
print(ll)
ll.delete('A')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
if __name__ == '__main__':
test_linked_list()
| 40.693182 | 87 | 0.593596 |
ace7d65870dd061111844f2fbded419e86cbad00 | 1,824 | py | Python | scripts/cutesv_inv_filter.py | Triciasnow/HiFi-sv-snakemake | 44efc47c98a66b2e0fe4cfa1863113d4586cb5d8 | [
"Apache-2.0"
] | 1 | 2021-11-15T13:10:09.000Z | 2021-11-15T13:10:09.000Z | scripts/cutesv_inv_filter.py | Triciasnow/HiFi-sv-snakemake | 44efc47c98a66b2e0fe4cfa1863113d4586cb5d8 | [
"Apache-2.0"
] | null | null | null | scripts/cutesv_inv_filter.py | Triciasnow/HiFi-sv-snakemake | 44efc47c98a66b2e0fe4cfa1863113d4586cb5d8 | [
"Apache-2.0"
] | null | null | null | import sys
import getopt
import re
def main(argv):
inname=''
outname=''
try:
opts,args=getopt.getopt(argv,"hi:o:",["infile=","outfile=",])
except getopt.GetoptError:
print('cutesv_inv_fliter.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('cutesv_inv_fliter.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--infile"):
inname = arg
elif opt in ("-o", "--outfile"):
outname = arg
return inname,outname
if __name__ == "__main__":
inputname,outputname = main(sys.argv[1:])
inputfile = open(inputname)
outputfile = open(outputname,'w')
while 1:
lines = inputfile.readlines(10000)
if not lines:
break
for line1 in lines:
line1 = line1.rstrip()
cut1 = line1.strip().split('\t')
if re.search('^#',line1):
outputfile.write(str(line1))
outputfile.write('\n')
else:
cut2 = cut1[7]
INFO = cut2.strip().split(';')
sv_type = INFO[1]
length = INFO[2].split('=')
len = length[1]
filter = INFO[0]
cut3 = cut1[9]
FORMAT = cut3.strip().split(':')
GT = FORMAT[0]
if GT =='0/1' or GT == '1/1':
if cut1[6] == 'PASS':
if filter == 'PRECISE':
if sv_type == 'SVTYPE=INV':
if abs(int(len)) >= 120 and abs(int(len)) < 30000:
outputfile.write(str(line1))
outputfile.write('\n')
outputfile.close()
| 32 | 82 | 0.452303 |
ace7d7dc6a5e5498a5d9d3a42863a92547a9e359 | 5,543 | py | Python | backend/src/baserow/contrib/database/ws/views/signals.py | ericderace/baserow | 7b35e81f75166d914d07ef4ad0c30c625b6bb396 | [
"MIT"
] | 1 | 2021-04-13T16:27:58.000Z | 2021-04-13T16:27:58.000Z | backend/src/baserow/contrib/database/ws/views/signals.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | 6 | 2021-04-08T22:03:06.000Z | 2022-01-13T03:38:17.000Z | backend/src/baserow/contrib/database/ws/views/signals.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | null | null | null | from django.dispatch import receiver
from django.db import transaction
from baserow.ws.registries import page_registry
from baserow.contrib.database.views import signals as view_signals
from baserow.contrib.database.views.registries import view_type_registry
from baserow.contrib.database.api.views.serializers import (
ViewSerializer, ViewFilterSerializer, ViewSortSerializer
)
from baserow.contrib.database.api.views.grid.serializers import GridViewSerializer
@receiver(view_signals.view_created)
def view_created(sender, view, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_created',
'view': view_type_registry.get_serializer(
view,
ViewSerializer,
filters=True,
sortings=True
).data
},
getattr(user, 'web_socket_id', None),
table_id=view.table_id
))
@receiver(view_signals.view_updated)
def view_updated(sender, view, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_updated',
'view_id': view.id,
'view': view_type_registry.get_serializer(
view,
ViewSerializer,
# We do not want to broad cast the filters and sortings every time
# the view changes. There are separate views and handlers for them
# each will broad cast their own message.
filters=False,
sortings=False
).data
},
getattr(user, 'web_socket_id', None),
table_id=view.table_id
))
@receiver(view_signals.view_deleted)
def view_deleted(sender, view_id, view, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_deleted',
'table_id': view.table_id,
'view_id': view_id
},
getattr(user, 'web_socket_id', None),
table_id=view.table_id
))
@receiver(view_signals.view_filter_created)
def view_filter_created(sender, view_filter, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_filter_created',
'view_filter': ViewFilterSerializer(view_filter).data
},
getattr(user, 'web_socket_id', None),
table_id=view_filter.view.table_id
))
@receiver(view_signals.view_filter_updated)
def view_filter_updated(sender, view_filter, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_filter_updated',
'view_filter_id': view_filter.id,
'view_filter': ViewFilterSerializer(view_filter).data
},
getattr(user, 'web_socket_id', None),
table_id=view_filter.view.table_id
))
@receiver(view_signals.view_filter_deleted)
def view_filter_deleted(sender, view_filter_id, view_filter, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_filter_deleted',
'view_id': view_filter.view_id,
'view_filter_id': view_filter_id
},
getattr(user, 'web_socket_id', None),
table_id=view_filter.view.table_id
))
@receiver(view_signals.view_sort_created)
def view_sort_created(sender, view_sort, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_sort_created',
'view_sort': ViewSortSerializer(view_sort).data
},
getattr(user, 'web_socket_id', None),
table_id=view_sort.view.table_id
))
@receiver(view_signals.view_sort_updated)
def view_sort_updated(sender, view_sort, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_sort_updated',
'view_sort_id': view_sort.id,
'view_sort': ViewSortSerializer(view_sort).data
},
getattr(user, 'web_socket_id', None),
table_id=view_sort.view.table_id
))
@receiver(view_signals.view_sort_deleted)
def view_sort_deleted(sender, view_sort_id, view_sort, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'view_sort_deleted',
'view_id': view_sort.view_id,
'view_sort_id': view_sort_id
},
getattr(user, 'web_socket_id', None),
table_id=view_sort.view.table_id
))
@receiver(view_signals.grid_view_field_options_updated)
def grid_view_field_options_updated(sender, grid_view, user, **kwargs):
table_page_type = page_registry.get('table')
transaction.on_commit(lambda: table_page_type.broadcast(
{
'type': 'grid_view_field_options_updated',
'grid_view_id': grid_view.id,
'grid_view_field_options': GridViewSerializer(
grid_view
).data['field_options']
},
getattr(user, 'web_socket_id', None),
table_id=grid_view.table_id
))
| 33.79878 | 82 | 0.655962 |
ace7d8ceb432f828fcfa2feac70d4be77b95a6bc | 3,582 | py | Python | tools/pytest/testing/test_argcomplete.py | servo-wpt-sync/web-platform-tests | 56e2df852354bc2b89e6d17a9dbafd280d24203c | [
"BSD-3-Clause"
] | 4 | 2020-09-09T15:28:01.000Z | 2021-12-01T00:59:56.000Z | tools/pytest/testing/test_argcomplete.py | 063095/web-platform-tests | 255d54144a82ce76d8e50a4aa8de284151119f8b | [
"BSD-3-Clause"
] | 1 | 2021-03-31T20:23:55.000Z | 2021-03-31T20:23:55.000Z | tools/pytest/testing/test_argcomplete.py | 063095/web-platform-tests | 255d54144a82ce76d8e50a4aa8de284151119f8b | [
"BSD-3-Clause"
] | 1 | 2020-03-31T17:20:54.000Z | 2020-03-31T17:20:54.000Z | from __future__ import with_statement
import py, pytest
# test for _argcomplete but not specific for any application
def equal_with_bash(prefix, ffc, fc, out=None):
res = ffc(prefix)
res_bash = set(fc(prefix))
retval = set(res) == res_bash
if out:
out.write('equal_with_bash %s %s\n' % (retval, res))
if not retval:
out.write(' python - bash: %s\n' % (set(res) - res_bash))
out.write(' bash - python: %s\n' % (res_bash - set(res)))
return retval
# copied from argcomplete.completers as import from there
# also pulls in argcomplete.__init__ which opens filedescriptor 9
# this gives an IOError at the end of testrun
def _wrapcall(*args, **kargs):
try:
if py.std.sys.version_info > (2,7):
return py.std.subprocess.check_output(*args,**kargs).decode().splitlines()
if 'stdout' in kargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = py.std.subprocess.Popen(
stdout=py.std.subprocess.PIPE, *args, **kargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kargs.get("args")
if cmd is None:
cmd = args[0]
raise py.std.subprocess.CalledProcessError(retcode, cmd)
return output.decode().splitlines()
except py.std.subprocess.CalledProcessError:
return []
class FilesCompleter(object):
'File completer class, optionally takes a list of allowed extensions'
def __init__(self,allowednames=(),directories=True):
# Fix if someone passes in a string instead of a list
if type(allowednames) is str:
allowednames = [allowednames]
self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames]
self.directories = directories
def __call__(self, prefix, **kwargs):
completion = []
if self.allowednames:
if self.directories:
files = _wrapcall(['bash','-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion += [ f + '/' for f in files]
for x in self.allowednames:
completion += _wrapcall(['bash', '-c',
"compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)])
else:
completion += _wrapcall(['bash', '-c',
"compgen -A file -- '{p}'".format(p=prefix)])
anticomp = _wrapcall(['bash', '-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion = list( set(completion) - set(anticomp))
if self.directories:
completion += [f + '/' for f in anticomp]
return completion
class TestArgComplete:
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_compare_with_compgen(self):
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/ /d /data qqq'.split():
assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_remove_dir_prefix(self):
"""this is not compatible with compgen but it is with bash itself:
ls /usr/<TAB>
"""
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/usr/'.split():
assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
| 39.362637 | 86 | 0.594082 |
ace7d8fe028b8441708c89687b65df3fbe19c597 | 23,271 | py | Python | FIN10K-experiments-source-code/freeze/bert-bilstm-hist-3.py | Namir0806/FETILDA | d4a3e720dccef3ba0221e6d59214e54a11c6fc5b | [
"MIT"
] | null | null | null | FIN10K-experiments-source-code/freeze/bert-bilstm-hist-3.py | Namir0806/FETILDA | d4a3e720dccef3ba0221e6d59214e54a11c6fc5b | [
"MIT"
] | null | null | null | FIN10K-experiments-source-code/freeze/bert-bilstm-hist-3.py | Namir0806/FETILDA | d4a3e720dccef3ba0221e6d59214e54a11c6fc5b | [
"MIT"
] | null | null | null | from scipy import stats
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
import os
import random
import sys
import csv
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import transformers
from transformers import AutoConfig, AutoModel, AutoTokenizer
from transformers import AdamW
from torch.cuda.amp import autocast
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
import tensorflow as tf
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import LinearRegression
start = time.time()
torch.cuda.empty_cache()
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
tf.random.set_seed(seed_val)
class BERT_Arch(nn.Module):
def __init__(self, bert):
super(BERT_Arch, self).__init__()
self.bert = bert
# dropout layer
self.dropout = nn.Dropout(0.1)
# relu activation function
self.relu = nn.ReLU()
self.leakyrelu = nn.LeakyReLU()
self.elu = nn.ELU()
self.tanh = nn.Tanh()
self.zeros=0
self.totals=0
# dense layer 1
self.fc1 = nn.Linear(768,600)
# dense layer 2 (Output layer)
self.fc2 = nn.Linear(601,1)
self.fc3 = nn.Linear(1,1)
#LSTM
self.hidden_dim = 768 #try 4096 or even more
self.emb_dim = 768
self.encoder = nn.LSTM(self.emb_dim, self.hidden_dim, num_layers=1, bidirectional=True, dropout=0.1)
#Define Attention Network
def attnetwork(self, encoder_out, final_hidden):
hidden = final_hidden.squeeze(0)
attn_weights = torch.bmm(encoder_out, hidden.unsqueeze(2)).squeeze(2)
soft_attn_weights = F.softmax(attn_weights, 1)
new_hidden = torch.bmm(encoder_out.transpose(1,2), soft_attn_weights.unsqueeze(2)).squeeze(2)
return new_hidden, soft_attn_weights
#define the forward pass
def forward(self, sent_id, mask, hist):
cls_vec = []
chunk_max_weights = []
for i in range(len(sent_id)):
if i < 40:
#print("chunk i: ", i)
ip_id = torch.tensor(sent_id[i]).unsqueeze(0).to(device)
attn_mask = torch.tensor(mask[i]).unsqueeze(0).to(device)
#pass the inputs to the model
model_outputs = self.bert(input_ids=ip_id, attention_mask=attn_mask)
cls_hs=model_outputs[1]
atten=model_outputs[2]
cls_vec.append(cls_hs)
del ip_id
del attn_mask
'''
col_sum = np.sort(atten[0][0][11].sum(0)[1:-1].detach().cpu().numpy())
col_sum = col_sum[::-1]
max_col_sum = max(col_sum)
top_word_mean = col_sum[:5].mean()
chunk_max_weights.append(top_word_mean)
'''
#cls_vec_ = torch.mean(torch.stack(cls_vec, dim=0), dim=0)
cls_vec = torch.stack(cls_vec, dim=0)
cls_vec = cls_vec.to(torch.float32) #LSTM
#print("cls_vec shape: ", cls_vec.shape, type(cls_vec), cls_vec.dtype)
'''
x = self.fc1(cls_vec_)
x = self.relu(x)
x = self.dropout(x)
chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)
chunk_weights = chunk_weights.cuda()
prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1))
prod1 = prod1.transpose(1,2)
prod1 = prod1.to(torch.float32)
'''
emb_input = cls_vec
inputx = self.dropout(emb_input)
output, (hn, cn) = self.encoder(inputx) #emb_input)
fbout = output[:, :, :self.hidden_dim]+ output[:, :, self.hidden_dim:] #sum bidir outputs F+B
fbout = fbout.permute(1,0,2)
fbhn = (hn[-2,:,:]+hn[-1,:,:]).unsqueeze(0)
attn_out, attn_weights = self.attnetwork(fbout, fbhn)
'''
chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)
chunk_weights = chunk_weights.cuda()
prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1))
'''
prod = torch.bmm(cls_vec.transpose(1,2), attn_weights.transpose(0,1).unsqueeze(1))
prod_sum = torch.mean(prod, 0).transpose(0,1)
x = prod_sum #attn_out
x = self.fc1(x)
x =self.leakyrelu(x)
x = self.dropout(x)
hist = hist.unsqueeze(0)
hist = self.fc3(hist)
x = torch.cat((x, hist.unsqueeze(0)), dim=1)
#x = self.dropout(x)
# output layer
y = self.fc2(x)
# if we go from 601 to another hidden layer
# we can apply some non-linearity there: can improve performance
# we can add one extra hidden layer
# if allow hidden layer, we specify the dimentionality
# if no hidden layer, we do as is
y = self.leakyrelu(y)
# we are not doing anything to the output
# can do activation on that
return x, y
# function to train the model
def train(epoch):
memory_file = open('memory_bert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epoch)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', 'a+')
model.train()
total_loss, total_accuracy = 0, 0
# empty list to save model predictions
total_preds = []
total_hist = []
xs = []
# iterate over list of documents
for i in range(len(train_seq)):
memory_file.write("doc num: "+str(i)+" before train: "+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\n')
memory_file.write("doc num: "+str(i)+" before train: "+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\n')
sent_id = train_seq[i]
mask = train_mask[i]
hist = train_hist[i]
labels = train_y[i].unsqueeze(0).unsqueeze(0)
# clear previously calculated gradients
model.zero_grad()
memory_file.write("doc num: "+str(i)+" len(sent_id): "+str(len(sent_id))+" \n")
with autocast():
# get model predictions for the current batch
x, preds = model(sent_id, mask, hist)
# compute the loss between actual and predicted values
#loss = huber_loss(preds, labels)
loss = mse_loss(preds, labels)
# model predictions are stored on GPU. So, push it to CPU
preds = preds.detach().cpu().numpy()
x = x.detach().cpu().numpy().ravel()
# add on to the total loss
total_loss = total_loss + loss.item()
xs.append(x)
# backward pass to calculate the gradients
loss.backward()
# clip the the gradients to 1.0. It helps in preventing the exploding gradient problem
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# update parameters
optimizer.step()
# append the model predictions
total_preds.append(preds)
loss.detach().cpu()
memory_file.write("doc num: "+str(i)+" after train: "+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\n')
memory_file.write("doc num: "+str(i)+" after train: "+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\n')
memory_file.flush()
# compute the training loss of the epoch
avg_loss = total_loss / len(train_seq)
xs = np.array(xs)
# predictions are in the form of (no. of batches, size of batch, no. of classes).
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
#total_hist = np.concatenate(total_hist, axis=0)
memory_file.close()
#returns the loss and predictions
return avg_loss, total_preds , xs
# function for evaluating the model
def evaluate():
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0.0, 0.0
# empty list to save the model predictions
total_preds = []
total_xs = []
# iterate over list of documents
for i in range(len(valid_seq)):
sent_id = valid_seq[i]
mask = valid_mask[i]
hist = valid_hist[i]
labels = valid_y[i].unsqueeze(0).unsqueeze(0)
# deactivate autograd
with torch.no_grad():
with autocast():
# model predictions
x, preds = model(sent_id, mask, hist)
# compute the validation loss between actual and predicted values
loss = mse_loss(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
x = x.detach().cpu().numpy().ravel()
total_xs.append(x)
loss.detach().cpu()
# compute the validation loss of the epoch
avg_loss = total_loss / len(valid_seq)
total_xs = np.array(total_xs)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds, total_xs
def test():
# empty list to save the model predictions
total_xs = []
total_preds=[]
for i in range(len(test_seq)):
sent_id = test_seq[i]
mask = test_mask[i]
hist = test_hist[i]
#labels = test_y[i].unsqueeze(0).unsqueeze(0)
with torch.no_grad():
with autocast():
x, preds = model(sent_id, mask, hist)
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
x = x.detach().cpu().numpy().ravel()
total_xs.append(x)
# reshape the predictions in form of (number of samples, no. of classes)
total_xs = np.array(total_xs)
total_preds = np.concatenate(total_preds, axis=0)
return total_xs, total_preds
def train_x():
# empty list to save the model predictions
total_xs = []
total_preds=[]
for i in range(len(train_seq)):
sent_id = train_seq[i]
mask = train_mask[i]
hist = train_hist[i]
#labels = test_y[i].unsqueeze(0).unsqueeze(0)
with torch.no_grad():
with autocast():
x, preds = model(sent_id, mask, hist)
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
x = x.detach().cpu().numpy().ravel()
total_xs.append(x)
# reshape the predictions in form of (number of samples, no. of classes)
total_xs = np.array(total_xs)
total_preds = np.concatenate(total_preds, axis=0)
return total_xs, total_preds
# specify GPU
device = torch.device("cuda")
max_length = int(sys.argv[1]) #append two [CLS] and [SEP] tokens to make 512
sec = sys.argv[2]
bv = sys.argv[3]
train_fname = "train-results.csv"
#end_year = int(sys.argv[1])
#train_years_list = list(range(end_year-5, end_year))
#print("train_years: ", train_years_list)
df_train = pd.read_csv(train_fname)
#df = df[:10]
train_text, valid_text, train_hist, valid_hist, train_labels, valid_labels = train_test_split(df_train['mda'],
df_train['prev_'+bv],
df_train[bv],
shuffle=False,
train_size=0.8)
'''
val_text, test_text, val_hist, test_hist, val_labels, test_labels = train_test_split(temp_text,
temp_hist,
temp_labels,
shuffle=False,
test_size=0.2)
val_text = val_text.astype(str)
'''
train_text = train_text.astype(str)
valid_text = valid_text.astype(str)
'''
df_train = pd.DataFrame()
df_test = pd.DataFrame()
for y in train_years_list:
df_train = pd.concat([df_train, pd.read_csv(str(y) + "_tok.csv")])
'''
#bert_path = "/gpfs/u/home/HPDM/HPDMrawt/scratch/npl_env/sdm21-exps/long_document_fin/"
bert_path = "/gpfs/u/home/DLTM/DLTMboxi/scratch/env/bert-base-uncased/"
config = AutoConfig.from_pretrained(bert_path, output_attentions=True)
# import BERT-base pretrained model
bert = AutoModel.from_pretrained(bert_path, config=config) #longformer-base-4096/')
# Load the BERT tokenizer
tokenizer = AutoTokenizer.from_pretrained(bert_path) #longformer-base-4096/')
#TRAIN
# tokenize and encode sequences in the training set
tokens_train = tokenizer.batch_encode_plus(
train_text.tolist(),
add_special_tokens=False
)
#Extract input ids
train_seq_ = tokens_train['input_ids']
#Split each document into 510 tokens
train_seq = [[train_seq_[j][i:i + max_length] for i in range(0, len(train_seq_[j]), max_length)] for j in range(len(train_seq_))]
#print(train_seq[0][0])
#Add [CLS], [SEP] and [PAD] tokens
train_seq = [[[tokenizer.cls_token_id] + train_seq[j][i] + [tokenizer.sep_token_id] if len(train_seq[j][i]) == max_length else [tokenizer.cls_token_id] + train_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(train_seq[j][i])) for i in range(len(train_seq[j]))] for j in range(len(train_seq))]
#print(train_seq[0][0])
#df_train_seq=pd.DataFrame()
#df_train_seq["train_seq"]=train_seq
#df_train_seq.to_csv(sec+ "-train_seq.csv")
#Extract attention masks
train_mask_ = tokens_train['attention_mask']
#Split each document into 510 tokens
train_mask = [[train_mask_[j][i:i + max_length] for i in range(0, len(train_mask_[j]), max_length)] for j in range(len(train_mask_))]
#Add [1] for attention and [0] for [PAD]
train_mask = [[[1] + train_mask[j][i] + [1] if len(train_mask[j][i]) == max_length else [1]+train_mask[j][i]+[1] + [0] * (max_length-len(train_mask[j][i])) for i in range(len(train_mask[j]))] for j in range(len(train_mask))]
#VALID
# tokenize and encode sequences in the training set
tokens_valid = tokenizer.batch_encode_plus(
valid_text.tolist(),
add_special_tokens=False
)
#Extract input ids
valid_seq_ = tokens_valid['input_ids']
#Split each document into 510 tokens
valid_seq = [[valid_seq_[j][i:i + max_length] for i in range(0, len(valid_seq_[j]), max_length)] for j in range(len(valid_seq_))]
#print(valid_seq[0][0])
#Add [CLS], [SEP] and [PAD] tokens
valid_seq = [[[tokenizer.cls_token_id] + valid_seq[j][i] + [tokenizer.sep_token_id] if len(valid_seq[j][i]) == max_length else [tokenizer.cls_token_id] + valid_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(valid_seq[j][i])) for i in range(len(valid_seq[j]))] for j in range(len(valid_seq))]
#print(valid_seq[0][0])
#df_valid_seq=pd.DataFrame()
#df_valid_seq["valid_seq"]=valid_seq
#df_valid_seq.to_csv(sec+ "-valid_seq.csv")
#Extract attention masks
valid_mask_ = tokens_valid['attention_mask']
#Split each document into 510 tokens
valid_mask = [[valid_mask_[j][i:i + max_length] for i in range(0, len(valid_mask_[j]), max_length)] for j in range(len(valid_mask_))]
#Add [1] for attention and [0] for [PAD]
valid_mask = [[[1] + valid_mask[j][i] + [1] if len(valid_mask[j][i]) == max_length else [1]+valid_mask[j][i]+[1] + [0] * (max_length-len(valid_mask[j][i])) for i in range(len(valid_mask[j]))] for j in range(len(valid_mask))]
train_hist = torch.tensor(train_hist.tolist()).to(device)
train_y = torch.tensor(train_labels.tolist()).to(device)
valid_hist = torch.tensor(valid_hist.tolist()).to(device)
valid_y = torch.tensor(valid_labels.tolist()).to(device)
#val_hist = torch.tensor(val_hist.tolist()).to(device)
#val_y = torch.tensor(val_labels.tolist()).to(device)
# freeze all the parameters
for name, param in bert.named_parameters():
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = False #True
# pass the pre-trained BERT to our define architecture
model = BERT_Arch(bert)
# push the model to GPU
model = model.to(device)
# define the loss function
mse_loss = nn.MSELoss()
huber_loss = nn.L1Loss()
# number of training epochs
total_epochs = int(sys.argv[4])
start_epoch = int(sys.argv[5])
end_epoch = int(sys.argv[6])
epochs = end_epoch - start_epoch + 1
#plus = int(sys.argv[5])
# different learning rates
learning_rate = float(sys.argv[7])
#load weights of best model
path = 'saved_weights_bert_'+str(max_length)+'_taiwan_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt'
model.load_state_dict(torch.load(path))
# set initial loss to previous best
with open('best_valid_loss_bert_'+str(max_length)+'_taiwan_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt') as f:
lines = f.readlines()
best_valid_loss = float(lines[0])
best_epoch = int(lines[1])
# empty lists to store training and validation loss of each epoch
train_losses=[]
valid_losses=[]
#for each epoch
for epoch in range(epochs):
#print('\n Epoch {:} / {:}'.format(epoch + 1, epochs))
torch.cuda.empty_cache()
# define the optimizer
optimizer = AdamW(model.parameters(),
lr = learning_rate, eps = 1e-8) # learning rate
#train model
train_loss, _ , xs_final= train(start_epoch+epoch)
#evaluate model
valid_loss, _ , _ = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = start_epoch + epoch
#print(f'\nTraining Loss: {train_loss:.3f}')
#xs_train = xs_final
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save.state_dict(), 'saved_weights_bert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt')
#torch.save(model_to_save.state_dict(), 'saved_weights_bert_'+str(max_length)+'_'+sec+'_'+bv+'_epoch'+str(start_epoch+epoch)+'_of_'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt')
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.10f}')
print(f'Validation Loss: {valid_loss:.10f}')
# pass the pre-trained BERT to our define architecture
model = BERT_Arch(bert)
# push the model to GPU
model = model.to(device)
#load weights of best model
path = 'saved_weights_bert_'+str(max_length)+'_taiwan_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt'
model.load_state_dict(torch.load(path))
xs_train , _ = train_x()
years = ["2001","2002","2003","2004","2005","2006"]
for year in years:
sec = year
test_fname = sec+"-result.csv"
#TEST
df_test = pd.read_csv(test_fname)
test_text = df_test['mda']
test_hist = df_test['prev_'+bv]
test_labels = df_test[bv]
test_text = test_text.astype(str)
# tokenize and encode sequences in the test set
tokens_test = tokenizer.batch_encode_plus(
test_text.tolist(),
add_special_tokens=False
)
#Extract input ids
test_seq_ = tokens_test['input_ids']
#Split each document into 510 tokens
test_seq = [[test_seq_[j][i:i + max_length] for i in range(0, len(test_seq_[j]), max_length)] for j in range(len(test_seq_))]
#Add [CLS], [SEP] and [PAD] tokens
test_seq = [[[tokenizer.cls_token_id] + test_seq[j][i] + [tokenizer.sep_token_id] if len(test_seq[j][i]) == max_length else [tokenizer.cls_token_id]+test_seq[j][i] + [tokenizer.sep_token_id]+ [tokenizer.pad_token_id] * (max_length-len(test_seq[j][i])) for i in range(len(test_seq[j]))] for j in range(len(test_seq))]
#Extract attention masks
test_mask_ = tokens_test['attention_mask']
#Split each document into 510 tokens
test_mask = [[test_mask_[j][i:i + max_length] for i in range(0, len(test_mask_[j]), max_length)] for j in range(len(test_mask_))]
#Add [1] for attention and [0] for [PAD]
test_mask = [[[1] + test_mask[j][i] + [1] if len(test_mask[j][i]) == max_length else [1]+test_mask[j][i]+[1] + [0] * (max_length-len(test_mask[j][i])) for i in range(len(test_mask[j]))] for j in range(len(test_mask))]
test_hist = torch.tensor(test_hist.tolist()).to(device)
test_y = torch.tensor(test_labels.tolist()).to(device)
# get predictions for test data
valid_mses = []
test_mses = []
methods = ["bare", "svr", "kr", "lr"]
_ , preds, xs_valid = evaluate()
preds = np.asarray(preds)
valid_y_numpy = valid_y.cpu().data.numpy()
valid_mse = mean_squared_error(valid_y_numpy, preds)
valid_mses.append(valid_mse)
xs_test, preds = test()
preds = np.asarray(preds)
test_y = test_y.cpu().data.numpy()
test_mse = mean_squared_error(test_y, preds)
test_mses.append(test_mse)
print("bert bare mse: "+str(test_mses[0])+'---bare---'+str(valid_mses[0]))
lr = LinearRegression()
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
svr = SVR(kernel='rbf', C=0.1, epsilon=0.0001) #linear')
models_list = [svr, kr, lr]
for m in models_list:
m.fit(xs_train, train_labels.to_numpy())
preds = m.predict(xs_valid)
valid_mse = mean_squared_error(valid_labels.to_numpy(), preds)
valid_mses.append(valid_mse)
preds = m.predict(xs_test)
test_mse = mean_squared_error(test_labels.to_numpy(), preds)
test_mses.append(test_mse)
print(m, test_mse,'---',valid_mse)
mse = str(test_mses[valid_mses.index(min(valid_mses))])+"---"+methods[valid_mses.index(min(valid_mses))]+"---"+str(min(valid_mses))
spearmanr = (stats.spearmanr(preds, test_y))[0]
kendallr = (stats.kendalltau(preds, test_y))[0]
print("bert mse: ", mse)
mse_file = open('mse_bert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', "w")
mse_file.write(mse + "\n")
mse_file.write(str(test_mses[0])+'---bare---'+str(valid_mses[0])+"\n")
mse_file.write(str(best_valid_loss)+" after epoch: "+str(best_epoch)+"\n")
mse_file.write(str(spearmanr) + "\n")
mse_file.write(str(kendallr) + "\n")
#mse_file.close()
'''
test_error = pd.DataFrame()
test_error['cik_year'] = test_cik.tolist()
test_error['test_y'] = test_y.tolist()
test_error['preds'] = [p[0] for p in preds.tolist()]
test_error['error'] = test_error['test_y'] - test_error['preds']
test_error.to_csv('error_bert_'+str(max_length)+'_'+sec+'_'+bv+'_mean_hist.csv', index=False)
'''
#Linear Baseline
lr = LinearRegression().fit(train_hist.cpu().data.numpy().reshape(-1, 1),
train_y.cpu().data.numpy().reshape(-1, 1))
preds = lr.predict(test_hist.cpu().data.numpy().reshape(-1, 1))
lr_mse = mean_squared_error(test_y.reshape(-1, 1), preds)
print("LR mse", lr_mse)
mse_file.write("Linear mse: " + str(lr_mse))
mse_file.close()
print("Total execution time: ", time.time() - start)
| 32.501397 | 325 | 0.644665 |
ace7d92e1d81afbbc261aa93046d2582daebc407 | 1,676 | py | Python | config/wsgi_demo.py | adandan01/contmon | 95cb6ff432ce5e02cdaa823430626a8abb03d01e | [
"BSD-3-Clause"
] | null | null | null | config/wsgi_demo.py | adandan01/contmon | 95cb6ff432ce5e02cdaa823430626a8abb03d01e | [
"BSD-3-Clause"
] | null | null | null | config/wsgi_demo.py | adandan01/contmon | 95cb6ff432ce5e02cdaa823430626a8abb03d01e | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for contmon project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.demo"
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 40.878049 | 79 | 0.803699 |
ace7da887bbbc8d6d2abcf24b37582011e799f04 | 1,006 | py | Python | 0019_RemoveNthNodeFromEndOfList/python/test_solution.py | jeffvswanson/LeetCode | 6bc7d6cad3c2b1bd6ccb2616ec081fb5eb51ccc8 | [
"MIT"
] | null | null | null | 0019_RemoveNthNodeFromEndOfList/python/test_solution.py | jeffvswanson/LeetCode | 6bc7d6cad3c2b1bd6ccb2616ec081fb5eb51ccc8 | [
"MIT"
] | null | null | null | 0019_RemoveNthNodeFromEndOfList/python/test_solution.py | jeffvswanson/LeetCode | 6bc7d6cad3c2b1bd6ccb2616ec081fb5eb51ccc8 | [
"MIT"
] | null | null | null | import pytest
import solution
def create_linked_list(raw_list) -> solution.ListNode:
for i, val in enumerate(raw_list):
if i == 0:
node = solution.ListNode(val=val)
head = node
if i + 1 < len(raw_list):
node.next = solution.ListNode(val=raw_list[i+1])
node = node.next
return head
@pytest.mark.parametrize(
"raw_list,n,expected",
[
([1, 2, 3, 4, 5], 2, [1, 2, 3, 5]),
([1], 1, None),
([1, 2], 1, [1]),
]
)
def test_initial_pass(raw_list, n, expected):
head = create_linked_list(raw_list)
got = solution.initial_pass(head, n)
if expected:
expected = create_linked_list(expected)
if got and expected:
while got.val and expected.val:
assert got.val == expected.val
got = got.next
expected = expected.next
if got is None:
assert expected is None
break
else:
assert got is None
| 25.15 | 60 | 0.55169 |
ace7daba223ffab38576e864de3a12afb6b33923 | 38,037 | py | Python | plugins/item_tasks/plugin_tests/tasks_test.py | manthey/girder | 1ae1068b02b3dc775df957f3a3c79a6aa9798043 | [
"Apache-2.0"
] | null | null | null | plugins/item_tasks/plugin_tests/tasks_test.py | manthey/girder | 1ae1068b02b3dc775df957f3a3c79a6aa9798043 | [
"Apache-2.0"
] | null | null | null | plugins/item_tasks/plugin_tests/tasks_test.py | manthey/girder | 1ae1068b02b3dc775df957f3a3c79a6aa9798043 | [
"Apache-2.0"
] | null | null | null | import json
import mock
import os
import time
from girder_worker.app import app
from girder_worker_utils import types
from girder_worker_utils.decorators import argument
from girder.constants import AccessType
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.token import Token
from girder.models.user import User
from tests import base
from girder_jobs.constants import JobStatus
from girder_jobs.models.job import Job
from girder_item_tasks.constants import ACCESS_FLAG_EXECUTE_TASK
def setUpModule():
base.enabledPlugins.append('item_tasks')
base.startServer()
def tearDownModule():
base.stopServer()
class TasksTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.admin = User().createUser(
login='admin', firstName='admin', lastName='admin', email='a@a.com', password='123456')
self.user = User().createUser(
login='user1', firstName='user', lastName='1', email='u@u.com', password='123456')
folders = Folder().childFolders(self.admin, parentType='user', user=self.admin)
self.privateFolder, self.publicFolder = list(folders)
# show full diff when objects don't match
self.maxDiff = None
def testAddItemTasksToFolderFromJson(self):
"""
Test adding item tasks to a folder from a JSON spec.
"""
# Create a new folder that will contain the tasks
folder = Folder().createFolder(
name='placeholder', creator=self.admin, parent=self.admin, parentType='user')
# Create task to introspect container
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/folder/%s/item_task_json_description' % folder['_id'], method='POST', params={
'image': 'johndoe/foo:v5'
}, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['_modelType'], 'job')
self.assertEqual(len(scheduleMock.mock_calls), 1)
job = scheduleMock.mock_calls[0][1][0]
self.assertEqual(job['handler'], 'worker_handler')
self.assertEqual(job['itemTaskId'], folder['_id'])
self.assertEqual(job['kwargs']['outputs']['_stdout']['method'], 'POST')
self.assertTrue(job['kwargs']['outputs']['_stdout']['url'].endswith(
'folder/%s/item_task_json_specs' % folder['_id']))
params = job['kwargs']['outputs']['_stdout']['params']
self.assertEqual(params['image'], 'johndoe/foo:v5')
self.assertEqual(params['pullImage'], True)
token = job['kwargs']['outputs']['_stdout']['headers']['Girder-Token']
# Task should not be registered until we get the callback
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Simulate callback from introspection job
with open(os.path.join(os.path.dirname(__file__), 'specs.json')) as f:
specs = f.read()
parsedSpecs = json.loads(specs)
resp = self.request(
'/folder/%s/item_task_json_specs' % folder['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'pullImage': False
}, token=token, body=specs, type='application/json')
self.assertStatusOk(resp)
items = list(Folder().childItems(folder, user=self.admin))
self.assertEqual(len(items), 2)
# Image name and item task flag should be stored in the item metadata
for itemIndex, item in enumerate(items):
item = Item().load(item['_id'], force=True)
self.assertEqual(item['name'], 'johndoe/foo:v5 %s' % (str(itemIndex)))
self.assertEqual(item['description'], parsedSpecs[itemIndex]['description'])
self.assertTrue(item['meta']['isItemTask'])
parsedSpecs[itemIndex]['pull_image'] = False
parsedSpecs[itemIndex]['docker_image'] = 'johndoe/foo:v5'
self.assertEqual(item['meta']['itemTaskSpec'], parsedSpecs[itemIndex])
self.assertEqual(item['meta']['itemTaskName'], '')
# We should only be able to see tasks we have read access on
resp = self.request('/item_task')
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
# Test adding single task spec
folder2 = Folder().createFolder(
name='placeholder2', creator=self.admin, parent=self.admin, parentType='user')
with open(os.path.join(os.path.dirname(__file__), 'spec.json')) as f:
spec = f.read()
parsedSpec = json.loads(spec)
token = Token().createToken(
user=self.admin, scope='item_task.set_task_spec.%s' % folder2['_id'])
resp = self.request(
'/folder/%s/item_task_json_specs' % folder2['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'pullImage': False
},
token=token, body=spec, type='application/json')
self.assertStatusOk(resp)
items = list(Folder().childItems(folder2, user=self.admin))
self.assertEqual(len(items), 1)
# Check that the single item has the correct metadata
item = Item().load(items[0]['_id'], force=True)
self.assertEqual(item['name'], 'johndoe/foo:v5')
self.assertEqual(item['description'], parsedSpec['description'])
self.assertTrue(item['meta']['isItemTask'])
parsedSpec['pull_image'] = False
parsedSpec['docker_image'] = 'johndoe/foo:v5'
self.assertEqual(item['meta']['itemTaskSpec'], parsedSpec)
self.assertEqual(item['meta']['itemTaskName'], '')
def testItemTaskGetEndpointMinMax(self):
"""
Test adding item tasks to a folder from a JSON spec.
"""
with open(os.path.join(os.path.dirname(__file__), 'namedSpecs.json')) as f:
specs = f.read()
def createTask(itemName, taskName, specs=specs):
# Create a new item that will become a task
item = Item().createItem(name=itemName, creator=self.admin, folder=self.privateFolder)
# Create task to introspect container
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/item/%s/item_task_json_description' % item['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'taskName': taskName
}, user=self.admin)
self.assertStatusOk(resp)
job = scheduleMock.mock_calls[0][1][0]
token = job['kwargs']['outputs']['_stdout']['headers']['Girder-Token']
# Simulate callback with a valid task name
resp = self.request(
'/item/%s/item_task_json_specs' % (item['_id']), method='PUT', params={
'image': 'johndoe/foo:v5',
'taskName': taskName,
'setName': True,
'setDescription': True,
'pullImage': False
},
token=token, body=specs, type='application/json')
self.assertStatusOk(resp)
# Test GET endpoint
def testMinMax(expected, min=None, max=None):
params = {}
if min is not None:
params['minFileInputs'] = min
if max is not None:
params['maxFileInputs'] = max
resp = self.request(
'/item_task', params=params,
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), expected)
createTask('item1', 'Task 1')
createTask('item2', '1 File Task')
createTask('item3', '2 File Task')
createTask('item4', '3 File Task')
testMinMax(1, min=1, max=1)
testMinMax(4, max=3)
testMinMax(3, min=0, max=2)
testMinMax(2, min=2, max=3)
testMinMax(4)
testMinMax(1, min=3)
testMinMax(0, min=8)
testMinMax(1, min=0, max=0)
def testConfigureItemTaskFromJson(self):
"""
Test configuring an item with a task from a JSON spec, then reconfiguring the
item with a different task from the JSON spec.
"""
# Create a new item that will become a task
item = Item().createItem(name='placeholder', creator=self.admin, folder=self.privateFolder)
# Create job to introspect container
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/item/%s/item_task_json_description' % item['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'taskName': 'Task 2'
}, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['_modelType'], 'job')
self.assertEqual(len(scheduleMock.mock_calls), 1)
job = scheduleMock.mock_calls[0][1][0]
self.assertEqual(job['handler'], 'worker_handler')
self.assertEqual(job['itemTaskId'], item['_id'])
self.assertEqual(job['kwargs']['outputs']['_stdout']['method'], 'PUT')
self.assertTrue(job['kwargs']['outputs']['_stdout']['url'].endswith(
'item/%s/item_task_json_specs' % item['_id']))
params = job['kwargs']['outputs']['_stdout']['params']
self.assertEqual(params['image'], 'johndoe/foo:v5')
self.assertEqual(params['taskName'], 'Task 2')
self.assertEqual(params['setName'], True)
self.assertEqual(params['setDescription'], True)
self.assertEqual(params['pullImage'], True)
token = job['kwargs']['outputs']['_stdout']['headers']['Girder-Token']
# Task should not be registered until we get the callback
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Simulate callback from introspection job
with open(os.path.join(os.path.dirname(__file__), 'namedSpecs.json')) as f:
specs = f.read()
# Simulate callback with an invalid task name
resp = self.request(
'/item/%s/item_task_json_specs' % (item['_id']), method='PUT', params={
'image': 'johndoe/foo:v5',
'taskName': 'Invalid task'
},
token=token, body=specs, type='application/json')
self.assertStatus(resp, 400)
# Simulate callback with a valid task name
resp = self.request(
'/item/%s/item_task_json_specs' % (item['_id']), method='PUT', params={
'image': 'johndoe/foo:v5',
'taskName': 'Task 2',
'setName': True,
'setDescription': True,
'pullImage': False
},
token=token, body=specs, type='application/json')
self.assertStatusOk(resp)
# We should only be able to see tasks we have read access on
resp = self.request('/item_task')
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(item['_id']))
# Check that the item has the correct metadata
item = Item().load(item['_id'], force=True)
self.assertEqual(item['name'], 'Task 2')
self.assertEqual(item['description'], 'Task 2 description')
self.assertTrue(item['meta']['isItemTask'])
self.assertEqual(item['meta']['itemTaskName'], 'Task 2')
self.assertEqual(item['meta']['itemTaskSpec']['name'], 'Task 2')
self.assertEqual(item['meta']['itemTaskSpec']['description'], 'Task 2 description')
self.assertEqual(item['meta']['itemTaskSpec']['mode'], 'docker')
self.assertEqual(item['meta']['itemTaskSpec']['inputs'], [])
self.assertEqual(item['meta']['itemTaskSpec']['outputs'], [])
# Create job to introspect container
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/item/%s/item_task_json_description' % item['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'taskName': 'Task 1'
}, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['_modelType'], 'job')
self.assertEqual(len(scheduleMock.mock_calls), 1)
job = scheduleMock.mock_calls[0][1][0]
self.assertEqual(job['handler'], 'worker_handler')
self.assertEqual(job['itemTaskId'], item['_id'])
self.assertEqual(job['kwargs']['outputs']['_stdout']['method'], 'PUT')
self.assertTrue(job['kwargs']['outputs']['_stdout']['url'].endswith(
'item/%s/item_task_json_specs' % item['_id']))
params = job['kwargs']['outputs']['_stdout']['params']
self.assertEqual(params['image'], 'johndoe/foo:v5')
self.assertEqual(params['taskName'], 'Task 1')
self.assertEqual(params['setName'], True)
self.assertEqual(params['setDescription'], True)
self.assertEqual(params['pullImage'], True)
token = job['kwargs']['outputs']['_stdout']['headers']['Girder-Token']
# Simulate callback from introspection job
resp = self.request(
'/item/%s/item_task_json_specs' % item['_id'], method='PUT', params={
'image': 'johndoe/foo:v5',
'taskName': 'Task 1',
'setName': True,
'setDescription': True,
'pullImage': False
},
token=token, body=specs, type='application/json')
self.assertStatusOk(resp)
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(item['_id']))
# Check that the item has the correct metadata
item = Item().load(item['_id'], force=True)
self.assertEqual(item['name'], 'Task 1')
self.assertEqual(item['description'], 'Task 1 description')
self.assertTrue(item['meta']['isItemTask'])
self.assertEqual(item['meta']['itemTaskName'], 'Task 1')
self.assertEqual(item['meta']['itemTaskSpec']['name'], 'Task 1')
self.assertEqual(item['meta']['itemTaskSpec']['description'], 'Task 1 description')
self.assertEqual(item['meta']['itemTaskSpec']['mode'], 'docker')
self.assertEqual(item['meta']['itemTaskSpec']['inputs'], [{
'id': 'dummy_input',
'name': 'Dummy input',
'description': 'Dummy input flag',
'type': 'boolean',
'default': {'data': True}
}])
self.assertEqual(item['meta']['itemTaskSpec']['outputs'], [])
def testAddItemTasksToFolderFromSlicerCli(self):
"""
Test adding item tasks to a folder from Slicer CLI XML.
"""
# Create a new folder that will contain the tasks
folder = Folder().createFolder(
name='placeholder', creator=self.admin, parent=self.admin, parentType='user')
# Create task to introspect container
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/folder/%s/item_task_slicer_cli_description' % folder['_id'], method='POST',
params={
'image': 'johndoe/foo:v5',
'args': json.dumps(['--foo', 'bar'])
}, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['_modelType'], 'job')
self.assertEqual(len(scheduleMock.mock_calls), 1)
job = scheduleMock.mock_calls[0][1][0]
self.assertEqual(job['handler'], 'worker_handler')
self.assertEqual(job['itemTaskId'], folder['_id'])
self.assertEqual(job['kwargs']['outputs']['_stdout']['method'], 'POST')
self.assertTrue(job['kwargs']['outputs']['_stdout']['url'].endswith(
'folder/%s/item_task_slicer_cli_xml' % folder['_id']))
params = job['kwargs']['outputs']['_stdout']['params']
self.assertEqual(params['image'], 'johndoe/foo:v5')
self.assertEqual(params['args'], '["--foo", "bar"]')
self.assertEqual(params['pullImage'], True)
token = job['kwargs']['outputs']['_stdout']['headers']['Girder-Token']
# Task should not be registered until we get the callback
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Simulate callback from introspection job
with open(os.path.join(os.path.dirname(__file__), 'slicer_cli.xml')) as f:
xml = f.read()
resp = self.request(
'/folder/%s/item_task_slicer_cli_xml' % folder['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'args': json.dumps(['--foo', 'bar']),
'pullImage': False
},
token=token, body=xml, type='application/xml')
self.assertStatusOk(resp)
# We should only be able to see tasks we have read access on
resp = self.request('/item_task')
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
items = list(Folder().childItems(folder, user=self.admin))
self.assertEqual(len(items), 1)
item = items[0]
# Image name and item task flag should be stored in the item metadata
self.assertEqual(item['name'], 'PET phantom detector CLI')
self.assertEqual(
item['description'],
u'**Description**: Detects positions of PET/CT pocket phantoms in PET image.\n\n'
u'**Author(s)**: Girder Developers\n\n**Version**: 1.0\n\n'
u'**License**: Apache 2.0\n\n**Acknowledgements**: *none*\n\n'
u'*This description was auto-generated from the Slicer CLI XML specification.*'
)
self.assertTrue(item['meta']['isItemTask'])
self.assertHasKeys(item['meta']['itemTaskSpec'],
('mode', 'docker_image', 'container_args', 'inputs', 'outputs'))
self.assertEqual(item['meta']['itemTaskSpec']['mode'], 'docker')
self.assertEqual(item['meta']['itemTaskSpec']['docker_image'], 'johndoe/foo:v5')
self.assertEqual(item['meta']['itemTaskSlicerCliArgs'], ['--foo', 'bar'])
def testConfigureItemTaskFromSlicerCli(self):
# Create a new item that will become a task
item = Item().createItem(name='placeholder', creator=self.admin, folder=self.privateFolder)
# Create task to introspect container
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/item/%s/item_task_slicer_cli_description' % item['_id'], method='POST', params={
'image': 'johndoe/foo:v5',
'args': json.dumps(['--foo', 'bar'])
}, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['_modelType'], 'job')
self.assertEqual(len(scheduleMock.mock_calls), 1)
job = scheduleMock.mock_calls[0][1][0]
self.assertEqual(job['handler'], 'worker_handler')
self.assertEqual(job['itemTaskId'], item['_id'])
self.assertEqual(job['kwargs']['outputs']['_stdout']['method'], 'PUT')
self.assertTrue(job['kwargs']['outputs']['_stdout']['url'].endswith(
'item/%s/item_task_slicer_cli_xml' % item['_id']))
token = job['kwargs']['outputs']['_stdout']['headers']['Girder-Token']
# Task should not be registered until we get the callback
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Image and args should be stored in the item metadata
item = Item().load(item['_id'], force=True)
self.assertEqual(item['meta']['itemTaskSpec']['docker_image'], 'johndoe/foo:v5')
self.assertEqual(item['meta']['itemTaskSlicerCliArgs'], ['--foo', 'bar'])
# Simulate callback from introspection job
with open(os.path.join(os.path.dirname(__file__), 'slicer_cli.xml')) as f:
xml = f.read()
resp = self.request(
'/item/%s/item_task_slicer_cli_xml' % item['_id'], method='PUT', params={
'setName': True,
'setDescription': True
}, token=token, body=xml, type='application/xml')
self.assertStatusOk(resp)
# We should only be able to see tasks we have read access on
resp = self.request('/item_task')
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request('/item_task', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(item['_id']))
item = Item().load(item['_id'], force=True)
self.assertEqual(item['name'], 'PET phantom detector CLI')
self.assertEqual(
item['description'],
u'**Description**: Detects positions of PET/CT pocket phantoms in PET image.\n\n'
u'**Author(s)**: Girder Developers\n\n**Version**: 1.0\n\n'
u'**License**: Apache 2.0\n\n**Acknowledgements**: *none*\n\n'
u'*This description was auto-generated from the Slicer CLI XML specification.*'
)
self.assertTrue(item['meta']['isItemTask'])
self.assertEqual(item['meta']['itemTaskSpec'], {
'mode': 'docker',
'docker_image': 'johndoe/foo:v5',
'container_args': [
'--foo', 'bar', '--InputImage=$input{--InputImage}',
'--MaximumLineStraightnessDeviation=$input{--MaximumLineStraightnessDeviation}',
'--MaximumRadius=$input{--MaximumRadius}',
'--MaximumSphereDistance=$input{--MaximumSphereDistance}',
'--MinimumRadius=$input{--MinimumRadius}',
'--MinimumSphereActivity=$input{--MinimumSphereActivity}',
'--MinimumSphereDistance=$input{--MinimumSphereDistance}',
'--SpheresPerPhantom=$input{--SpheresPerPhantom}', '$flag{--StrictSorting}',
'--DetectedPoints=$output{--DetectedPoints}'
],
'inputs': [{
'description': 'Input image to be analysed.',
'format': 'image',
'name': 'InputImage', 'type': 'image', 'id': '--InputImage',
'target': 'filepath'
}, {
'description': 'Used for eliminating detections which are not in a straight line. '
'Unit: multiples of geometric average of voxel spacing',
'format': 'number',
'default': {'data': 1.0},
'type': 'number',
'id': '--MaximumLineStraightnessDeviation',
'name': 'MaximumLineStraightnessDeviation'
}, {
'description': 'Used for eliminating too big blobs. Unit: millimeter [mm]',
'format': 'number', 'default': {'data': 20.0},
'type': 'number',
'id': '--MaximumRadius',
'name': 'MaximumRadius'
}, {
'description': 'Signifies maximum distance between adjacent sphere centers [mm]. '
'Used to separate phantoms from tumors.',
'format': 'number', 'default': {'data': 40.0},
'type': 'number',
'id': '--MaximumSphereDistance',
'name': 'MaximumSphereDistance'
}, {
'description': 'Used for eliminating too small blobs. Unit: millimeter [mm]',
'format': 'number',
'default': {'data': 3.0},
'type': 'number',
'id': '--MinimumRadius',
'name': 'MinimumRadius'
}, {
'description': 'Used for thresholding in blob detection. '
'Unit: becquerels per milliliter [Bq/ml]',
'format': 'number', 'default': {'data': 5000.0},
'type': 'number',
'id': '--MinimumSphereActivity',
'name': 'MinimumSphereActivity'
}, {
'description': 'Signifies minimum distance between adjacent sphere centers [mm]. '
'Used to separate phantoms from tumors.',
'format': 'number',
'default': {'data': 30.0},
'type': 'number',
'id': '--MinimumSphereDistance',
'name': 'MinimumSphereDistance'
}, {
'description': 'What kind of phantom are we working with here?',
'format': 'number-enumeration',
'default': {'data': 3},
'type': 'number-enumeration',
'id': '--SpheresPerPhantom',
'name': 'SpheresPerPhantom',
'values': [2, 3]
}, {
'description': 'Controls whether spheres within a phantom must have descending '
'activities. If OFF, they can have approximately same activities '
'(within 15%).',
'format': 'boolean',
'default': {'data': False},
'type': 'boolean',
'id': '--StrictSorting',
'name': 'StrictSorting'
}],
'outputs': [{
'description': 'Fiducial points, one for each detected sphere. '
'Will be multiple of 3.',
'format': 'new-file',
'name': 'DetectedPoints',
'type': 'new-file',
'id': '--DetectedPoints',
'target': 'filepath'
}]
})
# Shouldn't be able to run the task if we don't have execute permission flag
Folder().setUserAccess(
self.privateFolder, user=self.user, level=AccessType.READ, save=True)
resp = self.request(
'/item_task/%s/execution' % item['_id'], method='POST', user=self.user)
self.assertStatus(resp, 403)
# Grant the user permission, and run the task
Folder().setUserAccess(
self.privateFolder, user=self.user, level=AccessType.WRITE,
flags=ACCESS_FLAG_EXECUTE_TASK, currentUser=self.admin, save=True)
inputs = {
'--InputImage': {
'mode': 'girder',
'resource_type': 'item',
'id': str(item['_id'])
},
'--MaximumLineStraightnessDeviation': {
'mode': 'inline',
'data': 1
},
'--MaximumRadius': {
'mode': 'inline',
'data': 20
},
'--MaximumSphereDistance': {
'mode': 'inline',
'data': 40
},
'--MinimumRadius': {
'mode': 'inline',
'data': 3
},
'--MinimumSphereActivity': {
'mode': 'inline',
'data': 5000
},
'--MinimumSphereDistance': {
'mode': 'inline',
'data': 30
},
'--SpheresPerPhantom': {
'mode': 'inline',
'data': 3},
'--StrictSorting': {
'mode': 'inline',
'data': False
}
}
outputs = {
'--DetectedPoints': {
'mode': 'girder',
'parent_id': str(self.privateFolder['_id']),
'parent_type': 'folder',
'name': 'test.txt'
}
}
# Ensure task was scheduled
with mock.patch('girder_jobs.models.job.Job.scheduleJob') as scheduleMock:
resp = self.request(
'/item_task/%s/execution' % item['_id'], method='POST', user=self.user, params={
'inputs': json.dumps(inputs),
'outputs': json.dumps(outputs)
})
self.assertEqual(len(scheduleMock.mock_calls), 1)
self.assertStatusOk(resp)
job = resp.json
self.assertEqual(job['_modelType'], 'job')
self.assertNotIn('kwargs', job) # ordinary user can't see kwargs
jobModel = Job()
job = jobModel.load(job['_id'], force=True)
output = job['kwargs']['outputs']['--DetectedPoints']
# Simulate output from the worker
contents = b'Hello world'
resp = self.request(
path='/file', method='POST', token=output['token'], params={
'parentType': output['parent_type'],
'parentId': output['parent_id'],
'name': output['name'],
'size': len(contents),
'mimeType': 'text/plain',
'reference': output['reference']
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', output['name'], contents)]
resp = self.multipartRequest(
path='/file/chunk', fields=fields, files=files, token=output['token'])
self.assertStatusOk(resp)
file = resp.json
self.assertEqual(file['_modelType'], 'file')
self.assertEqual(file['size'], 11)
self.assertEqual(file['mimeType'], 'text/plain')
file = File().load(file['_id'], force=True)
# Make sure temp token is removed once we change job status to final state
job = jobModel.load(job['_id'], force=True)
self.assertIn('itemTaskTempToken', job)
# Transition through states to SUCCESS
job = jobModel.updateJob(job, status=JobStatus.QUEUED)
job = jobModel.updateJob(job, status=JobStatus.RUNNING)
job = jobModel.updateJob(job, status=JobStatus.SUCCESS)
self.assertNotIn('itemTaskTempToken', job)
self.assertIn('itemTaskBindings', job)
# Wait for async data.process event to bind output provenance
start = time.time()
while time.time() - start < 15:
job = jobModel.load(job['_id'], force=True)
if 'itemId' in job['itemTaskBindings']['outputs']['--DetectedPoints']:
break
else:
time.sleep(0.2)
else:
raise Exception('Output binding did not occur in time')
self.assertEqual(
job['itemTaskBindings']['outputs']['--DetectedPoints']['itemId'], file['itemId'])
def testListExtensions(self):
with mock.patch('girder_item_tasks.celery_tasks.get_extensions',
return_value=['c', 'f', 'a', 'b']):
resp = self.request('/item_task/extensions', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, ['a', 'b', 'c', 'f'])
resp = self.request('/item_task/extensions', user=self.user)
self.assertStatus(resp, 403)
def createCeleryItemTask(self, user, params=None):
spec = {
'name': 'test function',
'description': 'test description',
'mode': 'girder_worker',
'inputs': [{
'id': 'n',
'name': 'n',
'type': 'integer'
}]
}
params = params or {}
params['taskName'] = 'task'
item = self.model('item').createItem(
name='temp', creator=self.admin, folder=self.privateFolder)
app.tasks['task'] = lambda n: None
with mock.patch('girder_item_tasks.celery_tasks.decorators.describe_function',
return_value=spec):
resp = self.request(
'/item/%s/item_task_celery' % item['_id'],
method='POST',
params=params,
user=user
)
return resp
def testConfigureCeleryTaskItemDefaults(self):
resp = self.createCeleryItemTask(self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], 'test function')
self.assertEqual(resp.json['description'], 'test description')
meta = resp.json['meta']
self.assertEqual(meta['itemTaskImport'], 'task')
def testConfigureCeleryTaskPermissions(self):
resp = self.createCeleryItemTask(self.user)
self.assertStatus(resp, 403)
def testConfigureCeleryTaskItemNoRename(self):
resp = self.createCeleryItemTask(self.admin, {'setName': False})
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], 'temp')
self.assertEqual(resp.json['description'], 'test description')
def testConfigureCeleryTaskItemNoRedescribe(self):
resp = self.createCeleryItemTask(self.admin, {'setDescription': False})
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], 'test function')
self.assertEqual(resp.json['description'], '')
def testConfigureCeleryTaskUnknownTask(self):
item = self.model('item').createItem(
name='temp', creator=self.admin, folder=self.privateFolder)
resp = self.request(
'/item/%s/item_task_celery' % item['_id'],
method='POST',
params={'taskName': 'not a valid task'},
user=self.admin
)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Unknown task "not a valid task"')
def testConfigureCeleryTaskInvalidTask(self):
item = self.model('item').createItem(
name='temp', creator=self.admin, folder=self.privateFolder)
app.tasks['task'] = lambda n: None
resp = self.request(
'/item/%s/item_task_celery' % item['_id'],
method='POST',
params={'taskName': 'task'},
user=self.admin
)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Could not get a task description')
def testConfigureCeleryTaskFolder(self):
folder = self.model('folder').createFolder(
self.privateFolder, name='tasks', creator=self.admin)
spec = {
'name': 'test function',
'description': 'test description',
'mode': 'girder_worker',
'inputs': [{
'id': 'n',
'name': 'n',
'type': 'integer'
}]
}
app.tasks['task'] = lambda n: None
with mock.patch(
'girder_item_tasks.celery_tasks.decorators.describe_function',
return_value=spec):
with mock.patch(
'girder_item_tasks.celery_tasks.get_extension_tasks',
return_value={'task': app.tasks['task']}):
resp = self.request(
'/folder/%s/item_task_celery' % folder['_id'],
method='POST',
params={'extension': 'ext'},
user=self.admin
)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
task = resp.json[0]
self.assertEqual(task['name'], 'test function')
meta = task['meta']
self.assertEqual(meta['itemTaskImport'], 'task')
resp = self.request(
'/folder/%s/item_task_celery' % folder['_id'],
method='POST',
params={'extension': 'ext'},
user=self.user
)
self.assertStatus(resp, 403)
def testConfigureCeleryTaskUnknownExtension(self):
folder = self.model('folder').createFolder(
self.privateFolder, name='tasks', creator=self.admin)
resp = self.request(
'/folder/%s/item_task_celery' % folder['_id'],
method='POST',
params={'extension': 'not a valid extension'},
user=self.admin
)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Unknown girder_worker extension')
def testExecuteCeleryTask(self):
item = self.createCeleryItemTask(self.admin).json
@app.task
@argument('n', types.Number)
def echo_number(n):
return n
app.tasks['task'] = echo_number
inputs = json.dumps({'n': {'mode': 'inline', 'data': 10.0}})
return_val = mock.Mock()
return_val.job = {}
with mock.patch.object(echo_number, 'apply_async', return_value=return_val) as apply_async:
resp = self.request(
'/item_task/%s/execution' % item['_id'],
method='POST',
params={'inputs': inputs},
user=self.admin
)
self.assertStatusOk(resp)
apply_async.assert_called_with(
args=[],
kwargs={'n': 10.0},
girder_job_title='task'
)
| 43.07701 | 99 | 0.556879 |
ace7db67232051b14921d42a7b2a58b5b56ce26d | 2,418 | py | Python | src/byro/common/migrations/0004_auto_20180111_1807.py | uescher/byro | e43d646dc8e833591c82b2ea1711c70b9ce7e0b2 | [
"Apache-2.0"
] | null | null | null | src/byro/common/migrations/0004_auto_20180111_1807.py | uescher/byro | e43d646dc8e833591c82b2ea1711c70b9ce7e0b2 | [
"Apache-2.0"
] | null | null | null | src/byro/common/migrations/0004_auto_20180111_1807.py | uescher/byro | e43d646dc8e833591c82b2ea1711c70b9ce7e0b2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-11 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0003_auto_20171013_1436'),
]
operations = [
migrations.AlterField(
model_name='configuration',
name='language',
field=models.CharField(blank=True, choices=[('af', 'Afrikaans'), ('ar', 'Arabic'), ('ast', 'Asturian'), ('az', 'Azerbaijani'), ('bg', 'Bulgarian'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('br', 'Breton'), ('bs', 'Bosnian'), ('ca', 'Catalan'), ('cs', 'Czech'), ('cy', 'Welsh'), ('da', 'Danish'), ('de', 'German'), ('dsb', 'Lower Sorbian'), ('el', 'Greek'), ('en', 'English'), ('en-au', 'Australian English'), ('en-gb', 'British English'), ('eo', 'Esperanto'), ('es', 'Spanish'), ('es-ar', 'Argentinian Spanish'), ('es-co', 'Colombian Spanish'), ('es-mx', 'Mexican Spanish'), ('es-ni', 'Nicaraguan Spanish'), ('es-ve', 'Venezuelan Spanish'), ('et', 'Estonian'), ('eu', 'Basque'), ('fa', 'Persian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy', 'Frisian'), ('ga', 'Irish'), ('gd', 'Scottish Gaelic'), ('gl', 'Galician'), ('he', 'Hebrew'), ('hi', 'Hindi'), ('hr', 'Croatian'), ('hsb', 'Upper Sorbian'), ('hu', 'Hungarian'), ('ia', 'Interlingua'), ('id', 'Indonesian'), ('io', 'Ido'), ('is', 'Icelandic'), ('it', 'Italian'), ('ja', 'Japanese'), ('ka', 'Georgian'), ('kab', 'Kabyle'), ('kk', 'Kazakh'), ('km', 'Khmer'), ('kn', 'Kannada'), ('ko', 'Korean'), ('lb', 'Luxembourgish'), ('lt', 'Lithuanian'), ('lv', 'Latvian'), ('mk', 'Macedonian'), ('ml', 'Malayalam'), ('mn', 'Mongolian'), ('mr', 'Marathi'), ('my', 'Burmese'), ('nb', 'Norwegian Bokmål'), ('ne', 'Nepali'), ('nl', 'Dutch'), ('nn', 'Norwegian Nynorsk'), ('os', 'Ossetic'), ('pa', 'Punjabi'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('pt-br', 'Brazilian Portuguese'), ('ro', 'Romanian'), ('ru', 'Russian'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('sq', 'Albanian'), ('sr', 'Serbian'), ('sr-latn', 'Serbian Latin'), ('sv', 'Swedish'), ('sw', 'Swahili'), ('ta', 'Tamil'), ('te', 'Telugu'), ('th', 'Thai'), ('tr', 'Turkish'), ('tt', 'Tatar'), ('udm', 'Udmurt'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('vi', 'Vietnamese'), ('zh-hans', 'Simplified Chinese'), ('zh-hant', 'Traditional Chinese')], max_length=5, null=True, verbose_name='language'),
),
]
| 115.142857 | 2,009 | 0.543424 |
ace7dc3f7dd694a738ccd13ce4c930ab65bf7f59 | 4,257 | py | Python | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/software_update_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/software_update_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/software_update_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SoftwareUpdateConfiguration(Model):
"""Software update configuration properties.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Resource name.
:vartype name: str
:ivar id: Resource Id.
:vartype id: str
:ivar type: Resource type
:vartype type: str
:param update_configuration: Required. update specific properties for the
Software update configuration
:type update_configuration:
~azure.mgmt.automation.models.UpdateConfiguration
:param schedule_info: Required. Schedule information for the Software
update configuration
:type schedule_info: ~azure.mgmt.automation.models.ScheduleProperties
:ivar provisioning_state: Provisioning state for the software update
configuration, which only appears in the response.
:vartype provisioning_state: str
:param error: Details of provisioning error
:type error: ~azure.mgmt.automation.models.ErrorResponse
:ivar creation_time: Creation time of the resource, which only appears in
the response.
:vartype creation_time: datetime
:ivar created_by: CreatedBy property, which only appears in the response.
:vartype created_by: str
:ivar last_modified_time: Last time resource was modified, which only
appears in the response.
:vartype last_modified_time: datetime
:ivar last_modified_by: LastModifiedBy property, which only appears in the
response.
:vartype last_modified_by: str
:param tasks: Tasks information for the Software update configuration.
:type tasks:
~azure.mgmt.automation.models.SoftwareUpdateConfigurationTasks
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'update_configuration': {'required': True},
'schedule_info': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'created_by': {'readonly': True},
'last_modified_time': {'readonly': True},
'last_modified_by': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'update_configuration': {'key': 'properties.updateConfiguration', 'type': 'UpdateConfiguration'},
'schedule_info': {'key': 'properties.scheduleInfo', 'type': 'ScheduleProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'ErrorResponse'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'created_by': {'key': 'properties.createdBy', 'type': 'str'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'last_modified_by': {'key': 'properties.lastModifiedBy', 'type': 'str'},
'tasks': {'key': 'properties.tasks', 'type': 'SoftwareUpdateConfigurationTasks'},
}
def __init__(self, **kwargs):
super(SoftwareUpdateConfiguration, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
self.update_configuration = kwargs.get('update_configuration', None)
self.schedule_info = kwargs.get('schedule_info', None)
self.provisioning_state = None
self.error = kwargs.get('error', None)
self.creation_time = None
self.created_by = None
self.last_modified_time = None
self.last_modified_by = None
self.tasks = kwargs.get('tasks', None)
| 43 | 105 | 0.650458 |
ace7dc41d23b2b92882c3db5afb954e43f74688d | 8,153 | py | Python | contrib/devtools/update-translations.py | konez2k/bitgreen | 081f2fe17a75436c9f2d5b4cc2ca848cadf06f92 | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | konez2k/bitgreen | 081f2fe17a75436c9f2d5b4cc2ca848cadf06f92 | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | konez2k/bitgreen | 081f2fe17a75436c9f2d5b4cc2ca848cadf06f92 | [
"MIT"
] | 1 | 2018-09-11T22:02:55.000Z | 2018-09-11T22:02:55.000Z | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitgreen_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.63981 | 124 | 0.634 |
ace7dd7dde0f69a5e06447c7c97702f6650deefa | 169 | py | Python | utils/rules/base.py | 18645956947/TripleIE | 326e0844ed2cd167a084658bd89703ed94a6e484 | [
"MIT"
] | null | null | null | utils/rules/base.py | 18645956947/TripleIE | 326e0844ed2cd167a084658bd89703ed94a6e484 | [
"MIT"
] | 1 | 2019-04-02T06:51:07.000Z | 2019-04-02T11:14:38.000Z | utils/rules/base.py | 18645956947/TripleIE | 326e0844ed2cd167a084658bd89703ed94a6e484 | [
"MIT"
] | 1 | 2019-04-02T02:11:08.000Z | 2019-04-02T02:11:08.000Z | import abc
class Base():
def __init__(self, sentence):
self.sentence = sentence
# 获取规则
@abc.abstractmethod
def get_result(self):
pass
| 14.083333 | 33 | 0.609467 |
ace7de16d4d81da19b4e682ce4735555881f6670 | 4,570 | py | Python | flickipedia/web/rest.py | rfaulkner/Flickipedia | 1b53f30be4027901748a09c411d568c7148f4e4b | [
"BSD-2-Clause"
] | 1 | 2016-03-11T09:40:19.000Z | 2016-03-11T09:40:19.000Z | flickipedia/web/rest.py | rfaulkner/Flickipedia | 1b53f30be4027901748a09c411d568c7148f4e4b | [
"BSD-2-Clause"
] | 1 | 2015-02-27T02:23:19.000Z | 2015-02-27T02:23:19.000Z | flickipedia/web/rest.py | rfaulkner/Flickipedia | 1b53f30be4027901748a09c411d568c7148f4e4b | [
"BSD-2-Clause"
] | null | null | null | """
Defines restful interface to backend
"""
from flickipedia.mysqlio import DataIOMySQL
from flickipedia.config import schema
from flickipedia.config import log
from flickipedia.model.likes import LikeModel
from flickipedia.model.exclude import ExcludeModel
def api_insert_article(wiki_page_id, article_name):
"""
Adds an article
"""
raise NotImplementedError()
def api_insert_photo(flickr_id, article_id):
"""
Adds a photo
"""
raise NotImplementedError()
def api_set_like(uid, pid, aid):
"""
Toggles the like-glyph value for the given triplet
:param uid: Flickipedia user id
:param pid: Flickipedia photo id
:param aid: Flickipedia article id
:return: True on success, False otherwise
"""
# TODO - USE MODELS
io = DataIOMySQL()
io.connect()
result = api_get_like(uid, pid, aid)
# toggle and set new value (delete row if it doesn't exist)
if result: # io.update false
try:
io.delete(result)
except Exception as e:
log.error(' "%s"' % e.message)
return False
else: # io.update true
try:
io.insert('Like', user_id=uid, photo_id=pid, article_id=aid)
except Exception as e:
log.error(' "%s"' % e.message)
return False
# Clean up connections
io.sess.close()
io.engine.dispose()
return True
def api_get_like(uid, pid, aid):
"""
Determines the like-glyph value for the given triplet
:param uid: Flickipedia user id
:param pid: Flickipedia photo id
:param aid: Flickipedia article id
:return: 'Like' row if exists, None otherwise
"""
# TODO - USE MODELS
io = DataIOMySQL()
io.connect()
schema_obj = getattr(schema, 'Likes')
# Query to extract
res = io.session.query(schema_obj, schema_obj.is_set).filter(
schema_obj.photo_id == pid,
schema_obj.article_id == aid,
schema_obj.user_id == uid
).limit(1).all()
# Clean up connections
io.sess.close()
io.engine.dispose()
if len(res) == 0:
log.error('REST \'api_get_glyph\': Couldn\'t find ('
'user="%s", photo_id=%s, article_id=%s)' % (
uid, pid, aid))
return None
else:
return res[0]
def api_method_endorse_event(article_id, user_id, photo_id):
"""model logic for photo endorse
:param article_id: article local id
:param user_id: user id
:param photo_id: photo local id
"""
with LikeModel() as lm:
like = lm.get_like(user_id, article_id, photo_id)
if like:
lm.delete_like(like)
else:
lm.insert_like(user_id, article_id, photo_id)
def api_method_endorse_fetch(article_id, user_id, photo_id):
"""model logic for photo endorse fetch
:param article_id: article local id
:param user_id: user id
:param photo_id: photo local id
"""
with LikeModel() as lm:
like = lm.get_like(user_id, article_id, photo_id)
res = 1 if like else 0
return res
def api_method_exclude_event(article_id, user_id, photo_id):
"""model logic for photo exclude
:param article_id: article local id
:param user_id: user id
:param photo_id: photo local id
"""
with ExcludeModel() as em:
exclude = em.get_exclude(user_id, article_id, photo_id)
if exclude:
em.delete_exclude(exclude)
else:
em.insert_exclude(user_id, article_id, photo_id)
def api_method_exclude_fetch(article_id, user_id, photo_id):
"""model logic for photo exclude fetch
:param article_id: article local id
:param user_id: user id
:param photo_id: photo local id
"""
with ExcludeModel() as em:
exclude = em.get_exclude(user_id, article_id, photo_id)
res = 1 if exclude else 0
return res
def api_method_endorse_count(article_id, photo_id):
"""model logic for producing photo endorse count
:param article_id: article local id
:param photo_id: photo local id
"""
with LikeModel() as lm:
return lm.get_likes_article_photo(article_id, photo_id, count=True)
def api_method_exclude_count(article_id, photo_id):
"""model logic for producing photo exclude count
:param article_id: article local id
:param photo_id: photo local id
"""
with ExcludeModel() as em:
return em.get_excludes_article_photo(article_id, photo_id, count=True)
| 25.674157 | 78 | 0.636105 |
ace7de82b0a4f4682f1284b7d8020791c2fbc7ab | 12,793 | py | Python | open_seq2seq/models/speech2text.py | arnav1993k/OpenSeq2Seq | 1fd09333fdc787f13ed062258efffc59f74c2b2a | [
"Apache-2.0"
] | null | null | null | open_seq2seq/models/speech2text.py | arnav1993k/OpenSeq2Seq | 1fd09333fdc787f13ed062258efffc59f74c2b2a | [
"Apache-2.0"
] | null | null | null | open_seq2seq/models/speech2text.py | arnav1993k/OpenSeq2Seq | 1fd09333fdc787f13ed062258efffc59f74c2b2a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import tensorflow as tf
from six.moves import range
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from io import BytesIO
from open_seq2seq.utils.utils import deco_print
from .encoder_decoder import EncoderDecoderModel
import pickle
def sparse_tensor_to_chars(tensor, idx2char):
text = [''] * tensor.dense_shape[0]
for idx_tuple, value in zip(tensor.indices, tensor.values):
text[idx_tuple[0]] += idx2char[value]
return text
def sparse_tensor_to_chars_bpe(tensor):
idx = [[] for _ in range(tensor.dense_shape[0])]
for idx_tuple, value in zip(tensor.indices, tensor.values):
idx[idx_tuple[0]].append(int(value))
return idx
def dense_tensor_to_chars(tensor, idx2char, startindex, endindex):
batch_size = len(tensor)
text = [''] * batch_size
for batch_num in range(batch_size):
'''text[batch_num] = "".join([idx2char[idx] for idx in tensor[batch_num]
if idx not in [startindex, endindex]])'''
text[batch_num] = ""
for idx in tensor[batch_num]:
if idx == endindex:
break
text[batch_num] += idx2char[idx]
return text
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
The code was copied from: http://hetland.org/coding/python/levenshtein.py
"""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def plot_attention(alignments, pred_text, encoder_len, training_step):
alignments = alignments[:len(pred_text), :encoder_len]
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(1, 1, 1)
img = ax.imshow(alignments, interpolation='nearest', cmap='Blues')
ax.grid()
#fig.savefig('/home/rgadde/Desktop/OpenSeq2Seq/plots/file{}.png'.format(training_step), dpi=300)
sbuffer = BytesIO()
fig.savefig(sbuffer, dpi=300)
summary = tf.Summary.Image(
encoded_image_string=sbuffer.getvalue(),
height=int(fig.get_figheight() * 2),
width=int(fig.get_figwidth() * 2)
)
summary = tf.Summary.Value(
tag="attention_summary_step_{}".format(int(training_step / 2200)), image=summary)
plt.close(fig)
return summary
class Speech2Text(EncoderDecoderModel):
def _create_decoder(self):
data_layer = self.get_data_layer()
self.params['decoder_params']['tgt_vocab_size'] = (
data_layer.params['tgt_vocab_size']
)
self.dump_outputs = self.params['decoder_params'].get('infer_logits_to_pickle', False)
self.is_bpe = data_layer.params.get('bpe', False)
self.tensor_to_chars = sparse_tensor_to_chars
self.tensor_to_char_params = {}
self.autoregressive = data_layer.params.get('autoregressive', False)
if self.autoregressive:
self.params['decoder_params']['GO_SYMBOL'] = data_layer.start_index
self.params['decoder_params']['END_SYMBOL'] = data_layer.end_index
self.tensor_to_chars = dense_tensor_to_chars
self.tensor_to_char_params['startindex'] = data_layer.start_index
self.tensor_to_char_params['endindex'] = data_layer.end_index
return super(Speech2Text, self)._create_decoder()
def _create_loss(self):
if self.get_data_layer().params.get('autoregressive', False):
self.params['loss_params'][
'batch_size'] = self.params['batch_size_per_gpu']
self.params['loss_params']['tgt_vocab_size'] = (
self.get_data_layer().params['tgt_vocab_size']
)
return super(Speech2Text, self)._create_loss()
def _build_forward_pass_graph(self, input_tensors, gpu_id=0):
"""TensorFlow graph for speech2text model is created here.
This function connects encoder, decoder and loss together. As an input for
encoder it will specify source tensors (as returned from
the data layer). As an input for decoder it will specify target tensors
as well as all output returned from encoder. For loss it
will also specify target tensors and all output returned from
decoder. Note that loss will only be built for mode == "train" or "eval".
Args:
input_tensors (dict): ``input_tensors`` dictionary that has to contain
``source_tensors`` key with the list of all source tensors, and
``target_tensors`` with the list of all target tensors. Note that
``target_tensors`` only need to be provided if mode is
"train" or "eval".
gpu_id (int, optional): id of the GPU where the current copy of the model
is constructed. For Horovod this is always zero.
Returns:
tuple: tuple containing loss tensor as returned from
``loss.compute_loss()`` and list of outputs tensors, which is taken from
``decoder.decode()['outputs']``. When ``mode == 'infer'``, loss will
be None.
"""
if not isinstance(input_tensors, dict) or \
'source_tensors' not in input_tensors:
raise ValueError('Input tensors should be a dict containing '
'"source_tensors" key')
if not isinstance(input_tensors['source_tensors'], list):
raise ValueError('source_tensors should be a list')
source_tensors = input_tensors['source_tensors']
if self.mode == "train" or self.mode == "eval":
if 'target_tensors' not in input_tensors:
raise ValueError('Input tensors should contain "target_tensors" key'
'when mode != "infer"')
if not isinstance(input_tensors['target_tensors'], list):
raise ValueError('target_tensors should be a list')
target_tensors = input_tensors['target_tensors']
with tf.variable_scope("ForwardPass"):
encoder_input = {"source_tensors": source_tensors}
encoder_output = self.encoder.encode(input_dict=encoder_input)
decoder_input = {"encoder_output": encoder_output}
if self.mode == "train" or self.mode == "eval":
decoder_input['target_tensors'] = target_tensors
decoder_output = self.decoder.decode(input_dict=decoder_input)
model_outputs = decoder_output.get("outputs", None)
if self.mode == "train" or self.mode == "eval":
with tf.variable_scope("Loss"):
loss_input_dict = {
"decoder_output": decoder_output,
"target_tensors": target_tensors,
}
loss = self.loss_computator.compute_loss(loss_input_dict)
else:
deco_print("Inference Mode. Loss part of graph isn't built.")
loss = None
if self.dump_outputs:
model_logits = decoder_output.get("logits", None)
return loss, [model_logits]
return loss, model_outputs
def maybe_print_logs(self, input_values, output_values, training_step):
y, len_y = input_values['target_tensors']
decoded_sequence = output_values
y_one_sample = y[0]
len_y_one_sample = len_y[0]
decoded_sequence_one_batch = decoded_sequence[0]
if self.is_bpe:
dec_list = sparse_tensor_to_chars_bpe(decoded_sequence_one_batch)[0]
true_text = self.get_data_layer().sp.DecodeIds(y_one_sample[:len_y_one_sample].tolist())
pred_text = self.get_data_layer().sp.DecodeIds(dec_list)
else:
# we also clip the sample by the correct length
true_text = "".join(map(
self.get_data_layer().params['idx2char'].get,
y_one_sample[:len_y_one_sample],
))
pred_text = "".join(self.tensor_to_chars(
decoded_sequence_one_batch,
self.get_data_layer().params['idx2char'],
**self.tensor_to_char_params
)[0])
sample_wer = levenshtein(true_text.split(), pred_text.split()) / \
len(true_text.split())
self.autoregressive = self.get_data_layer().params.get('autoregressive', False)
self.plot_attention = False # (output_values[1] != None).all()
if self.plot_attention:
attention_summary = plot_attention(
output_values[1][0], pred_text, output_values[2][0], training_step)
deco_print("Sample WER: {:.4f}".format(sample_wer), offset=4)
deco_print("Sample target: " + true_text, offset=4)
deco_print("Sample prediction: " + pred_text, offset=4)
if self.plot_attention:
return {
'Sample WER': sample_wer,
'Attention Summary': attention_summary,
}
else:
return {
'Sample WER': sample_wer,
}
def finalize_evaluation(self, results_per_batch, training_step=None):
total_word_lev = 0.0
total_word_count = 0.0
for word_lev, word_count in results_per_batch:
total_word_lev += word_lev
total_word_count += word_count
total_wer = 1.0 * total_word_lev / total_word_count
deco_print("Validation WER: {:.4f}".format(total_wer), offset=4)
return {
"Eval WER": total_wer,
}
def evaluate(self, input_values, output_values):
total_word_lev = 0.0
total_word_count = 0.0
decoded_sequence = output_values[0]
if self.is_bpe:
decoded_texts = sparse_tensor_to_chars_bpe(decoded_sequence)
else:
decoded_texts = self.tensor_to_chars(
decoded_sequence,
self.get_data_layer().params['idx2char'],
**self.tensor_to_char_params
)
batch_size = input_values['source_tensors'][0].shape[0]
for sample_id in range(batch_size):
# y is the third returned input value, thus input_values[2]
# len_y is the fourth returned input value
y = input_values['target_tensors'][0][sample_id]
len_y = input_values['target_tensors'][1][sample_id]
if self.is_bpe:
true_text = self.get_data_layer().sp.DecodeIds(y[:len_y].tolist())
pred_text = self.get_data_layer().sp.DecodeIds(decoded_texts[sample_id])
else:
true_text = "".join(map(self.get_data_layer().params['idx2char'].get,
y[:len_y]))
pred_text = "".join(decoded_texts[sample_id])
if self.get_data_layer().params.get('autoregressive', False):
true_text = true_text[:-4]
# print('TRUE_TEXT: "{}"'.format(true_text))
# print('PRED_TEXT: "{}"'.format(pred_text))
total_word_lev += levenshtein(true_text.split(), pred_text.split())
total_word_count += len(true_text.split())
return total_word_lev, total_word_count
def infer(self, input_values, output_values):
preds = []
decoded_sequence = output_values[0]
if self.dump_outputs:
# decoded_sequence has 'time_major' shape: [T, B, C]
for i in range(decoded_sequence.shape[0]):
preds.append(decoded_sequence[i, :, :].squeeze())
else:
decoded_texts = self.tensor_to_chars(
decoded_sequence,
self.get_data_layer().params['idx2char'],
**self.tensor_to_char_params
)
for decoded_text in decoded_texts:
preds.append("".join(decoded_text))
return preds, input_values['source_ids']
def finalize_inference(self, results_per_batch, output_file):
preds = []
ids = []
for result, idx in results_per_batch:
preds.extend(result)
ids.extend(idx)
preds = np.array(preds)
ids = np.hstack(ids)
# restoring the correct order
preds = preds[np.argsort(ids)]
if self.dump_outputs:
dump_out = {}
dump_results = {}
files = self.get_data_layer().all_files
for i,f in enumerate(files):
dump_results[f]=preds[i]
dump_out["results"] = dump_results
dump_out["stride"] = self.get_data_layer().params["window_stride"]
convs = self.encoder.params["convnet_layers"]
scale = 1
for c in convs:
scale*=c["stride"][0]
dump_out["scale"] = scale
dump_out["dictionary"] = self.get_data_layer().params['idx2char']
with open(output_file, 'wb') as f:
pickle.dump(dump_out, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
pd.DataFrame(
{
'wav_filename': self.get_data_layer().all_files,
'predicted_transcript': preds,
},
columns=['wav_filename', 'predicted_transcript'],
).to_csv(output_file, index=False)
def _get_num_objects_per_step(self, worker_id=0):
"""Returns number of audio frames in current batch."""
data_layer = self.get_data_layer(worker_id)
num_frames = tf.reduce_sum(data_layer.input_tensors['source_tensors'][1])
return num_frames
| 35.734637 | 98 | 0.668647 |
ace7df9afc2a1f5e471d9f2878f7879ed704af58 | 8,367 | py | Python | packages/dila2sql/dila2sql/utils.py | SocialGouv/dila2sql | c9fa54bdb9483139218fab643fb5ab50ad9b114a | [
"CC0-1.0"
] | 4 | 2019-04-16T08:57:17.000Z | 2021-03-22T14:42:41.000Z | packages/dila2sql/dila2sql/utils.py | SocialGouv/dila2sql | c9fa54bdb9483139218fab643fb5ab50ad9b114a | [
"CC0-1.0"
] | 16 | 2019-03-27T08:49:38.000Z | 2020-08-11T06:34:44.000Z | packages/dila2sql/dila2sql/utils.py | SocialGouv/dila2sql | c9fa54bdb9483139218fab643fb5ab50ad9b114a | [
"CC0-1.0"
] | 2 | 2019-08-08T13:25:03.000Z | 2020-08-16T18:12:20.000Z | import os
import os.path
import re
import sre_parse
import traceback
import collections
from itertools import islice
from unicodedata import combining, decomposition, normalize
from peewee import SqliteDatabase, PostgresqlDatabase, ProgrammingError, IntegrityError
from playhouse.db_url import connect
from datetime import date
from .models import db_proxy, DBMeta
if not hasattr(re, 'Match'):
# For Python 3.6
re.Match = type(re.compile('').match(''))
ROOT = os.path.dirname(__file__) + '/'
def connect_db(db_url, create_schema=True, update_schema=True):
db = connect(db_url)
db.interpolation_char = '?' if isinstance(db, SqliteDatabase) else "%s"
if create_schema:
if not db.table_exists("db_meta"):
with open(ROOT + 'sql/schema.sql', 'r') as f:
if isinstance(db, PostgresqlDatabase):
db.cursor().execute(f.read())
elif isinstance(db, SqliteDatabase):
db.cursor().executescript(f.read())
if update_schema:
r = run_migrations(db)
if r == '!RECREATE!':
return connect_db(db_url, create_schema=True)
return db
def run_migrations(db):
db_proxy.initialize(db)
db_meta = DBMeta.select().where(DBMeta.key == 'schema_version').first()
v = int(db_meta.value) if db_meta else 0
if v == 0:
DBMeta.insert(key='schema_version', value=v).execute()
migrations = open(ROOT + 'sql/migrations.sql').read().split('\n\n-- migration #')
n = 0
for m in migrations[1:]:
n, sql = m.split('\n', 1)
n = int(n)
if v >= n:
continue
sql = sql.strip()
if sql == '!RECREATE!':
print('Recreating DB from scratch (migration #%s)...' % n)
db.close()
# TODO
# os.rename(db.address, db.address + '.back')
return sql
print('Running DB migration #%s...' % n)
try:
db.cursor().executescript(sql)
except (IntegrityError, ProgrammingError):
traceback.print_exc()
r = input('Have you already run this migration? (y/N) ')
if r.lower() != 'y':
raise SystemExit(1)
DBMeta.update(value=n).where(key='schema_version').execute()
return n - v
def group_by_2(iterable):
iterable = iterable.__iter__()
next = iterable.__next__
while True:
try:
a = next()
except StopIteration:
return
try:
b = next()
except StopIteration:
raise ValueError("iterable returned an odd number of items")
yield (a, b)
def add_accentless_fallbacks(pattern):
r"""Modifies a regexp pattern to also match accentless text.
>>> add_accentless_fallbacks(r'Arrêté')
'Arr[êe]t[ée]'
>>> add_accentless_fallbacks(r'foo|bar')
'foo|bar'
>>> add_accentless_fallbacks(r'm[êè]me')
'm[êèe]me'
>>> add_accentless_fallbacks(r'm[êèe]me')
'm[êèe]me'
>>> add_accentless_fallbacks(r'\[Décret')
'\\[D[ée]cret'
>>> add_accentless_fallbacks(r'\[(?P<blé>Décret[ée])?(?(blé) à | a )(?P=blé)')
'\\[(?P<blé>D[ée]cret[ée])?(?(blé) [àa] | a )(?P=blé)'
>>> add_accentless_fallbacks(r'(?# commenté )')
'(?# commenté )'
>>> add_accentless_fallbacks(r'[\]é]')
'[\\]ée]'
"""
def remove_accent(c):
return chr(int(decomposition(c).split(' ', 1)[0], 16))
r = []
source = sre_parse.Tokenizer(pattern)
sourceget = source.get
while True:
this = source.next
if this is None:
break # end of pattern
sourceget()
if this[0] == '\\':
r.append(this)
elif this == '[':
elements = []
accented = set()
while True:
this = sourceget()
if this in (None, ']'):
break
elements.append(this)
if this[0] == '\\':
continue
if decomposition(this):
accented.add(this)
if accented:
elements_set = set(elements)
for c in sorted(accented):
accentless = remove_accent(c)
if accentless not in elements_set:
elements.append(accentless)
elements_set.add(accentless)
r.append('[')
r.extend(elements)
if this:
r.append(']')
elif this == '(' and source.match('?'):
this = sourceget()
if this is None:
this = ''
elif this == 'P':
if source.next == '<':
# named group
this += source.getuntil('>') + '>'
elif source.next == '=':
# named backreference
this += source.getuntil(')') + ')'
elif this == '#':
# comment
this += source.getuntil(')') + ')'
elif this == '(':
# conditional backreference group
this += source.getuntil(')') + ')'
r.append('(?' + this)
else:
if decomposition(this):
this = '[%s%s]' % (this, remove_accent(this))
r.append(this)
return ''.join(r)
nonalphanum_re = re.compile(r'[^a-z0-9]')
def strip_accents(s):
return ''.join(c for c in normalize('NFKD', s) if not combining(c))
strip_down = lambda s: strip_accents(s).lower()
filter_nonalnum = lambda s: nonalphanum_re.sub('', strip_down(s))
def strip_prefix(s, prefix):
i = len(prefix)
if s[:i] == prefix:
return s[i:]
return s
def id_to_path(i):
return '/'.join((i[0:4], i[4:8], i[8:10], i[10:12], i[12:14], i[14:16], i[16:18], i))
def reconstruct_path(dossier, cid, sous_dossier, id):
x = 'en' if dossier.endswith('_en_vigueur') else 'non'
prefix = 'legi/global/code_et_TNC_%s_vigueur' % x
if id[4:8] != 'TEXT':
id = id_to_path(id)
return '/'.join((prefix, dossier, id_to_path(cid), sous_dossier, id+'.xml'))
def mimic_case(old_word, new_word):
"""
>>> print(mimic_case('EtAt', 'état'))
ÉtAt
"""
if len(old_word) != len(new_word):
raise ValueError("lengths don't match")
return ''.join([
new_word[i].upper() if old_word[i].isupper() else new_word[i].lower()
for i in range(len(old_word))
])
ascii_spaces_re = re.compile(r'(?: {2}| *[\t\n\r\f\v])[ \t\n\r\f\v]*')
nonword_re = re.compile(r'\W', re.U)
spaces_re = re.compile(r'\s+', re.U)
word_re = re.compile(r'\w{2,}', re.U)
def upper_words_percentage(s):
words = word_re.findall(s)
if not words:
return 0
return len([w for w in words if w.isupper()]) / len(words)
def partition(l, predicate):
a, b = [], []
for e in l:
if predicate(e):
a.append(e)
else:
b.append(e)
return a, b
def show_match(m, n=30, wrapper='%s{%s}%s'):
if type(m) is re.Match:
m_string = m.string
m_start, m_end = m.span()
else:
m_string, (m_start, m_end) = m
before = max(m_string.rfind(' ', 0, m_start - n), 0) if m_start > n else 0
before = ('[…]' if before > 0 else '') + m_string[before:m_start]
after = m_string.find(' ', m_end + n)
after = m_string[m_end:] if after == -1 else m_string[m_end:after+1] + '[…]'
return wrapper % (before, m_string[m_start:m_end], after)
def json_serializer(obj):
if isinstance(obj, date):
return str(obj)
raise TypeError("Type %s not serializable" % type(obj))
def consume(iterator, n):
# from https://stackoverflow.com/a/16800855
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def progressbar(iterable, **kwargs):
try:
from tqdm import tqdm
return tqdm(iterable, **kwargs)
except ImportError:
print('[warning] tqdm is not installed, the progress bar is disabled')
return iterable
| 29.989247 | 89 | 0.551691 |
ace7e1059b0553591118d0d496507fe8b9f8e6f6 | 7,827 | py | Python | flopy/modflow/mfmlt.py | codacy-badger/flopy | de874b02661f59ef4e99f18272883a13a4d55f16 | [
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2022-03-30T14:48:22.000Z | 2022-03-30T14:48:22.000Z | flopy/modflow/mfmlt.py | codacy-badger/flopy | de874b02661f59ef4e99f18272883a13a4d55f16 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | flopy/modflow/mfmlt.py | codacy-badger/flopy | de874b02661f59ef4e99f18272883a13a4d55f16 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | """
mfmlt module. Contains the ModflowMlt class. Note that the user can access
the ModflowMlt class as `flopy.modflow.ModflowMlt`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/mult.htm>`_.
"""
import collections
import sys
import numpy as np
from ..pakbase import Package
from ..utils import Util2d
class ModflowMlt(Package):
"""
MODFLOW Mult Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
mult_dict : dict
Dictionary with mult data for the model. mult_dict is typically
instantiated using load method.
extension : string
Filename extension (default is 'drn')
unitnumber : int
File unit number (default is 21).
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are supported in Flopy only when reading in existing models.
Parameter values are converted to native values in Flopy and the
connection to "parameters" is thus nonexistent.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> mltdict = flopy.modflow.ModflowZon(m, mult_dict=mult_dict)
"""
def __init__(self, model, mult_dict=None,
extension='mlt', unitnumber=None, filenames=None):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowMlt.defaultunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [ModflowMlt.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'mult.htm'
self.nml = 0
if mult_dict is not None:
self.nml = len(mult_dict)
self.mult_dict = mult_dict
# print mult_dict
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
Notes
-----
Not implemented because parameters are only supported on load
"""
pass
@staticmethod
def load(f, model, nrow=None, ncol=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nrow : int
number of rows. If not specified it will be retrieved from
the model object. (default is None).
ncol : int
number of columns. If not specified it will be retrieved from
the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
zone : ModflowMult dict
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> mlt = flopy.modflow.ModflowMlt.load('test.mlt', m)
"""
if model.verbose:
sys.stdout.write('loading mult package file...\n')
openfile = not hasattr(f, 'read')
if openfile:
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# dataset 1
t = line.strip().split()
nml = int(t[0])
# get nlay,nrow,ncol if not passed
if nrow is None and ncol is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# read zone data
mult_dict = collections.OrderedDict()
for n in range(nml):
line = f.readline()
t = line.strip().split()
if len(t[0]) > 10:
mltnam = t[0][0:10].lower()
else:
mltnam = t[0].lower()
if model.verbose:
sys.stdout.write(
' reading data for "{:<10s}" mult\n'.format(mltnam))
readArray = True
kwrd = None
if len(t) > 1:
if 'function' in t[1].lower() or 'expression' in t[1].lower():
readArray = False
kwrd = t[1].lower()
# load data
if readArray:
t = Util2d.load(f, model, (nrow, ncol), np.float32, mltnam,
ext_unit_dict)
# add unit number to list of external files in
# ext_unit_dict to remove.
if t.locat is not None:
model.add_pop_key_list(t.locat)
else:
line = f.readline()
t = [kwrd, line]
t = ModflowMlt.mult_function(mult_dict, line)
mult_dict[mltnam] = t
if openfile:
f.close()
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowMlt.ftype())
# create mlt dictionary
mlt = ModflowMlt(model, mult_dict=mult_dict, unitnumber=unitnumber,
filenames=filenames)
return mlt
@staticmethod
def mult_function(mult_dict, line):
"""
Construct a multiplier for the 'FUNCTION' option
"""
t = line.strip().split()
basename = t.pop(0).lower()[0:10]
multarray = mult_dict[basename]
try:
multarray = multarray.array.copy()
except:
multarray = multarray.copy()
# Construct the multiplier array
while True:
if len(t) < 2:
break
op = t.pop(0)
multname = t.pop(0)[0:10]
try:
atemp = mult_dict[multname.lower()].array
except:
atemp = mult_dict[multname.lower()]
if op == '+':
multarray = multarray + atemp
elif op == '*':
multarray = multarray * atemp
elif op == '-':
multarray = multarray - atemp
elif op == '/':
multarray = multarray / atemp
elif op == '^':
multarray = multarray ** atemp
else:
s = 'Invalid MULT operation {}'.format(op)
raise Exception(s)
return multarray
@staticmethod
def ftype():
return 'MULT'
@staticmethod
def defaultunit():
return 1002
| 29.096654 | 82 | 0.527916 |
ace7e279d9ffd035c925a6b689552854b2abac75 | 4,275 | py | Python | tests/test_normal_form.py | lan496/hsnf | 627aed7699ff223edf739709eb4eae5f4ad255d2 | [
"MIT"
] | 5 | 2019-07-15T08:25:07.000Z | 2022-03-22T09:58:42.000Z | tests/test_normal_form.py | lan496/hsnf | 627aed7699ff223edf739709eb4eae5f4ad255d2 | [
"MIT"
] | 2 | 2019-02-12T23:49:49.000Z | 2019-02-13T03:19:47.000Z | tests/test_normal_form.py | lan496/hsnf | 627aed7699ff223edf739709eb4eae5f4ad255d2 | [
"MIT"
] | 1 | 2021-11-18T19:55:22.000Z | 2021-11-18T19:55:22.000Z | # Copyright (c) 2019 Kohei Shinohara
# Distributed under the terms of the MIT License.
import unittest
import numpy as np
from hsnf import (
smith_normal_form,
row_style_hermite_normal_form,
column_style_hermite_normal_form,
)
class TestNormalForm(unittest.TestCase):
def setUp(self):
self.random_state = 0
def test_snf(self):
np.random.seed(self.random_state)
# test for square and non-square matrices
list_size = [
(1000, 3, 7),
(1000, 11, 5),
(1000, 13, 13)
]
for size in list_size:
X = np.random.randint(-1, 1, size=size)
for i in range(size[0]):
D, L, R = smith_normal_form(X[i])
self.verify_snf(X[i], D, L, R)
def test_hnf(self):
np.random.seed(self.random_state)
# test for square and non-square matrices
list_size = [
(1000, 3, 7),
(1000, 11, 5),
(1000, 13, 13)
]
for size in list_size:
X = np.random.randint(-1, 1, size=size)
for i in range(size[0]):
H, L = row_style_hermite_normal_form(X[i])
self.verify_row_style_hnf(X[i], H, L)
H, R = column_style_hermite_normal_form(X[i])
self.verify_column_style_hnf(X[i], H, R)
@unittest.skip
def test_hnf_uniqueness(self):
A1 = np.array([
[3, 3, 1, 4],
[0, 1, 0, 0],
[0, 0, 19, 16],
[0, 0, 0, 3]
])
H1_row_exp = np.array([
[3, 0, 1, 1],
[0, 1, 0, 0],
[0, 0, 19, 1],
[0, 0, 0, 3]
])
H1_row_act, _ = row_style_hermite_normal_form(A1)
self.assertTrue(np.allclose(H1_row_act, H1_row_exp))
A2 = np.array([
[0, 0, 5, 0, 1, 4],
[0, 0, 0, -1, -4, 99],
[0, 0, 0, 20, 19, 16],
[0, 0, 0, 0, 2, 1],
[0, 0, 0, 0, 0, 3],
[0, 0, 0, 0, 0, 0]
])
H2_row_exp = np.array([
[0, 0, 5, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
])
H2_row_act, _ = row_style_hermite_normal_form(A2)
print(H2_row_act)
self.assertTrue(np.allclose(H2_row_act, H2_row_exp))
A3 = np.array([
[2, 3, 6, 2],
[5, 6, 1, 6],
[8, 3, 1, 1]
])
H3_row_exp = np.array([
[1, 0, 50, -11],
[0, 3, 28, -2],
[0, 0, 61, -13]
])
H3_row_act, _ = row_style_hermite_normal_form(A3)
self.assertTrue(np.allclose(H3_row_act, H3_row_exp))
def verify_snf(self, M, D, L, R):
D_re = np.dot(L, np.dot(M, R))
self.assertTrue(np.array_equal(D_re, D))
D_diag = np.diagonal(D)
rank = np.count_nonzero(D_diag)
self.assertEqual(np.count_nonzero(D) - rank, 0)
for i in range(rank - 1):
self.assertTrue(D_diag[i + 1] % D_diag[i] == 0)
self.is_unimodular(L)
self.is_unimodular(R)
def verify_row_style_hnf(self, M, H, L):
H_re = np.dot(L, M)
self.assertTrue(np.array_equal(H_re, H))
self.assertTrue(np.allclose(H, np.triu(H)))
for s in range(min(H.shape)):
self.assertTrue(H[s, s] >= 0)
if (s + 1 < H.shape[0]) and (H[s, s] > 0):
self.assertTrue(np.max(H[(s + 1):, s]) < H[s, s])
self.is_unimodular(L)
def verify_column_style_hnf(self, M, H, R):
H_re = np.dot(M, R)
self.assertTrue(np.array_equal(H_re, H))
self.assertTrue(np.allclose(H, np.tril(H)))
for s in range(min(H.shape)):
self.assertTrue(H[s, s] >= 0)
if (s > 0) and (H[s, s] > 0):
self.assertTrue(np.max(H[s, :s]) < H[s, s])
self.is_unimodular(R)
def is_unimodular(self, A):
self.assertAlmostEqual(np.abs(np.linalg.det(A)), 1)
A_inv = np.around(np.linalg.inv(A))
self.assertTrue(np.allclose(np.eye(A.shape[0]), np.dot(A, A_inv)))
if __name__ == '__main__':
unittest.main()
| 28.311258 | 74 | 0.486316 |
ace7e5cba52001480cd2d6463275bde1ccb15906 | 11,934 | py | Python | smac/tae/base.py | TheVinhLuong102/AutoML-SMAC3 | d4cb7ed76e0fbdd9edf6ab5360ff75de67ac2195 | [
"BSD-3-Clause"
] | 711 | 2016-08-22T14:23:29.000Z | 2022-03-29T21:56:12.000Z | smac/tae/base.py | TheVinhLuong102/AutoML-SMAC3 | d4cb7ed76e0fbdd9edf6ab5360ff75de67ac2195 | [
"BSD-3-Clause"
] | 770 | 2016-08-17T14:39:07.000Z | 2022-03-31T11:35:58.000Z | smac/tae/base.py | TheVinhLuong102/AutoML-SMAC3 | d4cb7ed76e0fbdd9edf6ab5360ff75de67ac2195 | [
"BSD-3-Clause"
] | 210 | 2016-08-20T15:14:29.000Z | 2022-03-30T08:04:34.000Z | from abc import ABC, abstractmethod
import math
import time
import traceback
import typing
import numpy as np
from smac.configspace import Configuration
from smac.utils.constants import MAXINT
from smac.utils.logging import PickableLoggerAdapter
from smac.runhistory.runhistory import RunInfo, RunValue
from smac.stats.stats import Stats
from smac.tae import StatusType
__copyright__ = "Copyright 2021, AutoML.org Freiburg-Hannover"
__license__ = "3-clause BSD"
class BaseRunner(ABC):
"""Interface class to handle the execution of SMAC' configurations.
This interface defines how to interact with the SMBO loop.
The complexity of running a configuration as well as handling the
results is abstracted to the SMBO via a BaseRunner.
From SMBO perspective, launching a configuration follows a
submit/collect scheme as follows:
1. A run is launched via submit_run()
1. Submit_run internally calls run_wrapper(), a method that
contains common processing functions among different runners,
for example, handling capping and stats checking.
2. A class that implements BaseRunner defines run() which is
really the algorithm to translate a RunInfo to a RunValue, i.e.
a configuration to an actual result.
2. A completed run is collected via get_finished_runs(), which returns
any finished runs, if any.
3. This interface also offers the method wait() as a mechanism to make
sure we have enough data in the next iteration to make a decision. For
example, the intensifier might not be able to select the next challenger
until more results are available.
"""
def __init__(
self,
ta: typing.Union[typing.List[str], typing.Callable],
stats: Stats,
run_obj: str = "runtime",
par_factor: int = 1,
cost_for_crash: float = float(MAXINT),
abort_on_first_run_crash: bool = True,
):
"""
Attributes
----------
results
ta
stats
run_obj
par_factor
cost_for_crash
abort_first_run_crash
Parameters
----------
ta : typing.Union[typing.List[str], typing.Callable]
target algorithm
stats: Stats
stats object to collect statistics about runtime/additional info
run_obj: str
run objective of SMAC
par_factor: int
penalization factor
cost_for_crash : float
cost that is used in case of crashed runs (including runs
that returned NaN or inf)
abort_on_first_run_crash: bool
if true and first run crashes, raise FirstRunCrashedException
"""
# The results is a FIFO structure, implemented via a list
# (because the Queue lock is not pickable). Finished runs are
# put in this list and collected via process_finished_runs
self.results = [] # type: typing.List[typing.Tuple[RunInfo, RunValue]]
# Below state the support for a Runner algorithm that
# implements a ta
self.ta = ta
self.stats = stats
self.run_obj = run_obj
self.par_factor = par_factor
self.cost_for_crash = cost_for_crash
self.abort_on_first_run_crash = abort_on_first_run_crash
self.logger = PickableLoggerAdapter(
self.__module__ + '.' + self.__class__.__name__)
self._supports_memory_limit = False
super().__init__()
@abstractmethod
def submit_run(self, run_info: RunInfo) -> None:
"""This function submits a configuration
embedded in a RunInfo object, and uses one of the workers
to produce a result (such result will eventually be available
on the self.results FIFO).
This interface method will be called by SMBO, with the expectation
that a function will be executed by a worker.
What will be executed is dictated by run_info, and "how" will it be
executed is decided via the child class that implements a run() method.
Because config submission can be a serial/parallel endeavor,
it is expected to be implemented by a child class.
Parameters
----------
run_info: RunInfo
An object containing the configuration and the necessary data to run it
"""
pass
@abstractmethod
def run(
self, config: Configuration,
instance: str,
cutoff: typing.Optional[float] = None,
seed: int = 12345,
budget: typing.Optional[float] = None,
instance_specific: str = "0",
) -> typing.Tuple[StatusType, float, float, typing.Dict]:
"""Runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics> for at most
<cutoff> seconds and random seed <seed>
This method exemplifies how to defined the run() method
Parameters
----------
config : Configuration
dictionary param -> value
instance : string
problem instance
cutoff : float, optional
Wallclock time limit of the target algorithm. If no value is
provided no limit will be enforced.
seed : int
random seed
budget : float, optional
A positive, real-valued number representing an arbitrary limit to the target
algorithm. Handled by the target algorithm internally
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
pass
def run_wrapper(
self,
run_info: RunInfo,
) -> typing.Tuple[RunInfo, RunValue]:
"""Wrapper around run() to exec and check the execution of a given config file
This function encapsulates common handling/processing, so that run() implementation
is simplified.
Parameters
----------
run_info : RunInfo
Object that contains enough information to execute a configuration run in
isolation.
Returns
-------
RunInfo:
an object containing the configuration launched
RunValue:
Contains information about the status/performance of config
"""
start = time.time()
if run_info.cutoff is None and self.run_obj == "runtime":
if self.logger:
self.logger.critical(
"For scenarios optimizing running time "
"(run objective), a cutoff time is required, "
"but not given to this call."
)
raise ValueError(
"For scenarios optimizing running time "
"(run objective), a cutoff time is required, "
"but not given to this call."
)
cutoff = None
if run_info.cutoff is not None:
cutoff = int(math.ceil(run_info.cutoff))
try:
status, cost, runtime, additional_info = self.run(
config=run_info.config,
instance=run_info.instance,
cutoff=cutoff,
seed=run_info.seed,
budget=run_info.budget,
instance_specific=run_info.instance_specific
)
except Exception as e:
status = StatusType.CRASHED
cost = self.cost_for_crash
runtime = time.time() - start
# Add context information to the error message
exception_traceback = traceback.format_exc()
error_message = repr(e)
additional_info = {
'traceback': exception_traceback,
'error': error_message
}
end = time.time()
if run_info.budget == 0 and status == StatusType.DONOTADVANCE:
raise ValueError(
"Cannot handle DONOTADVANCE state when using intensify or SH/HB on "
"instances."
)
# Catch NaN or inf.
if (
self.run_obj == 'runtime' and not np.isfinite(runtime)
or self.run_obj == 'quality' and not np.isfinite(cost)
):
if self.logger:
self.logger.warning("Target Algorithm returned NaN or inf as {}. "
"Algorithm run is treated as CRASHED, cost "
"is set to {} for quality scenarios. "
"(Change value through \"cost_for_crash\""
"-option.)".format(
self.run_obj,
self.cost_for_crash)
)
status = StatusType.CRASHED
if self.run_obj == "runtime":
# The following line pleases mypy - we already check for cutoff not being none above,
# prior to calling run. However, mypy assumes that the data type of cutoff
# is still Optional[int]
assert cutoff is not None
if runtime > self.par_factor * cutoff:
self.logger.warning(
"Returned running time is larger "
"than {0} times the passed cutoff time. "
"Clamping to {0} x cutoff.".format(self.par_factor))
runtime = cutoff * self.par_factor
status = StatusType.TIMEOUT
if status == StatusType.SUCCESS:
cost = runtime
else:
cost = cutoff * self.par_factor
if status == StatusType.TIMEOUT and run_info.capped:
status = StatusType.CAPPED
else:
if status == StatusType.CRASHED:
cost = self.cost_for_crash
return run_info, RunValue(
status=status,
cost=cost,
time=runtime,
additional_info=additional_info,
starttime=start,
endtime=end
)
@abstractmethod
def get_finished_runs(self) -> typing.List[typing.Tuple[RunInfo, RunValue]]:
"""This method returns any finished configuration, and returns a list with
the results of exercising the configurations. This class keeps populating results
to self.results until a call to get_finished runs is done. In this case, the
self.results list is emptied and all RunValues produced by running run() are
returned.
Returns
-------
List[RunInfo, RunValue]: A list of pais RunInfo/RunValues
a submitted configuration
"""
raise NotImplementedError()
@abstractmethod
def wait(self) -> None:
"""SMBO/intensifier might need to wait for runs to finish before making a decision.
This method waits until 1 run completes
"""
pass
@abstractmethod
def pending_runs(self) -> bool:
"""
Whether or not there are configs still running. Generally if the runner is serial,
launching a run instantly returns it's result. On parallel runners, there might
be pending configurations to complete.
"""
pass
@abstractmethod
def num_workers(self) -> int:
"""
Return the active number of workers that will execute tae runs.
"""
pass
| 36.273556 | 97 | 0.590079 |
ace7e5d3fae16088b576f1fd933d9f67d0341181 | 2,969 | py | Python | app.py | wassan128/kinkyo575 | a047ab6493fb8b7d9bb234c855ee307a13e5c91f | [
"MIT"
] | 3 | 2018-07-24T07:30:16.000Z | 2020-06-20T04:18:07.000Z | app.py | wassan128/kinkyo575 | a047ab6493fb8b7d9bb234c855ee307a13e5c91f | [
"MIT"
] | null | null | null | app.py | wassan128/kinkyo575 | a047ab6493fb8b7d9bb234c855ee307a13e5c91f | [
"MIT"
] | null | null | null | from flask import abort, Flask, flash, jsonify, redirect, request, render_template, session
import os
import tweepy
from twitter_handle import Tweets
from generate import *
CONSUMER_KEY = os.environ["CONSUMER_KEY"]
CONSUMER_SECRET = os.environ["CONSUMER_SECRET"]
#HOST = "http://localhost:5000"
HOST = "https://kinkyo575.herokuapp.com"
URL_CALLBACK = "{}/callback".format(HOST)
app = Flask(__name__)
app.secret_key = os.environ["SECRET_KEY"]
def is_login():
if session.get("is_login"):
api = get_interface()
name = False
if api:
name = api.me().name
return name
else:
return False
def get_interface():
access_token = session.get("access_token")
access_token_secret = session.get("access_token_secret")
api = None
if access_token and access_token_secret:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def get_senryu():
senryu = None
api = get_interface()
if api:
twit = Tweets(api)
data = twit.get()
senryu = generate_575(data)
return senryu
@app.route("/")
def index():
islogin = is_login()
return render_template("index.html", user=islogin)
@app.route("/auth")
def auth():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET, URL_CALLBACK)
redirect_url = auth.get_authorization_url()
session["request_token"] = auth.request_token
return redirect(redirect_url)
@app.route("/callback")
def callback():
session["verifier"] = request.args.get("oauth_verifier")
try:
token = session.pop("request_token", None)
verifier = session.pop("verifier", None)
if token is not None and verifier is not None:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET, URL_CALLBACK)
auth.request_token = token
auth.get_access_token(verifier)
session["access_token"] = auth.access_token
session["access_token_secret"] = auth.access_token_secret
session["is_login"] = True
return redirect("/")
else:
return redirect("/auth")
except:
abort(401)
@app.route("/senryu")
def senryu():
senryu = get_senryu()
return jsonify(senryu)
@app.route("/post", methods=["POST"])
def post():
text = request.data.decode("utf-8")
api = get_interface()
if api:
twit = Tweets(api)
res = twit.post("{}\n#近況圧縮575 {}".format(text.replace("\n", " ").replace(" ", ""), HOST))
return jsonify({"res": res})
@app.route("/logout")
def logout():
session.clear()
return redirect("/")
@app.errorhandler(401)
def error_401(error):
return "Error - 401"
@app.errorhandler(404)
def error_404(error):
return "Error - 404"
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(port=port)
| 25.817391 | 97 | 0.645672 |
ace7e85ddf6c4bf5174552665dee1521b5c40d3a | 10,690 | py | Python | tensorflow/python/ops/linalg/linear_operator_composition.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/ops/linalg/linear_operator_composition.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 88 | 2020-11-24T08:18:10.000Z | 2022-03-25T20:28:30.000Z | tensorflow/python/ops/linalg/linear_operator_composition.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Composes one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorComposition"]
@tf_export("linalg.LinearOperatorComposition")
class LinearOperatorComposition(linear_operator.LinearOperator):
"""Composes one or more `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` with action defined by:
```
op_composed(x) := op1(op2(...(opJ(x)...))
```
If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
[batch] matrix formed with the multiplication `A1 A2...AJ`.
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
`N_j = M_{j+1}`, in which case the composed operator has shape equal to
`broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
batch shapes broadcast. Even if the composed shape is well defined, the
composed operator's methods may fail due to lack of broadcasting ability in
the defining operators' methods.
```python
# Create a 2 x 2 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorComposition([operator_1, operator_2])
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 4 x 6 operators.
operator_46 = LinearOperatorComposition([operator_45, operator_56])
# Create a shape [2, 3, 6, 2] vector.
x = tf.random.normal(shape=[2, 3, 6, 2])
operator.matmul(x)
==> Shape [2, 3, 4, 2] Tensor
```
#### Performance
The performance of `LinearOperatorComposition` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorComposition`.
`LinearOperatorComposition` is initialized with a list of operators
`[op_1,...,op_J]`. For the `matmul` method to be well defined, the
composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have
similar constraints.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
parameters = dict(
operators=operators,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The composition of non-singular operators is always non-singular.")
is_non_singular = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = "_o_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorComposition, self).__init__(
dtype=dtype,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents(graph_parents)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension.assert_is_compatible_with(operator.range_dimension)
domain_dimension = operator.domain_dimension
matrix_shape = tensor_shape.TensorShape(
[self.operators[0].range_dimension,
self.operators[-1].domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
# Don't check the matrix dimensions. That would add unnecessary Asserts to
# the graph. Things will fail at runtime naturally if shapes are
# incompatible.
matrix_shape = array_ops.stack([
self.operators[0].range_dimension_tensor(),
self.operators[-1].domain_dimension_tensor()
])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# If self.operators = [A, B], and not adjoint, then
# matmul_order_list = [B, A].
# As a result, we return A.matmul(B.matmul(x))
if adjoint:
matmul_order_list = self.operators
else:
matmul_order_list = list(reversed(self.operators))
result = matmul_order_list[0].matmul(
x, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in matmul_order_list[1:]:
result = operator.matmul(result, adjoint=adjoint)
return result
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# TODO(langmore) Implement solve using solve_ls if some intermediate
# operator maps to a high dimensional space.
# In that case, an exact solve may still be possible.
# If self.operators = [A, B], and not adjoint, then
# solve_order_list = [A, B].
# As a result, we return B.solve(A.solve(x))
if adjoint:
solve_order_list = list(reversed(self.operators))
else:
solve_order_list = self.operators
solution = solve_order_list[0].solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in solve_order_list[1:]:
solution = operator.solve(solution, adjoint=adjoint)
return solution
| 36.989619 | 99 | 0.694949 |
ace7e8b7fea8ddfffc1178c7ebf126d23583b02e | 3,121 | py | Python | common/src/stack/command/stack/argument_processors/scope.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | common/src/stack/command/stack/argument_processors/scope.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | common/src/stack/command/stack/argument_processors/scope.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
from collections import namedtuple
from stack.exception import CommandError, ArgRequired, ParamError
from stack.util import flatten
# these need to be non-relative imports to prevent circular importing errors
from stack.argument_processors.appliance import ApplianceArgProcessor
from stack.argument_processors.environment import EnvironmentArgProcessor
from stack.argument_processors.host import HostArgProcessor
from stack.argument_processors.os import OSArgProcessor
class ScopeArgProcessor(
ApplianceArgProcessor,
OSArgProcessor,
EnvironmentArgProcessor,
HostArgProcessor
):
def getScopeMappings(self, args=None, scope=None):
# We will return a list of these
ScopeMapping = namedtuple(
'ScopeMapping',
['scope', 'appliance_id', 'os_id', 'environment_id', 'node_id']
)
scope_mappings = []
# Validate the different scopes and get the keys to the targets
if scope == 'global':
# Global scope has no friends
if args:
raise CommandError(
cmd = self,
msg = "Arguments are not allowed at the global scope.",
)
scope_mappings.append(
ScopeMapping(scope, None, None, None, None)
)
elif scope == 'appliance':
# Piggy-back to resolve the appliance names
names = self.getApplianceNames(args)
# Now we have to convert the names to the primary keys
for appliance_id in flatten(self.db.select(
'id from appliances where name in %s', (names,)
)):
scope_mappings.append(
ScopeMapping(scope, appliance_id, None, None, None)
)
elif scope == 'os':
# Piggy-back to resolve the os names
names = self.getOSNames(args)
# Now we have to convert the names to the primary keys
for os_id in flatten(self.db.select(
'id from oses where name in %s', (names,)
)):
scope_mappings.append(
ScopeMapping(scope, None, os_id, None, None)
)
elif scope == 'environment':
# Piggy-back to resolve the environment names
names = self.getEnvironmentNames(args)
if names:
# Now we have to convert the names to the primary keys
for environment_id in flatten(self.db.select(
'id from environments where name in %s', (names,)
)):
scope_mappings.append(
ScopeMapping(scope, None, None, environment_id, None)
)
elif scope == 'host':
# Piggy-back to resolve the host names
names = self.getHostnames(args)
if not names:
raise ArgRequired(self, 'host')
# Now we have to convert the names to the primary keys
for node_id in flatten(self.db.select(
'id from nodes where name in %s', (names,)
)):
scope_mappings.append(
ScopeMapping(scope, None, None, None, node_id)
)
else:
raise ParamError(self, 'scope', 'is not valid')
return scope_mappings
| 29.168224 | 76 | 0.713233 |
ace7e9a62c8283bdd6c34a3e5710fe27079ede7d | 5,284 | py | Python | ch04/web/on_time_flask_template.py | jar0man/Agile_Data_Code_2 | 30ae666f2fb322f8c099fa045f2c18cc1dba6e6c | [
"MIT"
] | null | null | null | ch04/web/on_time_flask_template.py | jar0man/Agile_Data_Code_2 | 30ae666f2fb322f8c099fa045f2c18cc1dba6e6c | [
"MIT"
] | null | null | null | ch04/web/on_time_flask_template.py | jar0man/Agile_Data_Code_2 | 30ae666f2fb322f8c099fa045f2c18cc1dba6e6c | [
"MIT"
] | null | null | null | import sys, os, re
import time
from flask import Flask, render_template, request
from pymongo import MongoClient
from bson import json_util
import config
import json
from elasticsearch import Elasticsearch
#elastic = Elasticsearch(config.ELASTIC_URL)
elastic = Elasticsearch()
# Process Elasticsearch hits and return flights records
def process_search(results):
records = []
total = 0
if results['hits'] and results['hits']['hits']:
total = results['hits']['total']['value']
hits = results['hits']['hits']
for hit in hits:
record = hit['_source']
records.append(record)
print('Total:'+ str(total) )
return records, total
# Calculate offsets for fetching lists of flights from MongoDB
def get_navigation_offsets(offset1, offset2, increment):
offsets = {}
offsets['Previous'] = {'top_offset': max(offset2 - increment, 0),
'bottom_offset': max(offset1 - increment, 0)} # Don't go < 0
offsets['Next'] = {'top_offset': offset2 + increment, 'bottom_offset':
offset1 + increment}
return offsets
# Strip the existing start and end parameters from the query string
def strip_place(url):
try:
p = re.match('(.+)&start=.+&end=.+', url).group(1)
except AttributeError as e:
return url
return p
# Set up Flask and Mongo
app = Flask(__name__)
client = MongoClient()
# Controller: Fetch a flight and display it
@app.route("/on_time_performance")
def on_time_performance():
carrier = request.args.get('Carrier')
flight_date = request.args.get('FlightDate')
flight_num = request.args.get('FlightNum')
flight = client.agile_data_science.on_time_performance.find_one({
'Carrier': carrier,
'FlightDate': flight_date,
'FlightNum': int(flight_num)
})
return render_template('flight.html', flight=flight, carrier=carrier,flight_date=flight_date, flight_num=flight_num)
# Controller: Fetch all flights between cities on a given day and display them
@app.route("/flights/<origin>/<dest>/<flight_date>")
def list_flights(origin, dest, flight_date):
start = request.args.get('start') or 0
start = int(start)
end = request.args.get('end') or config.RECORDS_PER_PAGE
end = int(end)
width = end - start
nav_offsets = get_navigation_offsets(start, end, config.RECORDS_PER_PAGE)
flights = client.agile_data_science.on_time_performance.find(
{
'Origin': origin,
'Dest': dest,
'FlightDate': flight_date
},
sort = [
('DepTime', 1),
('ArrTime', 1)
]
)
flight_count = flights.count()
flights = flights.skip(start).limit(width)
return render_template(
'flights.html',
flights=flights,
flight_date=flight_date,
flight_count=flight_count,
nav_path=request.path,
nav_offsets=nav_offsets
)
@app.route("/flights/search")
@app.route("/flights/search/")
def search_flights():
# Search parameters
carrier = request.args.get('Carrier')
flight_date = request.args.get('FlightDate')
origin = request.args.get('Origin')
dest = request.args.get('Dest')
tail_number = request.args.get('TailNum')
flight_number = request.args.get('FlightNum')
# Pagination parameters
start = request.args.get('start') or 0
start = int(start)
end = request.args.get('end') or config.RECORDS_PER_PAGE
end = int(end)
print(request.args)
# Navigation path and offset setup
nav_path = strip_place(request.url)
nav_offsets = get_navigation_offsets(start, end, config.RECORDS_PER_PAGE)
# Build the base of our Elasticsearch query
query = {
'query': {
'bool': {
'must': []}
},
# 'sort': [
# {'FlightDate': {'order': 'asc', 'ignore_unmapped' : True} },
# {'DepTime': {'order': 'asc', 'ignore_unmapped' : True} },
# {'Carrier': {'order': 'asc', 'ignore_unmapped' : True} },
# {'FlightNum': {'order': 'asc', 'ignore_unmapped' : True} },
# '_score'
# ],
'from': start,
'size': config.RECORDS_PER_PAGE
}
# Add any search parameters present
if carrier:
query['query']['bool']['must'].append({'match': {'Carrier': carrier}})
if flight_date:
query['query']['bool']['must'].append({'match': {'FlightDate': flight_date}})
if origin:
query['query']['bool']['must'].append({'match': {'Origin': origin}})
if dest:
query['query']['bool']['must'].append({'match': {'Dest': dest}})
if tail_number:
query['query']['bool']['must'].append({'match': {'TailNum': tail_number}})
if flight_number:
query['query']['bool']['must'].append({'match': {'FlightNum': int(flight_number)}})
# Query Elasticsearch, process to get records and count
print("QUERY")
print(carrier, flight_date, origin, dest, tail_number, flight_number)
print(json.dumps(query))
results = elastic.search(index='agile_data_science',body=query)
flights, flight_count = process_search(results)
# Persist search parameters in the form template
return render_template(
'search.html',
flights=flights,
flight_date=flight_date,
flight_count=flight_count,
nav_path=nav_path,
nav_offsets=nav_offsets,
carrier=carrier,
origin=origin,
dest=dest,
tail_number=tail_number,
flight_number=flight_number
,query=request.url
)
if __name__ == "__main__":
app.run(debug=True)
| 29.355556 | 118 | 0.674678 |
ace7ecdead83f17a7e350849fe941f84dafb2201 | 26,144 | py | Python | mitmproxy/proxy/protocol/http2.py | davidpshaw/mitmproxy | cfcf3ccb1356792a54ef068dc258ae2c379c4c95 | [
"MIT"
] | null | null | null | mitmproxy/proxy/protocol/http2.py | davidpshaw/mitmproxy | cfcf3ccb1356792a54ef068dc258ae2c379c4c95 | [
"MIT"
] | null | null | null | mitmproxy/proxy/protocol/http2.py | davidpshaw/mitmproxy | cfcf3ccb1356792a54ef068dc258ae2c379c4c95 | [
"MIT"
] | null | null | null | import threading
import time
import functools
from typing import Dict, Callable, Any, List # noqa
import h2.exceptions
from h2 import connection
from h2 import events
import queue
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol import http as httpbase
import mitmproxy.net.http
from mitmproxy.net import tcp
from mitmproxy.types import basethread
from mitmproxy.net.http import http2, headers
class SafeH2Connection(connection.H2Connection):
def __init__(self, conn, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conn = conn
self.lock = threading.RLock()
def safe_acknowledge_received_data(self, acknowledged_size: int, stream_id: int):
if acknowledged_size == 0:
return
with self.lock:
self.acknowledge_received_data(acknowledged_size, stream_id)
self.conn.send(self.data_to_send())
def safe_reset_stream(self, stream_id: int, error_code: int):
with self.lock:
try:
self.reset_stream(stream_id, error_code)
except h2.exceptions.StreamClosedError: # pragma: no cover
# stream is already closed - good
pass
self.conn.send(self.data_to_send())
def safe_update_settings(self, new_settings: Dict[int, Any]):
with self.lock:
self.update_settings(new_settings)
self.conn.send(self.data_to_send())
def safe_send_headers(self, raise_zombie: Callable, stream_id: int, headers: headers.Headers, **kwargs):
with self.lock:
raise_zombie()
self.send_headers(stream_id, headers.fields, **kwargs)
self.conn.send(self.data_to_send())
def safe_send_body(self, raise_zombie: Callable, stream_id: int, chunks: List[bytes]):
for chunk in chunks:
position = 0
while position < len(chunk):
self.lock.acquire()
raise_zombie(self.lock.release)
max_outbound_frame_size = self.max_outbound_frame_size
frame_chunk = chunk[position:position + max_outbound_frame_size]
if self.local_flow_control_window(stream_id) < len(frame_chunk): # pragma: no cover
self.lock.release()
time.sleep(0.1)
continue
self.send_data(stream_id, frame_chunk)
try:
self.conn.send(self.data_to_send())
except Exception as e: # pragma: no cover
raise e
finally:
self.lock.release()
position += max_outbound_frame_size
with self.lock:
raise_zombie()
self.end_stream(stream_id)
self.conn.send(self.data_to_send())
class Http2Layer(base.Layer):
if False:
# mypy type hints
client_conn = None # type: connections.ClientConnection
def __init__(self, ctx, mode: str) -> None:
super().__init__(ctx)
self.mode = mode
self.streams = dict() # type: Dict[int, Http2SingleStreamLayer]
self.server_to_client_stream_ids = dict([(0, 0)]) # type: Dict[int, int]
self.connections = {} # type: Dict[object, SafeH2Connection]
config = h2.config.H2Configuration(
client_side=False,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False)
self.connections[self.client_conn] = SafeH2Connection(self.client_conn, config=config)
def _initiate_server_conn(self):
if self.server_conn.connected():
config = h2.config.H2Configuration(
client_side=True,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False)
self.connections[self.server_conn] = SafeH2Connection(self.server_conn, config=config)
self.connections[self.server_conn].initiate_connection()
self.server_conn.send(self.connections[self.server_conn].data_to_send())
def _complete_handshake(self):
preamble = self.client_conn.rfile.read(24)
self.connections[self.client_conn].initiate_connection()
self.connections[self.client_conn].receive_data(preamble)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
def next_layer(self): # pragma: no cover
# WebSocket over HTTP/2?
# CONNECT for proxying?
raise NotImplementedError()
def _handle_event(self, event, source_conn, other_conn, is_server):
self.log(
"HTTP2 Event from {}".format("server" if is_server else "client"),
"debug",
[repr(event)]
)
eid = None
if hasattr(event, 'stream_id'):
if is_server and event.stream_id % 2 == 1:
eid = self.server_to_client_stream_ids[event.stream_id]
else:
eid = event.stream_id
if isinstance(event, events.RequestReceived):
return self._handle_request_received(eid, event)
elif isinstance(event, events.ResponseReceived):
return self._handle_response_received(eid, event)
elif isinstance(event, events.DataReceived):
return self._handle_data_received(eid, event, source_conn)
elif isinstance(event, events.StreamEnded):
return self._handle_stream_ended(eid)
elif isinstance(event, events.StreamReset):
return self._handle_stream_reset(eid, event, is_server, other_conn)
elif isinstance(event, events.RemoteSettingsChanged):
return self._handle_remote_settings_changed(event, other_conn)
elif isinstance(event, events.ConnectionTerminated):
return self._handle_connection_terminated(event, is_server)
elif isinstance(event, events.PushedStreamReceived):
return self._handle_pushed_stream_received(event)
elif isinstance(event, events.PriorityUpdated):
return self._handle_priority_updated(eid, event)
elif isinstance(event, events.TrailersReceived):
raise NotImplementedError('TrailersReceived not implemented')
# fail-safe for unhandled events
return True
def _handle_request_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid] = Http2SingleStreamLayer(self, self.connections[self.client_conn], eid, headers)
self.streams[eid].timestamp_start = time.time()
self.streams[eid].no_body = (event.stream_ended is not None)
if event.priority_updated is not None:
self.streams[eid].priority_exclusive = event.priority_updated.exclusive
self.streams[eid].priority_depends_on = event.priority_updated.depends_on
self.streams[eid].priority_weight = event.priority_updated.weight
self.streams[eid].handled_priority_event = event.priority_updated
self.streams[eid].start()
self.streams[eid].request_arrived.set()
return True
def _handle_response_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].queued_data_length = 0
self.streams[eid].timestamp_start = time.time()
self.streams[eid].response_headers = headers
self.streams[eid].response_arrived.set()
return True
def _handle_data_received(self, eid, event, source_conn):
bsl = self.config.options._processed.get("body_size_limit")
if bsl and self.streams[eid].queued_data_length > bsl:
self.streams[eid].kill()
self.connections[source_conn].safe_reset_stream(
event.stream_id,
h2.errors.ErrorCodes.REFUSED_STREAM
)
self.log("HTTP body too large. Limit is {}.".format(bsl), "info")
else:
self.streams[eid].data_queue.put(event.data)
self.streams[eid].queued_data_length += len(event.data)
self.connections[source_conn].safe_acknowledge_received_data(
event.flow_controlled_length,
event.stream_id
)
return True
def _handle_stream_ended(self, eid):
self.streams[eid].timestamp_end = time.time()
self.streams[eid].data_finished.set()
return True
def _handle_stream_reset(self, eid, event, is_server, other_conn):
if eid in self.streams:
self.streams[eid].kill()
if event.error_code == h2.errors.ErrorCodes.CANCEL:
if is_server:
other_stream_id = self.streams[eid].client_stream_id
else:
other_stream_id = self.streams[eid].server_stream_id
if other_stream_id is not None:
self.connections[other_conn].safe_reset_stream(other_stream_id, event.error_code)
return True
def _handle_remote_settings_changed(self, event, other_conn):
new_settings = dict([(key, cs.new_value) for (key, cs) in event.changed_settings.items()])
self.connections[other_conn].safe_update_settings(new_settings)
return True
def _handle_connection_terminated(self, event, is_server):
self.log("HTTP/2 connection terminated by {}: error code: {}, last stream id: {}, additional data: {}".format(
"server" if is_server else "client",
event.error_code,
event.last_stream_id,
event.additional_data), "info")
if event.error_code != h2.errors.ErrorCodes.NO_ERROR:
# Something terrible has happened - kill everything!
self.connections[self.client_conn].close_connection(
error_code=event.error_code,
last_stream_id=event.last_stream_id,
additional_data=event.additional_data
)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
self._kill_all_streams()
else:
"""
Do not immediately terminate the other connection.
Some streams might be still sending data to the client.
"""
return False
def _handle_pushed_stream_received(self, event):
# pushed stream ids should be unique and not dependent on race conditions
# only the parent stream id must be looked up first
parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].push_stream(parent_eid, event.pushed_stream_id, event.headers)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
layer = Http2SingleStreamLayer(self, self.connections[self.client_conn], event.pushed_stream_id, headers)
self.streams[event.pushed_stream_id] = layer
self.streams[event.pushed_stream_id].timestamp_start = time.time()
self.streams[event.pushed_stream_id].pushed = True
self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
self.streams[event.pushed_stream_id].timestamp_end = time.time()
self.streams[event.pushed_stream_id].request_arrived.set()
self.streams[event.pushed_stream_id].request_data_finished.set()
self.streams[event.pushed_stream_id].start()
return True
def _handle_priority_updated(self, eid, event):
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY frame surpressed. Use --http2-priority to enable forwarding.", "debug")
return True
if eid in self.streams and self.streams[eid].handled_priority_event is event:
# this event was already handled during stream creation
# HeadersFrame + Priority information as RequestReceived
return True
with self.connections[self.server_conn].lock:
mapped_stream_id = event.stream_id
if mapped_stream_id in self.streams and self.streams[mapped_stream_id].server_stream_id:
# if the stream is already up and running and was sent to the server,
# use the mapped server stream id to update priority information
mapped_stream_id = self.streams[mapped_stream_id].server_stream_id
if eid in self.streams:
self.streams[eid].priority_exclusive = event.exclusive
self.streams[eid].priority_depends_on = event.depends_on
self.streams[eid].priority_weight = event.weight
self.connections[self.server_conn].prioritize(
mapped_stream_id,
weight=event.weight,
depends_on=self._map_depends_on_stream_id(mapped_stream_id, event.depends_on),
exclusive=event.exclusive
)
self.server_conn.send(self.connections[self.server_conn].data_to_send())
return True
def _map_depends_on_stream_id(self, stream_id, depends_on):
mapped_depends_on = depends_on
if mapped_depends_on in self.streams and self.streams[mapped_depends_on].server_stream_id:
# if the depends-on-stream is already up and running and was sent to the server
# use the mapped server stream id to update priority information
mapped_depends_on = self.streams[mapped_depends_on].server_stream_id
if stream_id == mapped_depends_on:
# looks like one of the streams wasn't opened yet
# prevent self-dependent streams which result in ProtocolError
mapped_depends_on += 2
return mapped_depends_on
def _cleanup_streams(self):
death_time = time.time() - 10
zombie_streams = [(stream_id, stream) for stream_id, stream in list(self.streams.items()) if stream.zombie]
outdated_streams = [stream_id for stream_id, stream in zombie_streams if stream.zombie <= death_time]
for stream_id in outdated_streams: # pragma: no cover
self.streams.pop(stream_id, None)
def _kill_all_streams(self):
for stream in self.streams.values():
stream.kill()
def __call__(self):
self._initiate_server_conn()
self._complete_handshake()
conns = [c.connection for c in self.connections.keys()]
try:
while True:
r = tcp.ssl_read_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
is_server = (source_conn == self.server_conn)
with self.connections[source_conn].lock:
try:
raw_frame = b''.join(http2.read_raw_frame(source_conn.rfile))
except:
# read frame failed: connection closed
self._kill_all_streams()
return
if self.connections[source_conn].state_machine.state == h2.connection.ConnectionState.CLOSED:
self.log("HTTP/2 connection entered closed state already", "debug")
return
incoming_events = self.connections[source_conn].receive_data(raw_frame)
source_conn.send(self.connections[source_conn].data_to_send())
for event in incoming_events:
if not self._handle_event(event, source_conn, other_conn, is_server):
# connection terminated: GoAway
self._kill_all_streams()
return
self._cleanup_streams()
except Exception as e: # pragma: no cover
self.log(repr(e), "info")
self._kill_all_streams()
def detect_zombie_stream(func): # pragma: no cover
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.raise_zombie()
result = func(self, *args, **kwargs)
self.raise_zombie()
return result
return wrapper
class Http2SingleStreamLayer(httpbase._HttpTransmissionLayer, basethread.BaseThread):
def __init__(self, ctx, h2_connection, stream_id: int, request_headers: mitmproxy.net.http.Headers) -> None:
super().__init__(
ctx, name="Http2SingleStreamLayer-{}".format(stream_id)
)
self.h2_connection = h2_connection
self.zombie = None # type: float
self.client_stream_id = stream_id # type: int
self.server_stream_id = None # type: int
self.request_headers = request_headers
self.response_headers = None # type: mitmproxy.net.http.Headers
self.pushed = False
self.timestamp_start = None # type: float
self.timestamp_end = None # type: float
self.request_arrived = threading.Event()
self.request_data_queue = queue.Queue() # type: queue.Queue[bytes]
self.request_queued_data_length = 0
self.request_data_finished = threading.Event()
self.response_arrived = threading.Event()
self.response_data_queue = queue.Queue() # type: queue.Queue[bytes]
self.response_queued_data_length = 0
self.response_data_finished = threading.Event()
self.no_body = False
self.priority_exclusive = None # type: bool
self.priority_depends_on = None # type: int
self.priority_weight = None # type: int
self.handled_priority_event = None # type: Any
def kill(self):
if not self.zombie:
self.zombie = time.time()
self.request_data_finished.set()
self.request_arrived.set()
self.response_arrived.set()
self.response_data_finished.set()
def connect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("HTTP2 layer should already have a connection.")
def disconnect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("Cannot dis- or reconnect in HTTP2 connections.")
def set_server(self, address): # pragma: no cover
raise exceptions.SetServerNotAllowedException(repr(address))
def check_close_connection(self, flow):
# This layer only handles a single stream.
# RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
return True
@property
def data_queue(self):
if self.response_arrived.is_set():
return self.response_data_queue
else:
return self.request_data_queue
@property
def queued_data_length(self):
if self.response_arrived.is_set():
return self.response_queued_data_length
else:
return self.request_queued_data_length
@queued_data_length.setter
def queued_data_length(self, v):
self.request_queued_data_length = v
@property
def data_finished(self):
if self.response_arrived.is_set():
return self.response_data_finished
else:
return self.request_data_finished
def raise_zombie(self, pre_command=None): # pragma: no cover
connection_closed = self.h2_connection.state_machine.state == h2.connection.ConnectionState.CLOSED
if self.zombie is not None or connection_closed:
if pre_command is not None:
pre_command()
raise exceptions.Http2ZombieException("Connection already dead")
@detect_zombie_stream
def read_request_headers(self, flow):
self.request_arrived.wait()
self.raise_zombie()
if self.pushed:
flow.metadata['h2-pushed-stream'] = True
first_line_format, method, scheme, host, port, path = http2.parse_headers(self.request_headers)
return http.HTTPRequest(
first_line_format,
method,
scheme,
host,
port,
path,
b"HTTP/2.0",
self.request_headers,
None,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
@detect_zombie_stream
def read_request_body(self, request):
self.request_data_finished.wait()
data = []
while self.request_data_queue.qsize() > 0:
data.append(self.request_data_queue.get())
return data
@detect_zombie_stream
def send_request(self, message):
if self.pushed:
# nothing to do here
return
while True:
self.raise_zombie()
self.connections[self.server_conn].lock.acquire()
max_streams = self.connections[self.server_conn].remote_settings.max_concurrent_streams
if self.connections[self.server_conn].open_outbound_streams + 1 >= max_streams:
# wait until we get a free slot for a new outgoing stream
self.connections[self.server_conn].lock.release()
time.sleep(0.1)
continue
# keep the lock
break
# We must not assign a stream id if we are already a zombie.
self.raise_zombie()
self.server_stream_id = self.connections[self.server_conn].get_next_available_stream_id()
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
headers = message.headers.copy()
headers.insert(0, ":path", message.path)
headers.insert(0, ":method", message.method)
headers.insert(0, ":scheme", message.scheme)
priority_exclusive = None
priority_depends_on = None
priority_weight = None
if self.handled_priority_event:
# only send priority information if they actually came with the original HeadersFrame
# and not if they got updated before/after with a PriorityFrame
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY information in HEADERS frame surpressed. Use --http2-priority to enable forwarding.", "debug")
else:
priority_exclusive = self.priority_exclusive
priority_depends_on = self._map_depends_on_stream_id(self.server_stream_id, self.priority_depends_on)
priority_weight = self.priority_weight
try:
self.connections[self.server_conn].safe_send_headers(
self.raise_zombie,
self.server_stream_id,
headers,
end_stream=self.no_body,
priority_exclusive=priority_exclusive,
priority_depends_on=priority_depends_on,
priority_weight=priority_weight,
)
except Exception as e: # pragma: no cover
raise e
finally:
self.raise_zombie()
self.connections[self.server_conn].lock.release()
if not self.no_body:
self.connections[self.server_conn].safe_send_body(
self.raise_zombie,
self.server_stream_id,
[message.content]
)
@detect_zombie_stream
def read_response_headers(self):
self.response_arrived.wait()
self.raise_zombie()
status_code = int(self.response_headers.get(':status', 502))
headers = self.response_headers.copy()
headers.pop(":status", None)
return http.HTTPResponse(
http_version=b"HTTP/2.0",
status_code=status_code,
reason=b'',
headers=headers,
content=None,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
@detect_zombie_stream
def read_response_body(self, request, response):
while True:
try:
yield self.response_data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.response_data_finished.is_set():
self.raise_zombie()
while self.response_data_queue.qsize() > 0:
yield self.response_data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def send_response_headers(self, response):
headers = response.headers.copy()
headers.insert(0, ":status", str(response.status_code))
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
headers
)
@detect_zombie_stream
def send_response_body(self, _response, chunks):
self.connections[self.client_conn].safe_send_body(
self.raise_zombie,
self.client_stream_id,
chunks
)
def __call__(self): # pragma: no cover
raise EnvironmentError('Http2SingleStreamLayer must be run as thread')
def run(self):
layer = httpbase.HttpLayer(self, self.mode)
try:
layer()
except exceptions.Http2ZombieException as e: # pragma: no cover
pass
except exceptions.ProtocolException as e: # pragma: no cover
self.log(repr(e), "info")
except exceptions.SetServerNotAllowedException as e: # pragma: no cover
self.log("Changing the Host server for HTTP/2 connections not allowed: {}".format(e), "info")
except exceptions.Kill: # pragma: no cover
self.log("Connection killed", "info")
self.kill()
| 41.236593 | 136 | 0.634907 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.