hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7eebd4ac60a91719b65aee6f78c39cf889d2c89
| 13,564
|
py
|
Python
|
mail/mail.py
|
YangWanjun/ebusiness
|
03d92908b4db1a305c8cb99fc27700fd4dc972bd
|
[
"Apache-2.0"
] | null | null | null |
mail/mail.py
|
YangWanjun/ebusiness
|
03d92908b4db1a305c8cb99fc27700fd4dc972bd
|
[
"Apache-2.0"
] | 3
|
2020-02-11T22:59:47.000Z
|
2021-03-19T22:03:11.000Z
|
mail/mail.py
|
YangWanjun/ebusiness
|
03d92908b4db1a305c8cb99fc27700fd4dc972bd
|
[
"Apache-2.0"
] | null | null | null |
import os
import mimetypes
import zipfile
import traceback
import subprocess
import io
import datetime
import shutil
import sys
from email import encoders
from email.header import Header
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives, get_connection, SafeMIMEText
from django.core.mail.message import MIMEBase
from django.core.validators import validate_email
from django.db import connection
from django.utils.encoding import smart_str
from .models import EMailLogEntry
from master.models import Attachment
from utils import constants, common
from utils.errors import CustomException
logger = common.get_system_logger()
class Mail(object):
def __init__(self, sender=None, recipient_list=None, cc_list=None, bcc_list=None, attachment_list=None,
is_encrypt=False, mail_title=None, mail_body=None, pass_title=None, pass_body=None, **kwargs):
self.sender = sender
self.recipient_list = Mail.str_to_list(recipient_list)
self.cc_list = Mail.str_to_list(cc_list)
self.bcc_list = Mail.str_to_list(bcc_list)
self.attachment_list = attachment_list if attachment_list else []
self.is_encrypt = is_encrypt
self.mail_title = mail_title
self.mail_body = mail_body
self.pass_title = pass_title
self.password = None
self.pass_body = pass_body
self.temp_files = []
def check_recipient(self):
if not self.recipient_list:
raise CustomException("宛先はありません。")
return self.check_email_address(self.recipient_list)
def check_cc_list(self):
return self.check_email_address(self.cc_list)
def check_bcc_list(self):
return self.check_email_address(self.bcc_list)
def check_attachment(self):
if self.attachment_list:
qs = Attachment.objects.filter(is_deleted=False, uuid__in=self.attachment_list)
self.attachment_list = [AttachmentFile(path=item.path.path, filename=item.name) for item in qs]
for attachment in self.attachment_list:
if not attachment.is_valid():
raise CustomException("ファイル「%s」が見つかりません。" % attachment)
def check_mail_title(self):
if not self.mail_title:
raise CustomException("メールの題名を設定してください。")
@classmethod
def get_mail_connection(cls):
try:
with connection.cursor() as cursor:
cursor.execute(
"select value from mst_config where name = %s "
" union all "
"select value from mst_config where name = %s "
" union all "
"select value from mst_config where name = %s "
" union all "
"select value from mst_config where name = %s ",
[constants.CONFIG_EMAIL_SMTP_HOST, constants.CONFIG_EMAIL_SMTP_PORT,
constants.CONFIG_EMAIL_ADDRESS, constants.CONFIG_EMAIL_PASSWORD]
)
host, port, username, password = cursor.fetchall()
backend = get_connection()
backend.host = str(host[0])
backend.port = int(port[0])
backend.username = str(username[0])
backend.password = str(password[0])
return backend
except Exception as ex:
logger.error(str(ex))
logger.error(traceback.format_exc())
raise CustomException(str(ex))
@classmethod
def str_to_list(cls, s):
if isinstance(s, str):
return [i.strip() for i in s.split(',') if i]
else:
return s
@classmethod
def check_email_address(cls, mail_address):
if not mail_address:
return False
if isinstance(mail_address, str):
mail_list = [mail_address]
elif isinstance(mail_address, (tuple, list)):
mail_list = mail_address
else:
raise CustomException('有効なメールアドレスを入れてください。')
for email in mail_list:
try:
validate_email(email)
except ValidationError:
raise CustomException('有効なメールアドレスを入れてください。')
return True
def zip_attachments(self):
if self.attachment_list:
if sys.platform in ("linux", "linux2"):
# tempフォルダー配下の一時フォルダーを取得する
temp_path = os.path.join(common.get_temp_path(), datetime.datetime.now().strftime('%Y%m%d%H%M%S%f'))
if not os.path.exists(temp_path):
os.mkdir(temp_path)
self.temp_files.append(temp_path)
temp_zip = os.path.join(common.get_temp_path(), datetime.datetime.now().strftime('%Y%m%d%H%M%S%f.zip'))
self.temp_files.append(temp_zip)
file_list = []
for attachment_file in self.attachment_list:
new_path = os.path.join(temp_path, attachment_file.filename)
file_list.append(new_path)
self.temp_files.append(new_path)
if attachment_file.is_bytes():
# バイナリーファイルを一時ファイルに書き込む
with open(new_path, 'wb') as f:
f.write(attachment_file.content)
else:
shutil.copy(attachment_file.path, new_path)
password = self.generate_password()
# tempフォルダー配下すべてのファイル名をUTF8からShift-JISに変換する
subprocess.call(["convmv", "-r", "-f", "utf8", '-t', 'sjis', '--notest', temp_path.rstrip('/') + '/'])
# 一時フォルダーを圧縮する
command = "zip --password {0} -j {1} {2}/*".format(password, temp_zip, temp_path.rstrip('/'))
print(command)
subprocess.call(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
bytes_data = open(temp_zip, 'rb', ).read()
return bytes_data
else:
buff = io.BytesIO()
in_memory_zip = zipfile.ZipFile(buff, mode='w')
for attachment_file in self.attachment_list:
if attachment_file.is_bytes():
in_memory_zip.writestr(attachment_file.filename, attachment_file.content)
else:
in_memory_zip.write(attachment_file.path, attachment_file.filename)
in_memory_zip.close()
return buff.getvalue()
else:
return None
# def escape(self, name):
# """Shift_JISのダメ文字対策
#
# 2バイト目に「5C」のコードが使われている文字は、次のようなものがあります。
# ―ソЫⅨ噂浬欺圭構蚕十申曾箪貼能表暴予禄兔喀媾彌拿杤歃濬畚秉綵臀藹觸軆鐔饅鷭偆砡
#
# :param name:
# :return:
# """
# chars = "ソЫⅨ噂浬欺圭構蚕十申曾箪貼能表暴予禄兔喀媾彌拿杤歃濬畚秉綵臀藹觸軆鐔饅鷭偆砡"
# s = name
# for c in chars:
# if c in s:
# s = s.replace(c, "_")
# return s
def generate_password(self, length=8):
self.password = common.generate_password(length)
return self.password
def send_email(self, user=None):
try:
self.check_recipient()
self.check_cc_list()
self.check_bcc_list()
self.check_attachment()
self.check_mail_title()
mail_connection = self.get_mail_connection()
if not self.sender:
self.sender = mail_connection.username
email = EmailMultiAlternativesWithEncoding(
subject=self.mail_title,
body=self.mail_body,
from_email=self.sender,
to=self.recipient_list,
cc=self.cc_list,
bcc=self.bcc_list,
connection=mail_connection
)
# email.attach_alternative(self.mail_body, constants.MIME_TYPE_HTML)
if self.is_encrypt is False:
for attachment in [item for item in self.attachment_list]:
if attachment.is_bytes():
email.attach(attachment.filename, attachment.content, constants.MIME_TYPE_ZIP)
else:
email.attach_file(attachment.path, constants.MIME_TYPE_STREAM)
else:
attachments = self.zip_attachments()
if attachments:
email.attach('%s.zip' % self.mail_title, attachments, constants.MIME_TYPE_ZIP)
email.send()
# パスワードを送信する。
self.send_password(mail_connection, user=user)
log_format = "題名: %s; TO: %s; CC: %s; 送信完了。"
logger.info(log_format % (
self.mail_title,
','.join(self.recipient_list) if self.recipient_list else '',
','.join(self.cc_list) if self.cc_list else ''
))
if user:
# 送信ログ
if self.attachment_list:
attachment_name = ",".join([item.filename for item in self.attachment_list])
else:
attachment_name = None
EMailLogEntry.objects.create(
user=user,
sender=self.sender,
recipient=",".join(self.recipient_list),
cc=",".join(self.cc_list) if self.cc_list else None,
bcc=",".join(self.bcc_list) if self.bcc_list else None,
title=self.mail_title,
body=self.mail_body,
attachment=attachment_name,
)
except subprocess.CalledProcessError as e:
logger.error(e.output)
logger.error(traceback.format_exc())
raise CustomException(str(e.output))
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
raise CustomException(str(ex))
finally:
# 一時ファイルを削除
for path in self.temp_files:
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def send_password(self, conn, user=None):
if self.attachment_list and self.is_encrypt and self.password:
subject = self.pass_title or self.mail_title
try:
body = self.pass_body.format(password=self.password)
except Exception as ex:
logger.error(ex)
body = "PW: %s" % self.password
email = EmailMultiAlternativesWithEncoding(
subject=subject,
body=body,
from_email=self.sender,
to=self.recipient_list,
cc=self.cc_list,
connection=conn
)
# email.attach_alternative(body, constants.MIME_TYPE_HTML)
email.send()
logger.info("%sのパスワードは送信しました。" % self.mail_title)
if user:
# パスワード送信ログ
EMailLogEntry.objects.create(
user=user,
sender=self.sender,
recipient=",".join(self.recipient_list),
cc=",".join(self.cc_list) if self.cc_list else None,
bcc=",".join(self.bcc_list) if self.bcc_list else None,
title=subject,
body=body,
attachment=None,
)
class AttachmentFile:
def __init__(self, path=None, content=None, filename=None):
self.path = path
self.content = content
if path and not filename:
self.filename = os.path.basename(path)
else:
self.filename = filename
def is_valid(self):
"""有効なファイルであるかどうか
:return:
"""
if self.path:
return os.path.exists(self.path)
elif self.content and isinstance(self.content, bytes):
return True
else:
return False
def is_bytes(self):
if self.content and isinstance(self.content, bytes):
return True
else:
return False
class EmailMultiAlternativesWithEncoding(EmailMultiAlternatives):
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object. Use self.encoding when handling text attachments.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
# if mimetype is None:
# mimetype = constants.MIME_TYPE_EXCEL
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, settings.DEFAULT_CHARSET), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
encoders.encode_base64(attachment)
if filename:
try:
filename = Header(filename, 'utf-8').encode()
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
attachment.add_header('Content-Disposition', 'attachment', filename=filename)
return attachment
| 38.86533
| 119
| 0.570112
|
b24de997bf78cf0737eec0c9ad078f45075a6060
| 6,802
|
py
|
Python
|
AppServer/google/appengine/ext/endpoints/message_parser.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/google/appengine/ext/endpoints/message_parser.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/google/appengine/ext/endpoints/message_parser.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Describe ProtoRPC Messages in JSON Schema.
Add protorpc.message subclasses to MessageTypeToJsonSchema and get a JSON
Schema description of all the messages.
"""
import re
import google
from protorpc import message_types
from protorpc import messages
__all__ = ['MessageTypeToJsonSchema']
class MessageTypeToJsonSchema(object):
"""Describe ProtoRPC messages in JSON Schema.
Add protorpc.message subclasses to MessageTypeToJsonSchema and get a JSON
Schema description of all the messages. MessageTypeToJsonSchema handles
all the types of fields that can appear in a message.
"""
__FIELD_TO_SCHEMA_TYPE_MAP = {
messages.IntegerField: {messages.Variant.INT32: ('integer', 'int32'),
messages.Variant.INT64: ('string', 'int64'),
messages.Variant.UINT32: ('integer', 'uint32'),
messages.Variant.UINT64: ('string', 'uint64'),
messages.Variant.SINT32: ('integer', 'int32'),
messages.Variant.SINT64: ('string', 'int64'),
None: ('integer', 'int32')},
messages.FloatField: {messages.Variant.FLOAT: ('number', 'float'),
messages.Variant.DOUBLE: ('number', 'double'),
None: ('number', 'float')},
messages.BooleanField: ('boolean', None),
messages.BytesField: ('string', 'byte'),
message_types.DateTimeField: ('string', 'date-time'),
messages.StringField: ('string', None),
messages.MessageField: ('object', None),
messages.EnumField: ('string', None),
}
__DEFAULT_SCHEMA_TYPE = ('string', None)
def __init__(self):
self.__schemas = {}
self.__normalized_names = {}
def add_message(self, message_type):
"""Add a new message.
Args:
message_type: protorpc.message.Message class to be parsed.
Returns:
string, The JSON Schema id.
Raises:
KeyError if the Schema id for this message_type would collide with the
Schema id of a different message_type that was already added.
"""
name = self.__normalized_name(message_type)
if name not in self.__schemas:
schema = self.__message_to_schema(message_type)
self.__schemas[name] = schema
return name
def ref_for_message_type(self, message_type):
"""Returns the JSON Schema id for the given message.
Args:
message_type: protorpc.message.Message class to be parsed.
Returns:
string, The JSON Schema id.
Raises:
KeyError: if the message hasn't been parsed via add_message().
"""
name = self.__normalized_name(message_type)
if name not in self.__schemas:
raise KeyError('Message has not been parsed: %s', name)
return name
def schemas(self):
"""Returns the JSON Schema of all the messages.
Returns:
object: JSON Schema description of all messages.
"""
return self.__schemas.copy()
def __normalized_name(self, message_type):
"""Normalized schema name.
Generate a normalized schema name, taking the class name and stripping out
everything but alphanumerics, and camel casing the remaining words.
A normalized schema name is a name that matches [a-zA-Z][a-zA-Z0-9]*
Args:
message_type: protorpc.message.Message class being parsed.
Returns:
A string, the normalized schema name.
Raises:
KeyError if a collision is found between normalized names.
"""
name = message_type.definition_name()
split_name = re.split(r'[^0-9a-zA-Z]', name)
normalized = ''.join(
part[0].upper() + part[1:] for part in split_name if part)
previous = self.__normalized_names.get(normalized)
if previous:
if previous != name:
raise KeyError('Both %s and %s normalize to the same schema name: %s' %
(name, previous, normalized))
else:
self.__normalized_names[normalized] = name
return normalized
def __message_to_schema(self, message_type):
"""Parse a single message into JSON Schema.
Will recursively descend the message structure
and also parse other messages references via MessageFields.
Args:
message_type: protorpc.messages.Message class to parse.
Returns:
An object representation of the schema.
"""
name = self.__normalized_name(message_type)
schema = {
'id': name,
'type': 'object',
}
if message_type.__doc__:
schema['description'] = message_type.__doc__
properties = {}
for field in message_type.all_fields():
descriptor = {}
type_info = {}
if type(field) == messages.MessageField:
field_type = field.type().__class__
type_info['$ref'] = self.add_message(field_type)
if field_type.__doc__:
descriptor['description'] = field_type.__doc__
else:
schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(
type(field), self.__DEFAULT_SCHEMA_TYPE)
if isinstance(schema_type, dict):
variant_map = schema_type
variant = getattr(field, 'variant', None)
if variant in variant_map:
schema_type = variant_map[variant]
else:
schema_type = variant_map[None]
type_info['type'] = schema_type[0]
if schema_type[1]:
type_info['format'] = schema_type[1]
if type(field) == messages.EnumField:
sorted_enums = sorted([enum_info for enum_info in field.type],
key=lambda enum_info: enum_info.number)
type_info['enum'] = [enum_info.name for enum_info in sorted_enums]
if field.required:
descriptor['required'] = True
if field.default:
if type(field) == messages.EnumField:
descriptor['default'] = str(field.default)
else:
descriptor['default'] = field.default
if field.repeated:
descriptor['items'] = type_info
descriptor['type'] = 'array'
else:
descriptor.update(type_info)
properties[field.name] = descriptor
schema['properties'] = properties
return schema
| 29.573913
| 79
| 0.647898
|
fffb42c0c3b615663dcfb285e436943fac14b32b
| 703
|
py
|
Python
|
src/python/grpcio_admin/grpc_version.py
|
bradfol/grpc
|
2c2b82e7c0fd82d43cda5a05ab8e1b02f02aacc2
|
[
"Apache-2.0"
] | 1
|
2021-05-09T04:42:15.000Z
|
2021-05-09T04:42:15.000Z
|
src/python/grpcio_admin/grpc_version.py
|
bradfol/grpc
|
2c2b82e7c0fd82d43cda5a05ab8e1b02f02aacc2
|
[
"Apache-2.0"
] | 2
|
2021-12-29T09:10:10.000Z
|
2022-01-05T09:28:51.000Z
|
src/python/grpcio_admin/grpc_version.py
|
bradfol/grpc
|
2c2b82e7c0fd82d43cda5a05ab8e1b02f02aacc2
|
[
"Apache-2.0"
] | 2
|
2018-02-08T04:45:39.000Z
|
2018-04-23T03:12:42.000Z
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_admin/grpc_version.py.template`!!!
VERSION = '1.38.0.dev0'
| 39.055556
| 96
| 0.762447
|
7dc9e7d01384727fa7b60afa90ac4cab3e530b3d
| 122
|
py
|
Python
|
pulse_project/settings/__init__.py
|
ctrl-alt-delete-3308/pulse
|
417c065fef2e7947e64de55bcfd79279fd8dcc27
|
[
"MIT"
] | null | null | null |
pulse_project/settings/__init__.py
|
ctrl-alt-delete-3308/pulse
|
417c065fef2e7947e64de55bcfd79279fd8dcc27
|
[
"MIT"
] | 8
|
2019-12-05T01:34:03.000Z
|
2021-09-22T18:11:08.000Z
|
pulse_project/settings/__init__.py
|
ctrl-alt-delete-3308/pulse
|
417c065fef2e7947e64de55bcfd79279fd8dcc27
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
"""
Settings module for Pulse Project.
Loads development settings by default.
"""
from .dev import *
| 15.25
| 38
| 0.721311
|
e3712108562aa53188f885a90468f4d0e259dd4c
| 3,262
|
py
|
Python
|
tests/functional/fkey/primary/test_insert_pk_09.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/functional/fkey/primary/test_insert_pk_09.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/functional/fkey/primary/test_insert_pk_09.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: functional.fkey.primary.insert_pk_09
# title: Check correct work fix with foreign key
# decription: Check foreign key work.
# Master table has primary key consisting of several fields.
# Master transaction modifies all primary key fields.
# Detail transaction inserts record in detail_table.
# Expected: error - primary in master_table has been changed.
# tracker_id:
# min_versions: []
# versions: 2.5.3
# qmid: functional.fkey.primary.ins_09
import pytest
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DatabaseError, tpb, Isolation
# version: 2.5.3
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE MASTER_TABLE (
ID_1 INTEGER NOT NULL,
ID_2 VARCHAR(20) NOT NULL,
INT_F INTEGER,
PRIMARY KEY (ID_1, ID_2)
);
CREATE TABLE DETAIL_TABLE (
ID INTEGER PRIMARY KEY,
FKEY_1 INTEGER,
FKEY_2 VARCHAR(20)
);
ALTER TABLE DETAIL_TABLE ADD CONSTRAINT FK_DETAIL_TABLE FOREIGN KEY (FKEY_1, FKEY_2) REFERENCES MASTER_TABLE (ID_1, ID_2);
COMMIT;
INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10);
COMMIT;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# TPB_master = (
# chr(kdb.isc_tpb_write)
# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version)
# + chr(kdb.isc_tpb_nowait)
# )
# TPB_detail = (
# chr(kdb.isc_tpb_write)
# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version)
# + chr(kdb.isc_tpb_nowait)
# )
# db_conn.begin(tpb=TPB_master)
# cm_1 = db_conn.cursor()
# cm_1.execute("UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1")
# cm_1.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'")
#
# #Create second connection for change detail table
# con_detail = kdb.connect(
# dsn=dsn.encode(),
# user=user_name.encode(),
# password=user_password.encode()
# )
#
# try:
# con_detail.begin(tpb=TPB_detail)
# cd = con_detail.cursor()
# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 1, 'one')")
# con_detail.commit()
# except Exception, e:
# print (e[0])
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5.3')
def test_1(act_1: Action):
with act_1.db.connect() as con:
cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0)
con.begin(cust_tpb)
with con.cursor() as c:
c.execute("UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1")
c.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'")
#Create second connection for change detail table
with act_1.db.connect() as con_detail:
con_detail.begin(cust_tpb)
with con_detail.cursor() as cd:
with pytest.raises(DatabaseError,
match='.*violation of FOREIGN KEY constraint "FK_DETAIL_TABLE" on table "DETAIL_TABLE".*'):
cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 1, 'one')")
con_detail.commit()
# Passed.
| 33.979167
| 130
| 0.645616
|
ae646572d91b19d0f3c1a20e580322adfc6ccd9d
| 2,813
|
py
|
Python
|
FC2/fc2/train.py
|
zfgao66/deeplearning-mpo-tensorflow
|
c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6
|
[
"MIT"
] | 24
|
2019-04-30T14:59:43.000Z
|
2021-11-16T03:47:38.000Z
|
FC2/fc2/train.py
|
HC1022/deeplearning-mpo
|
c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6
|
[
"MIT"
] | null | null | null |
FC2/fc2/train.py
|
HC1022/deeplearning-mpo
|
c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6
|
[
"MIT"
] | 9
|
2019-08-14T10:50:37.000Z
|
2022-03-15T14:41:52.000Z
|
# -*- coding: utf-8 -*-
"""
@author: zfgao
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import inference
from hyperprameter import *
BATCH_SIZE=FLAGS.batch_size
TRAINING_STEPS=FLAGS.global_step
LEARNING_RATE_BASE=FLAGS.LEARNING_RATE_BASE
LEARNING_RATE_DECAY=FLAGS.LEARNING_RATE_DECAY
REGULARIZER_RATE=FLAGS.REGULARIZER_RATE
MOVING_DECAY=0.99
#seed =12345
#tf.set_random_seed(seed)
def mnist(inp):
x=tf.placeholder(tf.float32,[None,inference.input_node],name='x-input')
y_=tf.placeholder(tf.float32,[None,inference.output_node],name='y-input')
# regularizer = tf.contrib.layers.l2_regularizer(REGULARIZER_RATE)
y=inference.inference(x)
global_step=tf.Variable(0,trainable=False)
# ema = tf.train.ExponentialMovingAverage(MOVING_DECAY, global_step)
# ema_op = ema.apply(tf.trainable_variables())
ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
loss=tf.reduce_mean(ce)
loss += tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()]) * REGULARIZER_RATE
# loss = loss + tf.add_n(tf.get_collection('losses'))
learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,
global_step,
inp.train.num_examples/BATCH_SIZE,
LEARNING_RATE_DECAY)
train_steps=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
# with tf.control_dependencies([train_steps,ema_op]):
# train_op=tf.no_op(name='train')
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
best_acc = 0
for i in range(TRAINING_STEPS):
xs,ys = inp.train.next_batch(BATCH_SIZE)
_,step,lr = sess.run([train_steps,global_step,learning_rate],feed_dict={x:xs,y_:ys})
if i%1000 == 0:
accuracy_score = sess.run(accuracy, feed_dict={x:inp.test.images,y_:inp.test.labels})
print('step={},lr={}'.format(step,lr))
if best_acc< accuracy_score:
best_acc = accuracy_score
print('Accuracy at step %s: %s' % (i, accuracy_score))
accuracy_score=sess.run(accuracy,feed_dict={x:inp.test.images,y_:inp.test.labels})
print("After %s trainning step(s),best accuracy=%g" %(step,best_acc))
def main(argv=None):
inp=input_data.read_data_sets("./data/",validation_size=0,one_hot=True)
mnist(inp)
if __name__=='__main__':
tf.app.run()
| 39.619718
| 104
| 0.658727
|
20977113b50f12323bf30a1961e0497b3aa646e5
| 1,729
|
py
|
Python
|
setup.py
|
ducminh-phan/CSA-cython
|
49fbabc7376f98b3a63e82f31d868ff54bcfdbca
|
[
"MIT"
] | null | null | null |
setup.py
|
ducminh-phan/CSA-cython
|
49fbabc7376f98b3a63e82f31d868ff54bcfdbca
|
[
"MIT"
] | null | null | null |
setup.py
|
ducminh-phan/CSA-cython
|
49fbabc7376f98b3a63e82f31d868ff54bcfdbca
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from glob import glob
import numpy as np
from setuptools import setup, find_packages
import setup_utils
try:
from Cython.Build import cythonize
from Cython.Distutils import Extension
# The glob pattern '**/*.pyx' also matches files in subpackages
source_files_patterns = ['**/*.pyx']
except ImportError:
from setuptools import Extension
source_files_patterns = ['**/*.c']
def cythonize(extensions, **__):
module_list = []
for extension in extensions:
# Find all sources from the glob patterns provided in sources
source_files = sum([glob(pattern, recursive=True) for pattern in extension.sources], [])
for file in source_files:
module = deepcopy(extension)
module.name = setup_utils.fully_qualified_name(file)
module.sources = [file]
module_list.append(module)
return module_list
extensions = [Extension('*', source_files_patterns, extra_compile_args=['-O3'])]
directives = {}
opt = True
prof = False
if opt:
directives.update({
'boundscheck': False,
'wraparound': False,
'initializedcheck': False,
'cdivision': True,
})
if prof:
directives['profile'] = True
extensions = cythonize(extensions,
language_level=3,
annotate=True,
compiler_directives=directives,
)
setup(
name="csa",
ext_modules=extensions,
packages=find_packages(),
install_requires=['numpy', 'pandas', 'tqdm'],
include_dirs=[np.get_include()],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
| 25.426471
| 100
| 0.622325
|
3559da7f566cf79e86df59f013a53c2c86fb2e51
| 386,362
|
py
|
Python
|
nova/tests/unit/db/test_db_api.py
|
cloudbase/nova-virtualbox
|
cb758ecff10130ce7a06e0ce1c05dfc999af212f
|
[
"Apache-2.0"
] | 4
|
2015-04-13T14:52:41.000Z
|
2018-02-03T19:32:13.000Z
|
nova/tests/unit/db/test_db_api.py
|
cloudbase/nova-virtualbox
|
cb758ecff10130ce7a06e0ce1c05dfc999af212f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/db/test_db_api.py
|
cloudbase/nova-virtualbox
|
cb758ecff10130ce7a06e0ce1c05dfc999af212f
|
[
"Apache-2.0"
] | 2
|
2015-10-10T05:30:38.000Z
|
2020-07-24T01:56:46.000Z
|
# encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API."""
import copy
import datetime
import types
import uuid as stdlib_uuid
import iso8601
import mock
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from sqlalchemy import Column
from sqlalchemy.dialects import sqlite
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.orm import query
from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import types as col_types
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import uuidutils
from nova import quota
from nova import test
from nova.tests.unit import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = sqlalchemy_api.get_engine
get_session = sqlalchemy_api.get_session
def _reservation_get(context, uuid):
result = sqlalchemy_api.model_query(context, models.Reservation,
read_deleted="no").filter_by(uuid=uuid).first()
if not result:
raise exception.ReservationNotFound(uuid=uuid)
return result
def _quota_reserve(context, project_id, user_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, user_id, session):
return {resource: usage}
return sync
quotas = {}
user_quotas = {}
resources = {}
deltas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = db.quota_create(context, project_id,
resource, i,
user_id=user_id)
sync_name = '_sync_%s' % resource
resources[resource] = quota.ReservableResource(
resource, sync_name, 'quota_res_%d' % i)
deltas[resource] = i
setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
sqlalchemy_api, sync_name)
return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
timeutils.utcnow(), CONF.until_refresh,
datetime.timedelta(days=1), project_id, user_id)
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEqual(test_func.func_name, decorated_func.func_name)
self.assertEqual(test_func.__doc__, decorated_func.__doc__)
self.assertEqual(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_admin_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
def test_require_deadlock_retry_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api._retry_on_deadlock)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
class NotDbApiTestCase(DbTestCase):
def setUp(self):
super(NotDbApiTestCase, self).setUp()
self.flags(connection='notdb://', group='database')
def test_instance_get_all_by_filters_regex_unsupported_db(self):
# Ensure that the 'LIKE' operator is used for unsupported dbs.
self.create_instance_with_args(display_name='test1')
self.create_instance_with_args(display_name='test2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.context,
{'display_name': 'test'})
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': 'di'})
self.assertEqual(1, len(result))
def test_instance_get_all_by_filters_paginate(self):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test1['uuid'])
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test2['uuid'])
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test3['uuid'])
self.assertEqual(0, len(result))
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
marker=str(stdlib_uuid.uuid4()))
def _assert_equals_inst_order(self, correct_order, filters,
sort_keys=None, sort_dirs=None,
limit=None, marker=None,
match_keys=['uuid', 'vm_state',
'display_name', 'id']):
'''Retrieves instances based on the given filters and sorting
information and verifies that the instances are returned in the
correct sorted order by ensuring that the supplied keys match.
'''
result = db.instance_get_all_by_filters_sort(
self.context, filters, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dirs=sort_dirs)
self.assertEqual(len(correct_order), len(result))
for inst1, inst2 in zip(result, correct_order):
for key in match_keys:
self.assertEqual(inst1.get(key), inst2.get(key))
return result
def test_instance_get_all_by_filters_sort_keys(self):
'''Verifies sort order and direction for multiple instances.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
other_error = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ERROR)
other_active = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ACTIVE)
filters = {'display_name': '%test%'}
# Verify different sort key/direction combinations
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['asc', 'desc', 'asc']
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['desc', 'desc', 'asc']
correct_order = [test2_error, test2_error2, test2_active,
test1_error, test1_error2, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# created_at is added by default if not supplied, descending order
sort_keys = ['display_name', 'vm_state']
sort_dirs = ['desc', 'desc']
correct_order = [test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Now created_at should be in ascending order (defaults to the first
# sort dir direction)
sort_dirs = ['asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Remove name filter, get all instances
correct_order = [other_active, other_error,
test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, {},
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Default sorting, 'created_at' then 'id' in desc order
correct_order = [other_active, other_error,
test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, {})
def test_instance_get_all_by_filters_sort_keys_paginate(self):
'''Verifies sort order with pagination.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%'}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
def test_instance_get_all_by_filters_sort_key_invalid(self):
'''InvalidSortKey raised if an invalid key is given.'''
for keys in [['foo'], ['uuid', 'foo']]:
self.assertRaises(exception.InvalidSortKey,
db.instance_get_all_by_filters_sort,
self.context,
filters={},
sort_keys=keys)
def test_instance_get_deleted_by_filters_sort_keys_paginate(self):
'''Verifies sort order with pagination for deleted instances.'''
ctxt = context.get_admin_context()
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test1_active['uuid'])
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error['uuid'])
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error2['uuid'])
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test2_active['uuid'])
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error['uuid'])
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error2['uuid'])
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%', 'deleted': True}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
def test_convert_objects_related_datetimes(self):
t1 = timeutils.utcnow()
t2 = t1 + datetime.timedelta(seconds=10)
t3 = t2 + datetime.timedelta(hours=1)
t2_utc = t2.replace(tzinfo=iso8601.iso8601.Utc())
t3_utc = t3.replace(tzinfo=iso8601.iso8601.Utc())
datetime_keys = ('created_at', 'deleted_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
self.assertEqual(test2, expected_dict)
test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
self.assertEqual(test3, expected_dict)
def test_model_query_invalid_arguments(self):
# read_deleted shouldn't accept invalid values
self.assertRaises(ValueError, sqlalchemy_api.model_query,
self.context, models.Instance, read_deleted=False)
self.assertRaises(ValueError, sqlalchemy_api.model_query,
self.context, models.Instance, read_deleted="foo")
# Check model is a valid model
self.assertRaises(TypeError, sqlalchemy_api.model_query,
self.context, "")
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_use_slave_false(self, mock_get_session):
sqlalchemy_api.model_query(self.context, models.Instance,
use_slave=False)
mock_get_session.assert_called_once_with(use_slave=False)
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_use_slave_no_slave_connection(self, mock_get_session):
self.flags(slave_connection='', group='database')
sqlalchemy_api.model_query(self.context, models.Instance,
use_slave=True)
mock_get_session.assert_called_once_with(use_slave=False)
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_use_slave_true(self, mock_get_session):
self.flags(slave_connection='foo://bar', group='database')
sqlalchemy_api.model_query(self.context, models.Instance,
use_slave=True)
mock_get_session.assert_called_once_with(use_slave=True)
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_lazy_session_default(self, mock_get_session):
sqlalchemy_api.model_query(self.context, models.Instance,
session=mock.MagicMock())
self.assertFalse(mock_get_session.called)
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
self.assertEqual(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1['id'])
values = {'name': r1['name']}
metadata = {'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values, metadata=metadata)
self.assertEqual(r2['name'], values['name'])
self.assertEqual(r2['availability_zone'],
metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
# test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
result = _create_aggregate(metadata={'availability_zone':
'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertEqual(expected_metadata, {'availability_zone':
'fake_avail_zone'})
def test_aggregate_get(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
values5 = {'name': 'fake_aggregate5'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
# a3 has no hosts and should not be in the results.
_create_aggregate(context=ctxt, values=values3)
# a4 has no matching hosts.
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'])
# a5 has no matching hosts after deleting the only matching host.
a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
hosts=['foo5.openstack.org', 'foo.openstack.org'])
db.aggregate_host_delete(ctxt, a5['id'],
'foo.openstack.org')
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
_create_aggregate_with_hosts(context=ctxt, values=values2)
_create_aggregate(context=ctxt, values=values3)
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertNotIn('badkey', r1)
def test_aggregate_metadata_get_by_metadata_key(self):
ctxt = context.get_admin_context()
values = {'aggregate_id': 'fake_id',
'name': 'fake_aggregate'}
aggr = _create_aggregate_with_hosts(context=ctxt, values=values,
hosts=['bar.openstack.org'],
metadata={'availability_zone':
'az1'})
r1 = db.aggregate_metadata_get_by_metadata_key(ctxt, aggr['id'],
'availability_zone')
self.assertEqual(r1['availability_zone'], set(['az1']))
self.assertIn('availability_zone', r1)
self.assertNotIn('name', r1)
def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
key='good')
self.assertEqual(r1['good'], set(['value12', 'value23']))
self.assertNotIn('fake_key1', r1)
self.assertNotIn('bad', r1)
# Delete metadata
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org',
key='good')
self.assertNotIn('good', r2)
def test_aggregate_get_by_host_not_found(self):
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt)
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
self.assertEqual(aggregate['deleted'], result['id'])
def test_aggregate_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
self.assertEqual(result['availability_zone'], 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, result['id'], new_values)
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['availability_zone'] = 'different_avail_zone'
expected_metadata = copy.deepcopy(values['metadata'])
expected_metadata['availability_zone'] = values['availability_zone']
db.aggregate_update(ctxt, result['id'], values)
metadata = db.aggregate_metadata_get(ctxt, result['id'])
updated = db.aggregate_get(ctxt, result['id'])
self.assertThat(metadata,
matchers.DictMatches(expected_metadata))
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
expected_metadata = copy.deepcopy(values['metadata'])
db.aggregate_update(ctxt, result['id'], values)
metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected_metadata))
def test_aggregate_update_zone_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = _get_fake_aggr_metadata()
metadata.update(new_zone)
db.aggregate_update(ctxt, result['id'], new_zone)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_update_raise_name_exist(self):
ctxt = context.get_admin_context()
_create_aggregate(context=ctxt, values={'name': 'test1'},
metadata={'availability_zone': 'fake_avail_zone'})
_create_aggregate(context=ctxt, values={'name': 'test2'},
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_id = 1
new_values = {'name': 'test2'}
self.assertRaises(exception.AggregateNameExists,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
ctxt = context.get_admin_context()
counter = 3
for c in range(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in range(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in range(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_and_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
new_metadata = {key: 'foo',
'fake_new_key': 'fake_new_value'}
metadata.update(new_metadata)
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
def counted():
def get_query(context, id, session, read_deleted):
get_query.counter += 1
raise db_exc.DBDuplicateEntry
get_query.counter = 0
return get_query
get_query = counted()
self.stubs.Set(sqlalchemy_api,
'_aggregate_metadata_get_query', get_query)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
aggregate_metadata_add, ctxt, result['id'], {},
max_retries=5)
self.assertEqual(get_query.counter, 5)
def test_aggregate_metadata_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
db.aggregate_metadata_delete(ctxt, result['id'], key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
expected = db.aggregate_metadata_get(ctxt, result['id'])
del metadata[metadata.keys()[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
expected = db.aggregate_metadata_get(ctxt, result['id'])
aggregate = db.aggregate_get(ctxt, result['id'])
self.assertIsNone(aggregate['availability_zone'])
self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result['id'], host)
db.aggregate_host_add(ctxt, result['id'], host)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result['id'],
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
"""No-DB test class for simple test cases that do not require a backend."""
def test_manual_join_columns_immutable_list(self):
# Tests that _manual_join_columns doesn't modify the list passed in.
columns_to_join = ['system_metadata', 'test']
manual_joins, columns_to_join2 = (
sqlalchemy_api._manual_join_columns(columns_to_join))
self.assertEqual(['system_metadata'], manual_joins)
self.assertEqual(['test'], columns_to_join2)
self.assertEqual(['system_metadata', 'test'], columns_to_join)
class SqlAlchemyDbApiTestCase(DbTestCase):
def test_instance_get_all_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
self.assertEqual(types.UnicodeType, type(result[0]))
def test_instance_get_active_by_window_joined(self):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
start_time = now - datetime.timedelta(minutes=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
ctxt = context.get_admin_context()
# used for testing columns_to_join
network_info = jsonutils.dumps({'ckey': 'cvalue'})
sample_data = {
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'network_info': network_info},
}
self.create_instance_with_args(launched_at=now, **sample_data)
self.create_instance_with_args(launched_at=now1, terminated_at=now2,
**sample_data)
self.create_instance_with_args(launched_at=now2, terminated_at=now3,
**sample_data)
self.create_instance_with_args(launched_at=now3, terminated_at=None,
**sample_data)
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now)
self.assertEqual(4, len(result))
# verify that all default columns are joined
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now3, columns_to_join=['info_cache'])
self.assertEqual(2, len(result))
# verify that only info_cache is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now)
self.assertEqual(0, len(result))
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now2,
columns_to_join=['system_metadata'])
self.assertEqual(2, len(result))
# verify that only system_metadata is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertNotIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now2, end=now3,
columns_to_join=['metadata', 'info_cache'])
self.assertEqual(2, len(result))
# verify that only metadata and info_cache are loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual({}, sys_meta)
self.assertIn('info_cache', result[0])
self.assertEqual(network_info, result[0]['info_cache']['network_info'])
@mock.patch('nova.db.sqlalchemy.api.instance_get_all_by_filters_sort')
def test_instance_get_all_by_filters_calls_sort(self,
mock_get_all_filters_sort):
'''Verifies instance_get_all_by_filters calls the sort function.'''
# sort parameters should be wrapped in a list, all other parameters
# should be passed through
ctxt = context.get_admin_context()
sqlalchemy_api.instance_get_all_by_filters(ctxt, {'foo': 'bar'},
'sort_key', 'sort_dir', limit=100, marker='uuid',
columns_to_join='columns', use_slave=True)
mock_get_all_filters_sort.assert_called_once_with(ctxt, {'foo': 'bar'},
limit=100, marker='uuid', columns_to_join='columns',
use_slave=True, sort_keys=['sort_key'], sort_dirs=['sort_dir'])
class ProcessSortParamTestCase(test.TestCase):
def test_process_sort_params_defaults(self):
'''Verifies default sort parameters.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_keys(self):
'''Verifies that the default keys can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'])
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_dir(self):
'''Verifies that the default direction can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_dir='dir1')
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['dir1', 'dir1'], sort_dirs)
def test_process_sort_params_override_default_key_and_dir(self):
'''Verifies that the default key and dir can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'],
default_dir='dir1')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=[], default_dir='dir1')
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_process_sort_params_non_default(self):
'''Verifies that non-default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['key1', 'key2'], ['asc', 'desc'])
self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
# First sort_dir in list is used when adding the default keys
self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default(self):
'''Verifies that default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['asc', 'desc'])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
# Include default key value, rely on default direction
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default_dir(self):
'''Verifies that the default dir is applied to all keys.'''
# Direction is set, ignore default dir
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['desc'], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
# But should be used if no direction is set
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
def test_process_sort_params_unequal_length(self):
'''Verifies that a sort direction list is applied correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
# Default direction is the first key in the list
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
def test_process_sort_params_extra_dirs_lengths(self):
'''InvalidInput raised if more directions are given.'''
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key1', 'key2'],
['asc', 'desc', 'desc'])
def test_process_sort_params_invalid_sort_dir(self):
'''InvalidInput raised if invalid directions are given.'''
for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]:
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key'],
dirs)
class MigrationTestCase(test.TestCase):
def setUp(self):
super(MigrationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self._create()
self._create()
self._create(status='reverted')
self._create(status='confirmed')
self._create(status='error')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
self._create(source_compute='host2', dest_compute='host3')
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
source_node='a', dest_compute='host2', dest_node='b',
system_metadata=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
if system_metadata:
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
'dest_node': dest_node, 'instance_uuid': instance['uuid']}
db.migration_create(self.ctxt, values)
def _assert_in_progress(self, migrations):
for migration in migrations:
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
self.assertNotEqual('error', migration['status'])
def test_migration_get_in_progress_joins(self):
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'foo', 'a')
system_metadata = migrations[0]['instance']['system_metadata'][0]
self.assertEqual(system_metadata['key'], 'foo')
self.assertEqual(system_metadata['value'], 'bar')
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
# 2 as source + 1 as dest
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_in_progress_host1_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'b')
# some migrations are to/from host1, but none with a node 'b'
self.assertEqual(0, len(migrations))
def test_in_progress_host2_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
# 2 as dest, 1 as source
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_instance_join(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
for migration in migrations:
instance = migration['instance']
self.assertEqual(migration['instance_uuid'], instance['uuid'])
def test_get_migrations_by_filters(self):
filters = {"status": "migrating", "host": "host3"}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertEqual(filters["status"], migration['status'])
hosts = [migration['source_compute'], migration['dest_compute']]
self.assertIn(filters["host"], hosts)
def test_only_admin_can_get_all_migrations_by_filters(self):
user_ctxt = context.RequestContext(user_id=None, project_id=None,
is_admin=False, read_deleted="no",
overwrite=False)
self.assertRaises(exception.AdminRequired,
db.migration_get_all_by_filters, user_ctxt, {})
def test_migration_get_unconfirmed_by_dest_compute(self):
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(0, len(results))
updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
# Ensure different host is not returned
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
def test_migration_update_not_found(self):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return {k: v for k, v in obj.iteritems()
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.iteritems():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualOrderedListOfObjects(self, objs1, objs2,
ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
conv = lambda obj: map(obj_to_dict, obj)
self.assertEqual(conv(objs1), conv(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class InstanceSystemMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_system_metadata_* methods."""
def setUp(self):
super(InstanceSystemMetadataTestCase, self).setUp()
values = {'host': 'h1', 'project_id': 'p1',
'system_metadata': {'key': 'value'}}
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, values)
def test_instance_system_metadata_get(self):
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value'})
def test_instance_system_metadata_update_new_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
def test_instance_system_metadata_update_existent_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'new_value'})
def test_instance_system_metadata_update_delete_true(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
@test.testtools.skip("bug 1189462")
def test_instance_system_metadata_update_nonexistent(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_system_metadata_update,
self.ctxt, 'nonexistent-uuid',
{'key': 'value'}, True)
class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.reservation_* methods."""
def setUp(self):
super(ReservationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
usage = db.quota_usage_get(self.ctxt, 'project1', 'resource1', 'user1')
self.values = {'uuid': 'sample-uuid',
'project_id': 'project1',
'user_id': 'user1',
'resource': 'resource1',
'delta': 42,
'expire': timeutils.utcnow() + datetime.timedelta(days=1),
'usage': {'id': usage.id}}
def test_reservation_commit(self):
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
_reservation_get(self.ctxt, self.reservations[0])
db.reservation_commit(self.ctxt, self.reservations, 'project1',
'user1')
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, self.reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 2},
'fixed_ips': {'reserved': 0, 'in_use': 4}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_rollback(self):
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
_reservation_get(self.ctxt, self.reservations[0])
db.reservation_rollback(self.ctxt, self.reservations, 'project1',
'user1')
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, self.reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_expire(self):
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_security_group_rule_create(self):
security_group_rule = self._create_security_group_rule({})
self.assertIsNotNone(security_group_rule['id'])
for key, value in self._get_base_rule_values().items():
self.assertEqual(value, security_group_rule[key])
def _test_security_group_rule_get_by_security_group(self, columns=None):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
security_group = self._create_security_group({
'instances': [instance]})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
found_rules = db.security_group_rule_get_by_security_group(
self.ctxt, security_group['id'], columns_to_join=columns)
self.assertEqual(len(found_rules), 2)
rules_ids = [security_group_rule['id'], security_group_rule1['id']]
for rule in found_rules:
if columns is None:
self.assertIn('grantee_group', dict(rule.iteritems()))
self.assertIn('instances',
dict(rule.grantee_group.iteritems()))
self.assertIn(
'system_metadata',
dict(rule.grantee_group.instances[0].iteritems()))
self.assertIn(rule['id'], rules_ids)
else:
self.assertNotIn('grantee_group', dict(rule.iteritems()))
def test_security_group_rule_get_by_security_group(self):
self._test_security_group_rule_get_by_security_group()
def test_security_group_rule_get_by_security_group_no_joins(self):
self._test_security_group_rule_get_by_security_group(columns=[])
def test_security_group_rule_get_by_security_group_grantee(self):
security_group = self._create_security_group({})
security_group_rule = self._create_security_group_rule(
{'grantee_group': security_group})
rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
security_group['id'])
self.assertEqual(len(rules), 1)
self.assertEqual(rules[0]['id'], security_group_rule['id'])
def test_security_group_rule_destroy(self):
self._create_security_group({'name': 'fake1'})
self._create_security_group({'name': 'fake2'})
security_group_rule1 = self._create_security_group_rule({})
security_group_rule2 = self._create_security_group_rule({})
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get,
self.ctxt, security_group_rule1['id'])
self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
security_group_rule2['id']),
security_group_rule2, ['grantee_group'])
def test_security_group_rule_destroy_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_destroy, self.ctxt, 100500)
def test_security_group_rule_get(self):
security_group_rule1 = (
self._create_security_group_rule({}))
self._create_security_group_rule({})
real_security_group_rule = db.security_group_rule_get(self.ctxt,
security_group_rule1['id'])
self._assertEqualObjects(security_group_rule1,
real_security_group_rule, ['grantee_group'])
def test_security_group_rule_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get, self.ctxt, 100500)
def test_security_group_rule_count_by_group(self):
sg1 = self._create_security_group({'name': 'fake1'})
sg2 = self._create_security_group({'name': 'fake2'})
rules_by_group = {sg1: [], sg2: []}
for group in rules_by_group:
rules = rules_by_group[group]
for i in range(0, 10):
rules.append(
self._create_security_group_rule({'parent_group_id':
group['id']}))
db.security_group_rule_destroy(self.ctxt,
rules_by_group[sg1][0]['id'])
counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
group['id'])
for group in [sg1, sg2]]
expected = [9, 10]
self.assertEqual(counted_groups, expected)
class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def test_security_group_create(self):
security_group = self._create_security_group({})
self.assertIsNotNone(security_group['id'])
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, security_group[key])
def test_security_group_destroy(self):
security_group1 = self._create_security_group({})
security_group2 = \
self._create_security_group({'name': 'fake_sec_group2'})
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get,
self.ctxt, security_group1['id'])
self._assertEqualObjects(db.security_group_get(
self.ctxt, security_group2['id'],
columns_to_join=['instances']), security_group2)
def test_security_group_get(self):
security_group1 = self._create_security_group({})
self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances'])
self._assertEqualObjects(security_group1,
real_security_group)
def test_security_group_get_with_instance_columns(self):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
secgroup = self._create_security_group({'instances': [instance]})
secgroup = db.security_group_get(
self.ctxt, secgroup['id'],
columns_to_join=['instances.system_metadata'])
inst = secgroup.instances[0]
self.assertIn('system_metadata', dict(inst.iteritems()).keys())
def test_security_group_get_no_instances(self):
instance = db.instance_create(self.ctxt, {})
sid = self._create_security_group({'instances': [instance]})['id']
security_group = db.security_group_get(self.ctxt, sid,
columns_to_join=['instances'])
self.assertIn('instances', security_group.__dict__)
security_group = db.security_group_get(self.ctxt, sid)
self.assertNotIn('instances', security_group.__dict__)
def test_security_group_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get, self.ctxt, 100500)
def test_security_group_get_by_name(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
real_security_group1 = db.security_group_get_by_name(
self.ctxt,
security_group1['project_id'],
security_group1['name'],
columns_to_join=None)
real_security_group2 = db.security_group_get_by_name(
self.ctxt,
security_group2['project_id'],
security_group2['name'],
columns_to_join=None)
self._assertEqualObjects(security_group1, real_security_group1)
self._assertEqualObjects(security_group2, real_security_group2)
def test_security_group_get_by_project(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake2', 'project_id': 'fake_proj2'})
real1 = db.security_group_get_by_project(
self.ctxt,
security_group1['project_id'])
real2 = db.security_group_get_by_project(
self.ctxt,
security_group2['project_id'])
expected1, expected2 = [security_group1], [security_group2]
self._assertEqualListsOfObjects(expected1, real1,
ignored_keys=['instances'])
self._assertEqualListsOfObjects(expected2, real2,
ignored_keys=['instances'])
def test_security_group_get_by_instance(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'name': 'fake1', 'instances': [instance]},
{'name': 'fake2', 'instances': [instance]},
{'name': 'fake3', 'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_by_instance(self.ctxt,
instance['uuid'])
expected = security_groups[:2]
self._assertEqualListsOfObjects(expected, real,
ignored_keys=['instances'])
def test_security_group_get_all(self):
values = [
{'name': 'fake1', 'project_id': 'fake_proj1'},
{'name': 'fake2', 'project_id': 'fake_proj2'},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_all(self.ctxt)
self._assertEqualListsOfObjects(security_groups, real,
ignored_keys=['instances'])
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance],
'name': 'fake_in_use'},
{'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = []
for security_group in security_groups:
in_use = db.security_group_in_use(self.ctxt,
security_group['id'])
real.append(in_use)
expected = [True, False]
self.assertEqual(expected, real)
def test_security_group_ensure_default(self):
self.ctxt.project_id = 'fake'
self.ctxt.user_id = 'fake'
self.assertEqual(0, len(db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)))
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
self.assertEqual("default", security_groups[0]["name"])
usage = db.quota_usage_get(self.ctxt,
self.ctxt.project_id,
'security_groups',
self.ctxt.user_id)
self.assertEqual(1, usage.in_use)
@mock.patch.object(db.sqlalchemy.api, '_security_group_get_by_names')
def test_security_group_ensure_default_called_concurrently(self, sg_mock):
# make sure NotFound is always raised here to trick Nova to insert the
# duplicate security group entry
sg_mock.side_effect = exception.NotFound
# create the first db entry
self.ctxt.project_id = 1
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
# create the second one and ensure the exception is handled properly
default_group = db.security_group_ensure_default(self.ctxt)
self.assertEqual('default', default_group.name)
def test_security_group_update(self):
security_group = self._create_security_group({})
new_values = {
'name': 'sec_group1',
'description': 'sec_group_descr1',
'user_id': 'fake_user1',
'project_id': 'fake_proj1',
}
updated_group = db.security_group_update(self.ctxt,
security_group['id'],
new_values,
columns_to_join=['rules.grantee_group'])
for key, value in new_values.iteritems():
self.assertEqual(updated_group[key], value)
self.assertEqual(updated_group['rules'], [])
def test_security_group_update_to_duplicate(self):
self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj2'})
self.assertRaises(exception.SecurityGroupExists,
db.security_group_update,
self.ctxt, security_group2['id'],
{'project_id': 'fake_proj1'})
class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.instance_* methods."""
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'},
}
def setUp(self):
super(InstanceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _assertEqualInstances(self, instance1, instance2):
self._assertEqualObjects(instance1, instance2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def _assertEqualListsOfInstances(self, list1, list2):
self._assertEqualListsOfObjects(list1, list2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def create_instance_with_args(self, **kwargs):
if 'context' in kwargs:
context = kwargs.pop('context')
else:
context = self.ctxt
args = self.sample_data.copy()
args.update(kwargs)
return db.instance_create(context, args)
def test_instance_create(self):
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
def test_instance_create_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_no_metadata_clobber(self):
meta = {'foo': 'bar'}
sys_meta = {'sfoo': 'sbar'}
values = {
'metadata': meta,
'system_metadata': sys_meta,
}
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual({'foo': 'bar'}, meta)
self.assertEqual({'sfoo': 'sbar'}, sys_meta)
def test_instance_get_all_with_meta(self):
inst = self.create_instance_with_args()
for inst in db.instance_get_all(self.ctxt):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
system_metadata = {'original_image_ref': 'baz'}
# Update the metadata
db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
'system_metadata': system_metadata})
# Retrieve the user-provided metadata to ensure it was successfully
# updated
self.assertEqual(metadata,
db.instance_metadata_get(self.ctxt, instance['uuid']))
self.assertEqual(system_metadata,
db.instance_system_metadata_get(self.ctxt, instance['uuid']))
def test_instance_update_bad_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '123'}
self.assertRaises(ValueError,
db.instance_update,
self.ctxt, instance['uuid'], values)
def test_instance_update_good_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '2011-01-31T00:00:00.0'}
actual = db.instance_update(self.ctxt, instance['uuid'], values)
expected = datetime.datetime(2011, 1, 31)
self.assertEqual(expected, actual["created_at"])
def test_create_instance_unique_hostname(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
self.create_instance_with_args(hostname='h1', project_id='p1')
# With scope 'global' any duplicate should fail, be it this project:
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p3')
# or another:
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context2,
hostname='h1', project_id='p2')
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p1')
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
self.create_instance_with_args(context=context2, hostname='h2')
self.flags(osapi_compute_unique_server_name_scope=None)
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_all_by_filters_extra_columns(self,
mock_joinedload,
mock_undefer):
db.instance_get_all_by_filters_sort(
self.ctxt, {},
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_active_by_window_extra_columns(self,
mock_joinedload,
mock_undefer):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
db.instance_get_active_by_window_joined(
self.ctxt, now,
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
def test_instance_get_all_by_filters_with_meta(self):
inst = self.create_instance_with_args()
for inst in db.instance_get_all_by_filters(self.ctxt, {}):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_without_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=[])
for inst in result:
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_all_by_filters(self):
instances = [self.create_instance_with_args() for i in range(3)]
filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfInstances(instances, filtered_instances)
def test_instance_get_all_by_filters_zero_limit(self):
self.create_instance_with_args()
instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
self.assertEqual([], instances)
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
for row in meta:
self.assertIn(row['instance_uuid'], uuids)
def test_instance_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
self.ctxt, uuids)
for row in sys_meta:
self.assertIn(row['instance_uuid'], uuids)
def test_instance_system_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
def test_instance_get_all_by_filters_regex(self):
i1 = self.create_instance_with_args(display_name='test1')
i2 = self.create_instance_with_args(display_name='teeeest2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': 't.*st.'})
self._assertEqualListsOfInstances(result, [i1, i2])
def test_instance_get_all_by_filters_changes_since(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i1, i2], result)
changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i2], result)
def test_instance_get_all_by_filters_exact_match(self):
instance = self.create_instance_with_args(host='host1')
self.create_instance_with_args(host='host12')
result = db.instance_get_all_by_filters(self.ctxt,
{'host': 'host1'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_metadata(self):
instance = self.create_instance_with_args(metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_system_metadata(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_unicode_value(self):
instance = self.create_instance_with_args(display_name=u'test♥')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': u'test'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_tags(self):
instance = self.create_instance_with_args(
metadata={'foo': 'bar'})
self.create_instance_with_args()
# For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
{'name': 'tag-value', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
# For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
# For non-existent tag
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'barred'},
]})
self.assertEqual([], result)
# Confirm with deleted tags
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
# For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
]})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-value', 'value': 'bar'}
]})
self.assertEqual([], result)
# For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self.assertEqual([], result)
def test_instance_get_by_uuid(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
self._assertEqualInstances(inst, result)
def test_instance_get_by_uuid_join_empty(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=[])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_sys_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['system_metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices', 'extra'])
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices', 'extra'])
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
inst1 = self.create_instance_with_args()
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True,
'soft_deleted': False})
self._assertEqualListsOfObjects([inst1], result,
ignored_keys=['deleted', 'deleted_at', 'metadata',
'system_metadata', 'info_cache', 'pci_devices',
'extra'])
def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False,
'soft_deleted': True})
self._assertEqualListsOfInstances([inst2, inst3], result)
def test_instance_get_all_by_filters_not_deleted(self):
inst1 = self.create_instance_with_args()
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False})
self.assertIsNone(inst3.vm_state)
self._assertEqualListsOfInstances([inst3, inst4], result)
def test_instance_get_all_by_filters_cleaned(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
result = db.instance_get_all_by_filters(self.ctxt, {})
self.assertEqual(2, len(result))
self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
if inst1['uuid'] == result[0]['uuid']:
self.assertTrue(result[0]['cleaned'])
self.assertFalse(result[1]['cleaned'])
else:
self.assertTrue(result[1]['cleaned'])
self.assertFalse(result[0]['cleaned'])
def test_instance_get_all_by_filters_tag_any(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
inst3 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2, t3])
db.instance_tag_set(self.ctxt, inst3.uuid, [t3])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag-any': [t1, t2]})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_filters_tag_any_empty(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
t4 = 'tag4'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag-any': [t3, t4]})
self.assertEqual([], result)
def test_instance_get_all_by_filters_tag(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
inst3 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1, t3])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2])
db.instance_tag_set(self.ctxt, inst3.uuid, [t1, t2, t3])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag': [t1, t2]})
self._assertEqualListsOfObjects([inst2, inst3], result,
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_filters_tag_empty(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag': [t3]})
self.assertEqual([], result)
def test_instance_get_all_by_filters_tag_any_and_tag(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
inst3 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
t4 = 'tag4'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1, t2])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2, t4])
db.instance_tag_set(self.ctxt, inst3.uuid, [t2, t3])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag': [t1, t2],
'tag-any': [t3, t4]})
self._assertEqualListsOfObjects([inst2], result,
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
self.assertEqual(result[0]['uuid'], instance['uuid'])
self.assertEqual(result[0]['system_metadata'], [])
def test_instance_get_all_by_host_and_node(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
result = db.instance_get_all_by_host_and_node(
self.ctxt, 'h1', 'n1',
columns_to_join=['system_metadata', 'extra'])
self.assertEqual(instance['uuid'], result[0]['uuid'])
self.assertEqual('bar', result[0]['system_metadata'][0]['value'])
self.assertEqual(instance['uuid'], result[0]['extra']['instance_uuid'])
@mock.patch('nova.db.sqlalchemy.api._instances_fill_metadata')
@mock.patch('nova.db.sqlalchemy.api._instance_get_all_query')
def test_instance_get_all_by_host_and_node_fills_manually(self,
mock_getall,
mock_fill):
db.instance_get_all_by_host_and_node(
self.ctxt, 'h1', 'n1',
columns_to_join=['metadata', 'system_metadata', 'extra', 'foo'])
self.assertEqual(sorted(['extra', 'foo']),
sorted(mock_getall.call_args[1]['joins']))
self.assertEqual(sorted(['metadata', 'system_metadata']),
sorted(mock_fill.call_args[1]['manual_joins']))
def test_instance_get_all_hung_in_rebooting(self):
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self._assertEqualListsOfObjects([instance], results,
ignored_keys=['task_state', 'info_cache', 'security_groups',
'metadata', 'system_metadata', 'pci_devices',
'extra'])
db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=timeutils.utcnow())
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
def test_instance_update_with_expected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
'expected_vm_state': ('foo', 'bar')})
def test_instance_update_with_unexpected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
self.assertRaises(exception.UnexpectedVMStateError,
db.instance_update, self.ctxt, instance['uuid'],
{'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
self.assertEqual('wuff', instance_meta['key2'])
self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('foo', instance_meta['host'])
self.assertEqual('meow', instance_meta['key1'])
db.instance_destroy(ctxt, instance['uuid'])
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
# Make sure instance metadata is deleted as well
self.assertEqual({}, instance_meta)
def test_delete_instance_faults_on_instance_destroy(self):
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
# Create faults
db.instance_create(ctxt, {'uuid': uuid})
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': 404,
'host': 'localhost'
}
fault = db.instance_fault_create(ctxt, fault_values)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
db.instance_destroy(ctxt, uuid)
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
# Make sure instance faults is deleted as well
self.assertEqual(0, len(faults[uuid]))
def test_instance_update_with_and_get_original(self):
instance = self.create_instance_with_args(vm_state='building')
(old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
instance['uuid'], {'vm_state': 'needscoffee'})
self.assertEqual('building', old_ref['vm_state'])
self.assertEqual('needscoffee', new_ref['vm_state'])
def test_instance_update_and_get_original_metadata(self):
instance = self.create_instance_with_args()
columns_to_join = ['metadata']
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
columns_to_join=columns_to_join)
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_update_and_get_original_metadata_none_join(self):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, {'mk1': 'mv3'})
def test_instance_update_and_get_original_no_conflict_on_session(self):
session = get_session()
# patch get_session so that we may inspect it outside of the
# method; once enginefacade is implemented, this can be simplified
with mock.patch("nova.db.sqlalchemy.api.get_session", lambda: session):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
# test some regular persisted fields
self.assertEqual(old_ref.uuid, new_ref.uuid)
self.assertEqual(old_ref.project_id, new_ref.project_id)
# after a copy operation, we can assert:
# 1. the two states have their own InstanceState
old_insp = inspect(old_ref)
new_insp = inspect(new_ref)
self.assertNotEqual(old_insp, new_insp)
# 2. only one of the objects is still in our Session
self.assertIs(new_insp.session, session)
self.assertIsNone(old_insp.session)
# 3. The "new" object remains persistent and ready
# for updates
self.assertTrue(new_insp.persistent)
# 4. the "old" object is detached from this Session.
self.assertTrue(old_insp.detached)
def test_instance_update_unique_name(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
inst1 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name1')
inst2 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name2')
inst3 = self.create_instance_with_args(context=context2,
project_id='p2',
hostname='fake_name3')
# osapi_compute_unique_server_name_scope is unset so this should work:
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
# With scope 'global' any duplicate should fail.
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
db.instance_update,
context1,
inst2['uuid'],
{'hostname': 'fake_name1'})
self.assertRaises(exception.InstanceExists,
db.instance_update,
context2,
inst3['uuid'],
{'hostname': 'fake_name1'})
# But we should definitely be able to update our name if we aren't
# really changing it.
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists, db.instance_update,
context1, inst2['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
def _test_instance_update_updates_metadata(self, metadata_type):
instance = self.create_instance_with_args()
def set_and_check(meta):
inst = db.instance_update(self.ctxt, instance['uuid'],
{metadata_type: dict(meta)})
_meta = utils.metadata_to_dict(inst[metadata_type])
self.assertEqual(meta, _meta)
meta = {'speed': '88', 'units': 'MPH'}
set_and_check(meta)
meta['gigawatts'] = '1.21'
set_and_check(meta)
del meta['gigawatts']
set_and_check(meta)
def test_security_group_in_use(self):
db.instance_create(self.ctxt, dict(host='foo'))
def test_instance_update_updates_system_metadata(self):
# Ensure that system_metadata is updated during instance_update
self._test_instance_update_updates_metadata('system_metadata')
def test_instance_update_updates_metadata(self):
# Ensure that metadata is updated during instance_update
self._test_instance_update_updates_metadata('metadata')
def test_instance_floating_address_get_all(self):
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
instance_uuids = [instance1['uuid'], instance1['uuid'],
instance2['uuid']]
for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
float_addresses,
instance_uuids):
db.fixed_ip_create(ctxt, {'address': fixed_addr,
'instance_uuid': instance_uuid})
fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
db.floating_ip_create(ctxt,
{'address': float_addr,
'fixed_ip_id': fixed_id})
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[0])
self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
self.assertRaises(exception.InvalidUUID,
db.instance_floating_address_get_all,
ctxt, 'invalid_uuid')
def test_instance_stringified_ips(self):
instance = self.create_instance_with_args()
instance = db.instance_update(
self.ctxt, instance['uuid'],
{'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1')})
self.assertIsInstance(instance['access_ip_v4'], six.string_types)
self.assertIsInstance(instance['access_ip_v6'], six.string_types)
instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
self.assertIsInstance(instance['access_ip_v4'], six.string_types)
self.assertIsInstance(instance['access_ip_v6'], six.string_types)
def test_instance_destroy(self):
ctxt = context.get_admin_context()
values = {
'metadata': {'key': 'value'}
}
inst_uuid = self.create_instance_with_args(**values)['uuid']
db.instance_destroy(ctxt, inst_uuid)
self.assertRaises(exception.InstanceNotFound,
db.instance_get, ctxt, inst_uuid)
self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid))
self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid))
def test_instance_destroy_already_destroyed(self):
ctxt = context.get_admin_context()
instance = self.create_instance_with_args()
db.instance_destroy(ctxt, instance['uuid'])
self.assertRaises(exception.InstanceNotFound,
db.instance_destroy, ctxt, instance['uuid'])
class InstanceMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_metadata_* methods."""
def setUp(self):
super(InstanceMetadataTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_instance_metadata_get(self):
instance = db.instance_create(self.ctxt, {'metadata':
{'key': 'value'}})
self.assertEqual({'key': 'value'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_delete(self):
instance = db.instance_create(self.ctxt,
{'metadata': {'key': 'val',
'key1': 'val1'}})
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
self.assertEqual({'key': 'val'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_update(self):
instance = db.instance_create(self.ctxt, {'host': 'h1',
'project_id': 'p1', 'metadata': {'key': 'value'}})
# This should add new key/value pair
metadata = db.instance_metadata_update(
self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
# This should leave only one key/value pair
metadata = db.instance_metadata_update(
self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
class InstanceExtraTestCase(test.TestCase):
def setUp(self):
super(InstanceExtraTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def test_instance_extra_get_by_uuid_instance_create(self):
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertIsNotNone(inst_extra)
def test_instance_extra_update_by_uuid(self):
db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
{'numa_topology': 'changed'})
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertEqual('changed', inst_extra.numa_topology)
def test_instance_extra_get_with_columns(self):
extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'],
columns=['numa_topology', 'vcpu_model'])
self.assertNotIn('pci_requests', extra)
self.assertIn('numa_topology', extra)
self.assertIn('vcpu_model', extra)
class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ServiceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertIsNotNone(service['id'])
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, service[key])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2, ignored_keys=['compute_node'])
def test_service_update(self):
service = self._create_service({})
new_values = {
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_get(self):
service1 = self._create_service({})
self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_host(self):
values = [
{'host': 'host1', 'topic': 't11', 'binary': 'b11'},
{'host': 'host1', 'topic': 't12', 'binary': 'b12'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': 't1'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_compute_host(self):
values = [
{'host': 'host1', 'topic': CONF.compute_topic},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': CONF.compute_topic}
]
services = [self._create_service(vals) for vals in values]
real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
self._assertEqualObjects(services[0], real_service,
ignored_keys=['compute_node'])
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_compute_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_args(self):
values = [
{'host': 'host1', 'binary': 'a'},
{'host': 'host2', 'binary': 'b'}
]
services = [self._create_service(vals) for vals in values]
service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
self._assertEqualObjects(services[0], service1)
service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
self._assertEqualObjects(services[1], service2)
def test_service_get_by_args_not_found_exception(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_args,
self.ctxt, 'non-exists-host', 'a')
def test_service_binary_exists_exception(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'topic': 'top1'})
self.assertRaises(exception.ServiceBinaryExists, db.service_create,
self.ctxt, values)
def test_service_topic_exists_exceptions(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'binary': 'bin1'})
self.assertRaises(exception.ServiceTopicExists, db.service_create,
self.ctxt, values)
class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(BaseInstanceTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.user_ctxt = context.RequestContext('user', 'user')
def _get_base_values(self):
return {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 'fake_flavor',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': True
}
def _create_flavor(self, values, projects=None):
v = self._get_base_values()
v.update(values)
return db.flavor_create(self.ctxt, v, projects)
class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
IGNORED_FIELDS = [
'id',
'created_at',
'updated_at',
'deleted_at',
'deleted'
]
def setUp(self):
super(InstanceActionTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_action_values(self, uuid, action='run_instance',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
db.instance_create(ctxt, {'uuid': uuid})
values = {
'action': action,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'user_id': ctxt.user_id,
'project_id': ctxt.project_id,
'start_time': timeutils.utcnow(),
'message': 'action-message'
}
if extra is not None:
values.update(extra)
return values
def _create_event_values(self, uuid, event='schedule',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
values = {
'event': event,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'start_time': timeutils.utcnow(),
'host': 'fake-host',
'details': 'fake-details',
}
if extra is not None:
values.update(extra)
return values
def _assertActionSaved(self, action, uuid):
"""Retrieve the action to ensure it was successfully added."""
actions = db.actions_get(self.ctxt, uuid)
self.assertEqual(1, len(actions))
self._assertEqualObjects(action, actions[0])
def _assertActionEventSaved(self, event, action_id):
# Retrieve the event to ensure it was successfully added
events = db.action_events_get(self.ctxt, action_id)
self.assertEqual(1, len(events))
self._assertEqualObjects(event, events[0],
['instance_uuid', 'request_id'])
def test_instance_action_start(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
ignored_keys = self.IGNORED_FIELDS + ['finish_time']
self._assertEqualObjects(action_values, action, ignored_keys)
self._assertActionSaved(action, uuid)
def test_instance_action_finish(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
db.action_start(self.ctxt, action_values)
action_values['finish_time'] = timeutils.utcnow()
action = db.action_finish(self.ctxt, action_values)
self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
self._assertActionSaved(action, uuid)
def test_instance_action_finish_without_started_event(self):
"""Create an instance finish action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action_values['finish_time'] = timeutils.utcnow()
self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
self.ctxt, action_values)
def test_instance_actions_get_by_instance(self):
"""Ensure we can get actions by UUID."""
uuid1 = str(stdlib_uuid.uuid4())
expected = []
action_values = self._create_action_values(uuid1)
action = db.action_start(self.ctxt, action_values)
expected.append(action)
action_values['action'] = 'resize'
action = db.action_start(self.ctxt, action_values)
expected.append(action)
# Create some extra actions
uuid2 = str(stdlib_uuid.uuid4())
ctxt2 = context.get_admin_context()
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
# Retrieve the action to ensure it was successfully added
actions = db.actions_get(self.ctxt, uuid1)
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_are_in_order(self):
"""Ensure retrived actions are in order."""
uuid1 = str(stdlib_uuid.uuid4())
extra = {
'created_at': timeutils.utcnow()
}
action_values = self._create_action_values(uuid1, extra=extra)
action1 = db.action_start(self.ctxt, action_values)
action_values['action'] = 'delete'
action2 = db.action_start(self.ctxt, action_values)
actions = db.actions_get(self.ctxt, uuid1)
self.assertEqual(2, len(actions))
self._assertEqualOrderedListOfObjects([action2, action1], actions)
def test_instance_action_get_by_instance_and_action(self):
"""Ensure we can get an action by instance UUID and action id."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid1)
db.action_start(self.ctxt, action_values)
request_id = action_values['request_id']
# NOTE(rpodolyaka): ensure we use a different req id for the 2nd req
action_values['action'] = 'resize'
action_values['request_id'] = 'req-00000000-7522-4d99-7ff-111111111111'
db.action_start(self.ctxt, action_values)
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
self.assertEqual('run_instance', action['action'])
self.assertEqual(self.ctxt.request_id, action['request_id'])
def test_instance_action_event_start(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
event_values = self._create_event_values(uuid)
event = db.action_event_start(self.ctxt, event_values)
# self.fail(self._dict_from_object(event, None))
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_start_without_action(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = self._create_event_values(uuid)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_start, self.ctxt, event_values)
def test_instance_action_event_finish_without_started_event(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionEventNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_without_action(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_success(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
def test_instance_action_event_finish_error(self):
"""Finish an instance action event with an error."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Error'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual('Error', action['message'])
def test_instance_action_and_event_start_string_time(self):
"""Create an instance action and event with a string start_time."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_start(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_events_get_are_in_order(self):
"""Ensure retrived action events are in order."""
uuid1 = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
extra1 = {
'created_at': timeutils.utcnow()
}
extra2 = {
'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
}
event_val1 = self._create_event_values(uuid1, 'schedule', extra=extra1)
event_val2 = self._create_event_values(uuid1, 'run', extra=extra1)
event_val3 = self._create_event_values(uuid1, 'stop', extra=extra2)
event1 = db.action_event_start(self.ctxt, event_val1)
event2 = db.action_event_start(self.ctxt, event_val2)
event3 = db.action_event_start(self.ctxt, event_val3)
events = db.action_events_get(self.ctxt, action['id'])
self.assertEqual(3, len(events))
self._assertEqualOrderedListOfObjects([event3, event2, event1], events,
['instance_uuid', 'request_id'])
def test_instance_action_event_get_by_id(self):
"""Get a specific instance action event."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
db.action_start(ctxt2,
self._create_action_values(uuid2, 'reboot', ctxt2))
event = db.action_event_start(self.ctxt,
self._create_event_values(uuid1))
event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
db.action_event_start(ctxt2, event_values)
# Retrieve the event to ensure it was successfully added
saved_event = db.action_event_get_by_id(self.ctxt,
action['id'],
event['id'])
self._assertEqualObjects(event, saved_event,
['instance_uuid', 'request_id'])
class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceFaultTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_fault_values(self, uuid, code=404):
return {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': code,
'host': 'localhost'
}
def test_instance_fault_create(self):
"""Ensure we can create an instance fault."""
uuid = str(stdlib_uuid.uuid4())
# Ensure no faults registered for this instance
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(0, len(faults[uuid]))
# Create a fault
fault_values = self._create_fault_values(uuid)
db.instance_create(self.ctxt, {'uuid': uuid})
fault = db.instance_fault_create(self.ctxt, fault_values)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(fault_values, fault, ignored_keys)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
def test_instance_fault_get_by_instance(self):
"""Ensure we can retrieve faults for instance."""
uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
fault_codes = [404, 500]
expected = {}
# Create faults
for uuid in uuids:
db.instance_create(self.ctxt, {'uuid': uuid})
expected[uuid] = []
for code in fault_codes:
fault_values = self._create_fault_values(uuid, code)
fault = db.instance_fault_create(self.ctxt, fault_values)
expected[uuid].append(fault)
# Ensure faults are saved
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
self.assertEqual(len(expected), len(faults))
for uuid in uuids:
self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
def test_instance_faults_get_by_instance_uuids_no_faults(self):
uuid = str(stdlib_uuid.uuid4())
# None should be returned when no faults exist.
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
expected = {uuid: []}
self.assertEqual(expected, faults)
def test_instance_faults_get_by_instance_uuids_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
self.assertEqual({}, faults)
class InstanceTypeTestCase(BaseInstanceTypeTestCase):
def test_flavor_create(self):
flavor = self._create_flavor({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self.assertIsNotNone(flavor['id'])
self._assertEqualObjects(flavor, self._get_base_values(),
ignored_keys)
def test_flavor_create_with_projects(self):
projects = ['fake-project1', 'fake-project2']
flavor = self._create_flavor({}, projects + ['fake-project2'])
access = db.flavor_access_get_by_flavor_id(self.ctxt,
flavor['flavorid'])
self.assertEqual(projects, [x.project_id for x in access])
def test_flavor_destroy(self):
specs1 = {'a': '1', 'b': '2'}
flavor1 = self._create_flavor({'name': 'name1', 'flavorid': 'a1',
'extra_specs': specs1})
specs2 = {'c': '4', 'd': '3'}
flavor2 = self._create_flavor({'name': 'name2', 'flavorid': 'a2',
'extra_specs': specs2})
db.flavor_destroy(self.ctxt, 'name1')
self.assertRaises(exception.FlavorNotFound,
db.flavor_get, self.ctxt, flavor1['id'])
real_specs1 = db.flavor_extra_specs_get(self.ctxt, flavor1['flavorid'])
self._assertEqualObjects(real_specs1, {})
r_flavor2 = db.flavor_get(self.ctxt, flavor2['id'])
self._assertEqualObjects(flavor2, r_flavor2, 'extra_specs')
def test_flavor_destroy_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_destroy, self.ctxt, 'nonexists')
def test_flavor_create_duplicate_name(self):
self._create_flavor({})
self.assertRaises(exception.FlavorExists,
self._create_flavor,
{'flavorid': 'some_random_flavor'})
def test_flavor_create_duplicate_flavorid(self):
self._create_flavor({})
self.assertRaises(exception.FlavorIdExists,
self._create_flavor,
{'name': 'some_random_name'})
def test_flavor_create_with_extra_specs(self):
extra_specs = dict(a='abc', b='def', c='ghi')
flavor = self._create_flavor({'extra_specs': extra_specs})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self._assertEqualObjects(flavor, self._get_base_values(),
ignored_keys)
self._assertEqualObjects(extra_specs, flavor['extra_specs'])
def test_flavor_get_all(self):
# NOTE(boris-42): Remove base instance types
for it in db.flavor_get_all(self.ctxt):
db.flavor_destroy(self.ctxt, it['name'])
flavors = [
{'root_gb': 600, 'memory_mb': 100, 'disabled': True,
'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
{'root_gb': 500, 'memory_mb': 200, 'disabled': True,
'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
{'root_gb': 400, 'memory_mb': 300, 'disabled': False,
'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
{'root_gb': 300, 'memory_mb': 400, 'disabled': False,
'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
{'root_gb': 200, 'memory_mb': 500, 'disabled': True,
'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
{'root_gb': 100, 'memory_mb': 600, 'disabled': True,
'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
]
flavors = [self._create_flavor(it) for it in flavors]
lambda_filters = {
'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
'min_root_gb': lambda it, v: it['root_gb'] >= v,
'disabled': lambda it, v: it['disabled'] == v,
'is_public': lambda it, v: (v is None or it['is_public'] == v)
}
mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
disabled_filts = [{'disabled': x} for x in [True, False]]
is_public_filts = [{'is_public': x} for x in [True, False, None]]
def assert_multi_filter_flavor_get(filters=None):
if filters is None:
filters = {}
expected_it = flavors
for name, value in filters.iteritems():
filt = lambda it: lambda_filters[name](it, value)
expected_it = filter(filt, expected_it)
real_it = db.flavor_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects(expected_it, real_it)
# no filter
assert_multi_filter_flavor_get()
# test only with one filter
for filt in mem_filts:
assert_multi_filter_flavor_get(filt)
for filt in root_filts:
assert_multi_filter_flavor_get(filt)
for filt in disabled_filts:
assert_multi_filter_flavor_get(filt)
for filt in is_public_filts:
assert_multi_filter_flavor_get(filt)
# test all filters together
for mem in mem_filts:
for root in root_filts:
for disabled in disabled_filts:
for is_public in is_public_filts:
filts = [f.items() for f in
[mem, root, disabled, is_public]]
filts = dict(reduce(lambda x, y: x + y, filts, []))
assert_multi_filter_flavor_get(filts)
def test_flavor_get_all_limit_sort(self):
def assert_sorted_by_key_dir(sort_key, asc=True):
sort_dir = 'asc' if asc else 'desc'
results = db.flavor_get_all(self.ctxt, sort_key='name',
sort_dir=sort_dir)
# Manually sort the results as we would expect them
expected_results = sorted(results,
key=lambda item: item['name'],
reverse=(not asc))
self.assertEqual(expected_results, results)
def assert_sorted_by_key_both_dir(sort_key):
assert_sorted_by_key_dir(sort_key, True)
assert_sorted_by_key_dir(sort_key, False)
for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
'vcpu_weight', 'id']:
assert_sorted_by_key_both_dir(attr)
def test_flavor_get_all_limit(self):
limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
self.assertEqual(2, len(limited_flavors))
def test_flavor_get_all_list_marker(self):
all_flavors = db.flavor_get_all(self.ctxt)
# Set the 3rd result as the marker
marker_flavorid = all_flavors[2]['flavorid']
marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
# We expect everything /after/ the 3rd result
expected_results = all_flavors[3:]
self.assertEqual(expected_results, marked_flavors)
def test_flavor_get_all_marker_not_found(self):
self.assertRaises(exception.MarkerNotFound,
db.flavor_get_all, self.ctxt, marker='invalid')
def test_flavor_get(self):
flavors = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
flavors = [self._create_flavor(t) for t in flavors]
for flavor in flavors:
flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
self._assertEqualObjects(flavor, flavor_by_id)
def test_flavor_get_non_public(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
self._assertEqualObjects(flavor, flavor_by_id)
# Regular user can not
self.assertRaises(exception.FlavorNotFound, db.flavor_get,
self.user_ctxt, flavor['id'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, flavor['flavorid'],
self.user_ctxt.project_id)
flavor_by_id = db.flavor_get(self.user_ctxt, flavor['id'])
self._assertEqualObjects(flavor, flavor_by_id)
def test_flavor_get_by_name(self):
flavors = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
flavors = [self._create_flavor(t) for t in flavors]
for flavor in flavors:
flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
self._assertEqualObjects(flavor, flavor_by_name)
def test_flavor_get_by_name_not_found(self):
self._create_flavor({})
self.assertRaises(exception.FlavorNotFoundByName,
db.flavor_get_by_name, self.ctxt, 'nonexists')
def test_flavor_get_by_name_non_public(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
self._assertEqualObjects(flavor, flavor_by_name)
# Regular user can not
self.assertRaises(exception.FlavorNotFoundByName,
db.flavor_get_by_name, self.user_ctxt,
flavor['name'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, flavor['flavorid'],
self.user_ctxt.project_id)
flavor_by_name = db.flavor_get_by_name(self.user_ctxt, flavor['name'])
self._assertEqualObjects(flavor, flavor_by_name)
def test_flavor_get_by_flavor_id(self):
flavors = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
flavors = [self._create_flavor(t) for t in flavors]
for flavor in flavors:
params = (self.ctxt, flavor['flavorid'])
flavor_by_flavorid = db.flavor_get_by_flavor_id(*params)
self._assertEqualObjects(flavor, flavor_by_flavorid)
def test_flavor_get_by_flavor_not_found(self):
self._create_flavor({})
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_flavor_get_by_flavor_id_non_public(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
flavor['flavorid'])
self._assertEqualObjects(flavor, flavor_by_fid)
# Regular user can not
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id, self.user_ctxt,
flavor['flavorid'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, flavor['flavorid'],
self.user_ctxt.project_id)
flavor_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
flavor['flavorid'])
self._assertEqualObjects(flavor, flavor_by_fid)
def test_flavor_get_by_flavor_id_deleted(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123'})
db.flavor_destroy(self.ctxt, 'abc')
flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
flavor['flavorid'], read_deleted='yes')
self.assertEqual(flavor['id'], flavor_by_fid['id'])
def test_flavor_get_by_flavor_id_deleted_and_recreat(self):
# NOTE(wingwj): Aims to test difference between mysql and postgresql
# for bug 1288636
param_dict = {'name': 'abc', 'flavorid': '123'}
self._create_flavor(param_dict)
db.flavor_destroy(self.ctxt, 'abc')
# Recreate the flavor with the same params
flavor = self._create_flavor(param_dict)
flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
flavor['flavorid'], read_deleted='yes')
self.assertEqual(flavor['id'], flavor_by_fid['id'])
class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
values = ({'name': 'n1', 'flavorid': 'f1',
'extra_specs': dict(a='a', b='b', c='c')},
{'name': 'n2', 'flavorid': 'f2',
'extra_specs': dict(d='d', e='e', f='f')})
# NOTE(boris-42): We have already tested flavor_create method
# with extra_specs in InstanceTypeTestCase.
self.flavors = [self._create_flavor(v) for v in values]
def test_flavor_extra_specs_get(self):
for it in self.flavors:
real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_flavor_extra_specs_delete(self):
for it in self.flavors:
specs = it['extra_specs']
key = specs.keys()[0]
del specs[key]
db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_flavor_extra_specs_delete_failed(self):
for it in self.flavors:
self.assertRaises(exception.FlavorExtraSpecsNotFound,
db.flavor_extra_specs_delete,
self.ctxt, it['flavorid'], 'dummy')
def test_flavor_extra_specs_update_or_create(self):
for it in self.flavors:
current_specs = it['extra_specs']
current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
params = (self.ctxt, it['flavorid'], current_specs)
db.flavor_extra_specs_update_or_create(*params)
real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
self._assertEqualObjects(current_specs, real_specs)
def test_flavor_extra_specs_update_or_create_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_extra_specs_update_or_create,
self.ctxt, 'nonexists', {})
def test_flavor_extra_specs_update_or_create_retry(self):
def counted():
def get_id(context, flavorid, session):
get_id.counter += 1
raise db_exc.DBDuplicateEntry
get_id.counter = 0
return get_id
get_id = counted()
self.stubs.Set(sqlalchemy_api, '_flavor_get_id_from_flavor', get_id)
self.assertRaises(exception.FlavorExtraSpecUpdateCreateFailed,
sqlalchemy_api.flavor_extra_specs_update_or_create,
self.ctxt, 1, {}, 5)
self.assertEqual(get_id.counter, 5)
class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
def _create_flavor_access(self, flavor_id, project_id):
return db.flavor_access_add(self.ctxt, flavor_id, project_id)
def test_flavor_access_get_by_flavor_id(self):
flavors = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_flavor(v) for v in flavors))
access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
self._create_flavor_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
for it, access_it in zip((it1, it2), (access_it1, access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_flavor_access_get_by_flavor_id_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_flavor_access_add(self):
flavor = self._create_flavor({'flavorid': 'f1'})
project_id = 'p1'
access = self._create_flavor_access(flavor['flavorid'], project_id)
# NOTE(boris-42): Check that flavor_access_add doesn't fail and
# returns correct value. This is enough because other
# logic is checked by other methods.
self.assertIsNotNone(access['id'])
self.assertEqual(access['instance_type_id'], flavor['id'])
self.assertEqual(access['project_id'], project_id)
def test_flavor_access_add_to_non_existing_flavor(self):
self.assertRaises(exception.FlavorNotFound,
self._create_flavor_access,
'nonexists', 'does_not_matter')
def test_flavor_access_add_duplicate_project_id_flavor(self):
flavor = self._create_flavor({'flavorid': 'f1'})
params = (flavor['flavorid'], 'p1')
self._create_flavor_access(*params)
self.assertRaises(exception.FlavorAccessExists,
self._create_flavor_access, *params)
def test_flavor_access_remove(self):
flavors = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_flavor(v) for v in flavors))
access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
self._create_flavor_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
db.flavor_access_remove(self.ctxt, it1['flavorid'],
access_it1[1]['project_id'])
for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_flavor_access_remove_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_access_remove,
self.ctxt, 'nonexists', 'does_not_matter')
def test_flavor_access_remove_access_not_found(self):
flavor = self._create_flavor({'flavorid': 'f1'})
params = (flavor['flavorid'], 'p1')
self._create_flavor_access(*params)
self.assertRaises(exception.FlavorAccessNotFound,
db.flavor_access_remove,
self.ctxt, flavor['flavorid'], 'p2')
def test_flavor_access_removed_after_flavor_destroy(self):
flavor1 = self._create_flavor({'flavorid': 'f1', 'name': 'n1'})
flavor2 = self._create_flavor({'flavorid': 'f2', 'name': 'n2'})
values = [
(flavor1['flavorid'], 'p1'),
(flavor1['flavorid'], 'p2'),
(flavor2['flavorid'], 'p3')
]
for v in values:
self._create_flavor_access(*v)
db.flavor_destroy(self.ctxt, flavor1['name'])
p = (self.ctxt, flavor1['flavorid'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
p = (self.ctxt, flavor2['flavorid'])
self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
db.flavor_destroy(self.ctxt, flavor2['name'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
class FixedIPTestCase(BaseInstanceTypeTestCase):
def _timeout_test(self, ctxt, timeout, multi_host):
instance = db.instance_create(ctxt, dict(host='foo'))
net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
host='bar'))
old = timeout - datetime.timedelta(seconds=5)
new = timeout + datetime.timedelta(seconds=5)
# should deallocate
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# still allocated
db.fixed_ip_create(ctxt, dict(allocated=True,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# wrong network
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=old))
# too new
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=new))
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(db_exc.DBError())
self.mox.ReplayAll()
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 0)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 1)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 0)
def test_fixed_ip_get_by_floating_address(self):
fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
values = {'address': '8.7.6.5',
'fixed_ip_id': fixed_ip['id']}
floating = db.floating_ip_create(self.ctxt, values)['address']
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
self._assertEqualObjects(fixed_ip, fixed_ip_ref)
def test_fixed_ip_get_by_host(self):
host_ips = {
'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
'host2': ['1.1.1.4', '1.1.1.5'],
'host3': ['1.1.1.6']
}
for host, ips in host_ips.iteritems():
for ip in ips:
instance_uuid = self._create_instance(host=host)
db.fixed_ip_create(self.ctxt, {'address': ip})
db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
for host, ips in host_ips.iteritems():
ips_on_host = map(lambda x: x['address'],
db.fixed_ip_get_by_host(self.ctxt, host))
self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
def test_fixed_ip_get_by_network_host_not_found_exception(self):
self.assertRaises(
exception.FixedIpNotFoundForNetworkHost,
db.fixed_ip_get_by_network_host,
self.ctxt, 1, 'ignore')
def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
self.assertEqual(1, fip['network_id'])
self.assertEqual('host', fip['host'])
def _create_instance(self, **kwargs):
instance = db.instance_create(self.ctxt, kwargs)
return instance['uuid']
def test_fixed_ip_get_by_instance_fixed_ip_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
another_instance = db.instance_create(self.ctxt, {})
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=another_instance['uuid'], address="192.168.1.7"))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_not_found_exception(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForInstance,
db.fixed_ip_get_by_instance,
self.ctxt, instance_uuid)
def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
another_vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=another_vif.id, address="192.168.1.7"))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_no_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self.assertEqual(0, len(ips_list))
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
instance_uuid = self._create_instance()
address = self.create_fixed_ip(instance_uuid=instance_uuid)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
def test_fixed_ip_associate_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_on_deadlock(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
if mock_first.call_count == 1:
raise db_exc.DBDeadlock()
else:
return objects.Instance(id=1, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_on_no_rows_updated(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
if mock_first.call_count == 1:
return objects.Instance(id=2, address=address, reserved=False,
instance_uuid=None, network_id=None)
else:
return objects.Instance(id=1, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_limit_exceeded(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
return objects.Instance(id=2, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FixedIpAssociateFailed,
db.fixed_ip_associate, self.ctxt, address,
instance_uuid, network_id=network['id'])
self.assertEqual(5, mock_first.call_count)
def test_fixed_ip_associate_ip_not_in_network_with_no_retries(self):
instance_uuid = self._create_instance()
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
self.assertEqual(1, mock_first.call_count)
def test_fixed_ip_associate_pool_invalid_uuid(self):
instance_uuid = '123'
self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_pool_succeeds_fip_ref_network_id_is_none(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.create_fixed_ip(network_id=None)
fixed_ip = db.fixed_ip_associate_pool(self.ctxt,
network['id'], instance_uuid)
self.assertEqual(instance_uuid, fixed_ip['instance_uuid'])
self.assertEqual(network['id'], fixed_ip['network_id'])
def test_fixed_ip_associate_pool_succeeds_retry(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
def fake_first():
if mock_first.call_count == 1:
return {'network_id': network['id'], 'address': 'invalid',
'instance_uuid': None, 'host': None, 'id': 1}
else:
return {'network_id': network['id'], 'address': address,
'instance_uuid': None, 'host': None, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(instance_uuid, fixed_ip['instance_uuid'])
def test_fixed_ip_associate_pool_retry_limit_exceeded(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.create_fixed_ip(network_id=network['id'])
def fake_first():
return {'network_id': network['id'], 'address': 'invalid',
'instance_uuid': None, 'host': None, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FixedIpAssociateFailed,
db.fixed_ip_associate_pool, self.ctxt,
network['id'], instance_uuid)
self.assertEqual(5, mock_first.call_count)
def test_fixed_ip_create_same_address(self):
address = '192.168.1.5'
params = {'address': address}
db.fixed_ip_create(self.ctxt, params)
self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
self.ctxt, params)
def test_fixed_ip_create_success(self):
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': '192.168.1.5',
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_bulk_create_same_address(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_2, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None},
]
self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
self.ctxt, params)
# In this case the transaction will be rolled back and none of the ips
# will make it to the database.
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_1)
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_2)
def test_fixed_ip_bulk_create_success(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None}
]
db.fixed_ip_bulk_create(self.ctxt, params)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at',
'virtual_interface', 'network', 'floating_ips']
fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
# we have no `id` in incoming data so we can not use
# _assertEqualListsOfObjects to compare incoming data and received
# objects
fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
params = sorted(params, key=lambda i: i['network_id'])
for param, ip in zip(params, fixed_ip_data):
self._assertEqualObjects(param, ip, ignored_keys)
def test_fixed_ip_disassociate(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid}
vif = db.virtual_interface_create(self.ctxt, values)
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': vif['id']
}
db.fixed_ip_create(self.ctxt, param)
db.fixed_ip_disassociate(self.ctxt, address)
fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
ignored_keys = ['created_at', 'id', 'deleted_at',
'updated_at', 'instance_uuid',
'virtual_interface_id']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
self.assertIsNone(fixed_ip_data['instance_uuid'])
self.assertIsNone(fixed_ip_data['virtual_interface_id'])
def test_fixed_ip_get_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFound,
db.fixed_ip_get, self.ctxt, 0)
def test_fixed_ip_get_success2(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
self.ctxt.is_admin = False
self.assertRaises(exception.Forbidden, db.fixed_ip_get,
self.ctxt, fixed_ip_id)
def test_fixed_ip_get_success(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_get_by_address(self):
instance_uuid = self._create_instance()
db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4',
'instance_uuid': instance_uuid,
})
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4',
columns_to_join=['instance'])
self.assertIn('instance', fixed_ip.__dict__)
self.assertEqual(instance_uuid, fixed_ip.instance.uuid)
def test_fixed_ip_update_not_found_for_address(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_update, self.ctxt,
'192.168.1.5', {})
def test_fixed_ip_update(self):
instance_uuid_1 = self._create_instance()
instance_uuid_2 = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
param_1 = {
'reserved': True, 'deleted': 0, 'leased': True,
'host': '192.168.133.1', 'address': '10.0.0.2',
'allocated': True, 'instance_uuid': instance_uuid_1,
'network_id': network_id_1, 'virtual_interface_id': '123',
}
param_2 = {
'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
param_2['address'])
self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(FloatingIpTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'address': '1.1.1.1',
'fixed_ip_id': None,
'project_id': 'fake_project',
'host': 'fake_host',
'auto_assigned': False,
'pool': 'fake_pool',
'interface': 'fake_interface',
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(db_exc.DBError())
self.mox.ReplayAll()
def _create_floating_ip(self, values):
if not values:
values = {}
vals = self._get_base_values()
vals.update(values)
return db.floating_ip_create(self.ctxt, vals)
def test_floating_ip_get(self):
values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
floating_ips = [self._create_floating_ip(val) for val in values]
for floating_ip in floating_ips:
real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
self._assertEqualObjects(floating_ip, real_floating_ip,
ignored_keys=['fixed_ip'])
def test_floating_ip_get_not_found(self):
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, 100500)
def test_floating_ip_get_with_long_id_not_found(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidID,
db.floating_ip_get, self.ctxt, 123456789101112)
def test_floating_ip_get_pools(self):
values = [
{'address': '0.0.0.0', 'pool': 'abc'},
{'address': '1.1.1.1', 'pool': 'abc'},
{'address': '2.2.2.2', 'pool': 'def'},
{'address': '3.3.3.3', 'pool': 'ghi'},
]
for val in values:
self._create_floating_ip(val)
expected_pools = [{'name': x}
for x in set(map(lambda x: x['pool'], values))]
real_pools = db.floating_ip_get_pools(self.ctxt)
self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
def test_floating_ip_allocate_address(self):
pools = {
'pool1': ['0.0.0.0', '1.1.1.1'],
'pool2': ['2.2.2.2'],
'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
}
for pool, addresses in pools.iteritems():
for address in addresses:
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
for pool, addresses in pools.iteritems():
alloc_addrs = []
for i in addresses:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
alloc_addrs.append(float_addr)
self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
def test_floating_ip_allocate_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
float_ips = []
for i in range(0, 2):
float_ips.append(self._create_floating_ip(
{"address": addresses[i]}))
for i in range(2, 4):
float_ips.append(self._create_floating_ip({"address": addresses[i],
"auto_assigned": True}))
for i in range(0, 2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertFalse(float_ip.auto_assigned)
for i in range(2, 4):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
def test_floating_ip_allocate_address_no_more_floating_ips(self):
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
def test_floating_ip_allocate_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.Forbidden,
db.floating_ip_allocate_address,
ctxt, 'other_project_id', 'any_pool')
def test_floating_ip_allocate_address_succeeds_retry(self):
pool = 'pool0'
address = '0.0.0.0'
vals = {'pool': pool, 'address': address, 'project_id': None}
floating_ip = self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
def fake_first():
if mock_first.call_count == 1:
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 'invalid_id'}
else:
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
self.assertEqual(address, float_addr)
self.assertEqual(2, mock_first.call_count)
float_ip = db.floating_ip_get(self.ctxt, floating_ip.id)
self.assertEqual(project_id, float_ip['project_id'])
def test_floating_ip_allocate_address_retry_limit_exceeded(self):
pool = 'pool0'
address = '0.0.0.0'
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
def fake_first():
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 'invalid_id'}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FloatingIpAllocateFailed,
db.floating_ip_allocate_address, self.ctxt,
project_id, pool)
self.assertEqual(5, mock_first.call_count)
def test_floating_ip_allocate_address_no_more_ips_with_no_retries(self):
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
self.assertEqual(1, mock_first.call_count)
def _get_existing_ips(self):
return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
def test_floating_ip_bulk_create(self):
expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
result = db.floating_ip_bulk_create(self.ctxt,
map(lambda x: {'address': x}, expected_ips),
want_result=False)
self.assertIsNone(result)
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_ips)
def test_floating_ip_bulk_create_duplicate(self):
ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
prepare_ips = lambda x: {'address': x}
result = db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
self.assertEqual(ips, [ip.address for ip in result])
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_bulk_create,
self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']),
want_result=False)
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '1.1.1.5')
def test_floating_ip_bulk_destroy(self):
ips_for_delete = []
ips_for_non_delete = []
def create_ips(i, j):
return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
# NOTE(boris-42): Create more than 256 ip to check that
# _ip_range_splitter works properly.
for i in range(1, 3):
ips_for_delete.extend(create_ips(i, 255))
ips_for_non_delete.extend(create_ips(3, 255))
result = db.floating_ip_bulk_create(self.ctxt,
ips_for_delete + ips_for_non_delete,
want_result=False)
self.assertIsNone(result)
non_bulk_ips_for_delete = create_ips(4, 3)
non_bulk_ips_for_non_delete = create_ips(5, 3)
non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete
project_id = 'fake_project'
reservations = quota.QUOTAS.reserve(self.ctxt,
floating_ips=len(non_bulk_ips),
project_id=project_id)
for dct in non_bulk_ips:
self._create_floating_ip(dct)
quota.QUOTAS.commit(self.ctxt, reservations, project_id=project_id)
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, project_id),
{'project_id': project_id,
'floating_ips': {'in_use': 6, 'reserved': 0}})
ips_for_delete.extend(non_bulk_ips_for_delete)
ips_for_non_delete.extend(non_bulk_ips_for_non_delete)
db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_addresses)
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, project_id),
{'project_id': project_id,
'floating_ips': {'in_use': 3, 'reserved': 0}})
def test_floating_ip_create(self):
floating_ip = self._create_floating_ip({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self.assertIsNotNone(floating_ip['id'])
self._assertEqualObjects(floating_ip, self._get_base_values(),
ignored_keys)
def test_floating_ip_create_duplicate(self):
self._create_floating_ip({})
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ip, {})
def _create_fixed_ip(self, params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_floating_ip_fixed_ip_associate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
float_ips = [self._create_floating_ip({'address': address})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
self.assertEqual(fixed_ip.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
self.assertEqual('host', updated_float_ip.host)
# Test that already allocated float_ip returns None
result = db.floating_ip_fixed_ip_associate(self.ctxt,
float_addresses[0],
fixed_addresses[0], 'host')
self.assertIsNone(result)
def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', 'some', 'some')
def test_floating_ip_deallocate(self):
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
float_ip = self._create_floating_ip(values)
rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
self.assertEqual(1, rows_updated)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.project_id)
self.assertIsNone(updated_float_ip.host)
self.assertFalse(updated_float_ip.auto_assigned)
def test_floating_ip_deallocate_address_not_found(self):
self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
def test_floating_ip_destroy(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
expected_len = len(addresses)
for float_ip in float_ips:
db.floating_ip_destroy(self.ctxt, float_ip.address)
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, float_ip.id)
expected_len -= 1
if expected_len > 0:
self.assertEqual(expected_len,
len(db.floating_ip_get_all(self.ctxt)))
else:
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_disassociate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
float_ips = [self._create_floating_ip({'address': address})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
self.assertEqual(fixed.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.fixed_ip_id)
self.assertIsNone(updated_float_ip.host)
def test_floating_ip_disassociate_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_disassociate, self.ctxt,
'11.11.11.11')
def test_floating_ip_get_all(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
self._assertEqualListsOfObjects(float_ips,
db.floating_ip_get_all(self.ctxt),
ignored_keys="fixed_ip")
def test_floating_ip_get_all_associated(self):
instance = db.instance_create(self.ctxt, {'uuid': 'fake'})
float_ip = self._create_floating_ip({'address': '1.1.1.1'})
fixed_ip = self._create_fixed_ip({'address': '2.2.2.2',
'instance_uuid': instance.uuid})
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_ip,
'host')
float_ips = db.floating_ip_get_all(self.ctxt)
self.assertEqual(1, len(float_ips))
self.assertEqual(float_ip.address, float_ips[0].address)
self.assertEqual(fixed_ip, float_ips[0].fixed_ip.address)
self.assertEqual(instance.uuid, float_ips[0].fixed_ip.instance_uuid)
def test_floating_ip_get_all_not_found(self):
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_get_all_by_host(self):
hosts = {
'host1': ['1.1.1.1', '1.1.1.2'],
'host2': ['2.1.1.1', '2.1.1.2'],
'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
hosts_with_float_ips = {}
for host, addresses in hosts.iteritems():
hosts_with_float_ips[host] = []
for address in addresses:
float_ip = self._create_floating_ip({'host': host,
'address': address})
hosts_with_float_ips[host].append(float_ip)
for host, float_ips in hosts_with_float_ips.iteritems():
real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys="fixed_ip")
def test_floating_ip_get_all_by_host_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForHost,
db.floating_ip_get_all_by_host,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_all_by_project(self):
projects = {
'pr1': ['1.1.1.1', '1.1.1.2'],
'pr2': ['2.1.1.1', '2.1.1.2'],
'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
projects_with_float_ips = {}
for project_id, addresses in projects.iteritems():
projects_with_float_ips[project_id] = []
for address in addresses:
float_ip = self._create_floating_ip({'project_id': project_id,
'address': address})
projects_with_float_ips[project_id].append(float_ip)
for project_id, float_ips in projects_with_float_ips.iteritems():
real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
project_id)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys='fixed_ip')
def test_floating_ip_get_all_by_project_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.Forbidden,
db.floating_ip_get_all_by_project,
ctxt, 'other_project')
def test_floating_ip_get_by_address(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
for float_ip in float_ips:
real_float_ip = db.floating_ip_get_by_address(self.ctxt,
float_ip.address)
self._assertEqualObjects(float_ip, real_float_ip,
ignored_keys='fixed_ip')
def test_floating_ip_get_by_address_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '20.20.20.20')
def test_floating_ip_get_by_invalid_address(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.floating_ip_get_by_address,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_by_fixed_address(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
self._create_floating_ip({'address': float_addr})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
fixed_addr)
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_get_by_fixed_ip_id(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
self._create_floating_ip({'address': float_addr})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
fixed_ip['id'])
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_update(self):
float_ip = self._create_floating_ip({})
values = {
'project_id': 'some_pr',
'host': 'some_host',
'auto_assigned': True,
'interface': 'some_interface',
'pool': 'some_pool'
}
floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
values)
self.assertIsNotNone(floating_ref)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
self._assertEqualObjects(updated_float_ip, values,
ignored_keys=['id', 'address', 'updated_at',
'deleted_at', 'created_at',
'deleted', 'fixed_ip_id',
'fixed_ip'])
def test_floating_ip_update_to_duplicate(self):
float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_update,
self.ctxt, float_ip2['address'],
{'address': float_ip1['address']})
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met_single_value(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting',
'error'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'resize'})
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def test_destroy_with_not_equal_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_not_equal_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'active'})
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
class VolumeUsageDBApiTestCase(test.TestCase):
def setUp(self):
super(VolumeUsageDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.useFixture(test.TimeOverride())
def test_vol_usage_update_no_totals_update(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
timeutils.set_time_override(now)
start_time = now - datetime.timedelta(seconds=10)
expected_vol_usages = {
u'1': {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'user_id': 'fake-user-uuid1',
'curr_reads': 1000,
'curr_read_bytes': 2000,
'curr_writes': 3000,
'curr_write_bytes': 4000,
'curr_last_refreshed': now,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None},
u'2': {'volume_id': u'2',
'instance_uuid': 'fake-instance-uuid2',
'project_id': 'fake-project-uuid2',
'user_id': 'fake-user-uuid2',
'curr_reads': 100,
'curr_read_bytes': 200,
'curr_writes': 300,
'curr_write_bytes': 400,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None}
}
def _compare(vol_usage, expected):
for key, value in expected.items():
self.assertEqual(vol_usage[key], value)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
wr_req=30, wr_bytes=40,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid2',
project_id='fake-project-uuid2',
user_id='fake-user-uuid2',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
wr_req=3000, wr_bytes=4000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 2)
for usage in vol_usages:
_compare(usage, expected_vol_usages[usage.volume_id])
def test_vol_usage_update_totals_update(self):
ctxt = context.get_admin_context()
now = datetime.datetime(1, 1, 1, 1, 0, 0)
start_time = now - datetime.timedelta(seconds=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
timeutils.set_time_override(now)
db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 0)
self.assertEqual(current_usage['curr_reads'], 100)
timeutils.set_time_override(now1)
db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 0)
timeutils.set_time_override(now2)
db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
wr_req=500, wr_bytes=600,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
availability_zone='fake-az',
user_id='fake-user-uuid')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 300)
timeutils.set_time_override(now3)
db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
wr_req=600, wr_bytes=700,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
expected_vol_usages = {'volume_id': u'1',
'project_id': 'fake-project-uuid',
'user_id': 'fake-user-uuid',
'instance_uuid': 'fake-instance-uuid',
'availability_zone': 'fake-az',
'tot_reads': 600,
'tot_read_bytes': 800,
'tot_writes': 1000,
'tot_write_bytes': 1200,
'tot_last_refreshed': now3,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'curr_last_refreshed': now2}
self.assertEqual(1, len(vol_usages))
for key, value in expected_vol_usages.items():
self.assertEqual(vol_usages[0][key], value, key)
def test_vol_usage_update_when_blockdevicestats_reset(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
db.vol_usage_update(ctxt, u'1',
rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 200,
'curr_read_bytes': 300,
'curr_writes': 400,
'curr_write_bytes': 500,
'tot_reads': 10000,
'tot_read_bytes': 20000,
'tot_writes': 30000,
'tot_write_bytes': 40000}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
# This is unlikely to happen, but could when a volume is detached
# right after a instance has rebooted / recovered and before
# the system polled and updated the volume usage cache table.
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1',
update_totals=True)
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'tot_reads': 10100,
'tot_read_bytes': 20200,
'tot_writes': 30300,
'tot_write_bytes': 40400}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
class TaskLogTestCase(test.TestCase):
def setUp(self):
super(TaskLogTestCase, self).setUp()
self.context = context.get_admin_context()
now = timeutils.utcnow()
self.begin = now - datetime.timedelta(seconds=10)
self.end = now - datetime.timedelta(seconds=5)
self.task_name = 'fake-task-name'
self.host = 'fake-host'
self.message = 'Fake task message'
db.task_log_begin_task(self.context, self.task_name, self.begin,
self.end, self.host, message=self.message)
def test_task_log_get(self):
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], self.task_name)
self.assertEqual(result['period_beginning'], self.begin)
self.assertEqual(result['period_ending'], self.end)
self.assertEqual(result['host'], self.host)
self.assertEqual(result['message'], self.message)
def test_task_log_get_all(self):
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host)
self.assertEqual(len(result), 1)
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host, state='')
self.assertEqual(len(result), 0)
def test_task_log_begin_task(self):
db.task_log_begin_task(self.context, 'fake', self.begin,
self.end, self.host, task_items=42,
message=self.message)
result = db.task_log_get(self.context, 'fake', self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], 'fake')
def test_task_log_begin_task_duplicate(self):
params = (self.context, 'fake', self.begin, self.end, self.host)
db.task_log_begin_task(*params, message=self.message)
self.assertRaises(exception.TaskAlreadyRunning,
db.task_log_begin_task,
*params, message=self.message)
def test_task_log_end_task(self):
errors = 1
db.task_log_end_task(self.context, self.task_name, self.begin,
self.end, self.host, errors, message=self.message)
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['errors'], 1)
def test_task_log_end_task_task_not_running(self):
self.assertRaises(exception.TaskNotRunning,
db.task_log_end_task, self.context, 'nonexistent',
self.begin, self.end, self.host, 42,
message=self.message)
class BlockDeviceMappingTestCase(test.TestCase):
def setUp(self):
super(BlockDeviceMappingTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def _create_bdm(self, values):
values.setdefault('instance_uuid', self.instance['uuid'])
values.setdefault('device_name', 'fake_device')
values.setdefault('source_type', 'volume')
values.setdefault('destination_type', 'volume')
block_dev = block_device.BlockDeviceDict(values)
db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
uuid = block_dev['instance_uuid']
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
for bdm in bdms:
if bdm['device_name'] == values['device_name']:
return bdm
def test_scrub_empty_str_values_no_effect(self):
values = {'volume_size': 5}
expected = copy.copy(values)
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, expected)
def test_scrub_empty_str_values_empty_string(self):
values = {'volume_size': ''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_scrub_empty_str_values_empty_unicode(self):
values = {'volume_size': u''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_block_device_mapping_create(self):
bdm = self._create_bdm({})
self.assertIsNotNone(bdm)
def test_block_device_mapping_update(self):
bdm = self._create_bdm({})
result = db.block_device_mapping_update(
self.ctxt, bdm['id'], {'destination_type': 'moon'},
legacy=False)
uuid = bdm['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(bdm_real[0]['destination_type'], 'moon')
# Also make sure the update call returned correct data
self.assertEqual(dict(bdm_real[0].iteritems()),
dict(result.iteritems()))
def test_block_device_mapping_update_or_create(self):
values = {
'instance_uuid': self.instance['uuid'],
'device_name': 'fake_name',
'source_type': 'volume',
'destination_type': 'volume'
}
# check create
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
uuid = values['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
# check update
values['destination_type'] = 'camelot'
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'fake_name')
self.assertEqual(bdm_real['destination_type'], 'camelot')
# check create without device_name
bdm1 = dict(values)
bdm1['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
with_device_name = [b for b in bdms if b['device_name'] is not None]
without_device_name = [b for b in bdms if b['device_name'] is None]
self.assertEqual(len(with_device_name), 1,
'expected 1 bdm with device_name, found %d' %
len(with_device_name))
self.assertEqual(len(without_device_name), 1,
'expected 1 bdm without device_name, found %d' %
len(without_device_name))
# check create multiple devices without device_name
bdm2 = dict(values)
bdm2['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
with_device_name = [b for b in bdms if b['device_name'] is not None]
without_device_name = [b for b in bdms if b['device_name'] is None]
self.assertEqual(len(with_device_name), 1,
'expected 1 bdm with device_name, found %d' %
len(with_device_name))
self.assertEqual(len(without_device_name), 2,
'expected 2 bdms without device_name, found %d' %
len(without_device_name))
def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'guest_format': 'myformat',
}
bdm1 = dict(values)
bdm1['device_name'] = '/dev/sdb'
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm2 = dict(values)
bdm2['device_name'] = '/dev/sdc'
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = sorted(
db.block_device_mapping_get_all_by_instance(self.ctxt, uuid),
key=lambda bdm: bdm['device_name']
)
self.assertEqual(len(bdm_real), 2)
for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
self.assertEqual(bdm['device_name'], device_name)
self.assertEqual(bdm['guest_format'], 'myformat')
def test_block_device_mapping_update_or_create_check_remove_virt(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
}
# check that old swap bdms are deleted on create
val1 = dict(values)
val1['device_name'] = 'device1'
db.block_device_mapping_create(self.ctxt, val1, legacy=False)
val2 = dict(values)
val2['device_name'] = 'device2'
db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'device2')
self.assertEqual(bdm_real['source_type'], 'blank')
self.assertEqual(bdm_real['guest_format'], 'swap')
db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
def test_block_device_mapping_get_all_by_instance(self):
uuid1 = self.instance['uuid']
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bmds_values = [{'instance_uuid': uuid1,
'device_name': '/dev/vda'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdb'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdc'}]
for bdm in bmds_values:
self._create_bdm(bdm)
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
self.assertEqual(len(bmd), 1)
self.assertEqual(bmd[0]['device_name'], '/dev/vda')
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
self.assertEqual(len(bmd), 2)
def test_block_device_mapping_destroy(self):
bdm = self._create_bdm({})
db.block_device_mapping_destroy(self.ctxt, bdm['id'])
bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
bdm['instance_uuid'])
self.assertEqual(len(bdm), 0)
def test_block_device_mapping_destroy_by_instance_and_volume(self):
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
uuid = self.instance['uuid']
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
vol_id1)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
def test_block_device_mapping_destroy_by_instance_and_device(self):
self._create_bdm({'device_name': '/dev/vda'})
self._create_bdm({'device_name': '/dev/vdb'})
uuid = self.instance['uuid']
params = (self.ctxt, uuid, '/dev/vdb')
db.block_device_mapping_destroy_by_instance_and_device(*params)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vda')
def test_block_device_mapping_get_by_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
self.assertEqual(bdm['volume_id'], 'fake_id')
def test_block_device_mapping_get_by_volume_id_join_instance(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
['instance'])
self.assertEqual(bdm['volume_id'], 'fake_id')
self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.agent_build_* methods."""
def setUp(self):
super(AgentBuildTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_agent_build_create_and_get_all(self):
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
all_agent_builds = db.agent_build_get_all(self.ctxt)
self.assertEqual(1, len(all_agent_builds))
self._assertEqualObjects(agent_build, all_agent_builds[0])
def test_agent_build_get_by_triple(self):
agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
'os': 'FreeBSD', 'architecture': arch.X86_64})
self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
'FreeBSD', 'i386'))
self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
self.ctxt, 'kvm', 'FreeBSD', arch.X86_64))
def test_agent_build_destroy(self):
agent_build = db.agent_build_create(self.ctxt, {})
self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
def test_agent_build_update(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
def test_agent_build_destroy_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_destroy, self.ctxt, agent_build.id)
def test_agent_build_update_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
def test_agent_build_exists(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': arch.X86_64}
db.agent_build_create(self.ctxt, values)
self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
self.ctxt, values)
def test_agent_build_get_all_by_hypervisor(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': arch.X86_64}
created = db.agent_build_create(self.ctxt, values)
actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
self._assertEqualListsOfObjects([created], actual)
class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(VirtualInterfaceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project1'}
self.network = db.network_create_safe(self.ctxt, values)
def _get_base_values(self):
return {
'instance_uuid': self.instance_uuid,
'address': 'fake_address',
'network_id': self.network['id'],
'uuid': str(stdlib_uuid.uuid4())
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(db_exc.DBError())
self.mox.ReplayAll()
def _create_virt_interface(self, values):
v = self._get_base_values()
v.update(values)
return db.virtual_interface_create(self.ctxt, v)
def test_virtual_interface_create(self):
vif = self._create_virt_interface({})
self.assertIsNotNone(vif['id'])
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'uuid']
self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
def test_virtual_interface_create_with_duplicate_address(self):
vif = self._create_virt_interface({})
self.assertRaises(exception.VirtualInterfaceCreateException,
self._create_virt_interface, {"uuid": vif['uuid']})
def test_virtual_interface_get(self):
vifs = [self._create_virt_interface({'address': 'a'}),
self._create_virt_interface({'address': 'b'})]
for vif in vifs:
real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address(self):
vifs = [self._create_virt_interface({'address': 'first'}),
self._create_virt_interface({'address': 'second'})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_address(self.ctxt,
vif['address'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address_not_found(self):
self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
"i.nv.ali.ip"))
def test_virtual_interface_get_by_address_data_error_exception(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.virtual_interface_get_by_address,
self.ctxt,
"i.nv.ali.ip")
def test_virtual_interface_get_by_uuid(self):
vifs = [self._create_virt_interface({"address": "address_1"}),
self._create_virt_interface({"address": "address_2"})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
vifs1 = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2'})]
# multiple nic of same instance
vifs2 = [self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake4',
'instance_uuid': inst_uuid2})]
vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self._assertEqualListsOfObjects(vifs1, vifs1_real)
self._assertEqualOrderedListOfObjects(vifs2, vifs2_real)
def test_virtual_interface_get_by_instance_and_network(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project2'}
network_id = db.network_create_safe(self.ctxt, values)['id']
vifs = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2',
'network_id': network_id,
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
for vif in vifs:
params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
r_vif = db.virtual_interface_get_by_instance_and_network(*params)
self._assertEqualObjects(r_vif, vif)
def test_virtual_interface_delete_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
for vals in values:
self._create_virt_interface(vals)
db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self.assertEqual(len(real_vifs1), 0)
self.assertEqual(len(real_vifs2), 1)
def test_virtual_interface_get_all(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
vifs = [self._create_virt_interface(val) for val in values]
real_vifs = db.virtual_interface_get_all(self.ctxt)
self._assertEqualListsOfObjects(vifs, real_vifs)
class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.network_* methods."""
def setUp(self):
super(NetworkTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_associated_fixed_ip(self, host, cidr, ip):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr})
self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
host))
instance = db.instance_create(self.ctxt,
{'project_id': 'project1', 'host': host})
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network.id)
return network, instance
def test_network_get_associated_default_route(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': '192.0.3.0/30'})
ip = '192.0.3.1'
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network2.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network2.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network2.id)
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertTrue(data[0]['default_route'])
data = db.network_get_associated_fixed_ips(self.ctxt, network2.id)
self.assertEqual(1, len(data))
self.assertFalse(data[0]['default_route'])
def test_network_get_associated_fixed_ips(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertEqual('192.0.2.1', data[0]['address'])
self.assertEqual('192.0.2.1', data[0]['vif_address'])
self.assertEqual(instance.uuid, data[0]['instance_uuid'])
self.assertTrue(data[0]['allocated'])
def test_network_create_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(36, len(network['uuid']))
db_network = db.network_get(self.ctxt, network['id'])
self._assertEqualObjects(network, db_network)
def test_network_create_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(self.ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, self.ctxt, values2)
def test_network_delete_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db.network_get(self.ctxt, network['id'])
values = {'network_id': network['id'], 'address': '192.168.1.5'}
address1 = db.fixed_ip_create(self.ctxt, values)['address']
values = {'network_id': network['id'],
'address': '192.168.1.6',
'allocated': True}
address2 = db.fixed_ip_create(self.ctxt, values)['address']
self.assertRaises(exception.NetworkInUse,
db.network_delete_safe, self.ctxt, network['id'])
db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
network = db.network_delete_safe(self.ctxt, network['id'])
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address1)
ctxt = self.ctxt.elevated(read_deleted='yes')
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
self.assertTrue(fixed_ip['deleted'])
def test_network_in_use_on_host(self):
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(self.ctxt, values)
values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
vif = db.virtual_interface_create(self.ctxt, values)
values = {'address': '192.168.1.6',
'network_id': 1,
'allocated': True,
'instance_uuid': instance['uuid'],
'virtual_interface_id': vif['id']}
db.fixed_ip_create(self.ctxt, values)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
def test_network_update_nonexistent(self):
self.assertRaises(exception.NetworkNotFound,
db.network_update, self.ctxt, 123456, {})
def test_network_update_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
network_ref = db.network_create_safe(self.ctxt, values1)
db.network_create_safe(self.ctxt, values2)
self.assertRaises(exception.DuplicateVlan,
db.network_update, self.ctxt,
network_ref["id"], values2)
def test_network_update(self):
network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
'vlan': 1, 'host': 'test.com'})
db.network_update(self.ctxt, network.id, {'vlan': 2})
network_new = db.network_get(self.ctxt, network.id)
self.assertEqual(2, network_new.vlan)
def test_network_set_host_nonexistent_network(self):
self.assertRaises(exception.NetworkNotFound, db.network_set_host,
self.ctxt, 123456, 'nonexistent')
def test_network_set_host_already_set_correct(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertIsNone(db.network_set_host(self.ctxt, network.id,
'example.com'))
def test_network_set_host_already_set_incorrect(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertIsNone(db.network_set_host(self.ctxt, network.id,
'new.example.com'))
def test_network_set_host_with_initially_no_host(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual('example.com',
db.network_get(self.ctxt, network.id).host)
def test_network_set_host_succeeds_retry_on_deadlock(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
def fake_update(params):
if mock_update.call_count == 1:
raise db_exc.DBDeadlock()
else:
return 1
with mock.patch('sqlalchemy.orm.query.Query.update',
side_effect=fake_update) as mock_update:
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual(2, mock_update.call_count)
def test_network_set_host_succeeds_retry_on_no_rows_updated(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
def fake_update(params):
if mock_update.call_count == 1:
return 0
else:
return 1
with mock.patch('sqlalchemy.orm.query.Query.update',
side_effect=fake_update) as mock_update:
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual(2, mock_update.call_count)
def test_network_set_host_failed_with_retry_on_no_rows_updated(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
with mock.patch('sqlalchemy.orm.query.Query.update',
return_value=0) as mock_update:
self.assertRaises(exception.NetworkSetHostFailed,
db.network_set_host, self.ctxt, network.id,
'example.com')
self.assertEqual(5, mock_update.call_count)
def test_network_get_all_by_host(self):
self.assertEqual([],
db.network_get_all_by_host(self.ctxt, 'example.com'))
host = 'h1.example.com'
# network with host set
net1 = db.network_create_safe(self.ctxt, {'host': host})
self._assertEqualListsOfObjects([net1],
db.network_get_all_by_host(self.ctxt, host))
# network with fixed ip with host set
net2 = db.network_create_safe(self.ctxt, {})
db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
db.network_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_host(self.ctxt, host))
# network with instance with host set
net3 = db.network_create_safe(self.ctxt, {})
instance = db.instance_create(self.ctxt, {'host': host})
db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
'instance_uuid': instance.uuid})
self._assertEqualListsOfObjects([net1, net2, net3],
db.network_get_all_by_host(self.ctxt, host))
def test_network_get_by_cidr(self):
cidr = '192.0.2.0/30'
cidr_v6 = '2001:db8:1::/64'
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr))
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr_v6))
def test_network_get_by_cidr_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForCidr,
db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
def test_network_get_by_uuid(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project_1'})
self._assertEqualObjects(network,
db.network_get_by_uuid(self.ctxt, network.uuid))
def test_network_get_by_uuid_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForUUID,
db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
def test_network_get_all_by_uuids_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
def test_network_get_all_by_uuids(self):
net1 = db.network_create_safe(self.ctxt, {})
net2 = db.network_create_safe(self.ctxt, {})
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
def test_network_get_all_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, self.ctxt)
def test_network_get_all(self):
network = db.network_create_safe(self.ctxt, {})
network_db = db.network_get_all(self.ctxt)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network, network_db[0])
def test_network_get_all_admin_user(self):
network1 = db.network_create_safe(self.ctxt, {})
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1'})
self._assertEqualListsOfObjects([network1, network2],
db.network_get_all(self.ctxt,
project_only=True))
def test_network_get_all_normal_user(self):
normal_ctxt = context.RequestContext('fake', 'fake')
db.network_create_safe(self.ctxt, {})
db.network_create_safe(self.ctxt, {'project_id': 'project1'})
network1 = db.network_create_safe(self.ctxt,
{'project_id': 'fake'})
network_db = db.network_get_all(normal_ctxt, project_only=True)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network1, network_db[0])
def test_network_get(self):
network = db.network_create_safe(self.ctxt, {})
self._assertEqualObjects(db.network_get(self.ctxt, network.id),
network)
db.network_delete_safe(self.ctxt, network.id)
self.assertRaises(exception.NetworkNotFound,
db.network_get, self.ctxt, network.id)
def test_network_associate(self):
network = db.network_create_safe(self.ctxt, {})
self.assertIsNone(network.project_id)
db.network_associate(self.ctxt, "project1", network.id)
self.assertEqual("project1", db.network_get(self.ctxt,
network.id).project_id)
def test_network_diassociate(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'host': 'test.net'})
# disassociate project
db.network_disassociate(self.ctxt, network.id, False, True)
self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
# disassociate host
db.network_disassociate(self.ctxt, network.id, True, False)
self.assertIsNone(db.network_get(self.ctxt, network.id).host)
def test_network_count_reserved_ips(self):
net = db.network_create_safe(self.ctxt, {})
self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
db.fixed_ip_create(self.ctxt, {'network_id': net.id,
'reserved': True})
self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(KeyPairTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_key_pair(self, values):
return db.key_pair_create(self.ctxt, values)
def test_key_pair_create(self):
param = {
'name': 'test_1',
'type': 'ssh',
'user_id': 'test_user_id_1',
'public_key': 'test_public_key_1',
'fingerprint': 'test_fingerprint_1'
}
key_pair = self._create_key_pair(param)
self.assertIsNotNone(key_pair['id'])
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(key_pair, param, ignored_keys)
def test_key_pair_create_with_duplicate_name(self):
params = {'name': 'test_name', 'user_id': 'test_user_id',
'type': 'ssh'}
self._create_key_pair(params)
self.assertRaises(exception.KeyPairExists, self._create_key_pair,
params)
def test_key_pair_get(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_2', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_3', 'type': 'ssh'}
]
key_pairs = [self._create_key_pair(p) for p in params]
for key in key_pairs:
real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
self._assertEqualObjects(key, real_key)
def test_key_pair_get_no_results(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_get_deleted(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'}
key_pair_created = self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
ctxt = self.ctxt.elevated(read_deleted='yes')
key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
param['name'])
ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
self._assertEqualObjects(key_pair_deleted, key_pair_created,
ignored_keys)
self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
def test_key_pair_get_all_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_2', 'type': 'ssh'}
]
key_pairs_user_1 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_1']
key_pairs_user_2 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_2']
real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
def test_key_pair_count_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_2', 'type': 'ssh'}
]
for p in params:
self._create_key_pair(p)
count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
self.assertEqual(count_1, 2)
count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
self.assertEqual(count_2, 1)
def test_key_pair_destroy(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'}
self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_destroy_no_such_key(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound,
db.key_pair_destroy, self.ctxt,
param['user_id'], param['name'])
class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.quota_* methods."""
def setUp(self):
super(QuotaTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'resource0': 0,
'resource1': 1,
'resource2': 2})
def test_quota_get_all_by_project_and_user(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
j - 1, user_id='user%d' % i)
for i in range(3):
quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
'proj%d' % i,
'user%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'user_id': 'user%d' % i,
'resource0': -1,
'resource1': 0,
'resource2': 1})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get, self.ctxt, 'project1', 'resource1')
def test_quota_reserve_all_resources(self):
quotas = {}
deltas = {}
reservable_resources = {}
for i, resource in enumerate(quota.resources):
if isinstance(resource, quota.ReservableResource):
quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
resource.name, 100)
deltas[resource.name] = i
reservable_resources[resource.name] = resource
usages = {'instances': 3, 'cores': 6, 'ram': 9}
instances = []
for i in range(3):
instances.append(db.instance_create(self.ctxt,
{'vcpus': 2, 'memory_mb': 3,
'project_id': 'project1'}))
usages['fixed_ips'] = 2
network = db.network_create_safe(self.ctxt, {})
for i in range(2):
address = '192.168.0.%d' % i
db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
'address': address,
'network_id': network['id']})
db.fixed_ip_associate(self.ctxt, address,
instances[0].uuid, network['id'])
usages['floating_ips'] = 5
for i in range(5):
db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
usages['security_groups'] = 3
for i in range(3):
db.security_group_create(self.ctxt, {'project_id': 'project1'})
usages['server_groups'] = 4
for i in range(4):
db.instance_group_create(self.ctxt, {'uuid': str(i),
'project_id': 'project1'})
reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
quotas, quotas, deltas, None,
None, None, 'project1')
resources_names = reservable_resources.keys()
for reservation_uuid in reservations_uuids:
reservation = _reservation_get(self.ctxt, reservation_uuid)
usage = db.quota_usage_get(self.ctxt, 'project1',
reservation.resource)
self.assertEqual(usage.in_use, usages[reservation.resource],
'Resource: %s' % reservation.resource)
self.assertEqual(usage.reserved, deltas[reservation.resource])
self.assertIn(reservation.resource, resources_names)
resources_names.remove(reservation.resource)
self.assertEqual(len(resources_names), 0)
def test_quota_destroy_all_by_project(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
{'project_id': 'project1'})
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1', 'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, 'project1'),
{'project_id': 'project1'})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, r)
def test_quota_destroy_all_by_project_and_user(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
'user1')
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1',
'fixed_ips': {'in_use': 2, 'reserved': 2}})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, r)
def test_quota_usage_get_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
self.ctxt, 'p1', 'nonexitent_resource')
def test_quota_usage_get(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
expected = {'resource': 'resource0', 'project_id': 'p1',
'in_use': 0, 'reserved': 0, 'total': 0}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_usage_get_all_by_project(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project(
self.ctxt, 'p1'))
def test_quota_usage_get_all_by_project_and_user(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'user_id': 'u1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'p1', 'u1'))
def test_quota_usage_update_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
self.ctxt, 'p1', 'u1', 'resource', in_use=42)
def test_quota_usage_update(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
reserved=43)
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
expected = {'resource': 'resource0', 'project_id': 'p1',
'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_create_exists(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
'project1', 'resource1', 42)
class QuotaReserveNoDbTestCase(test.NoDBTestCase):
"""Tests quota reserve/refresh operations using mock."""
def test_create_quota_usage_if_missing_not_created(self):
# Tests that QuotaUsage isn't created if it's already in user_usages.
resource = 'fake-resource'
project_id = 'fake-project'
user_id = 'fake_user'
session = mock.sentinel
quota_usage = mock.sentinel
user_usages = {resource: quota_usage}
with mock.patch.object(sqlalchemy_api, '_quota_usage_create') as quc:
self.assertFalse(sqlalchemy_api._create_quota_usage_if_missing(
user_usages, resource, None,
project_id, user_id, session))
self.assertFalse(quc.called)
def _test_create_quota_usage_if_missing_created(self, per_project_quotas):
# Tests that the QuotaUsage is created.
user_usages = {}
if per_project_quotas:
resource = sqlalchemy_api.PER_PROJECT_QUOTAS[0]
else:
resource = 'fake-resource'
project_id = 'fake-project'
user_id = 'fake_user'
session = mock.sentinel
quota_usage = mock.sentinel
with mock.patch.object(sqlalchemy_api, '_quota_usage_create',
return_value=quota_usage) as quc:
self.assertTrue(sqlalchemy_api._create_quota_usage_if_missing(
user_usages, resource, None,
project_id, user_id, session))
self.assertEqual(quota_usage, user_usages[resource])
# Now test if the QuotaUsage was created with a user_id or not.
if per_project_quotas:
quc.assert_called_once_with(
project_id, None, resource, 0, 0, None, session=session)
else:
quc.assert_called_once_with(
project_id, user_id, resource, 0, 0, None, session=session)
def test_create_quota_usage_if_missing_created_per_project_quotas(self):
self._test_create_quota_usage_if_missing_created(True)
def test_create_quota_usage_if_missing_created_user_quotas(self):
self._test_create_quota_usage_if_missing_created(False)
def test_is_quota_refresh_needed_in_use(self):
# Tests when a quota refresh is needed based on the in_use value.
for in_use in range(-1, 1):
# We have to set until_refresh=None otherwise mock will give it
# a value which runs some code we don't want.
quota_usage = mock.MagicMock(in_use=in_use, until_refresh=None)
if in_use < 0:
self.assertTrue(sqlalchemy_api._is_quota_refresh_needed(
quota_usage, max_age=0))
else:
self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(
quota_usage, max_age=0))
def test_is_quota_refresh_needed_until_refresh_none(self):
quota_usage = mock.MagicMock(in_use=0, until_refresh=None)
self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(quota_usage,
max_age=0))
def test_is_quota_refresh_needed_until_refresh_not_none(self):
# Tests different values for the until_refresh counter.
for until_refresh in range(3):
quota_usage = mock.MagicMock(in_use=0, until_refresh=until_refresh)
refresh = sqlalchemy_api._is_quota_refresh_needed(quota_usage,
max_age=0)
until_refresh -= 1
if until_refresh <= 0:
self.assertTrue(refresh)
else:
self.assertFalse(refresh)
self.assertEqual(until_refresh, quota_usage.until_refresh)
def test_refresh_quota_usages(self):
quota_usage = mock.Mock(spec=models.QuotaUsage)
quota_usage.in_use = 5
quota_usage.until_refresh = None
sqlalchemy_api._refresh_quota_usages(quota_usage, until_refresh=5,
in_use=6)
self.assertEqual(6, quota_usage.in_use)
self.assertEqual(5, quota_usage.until_refresh)
def test_calculate_overquota_no_delta(self):
deltas = {'foo': -1}
user_quotas = {'foo': 10}
overs = sqlalchemy_api._calculate_overquota({}, user_quotas, deltas,
{}, {})
self.assertFalse(overs)
def test_calculate_overquota_unlimited_quota(self):
deltas = {'foo': 1}
project_quotas = {}
user_quotas = {'foo': -1}
project_usages = {}
user_usages = {'foo': 10}
overs = sqlalchemy_api._calculate_overquota(
project_quotas, user_quotas, deltas, project_usages, user_usages)
self.assertFalse(overs)
def _test_calculate_overquota(self, resource, project_usages, user_usages):
deltas = {resource: 1}
project_quotas = {resource: 10}
user_quotas = {resource: 10}
overs = sqlalchemy_api._calculate_overquota(
project_quotas, user_quotas, deltas, project_usages, user_usages)
self.assertEqual(resource, overs[0])
def test_calculate_overquota_per_project_quota_overquota(self):
# In this test, user quotas are fine but project quotas are over.
resource = 'foo'
project_usages = {resource: {'total': 10}}
user_usages = {resource: {'total': 5}}
self._test_calculate_overquota(resource, project_usages, user_usages)
def test_calculate_overquota_per_user_quota_overquota(self):
# In this test, project quotas are fine but user quotas are over.
resource = 'foo'
project_usages = {resource: {'total': 5}}
user_usages = {resource: {'total': 10}}
self._test_calculate_overquota(resource, project_usages, user_usages)
class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(QuotaClassTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_class_get_default(self):
params = {
'test_resource1': '10',
'test_resource2': '20',
'test_resource3': '30',
}
for res, limit in params.items():
db.quota_class_create(self.ctxt, 'default', res, limit)
defaults = db.quota_class_get_default(self.ctxt)
self.assertEqual(defaults, dict(class_name='default',
test_resource1=10,
test_resource2=20,
test_resource3=30))
def test_quota_class_create(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
self.assertEqual(qc.class_name, 'class name')
self.assertEqual(qc.resource, 'resource')
self.assertEqual(qc.hard_limit, 42)
def test_quota_class_get(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
self._assertEqualObjects(qc, qc_db)
def test_quota_class_get_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
self.ctxt, 'nonexistent', 'resource')
def test_quota_class_get_all_by_name(self):
for i in range(3):
for j in range(3):
db.quota_class_create(self.ctxt, 'class%d' % i,
'resource%d' % j, j)
for i in range(3):
classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
self.assertEqual(classes, {'class_name': 'class%d' % i,
'resource0': 0, 'resource1': 1, 'resource2': 2})
def test_quota_class_update(self):
db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
'resource').hard_limit, 43)
def test_quota_class_update_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
self.ctxt, 'class name', 'resource', 42)
def test_refresh_quota_usages(self):
quota_usages = mock.Mock()
sqlalchemy_api._refresh_quota_usages(quota_usages, until_refresh=5,
in_use=6)
class S3ImageTestCase(test.TestCase):
def setUp(self):
super(S3ImageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = [uuidutils.generate_uuid() for i in xrange(3)]
self.images = [db.s3_image_create(self.ctxt, uuid)
for uuid in self.values]
def test_s3_image_create(self):
for ref in self.images:
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(sorted(self.values),
sorted([ref.uuid for ref in self.images]))
def test_s3_image_get_by_uuid(self):
for uuid in self.values:
ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(uuid, ref.uuid)
def test_s3_image_get(self):
self.assertEqual(sorted(self.values),
sorted([db.s3_image_get(self.ctxt, ref.id).uuid
for ref in self.images]))
def test_s3_image_get_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
100500)
def test_s3_image_get_by_uuid_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
self.ctxt, uuidutils.generate_uuid())
class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(ComputeNodeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.service_dict = dict(host='host1', binary='nova-compute',
topic=CONF.compute_topic, report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=self.service['id'],
host=self.service['host'],
disk_available_least=100,
hypervisor_hostname='abracadabra104',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
extra_resources='',
stats='', numa_topology='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
def test_compute_node_create(self):
self._assertEqualObjects(self.compute_node_dict, self.item,
ignored_keys=self._ignored_keys + ['stats'])
new_stats = jsonutils.loads(self.item['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all(self):
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all_deleted_compute_node(self):
# Create a service and compute node and ensure we can find its stats;
# delete the service and compute node when done and loop again
for x in range(2, 5):
# Create a service
service_data = self.service_dict.copy()
service_data['host'] = 'host-%s' % x
service = db.service_create(self.ctxt, service_data)
# Create a compute node
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
node = db.compute_node_create(self.ctxt, compute_node_data)
# Ensure the "new" compute node is found
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(2, len(nodes))
found = None
for n in nodes:
if n['id'] == node['id']:
found = n
break
self.assertIsNotNone(found)
# Now ensure the match has stats!
self.assertNotEqual(jsonutils.loads(found['stats']), {})
# Now delete the newly-created compute node to ensure the related
# compute node stats are wiped in a cascaded fashion
db.compute_node_delete(self.ctxt, node['id'])
# Clean up the service
db.service_destroy(self.ctxt, service['id'])
def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
service_data = self.service_dict.copy()
service_data['host'] = 'host2'
service = db.service_create(self.ctxt, service_data)
existing_node = dict(self.item.iteritems())
expected = [existing_node]
for name in ['bm_node1', 'bm_node2']:
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = jsonutils.dumps(self.stats)
compute_node_data['hypervisor_hostname'] = name
node = db.compute_node_create(self.ctxt, compute_node_data)
node = dict(node.iteritems())
expected.append(node)
result = sorted(db.compute_node_get_all(self.ctxt),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get_all_by_host_with_distinct_hosts(self):
# Create another service with another node
service2 = self.service_dict.copy()
service2['host'] = 'host2'
db.service_create(self.ctxt, service2)
compute_node_another_host = self.compute_node_dict.copy()
compute_node_another_host['stats'] = jsonutils.dumps(self.stats)
compute_node_another_host['hypervisor_hostname'] = 'node_2'
compute_node_another_host['host'] = 'host2'
node = db.compute_node_create(self.ctxt, compute_node_another_host)
result = db.compute_node_get_all_by_host(self.ctxt, 'host1', False)
self._assertEqualListsOfObjects([self.item], result)
result = db.compute_node_get_all_by_host(self.ctxt, 'host2', False)
self._assertEqualListsOfObjects([node], result)
def test_compute_node_get_all_by_host_with_same_host(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_3'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = [self.item, node]
result = sorted(db.compute_node_get_all_by_host(
self.ctxt, 'host1', False),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get_all_by_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_all_by_host, self.ctxt, 'wrong')
def test_compute_nodes_get_by_service_id_one_result(self):
expected = [self.item]
result = db.compute_nodes_get_by_service_id(
self.ctxt, self.service['id'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_nodes_get_by_service_id_multiple_results(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_2'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = [self.item, node]
result = sorted(db.compute_nodes_get_by_service_id(
self.ctxt, self.service['id']),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_nodes_get_by_service_id_not_found(self):
self.assertRaises(exception.ServiceNotFound,
db.compute_nodes_get_by_service_id, self.ctxt,
'fake')
def test_compute_node_get_by_host_and_nodename(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_2'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = node
result = db.compute_node_get_by_host_and_nodename(
self.ctxt, 'host1', 'node_2')
self._assertEqualObjects(expected, result)
def test_compute_node_get_by_host_and_nodename_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_by_host_and_nodename,
self.ctxt, 'host1', 'wrong')
def test_compute_node_get(self):
compute_node_id = self.item['id']
node = db.compute_node_get(self.ctxt, compute_node_id)
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys + ['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_update(self):
compute_node_id = self.item['id']
stats = jsonutils.loads(self.item['stats'])
# change some values:
stats['num_instances'] = 8
stats['num_tribbles'] = 1
values = {
'vcpus': 4,
'stats': jsonutils.dumps(stats),
}
item_updated = db.compute_node_update(self.ctxt, compute_node_id,
values)
self.assertEqual(4, item_updated['vcpus'])
new_stats = jsonutils.loads(item_updated['stats'])
self.assertEqual(stats, new_stats)
def test_compute_node_delete(self):
compute_node_id = self.item['id']
db.compute_node_delete(self.ctxt, compute_node_id)
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(len(nodes), 0)
def test_compute_node_search_by_hypervisor(self):
nodes_created = []
new_service = copy.copy(self.service_dict)
for i in xrange(3):
new_service['binary'] += str(i)
new_service['topic'] += str(i)
service = db.service_create(self.ctxt, new_service)
self.compute_node_dict['service_id'] = service['id']
self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
node = db.compute_node_create(self.ctxt, self.compute_node_dict)
nodes_created.append(node)
nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
self.assertEqual(3, len(nodes))
self._assertEqualListsOfObjects(nodes_created, nodes,
ignored_keys=self._ignored_keys + ['stats', 'service'])
def test_compute_node_statistics(self):
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 1)
for k, v in stats.iteritems():
self.assertEqual(v, self.item[k])
def test_compute_node_statistics_disabled_service(self):
serv = db.service_get_by_host_and_topic(
self.ctxt, 'host1', CONF.compute_topic)
db.service_update(self.ctxt, serv['id'], {'disabled': True})
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 0)
def test_compute_node_statistics_with_old_service_id(self):
# NOTE(sbauza): This test is only for checking backwards compatibility
# with old versions of compute_nodes not providing host column.
# This test could be removed once we are sure that all compute nodes
# are populating the host field thanks to the ResourceTracker
service2 = self.service_dict.copy()
service2['host'] = 'host2'
db_service2 = db.service_create(self.ctxt, service2)
compute_node_old_host = self.compute_node_dict.copy()
compute_node_old_host['stats'] = jsonutils.dumps(self.stats)
compute_node_old_host['hypervisor_hostname'] = 'node_2'
compute_node_old_host['service_id'] = db_service2['id']
compute_node_old_host.pop('host')
db.compute_node_create(self.ctxt, compute_node_old_host)
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(2, stats.pop('count'))
def test_compute_node_statistics_with_other_service(self):
other_service = self.service_dict.copy()
other_service['topic'] = 'fake-topic'
other_service['binary'] = 'nova-fake'
db.service_create(self.ctxt, other_service)
stats = db.compute_node_statistics(self.ctxt)
data = {'count': 1,
'vcpus_used': 0,
'local_gb_used': 0,
'memory_mb': 1024,
'current_workload': 0,
'vcpus': 2,
'running_vms': 0,
'free_disk_gb': 2048,
'disk_available_least': 100,
'local_gb': 2048,
'free_ram_mb': 1024,
'memory_mb_used': 0}
for key, value in six.iteritems(data):
self.assertEqual(value, stats.pop(key))
def test_compute_node_not_found(self):
self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
self.ctxt, 100500)
def test_compute_node_update_always_updates_updated_at(self):
item_updated = db.compute_node_update(self.ctxt,
self.item['id'], {})
self.assertNotEqual(self.item['updated_at'],
item_updated['updated_at'])
def test_compute_node_update_override_updated_at(self):
# Update the record once so updated_at is set.
first = db.compute_node_update(self.ctxt, self.item['id'],
{'free_ram_mb': '12'})
self.assertIsNotNone(first['updated_at'])
# Update a second time. Make sure that the updated_at value we send
# is overridden.
second = db.compute_node_update(self.ctxt, self.item['id'],
{'updated_at': first.updated_at,
'free_ram_mb': '13'})
self.assertNotEqual(first['updated_at'], second['updated_at'])
def test_service_destroy_with_compute_node(self):
db.service_destroy(self.ctxt, self.service['id'])
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get, self.ctxt,
self.item['id'])
def test_service_destroy_with_old_compute_node(self):
# NOTE(sbauza): This test is only for checking backwards compatibility
# with old versions of compute_nodes not providing host column.
# This test could be removed once we are sure that all compute nodes
# are populating the host field thanks to the ResourceTracker
compute_node_old_host_dict = self.compute_node_dict.copy()
compute_node_old_host_dict.pop('host')
item_old = db.compute_node_create(self.ctxt,
compute_node_old_host_dict)
db.service_destroy(self.ctxt, self.service['id'])
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get, self.ctxt,
item_old['id'])
class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ProviderFwRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = self._get_rule_values()
self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
for rule in self.values]
def _get_rule_values(self):
cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
'2001:4f8:3:ba::/64',
'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
values = []
for i in xrange(len(cidr_samples)):
rule = {}
rule['protocol'] = 'foo' + str(i)
rule['from_port'] = 9999 + i
rule['to_port'] = 9898 + i
rule['cidr'] = cidr_samples[i]
values.append(rule)
return values
def test_provider_fw_rule_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, rule in enumerate(self.values):
self._assertEqualObjects(self.rules[i], rule,
ignored_keys=ignored_keys)
def test_provider_fw_rule_get_all(self):
self._assertEqualListsOfObjects(self.rules,
db.provider_fw_rule_get_all(self.ctxt))
def test_provider_fw_rule_destroy(self):
for rule in self.rules:
db.provider_fw_rule_destroy(self.ctxt, rule.id)
self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(CertificateTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.created = self._certificates_create()
def _get_certs_values(self):
base_values = {
'user_id': 'user',
'project_id': 'project',
'file_name': 'filename'
}
return [{k: v + str(x) for k, v in base_values.iteritems()}
for x in xrange(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
for cert in self._get_certs_values()]
def test_certificate_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, cert in enumerate(self._get_certs_values()):
self._assertEqualObjects(self.created[i], cert,
ignored_keys=ignored_keys)
def test_certificate_get_all_by_project(self):
cert = db.certificate_get_all_by_project(self.ctxt,
self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user(self):
cert = db.certificate_get_all_by_user(self.ctxt,
self.created[1].user_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user_and_project(self):
cert = db.certificate_get_all_by_user_and_project(self.ctxt,
self.created[1].user_id, self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
pools_data = [
{'address': '192.168.10.10',
'username': 'user1',
'password': 'passwd1',
'console_type': 'type1',
'public_hostname': 'public_host1',
'host': 'host1',
'compute_host': 'compute_host1',
},
{'address': '192.168.10.11',
'username': 'user2',
'password': 'passwd2',
'console_type': 'type2',
'public_hostname': 'public_host2',
'host': 'host2',
'compute_host': 'compute_host2',
},
]
self.console_pools = [db.console_pool_create(self.ctxt, val)
for val in pools_data]
instance_uuid = uuidutils.generate_uuid()
db.instance_create(self.ctxt, {'uuid': instance_uuid})
self.console_data = [{'instance_name': 'name' + str(x),
'instance_uuid': instance_uuid,
'password': 'pass' + str(x),
'port': 7878 + x,
'pool_id': self.console_pools[x]['id']}
for x in xrange(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
def test_console_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for console in self.consoles:
self.assertIsNotNone(console['id'])
self._assertEqualListsOfObjects(self.console_data, self.consoles,
ignored_keys=ignored_keys)
def test_console_get_by_id(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_id_uuid(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'],
console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_pool_instance(self):
console = self.consoles[0]
console_get = db.console_get_by_pool_instance(self.ctxt,
console['pool_id'], console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_all_by_instance(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfObjects(self.consoles, consoles_get)
def test_console_get_all_by_instance_with_pool(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
columns_to_join=['pool'])
self._assertEqualListsOfObjects(self.consoles, consoles_get,
ignored_keys=['pool'])
self._assertEqualListsOfObjects([pool for pool in self.console_pools],
[c['pool'] for c in consoles_get])
def test_console_get_all_by_instance_empty(self):
consoles_get = db.console_get_all_by_instance(self.ctxt,
uuidutils.generate_uuid())
self.assertEqual(consoles_get, [])
def test_console_delete(self):
console_id = self.consoles[0]['id']
db.console_delete(self.ctxt, console_id)
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, console_id)
def test_console_get_by_pool_instance_not_found(self):
self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
db.console_get_by_pool_instance, self.ctxt,
self.consoles[0]['pool_id'],
uuidutils.generate_uuid())
def test_console_get_not_found(self):
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, 100500)
def test_console_get_not_found_instance(self):
self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
self.ctxt, self.consoles[0]['id'],
uuidutils.generate_uuid())
class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(CellTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_cell_base_values(self):
return {
'name': 'myname',
'api_url': 'apiurl',
'transport_url': 'transporturl',
'weight_offset': 0.5,
'weight_scale': 1.5,
'is_parent': True,
}
def _cell_value_modify(self, value, step):
if isinstance(value, str):
return value + str(step)
elif isinstance(value, float):
return value + step + 0.6
elif isinstance(value, bool):
return bool(step % 2)
elif isinstance(value, int):
return value + step
def _create_cells(self):
test_values = []
for x in xrange(1, 4):
modified_val = {k: self._cell_value_modify(v, x)
for k, v in self._get_cell_base_values().iteritems()}
db.cell_create(self.ctxt, modified_val)
test_values.append(modified_val)
return test_values
def test_cell_create(self):
cell = db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertIsNotNone(cell['id'])
self._assertEqualObjects(cell, self._get_cell_base_values(),
ignored_keys=self._ignored_keys)
def test_cell_update(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
new_values = {
'api_url': 'apiurl1',
'transport_url': 'transporturl1',
'weight_offset': 0.6,
'weight_scale': 1.6,
'is_parent': False,
}
test_cellname = self._get_cell_base_values()['name']
updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
self._assertEqualObjects(updated_cell, new_values,
ignored_keys=self._ignored_keys + ['name'])
def test_cell_delete(self):
new_cells = self._create_cells()
for cell in new_cells:
test_cellname = cell['name']
db.cell_delete(self.ctxt, test_cellname)
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
test_cellname)
def test_cell_get(self):
new_cells = self._create_cells()
for cell in new_cells:
cell_get = db.cell_get(self.ctxt, cell['name'])
self._assertEqualObjects(cell_get, cell,
ignored_keys=self._ignored_keys)
def test_cell_get_all(self):
new_cells = self._create_cells()
cells = db.cell_get_all(self.ctxt)
self.assertEqual(len(new_cells), len(cells))
cells_byname = {newcell['name']: newcell
for newcell in new_cells}
for cell in cells:
self._assertEqualObjects(cell, cells_byname[cell['name']],
self._ignored_keys)
def test_cell_get_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
'cellnotinbase')
def test_cell_update_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
'cellnotinbase', self._get_cell_base_values())
def test_cell_create_exists(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertRaises(exception.CellExists, db.cell_create,
self.ctxt, self._get_cell_base_values())
class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsolePoolTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.test_console_pool_1 = {
'address': '192.168.2.10',
'username': 'user_1',
'password': 'secret_123',
'console_type': 'type_1',
'public_hostname': 'public_hostname_123',
'host': 'localhost',
'compute_host': '127.0.0.1',
}
self.test_console_pool_2 = {
'address': '192.168.2.11',
'username': 'user_2',
'password': 'secret_1234',
'console_type': 'type_2',
'public_hostname': 'public_hostname_1234',
'host': '127.0.0.1',
'compute_host': 'localhost',
}
self.test_console_pool_3 = {
'address': '192.168.2.12',
'username': 'user_3',
'password': 'secret_12345',
'console_type': 'type_2',
'public_hostname': 'public_hostname_12345',
'host': '127.0.0.1',
'compute_host': '192.168.1.1',
}
def test_console_pool_create(self):
console_pool = db.console_pool_create(
self.ctxt, self.test_console_pool_1)
self.assertIsNotNone(console_pool.get('id'))
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(
console_pool, self.test_console_pool_1, ignored_keys)
def test_console_pool_create_duplicate(self):
db.console_pool_create(self.ctxt, self.test_console_pool_1)
self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
self.ctxt, self.test_console_pool_1)
def test_console_pool_get_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_1
db_cp = db.console_pool_get_by_host_type(
self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
)
self._assertEqualObjects(cp, db_cp, ignored_keys)
def test_console_pool_get_by_host_type_no_resuls(self):
self.assertRaises(
exception.ConsolePoolNotFoundForHostType,
db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
'host', 'console_type')
def test_console_pool_get_all_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
self.test_console_pool_3,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_2
db_cp = db.console_pool_get_all_by_host_type(
self.ctxt, cp['host'], cp['console_type'])
self._assertEqualListsOfObjects(
db_cp, [self.test_console_pool_2, self.test_console_pool_3],
ignored_keys)
def test_console_pool_get_all_by_host_type_no_results(self):
res = db.console_pool_get_all_by_host_type(
self.ctxt, 'cp_host', 'cp_console_type')
self.assertEqual([], res)
class DnsdomainTestCase(test.TestCase):
def setUp(self):
super(DnsdomainTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.domain = 'test.domain'
self.testzone = 'testzone'
self.project = 'fake'
def test_dnsdomain_register_for_zone(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['availability_zone'], self.testzone)
self.assertEqual(domain['scope'], 'private')
def test_dnsdomain_register_for_project(self):
db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['project_id'], self.project)
self.assertEqual(domain['scope'], 'public')
def test_dnsdomain_unregister(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
db.dnsdomain_unregister(self.ctxt, self.domain)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertIsNone(domain)
def test_dnsdomain_get_all(self):
d_list = ['test.domain.one', 'test.domain.two']
db.dnsdomain_register_for_zone(self.ctxt, d_list[0], 'zone')
db.dnsdomain_register_for_zone(self.ctxt, d_list[1], 'zone')
db_list = db.dnsdomain_get_all(self.ctxt)
db_domain_list = [d.domain for d in db_list]
self.assertEqual(sorted(d_list), sorted(db_domain_list))
class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(BwUsageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.useFixture(test.TimeOverride())
def test_bw_usage_get_by_uuids(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
uuid3_refreshed = now - datetime.timedelta(seconds=5)
expected_bw_usages = {
'fake_uuid1': {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now},
'fake_uuid2': {'uuid': 'fake_uuid2',
'mac': 'fake_mac2',
'start_period': start_period,
'bw_in': 200,
'bw_out': 300,
'last_ctr_in': 22345,
'last_ctr_out': 77890,
'last_refreshed': now},
'fake_uuid3': {'uuid': 'fake_uuid3',
'mac': 'fake_mac3',
'start_period': start_period,
'bw_in': 400,
'bw_out': 500,
'last_ctr_in': 32345,
'last_ctr_out': 87890,
'last_refreshed': uuid3_refreshed}
}
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2'], start_period)
# No matches
self.assertEqual(len(bw_usages), 0)
# Add 3 entries
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period,
100, 200, 12345, 67890)
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period,
100, 200, 42, 42)
# Test explicit refreshed time
db.bw_usage_update(self.ctxt, 'fake_uuid3',
'fake_mac3', start_period,
400, 500, 32345, 87890,
last_refreshed=uuid3_refreshed)
# Update 2nd entry
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period,
200, 300, 22345, 77890)
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
self.assertEqual(len(bw_usages), 3)
for usage in bw_usages:
self._assertEqualObjects(expected_bw_usages[usage['uuid']], usage,
ignored_keys=self._ignored_keys)
def test_bw_usage_get(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
'fake_mac1')
self.assertIsNone(bw_usage)
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period,
100, 200, 12345, 67890)
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
'fake_mac1')
self._assertEqualObjects(bw_usage, expected_bw_usage,
ignored_keys=self._ignored_keys)
class Ec2TestCase(test.TestCase):
def setUp(self):
super(Ec2TestCase, self).setUp()
self.ctxt = context.RequestContext('fake_user', 'fake_project')
def test_ec2_ids_not_found_are_printable(self):
def check_exc_format(method, value):
try:
method(self.ctxt, value)
except exception.NotFound as exc:
self.assertIn(six.text_type(value), six.text_type(exc))
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
def test_ec2_volume_create(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(vol['id'])
self.assertEqual(vol['uuid'], 'fake-uuid')
def test_ec2_volume_get_by_id(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id'])
self.assertEqual(vol2['uuid'], vol['uuid'])
def test_ec2_volume_get_by_uuid(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid'])
self.assertEqual(vol2['id'], vol['id'])
def test_ec2_snapshot_create(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(snap['id'])
self.assertEqual(snap['uuid'], 'fake-uuid')
def test_ec2_snapshot_get_by_ec2_id(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
self.assertEqual(snap2['uuid'], 'fake-uuid')
def test_ec2_snapshot_get_by_uuid(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(snap['id'], snap2['id'])
def test_ec2_snapshot_get_by_ec2_id_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.ec2_snapshot_get_by_ec2_id,
self.ctxt, 123456)
def test_ec2_snapshot_get_by_uuid_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.ec2_snapshot_get_by_uuid,
self.ctxt, 'fake-uuid')
def test_ec2_instance_create(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(inst['id'])
self.assertEqual(inst['uuid'], 'fake-uuid')
def test_ec2_instance_get_by_uuid(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst2 = db.ec2_instance_get_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(inst['id'], inst2['id'])
def test_ec2_instance_get_by_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst2 = db.ec2_instance_get_by_id(self.ctxt, inst['id'])
self.assertEqual(inst['id'], inst2['id'])
def test_ec2_instance_get_by_uuid_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.ec2_instance_get_by_uuid,
self.ctxt, 'uuid-not-present')
def test_ec2_instance_get_by_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.ec2_instance_get_by_uuid,
self.ctxt, 12345)
def test_get_instance_uuid_by_ec2_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
self.assertEqual(inst_uuid, 'fake-uuid')
def test_get_instance_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_instance_uuid_by_ec2_id,
self.ctxt, 100500)
class FlavorMigrationTestCase(test.TestCase):
def test_augment_flavor_to_migrate_no_extra_specs(self):
flavor = objects.Flavor()
db_flavor = {
'extra_specs': {'foo': 'bar'}}
sqlalchemy_api._augment_flavor_to_migrate(flavor, db_flavor)
self.assertTrue(flavor.obj_attr_is_set('extra_specs'))
self.assertEqual(db_flavor['extra_specs'], flavor.extra_specs)
def test_augment_flavor_to_migrate_extra_specs_merge(self):
flavor = objects.Flavor()
flavor.extra_specs = {'foo': '1', 'bar': '2'}
db_flavor = {
'extra_specs': {'bar': '3', 'baz': '4'}
}
sqlalchemy_api._augment_flavor_to_migrate(flavor, db_flavor)
self.assertEqual({'foo': '1', 'bar': '2', 'baz': '4'},
flavor.extra_specs)
@mock.patch('nova.db.sqlalchemy.api._augment_flavor_to_migrate')
def test_augment_flavors_to_migrate(self, mock_augment):
instance = objects.Instance()
instance.flavor = objects.Flavor(flavorid='foo')
instance.old_flavor = None
instance.new_flavor = None
sqlalchemy_api._augment_flavors_to_migrate(instance,
{'foo': 'bar'})
mock_augment.assert_called_once_with(instance.flavor, 'bar')
@mock.patch('nova.db.sqlalchemy.api._augment_flavor_to_migrate')
@mock.patch('nova.db.sqlalchemy.api.flavor_get_by_flavor_id')
def test_augment_flavors_to_migrate_uses_cache(self, mock_get,
mock_augment):
instance = objects.Instance(context=context.get_admin_context())
instance.flavor = objects.Flavor(flavorid='foo')
instance.old_flavor = objects.Flavor(flavorid='foo')
instance.new_flavor = objects.Flavor(flavorid='bar')
flavor_cache = {'bar': 'bar_flavor'}
mock_get.return_value = 'foo_flavor'
sqlalchemy_api._augment_flavors_to_migrate(instance, flavor_cache)
self.assertIn('foo', flavor_cache)
self.assertEqual('foo_flavor', flavor_cache['foo'])
mock_get.assert_called_once_with(instance._context, 'foo', 'yes')
def test_migrate_flavor(self):
ctxt = context.get_admin_context()
flavor = flavors.get_default_flavor()
sysmeta = flavors.save_flavor_info({}, flavor)
db.flavor_extra_specs_update_or_create(ctxt, flavor.flavorid,
{'new_spec': 'foo'})
values = {'uuid': str(stdlib_uuid.uuid4()),
'system_metadata': sysmeta,
'extra': {'flavor': 'foobar'},
}
db.instance_create(ctxt, values)
values = {'uuid': str(stdlib_uuid.uuid4()),
'system_metadata': sysmeta,
'extra': {'flavor': None},
}
instance = db.instance_create(ctxt, values)
match, done = db.migrate_flavor_data(ctxt, None, {})
self.assertEqual(1, match)
self.assertEqual(1, done)
extra = db.instance_extra_get_by_instance_uuid(ctxt, instance['uuid'],
columns=['flavor'])
flavorinfo = jsonutils.loads(extra.flavor)
self.assertIsNone(flavorinfo['old'])
self.assertIsNone(flavorinfo['new'])
curflavor = obj_base.NovaObject.obj_from_primitive(flavorinfo['cur'])
self.assertEqual(flavor.flavorid, curflavor.flavorid)
def test_migrate_flavor_honors_limit(self):
ctxt = context.get_admin_context()
flavor = flavors.get_default_flavor()
sysmeta = flavors.save_flavor_info({}, flavor)
db.flavor_extra_specs_update_or_create(ctxt, flavor.flavorid,
{'new_spec': 'foo'})
for i in (1, 2, 3, 4, 5):
values = {'uuid': str(stdlib_uuid.uuid4()),
'system_metadata': sysmeta,
'extra': {'flavor': 'foobar'},
}
db.instance_create(ctxt, values)
values = {'uuid': str(stdlib_uuid.uuid4()),
'system_metadata': sysmeta,
'extra': {'flavor': None},
}
db.instance_create(ctxt, values)
match, done = db.migrate_flavor_data(ctxt, 2, {})
self.assertEqual(2, match)
self.assertEqual(2, done)
match, done = db.migrate_flavor_data(ctxt, 1, {})
self.assertEqual(1, match)
self.assertEqual(1, done)
match, done = db.migrate_flavor_data(ctxt, None, {})
self.assertEqual(2, match)
self.assertEqual(2, done)
match, done = db.migrate_flavor_data(ctxt, None, {})
self.assertEqual(0, match)
self.assertEqual(0, done)
def test_migrate_flavor_honors_states(self):
ctxt = context.get_admin_context()
flavor = flavors.get_default_flavor()
sysmeta = flavors.save_flavor_info({}, flavor)
values = {'uuid': str(stdlib_uuid.uuid4()),
'system_metadata': sysmeta,
'extra': {'flavor': None},
}
db.instance_create(ctxt, values)
values = {'uuid': str(stdlib_uuid.uuid4()),
'task_state': task_states.SPAWNING,
'system_metadata': sysmeta,
'extra': {'flavor': None},
}
db.instance_create(ctxt, values)
values = {'uuid': str(stdlib_uuid.uuid4()),
'vm_state': vm_states.RESCUED,
'system_metadata': sysmeta,
'extra': {'flavor': None},
}
db.instance_create(ctxt, values)
values = {'uuid': str(stdlib_uuid.uuid4()),
'vm_state': vm_states.RESIZED,
'system_metadata': sysmeta,
'extra': {'flavor': None},
}
db.instance_create(ctxt, values)
match, done = db.migrate_flavor_data(ctxt, None, {})
self.assertEqual(4, match)
self.assertEqual(1, done)
match, done = db.migrate_flavor_data(ctxt, None, {})
self.assertEqual(3, match)
self.assertEqual(0, done)
class ArchiveTestCase(test.TestCase):
def setUp(self):
super(ArchiveTestCase, self).setUp()
self.context = context.get_admin_context()
self.engine = get_engine()
self.conn = self.engine.connect()
self.instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "instance_id_mappings")
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "shadow_instance_id_mappings")
self.dns_domains = sqlalchemyutils.get_table(
self.engine, "dns_domains")
self.shadow_dns_domains = sqlalchemyutils.get_table(
self.engine, "shadow_dns_domains")
self.consoles = sqlalchemyutils.get_table(self.engine, "consoles")
self.console_pools = sqlalchemyutils.get_table(
self.engine, "console_pools")
self.shadow_consoles = sqlalchemyutils.get_table(
self.engine, "shadow_consoles")
self.shadow_console_pools = sqlalchemyutils.get_table(
self.engine, "shadow_console_pools")
self.instances = sqlalchemyutils.get_table(self.engine, "instances")
self.shadow_instances = sqlalchemyutils.get_table(
self.engine, "shadow_instances")
self.uuidstrs = []
for unused in range(6):
self.uuidstrs.append(stdlib_uuid.uuid4().hex)
self.ids = []
self.id_tablenames_to_cleanup = set(["console_pools", "consoles"])
self.uuid_tablenames_to_cleanup = set(["instance_id_mappings",
"instances"])
self.domain_tablenames_to_cleanup = set(["dns_domains"])
def test_shadow_tables(self):
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table_name in metadata.tables:
# NOTE(rpodolyaka): migration 209 introduced a few new tables,
# which don't have shadow tables and it's
# completely OK, so we should skip them here
if table_name.startswith("dump_"):
continue
# NOTE(snikitin): migration 266 introduced a new table 'tags',
# which have no shadow table and it's
# completely OK, so we should skip it here
if table_name == 'tags':
continue
if table_name.startswith("shadow_"):
self.assertIn(table_name[7:], metadata.tables)
continue
self.assertTrue(db_utils.check_shadow_table(self.engine,
table_name))
def test_archive_deleted_rows(self):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qiim = sql.select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 4 left in main
self.assertEqual(len(rows), 4)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 2 in shadow
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 4 in shadow
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we still have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we still have 4 in shadow
self.assertEqual(len(rows), 4)
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
tablenames.sort()
for tablename in tablenames:
ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename)
if ret == 0:
self.uuid_tablenames_to_cleanup.add(tablename)
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
""":returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
main_table = sqlalchemyutils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
return 1
shadow_table = sqlalchemyutils.get_table(
self.engine, "shadow_" + tablename)
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
self.conn.execute(ins_stmt)
except db_exc.DBError:
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
update_statement = main_table.update().\
where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qmt).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qst = sql.select([shadow_table]).\
where(shadow_table.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qst).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 4 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 4)
# Verify we have 2 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we still have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we still have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
return 0
def test_archive_deleted_rows_no_id_column(self):
uuidstr0 = self.uuidstrs[0]
ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
self.conn.execute(ins_stmt)
update_statement = self.dns_domains.update().\
where(self.dns_domains.c.domain == uuidstr0).\
values(deleted=True)
self.conn.execute(update_statement)
qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
uuidstr0)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 1)
qsdd = sql.select([self.shadow_dns_domains],
self.shadow_dns_domains.c.domain == uuidstr0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 0)
db.archive_deleted_rows(self.context, max_rows=1)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 1)
def test_archive_deleted_rows_fk_constraint(self):
# consoles.pool_id depends on console_pools.id
# SQLite doesn't enforce foreign key constraints without a pragma.
dialect = self.engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
self.conn.execute("PRAGMA foreign_keys = ON")
ins_stmt = self.console_pools.insert().values(deleted=1)
result = self.conn.execute(ins_stmt)
id1 = result.inserted_primary_key[0]
self.ids.append(id1)
ins_stmt = self.consoles.insert().values(deleted=1,
pool_id=id1)
result = self.conn.execute(ins_stmt)
id2 = result.inserted_primary_key[0]
self.ids.append(id2)
# The first try to archive console_pools should fail, due to FK.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 0)
# Then archiving consoles should work.
num = db.archive_deleted_rows_for_table(self.context, "consoles")
self.assertEqual(num, 1)
# Then archiving console_pools should work.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 1)
def test_archive_deleted_rows_2_tables(self):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement2)
# Verify we have 6 in each main table
qiim = sql.select([self.instance_id_mappings]).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
self.assertEqual(len(rows), 6)
qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(rows), 6)
# Verify we have 0 in each shadow table
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
self.assertEqual(len(rows), 0)
qsi = sql.select([self.shadow_instances]).\
where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(rows), 0)
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(self.context, max_rows=7)
# Verify we have 5 left in the two main tables combined
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 5)
# Verify we have 7 in the two shadow tables combined.
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(self.context, max_rows=1)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=500)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceGroupDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
members=None):
return db.instance_group_create(context, values, policies=policies,
members=members)
def test_instance_group_create_no_key(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
def test_instance_group_create_with_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
def test_instance_group_create_with_same_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
self._create_instance_group(self.context, values)
self.assertRaises(exception.InstanceGroupIdExists,
self._create_instance_group, self.context, values)
def test_instance_group_get(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self._assertEqualObjects(result1, result2)
def test_instance_group_update_simple(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, result1['uuid'],
values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self.assertEqual(result1['uuid'], result2['uuid'])
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result2, values, ignored_keys)
def test_instance_group_delete(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
db.instance_group_delete(self.context, result['uuid'])
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete, self.context,
result['uuid'])
def test_instance_group_get_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get,
self.context,
'nonexistent')
def test_instance_group_delete_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete,
self.context,
'nonexistent')
def test_instance_group_get_all(self):
groups = db.instance_group_get_all(self.context)
self.assertEqual(0, len(groups))
value = self._get_default_values()
result1 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
self.assertEqual(1, len(groups))
value = self._get_default_values()
result2 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
def test_instance_group_get_all_by_project_id(self):
groups = db.instance_group_get_all_by_project_id(self.context,
'invalid_project_id')
self.assertEqual(0, len(groups))
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all_by_project_id(self.context,
'fake_project')
self.assertEqual(1, len(groups))
values = self._get_default_values()
values['project_id'] = 'new_project_id'
result2 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
projects = [{'name': 'fake_project', 'value': [result1]},
{'name': 'new_project_id', 'value': [result2]}]
for project in projects:
groups = db.instance_group_get_all_by_project_id(self.context,
project['name'])
self._assertEqualListsOfObjects(project['value'], groups)
def test_instance_group_update(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
id = result['uuid']
values = self._get_default_values()
values['name'] = 'new_fake_name'
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self.assertEqual(result['name'], 'new_fake_name')
# update update members
values = self._get_default_values()
members = ['instance_id1', 'instance_id2']
values['members'] = members
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
# update update policies
values = self._get_default_values()
policies = ['policy1', 'policy2']
values['policies'] = policies
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
# test invalid ID
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_update, self.context,
'invalid_id', values)
def test_instance_group_get_by_instance(self):
values = self._get_default_values()
group1 = self._create_instance_group(self.context, values)
members = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, group1.uuid, members)
group2 = db.instance_group_get_by_instance(self.context,
'instance_id1')
self.assertEqual(group2.uuid, group1.uuid)
class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_members_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
members = ['instance_id1', 'instance_id2']
result = self._create_instance_group(self.context, values,
members=members)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
def test_instance_group_members_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members = db.instance_group_members_get(self.context, id)
self.assertEqual(members, [])
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
def test_instance_group_members_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
# check add with existing keys
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
for instance_id in members3[:]:
db.instance_group_member_delete(self.context, id, instance_id)
members3.remove(instance_id)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_members_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_member_delete, self.context,
'invalidid', 'instance_id1')
members = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members)
self.assertRaises(exception.InstanceGroupMemberNotFound,
db.instance_group_member_delete,
self.context, id, 'invalid_id')
class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_policies_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
policies = ['policy1', 'policy2']
result = self._create_instance_group(self.context, values,
policies=policies)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
def test_instance_group_policies_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies = db.instance_group_policies_get(self.context, id)
self.assertEqual(policies, [])
policies2 = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies2)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
def test_instance_group_policies_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies2 = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies2)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
policies3 = ['policy1', 'policy2', 'policy3']
db.instance_group_policies_add(self.context, id, policies3)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
def test_instance_group_policies_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies3 = ['policy1', 'policy2', 'policy3']
db.instance_group_policies_add(self.context, id, policies3)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
for policy in policies3[:]:
db.instance_group_policy_delete(self.context, id, policy)
policies3.remove(policy)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
def test_instance_group_policies_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_policies_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_policy_delete, self.context,
'invalidid', 'policy1')
policies = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies)
self.assertRaises(exception.InstanceGroupPolicyNotFound,
db.instance_group_policy_delete,
self.context, id, 'invalid_policy')
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(PciDeviceDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.admin_context = context.get_admin_context()
self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
def _get_fake_pci_devs(self):
return {'id': 3353,
'compute_node_id': 1,
'address': '0000:0f:08.7',
'vendor_id': '8086',
'product_id': '1520',
'numa_node': 1,
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08.7',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'request_id': None,
}, {'id': 3356,
'compute_node_id': 1,
'address': '0000:0f:03.7',
'vendor_id': '8083',
'product_id': '1523',
'numa_node': 0,
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08.7',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'request_id': None,
}
def _create_fake_pci_devs(self):
v1, v2 = self._get_fake_pci_devs()
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
return (v1, v2)
def test_pci_device_get_by_addr(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_addr(self.admin_context, 1,
'0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_addr_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_get_by_addr, self.admin_context,
1, '0000:0f:08:09')
def test_pci_device_get_by_addr_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_by_addr,
self.context, 1, '0000:0f:08.7')
def test_pci_device_get_by_id(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_id(self.admin_context, 3353)
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_id_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFoundById,
db.pci_device_get_by_id,
self.admin_context, 3354)
def test_pci_device_get_by_id_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_by_id,
self.context, 3553)
def test_pci_device_get_all_by_node(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_all_by_node_empty(self):
v1, v2 = self._get_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 9)
self.assertEqual(len(results), 0)
def test_pci_device_get_all_by_node_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_all_by_node,
self.context, 1)
def test_pci_device_get_by_instance_uuid(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_by_instance_uuid_check_status(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
def test_pci_device_update(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
v1['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_update_low_priv(self):
v1, v2 = self._get_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_update, self.context,
v1['compute_node_id'], v1['address'], v1)
def test_pci_device_destroy(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
v1['address'])
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
def test_pci_device_destroy_exception(self):
v1, v2 = self._get_fake_pci_devs()
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_destroy,
self.admin_context,
v2['compute_node_id'],
v2['address'])
class RetryOnDeadlockTestCase(test.TestCase):
def test_without_deadlock(self):
@sqlalchemy_api._retry_on_deadlock
def call_api(*args, **kwargs):
return True
self.assertTrue(call_api())
def test_raise_deadlock(self):
self.attempts = 2
@sqlalchemy_api._retry_on_deadlock
def call_api(*args, **kwargs):
while self.attempts:
self.attempts = self.attempts - 1
raise db_exc.DBDeadlock("fake exception")
return True
self.assertTrue(call_api())
class TestSqlalchemyTypesRepr(test_base.DbTestCase):
def setUp(self):
super(TestSqlalchemyTypesRepr, self).setUp()
meta = MetaData(bind=self.engine)
self.table = Table(
'cidr_tbl',
meta,
Column('id', Integer, primary_key=True),
Column('addr', col_types.CIDR())
)
self.table.create()
self.addCleanup(meta.drop_all)
def test_cidr_repr(self):
addrs = [('192.168.3.0/24', '192.168.3.0/24'),
('2001:db8::/64', '2001:db8::/64'),
('192.168.3.0', '192.168.3.0/32'),
('2001:db8::', '2001:db8::/128'),
(None, None)]
with self.engine.begin() as conn:
for i in addrs:
conn.execute(self.table.insert(), {'addr': i[0]})
query = self.table.select().order_by(self.table.c.id)
result = conn.execute(query)
for idx, row in enumerate(result):
self.assertEqual(addrs[idx][1], row.addr)
class TestMySQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
test_base.MySQLOpportunisticTestCase):
pass
class TestPostgreSQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
test_base.PostgreSQLOpportunisticTestCase):
pass
class TestDBInstanceTags(test.TestCase):
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'}
}
def setUp(self):
super(TestDBInstanceTags, self).setUp()
self.user_id = 'user1'
self.project_id = 'project1'
self.context = context.RequestContext(self.user_id, self.project_id)
def _create_instance(self):
inst = db.instance_create(self.context, self.sample_data)
return inst['uuid']
def _get_tags_from_resp(self, tag_refs):
return [(t.resource_id, t.tag) for t in tag_refs]
def test_instance_tag_add(self):
uuid = self._create_instance()
tag = 'tag'
tag_ref = db.instance_tag_add(self.context, uuid, tag)
self.assertEqual(uuid, tag_ref.resource_id)
self.assertEqual(tag, tag_ref.tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tag for the instance was added
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([(uuid, tag)], tags)
def test_instance_tag_add_duplication(self):
uuid = self._create_instance()
tag = 'tag'
for x in xrange(5):
db.instance_tag_add(self.context, uuid, tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the only one tag for the instance was added
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([(uuid, tag)], tags)
def test_instance_tag_set(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
tag3 = 'tag3'
tag4 = 'tag4'
# Set tags to the instance
db.instance_tag_set(self.context, uuid, [tag1, tag2])
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tags for the instance were set
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
self.assertEqual(expected, tags)
# Set new tags to the instance
db.instance_tag_set(self.context, uuid, [tag3, tag4, tag2])
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tags for the instance were replaced
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag3), (uuid, tag4), (uuid, tag2)]
self.assertEqual(set(expected), set(tags))
def test_instance_tag_get_by_instance_uuid(self):
uuid1 = self._create_instance()
uuid2 = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
tag3 = 'tag3'
db.instance_tag_add(self.context, uuid1, tag1)
db.instance_tag_add(self.context, uuid2, tag1)
db.instance_tag_add(self.context, uuid2, tag2)
db.instance_tag_add(self.context, uuid2, tag3)
# Check the tags for the first instance
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid1)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid1, tag1)]
self.assertEqual(expected, tags)
# Check the tags for the second instance
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid2)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid2, tag1), (uuid2, tag2), (uuid2, tag3)]
self.assertEqual(expected, tags)
def test_instance_tag_get_by_instance_uuid_no_tags(self):
uuid = self._create_instance()
self.assertEqual([], db.instance_tag_get_by_instance_uuid(self.context,
uuid))
def test_instance_tag_delete(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
db.instance_tag_add(self.context, uuid, tag1)
db.instance_tag_add(self.context, uuid, tag2)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
# Check the tags for the instance were added
self.assertEqual(expected, tags)
db.instance_tag_delete(self.context, uuid, tag1)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag2)]
self.assertEqual(expected, tags)
def test_instance_tag_delete_non_existent(self):
uuid = self._create_instance()
self.assertRaises(exception.InstanceTagNotFound,
db.instance_tag_delete, self.context, uuid, 'tag')
def test_instance_tag_delete_all(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
db.instance_tag_add(self.context, uuid, tag1)
db.instance_tag_add(self.context, uuid, tag2)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
# Check the tags for the instance were added
self.assertEqual(expected, tags)
db.instance_tag_delete_all(self.context, uuid)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([], tags)
def test_instance_tag_add_to_non_existing_instance(self):
self.assertRaises(exception.InstanceNotFound, db.instance_tag_add,
self.context, 'fake_uuid', 'tag')
def test_instance_tag_set_to_non_existing_instance(self):
self.assertRaises(exception.InstanceNotFound, db.instance_tag_set,
self.context, 'fake_uuid', ['tag1', 'tag2'])
def test_instance_tag_get_from_non_existing_instance(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_get_by_instance_uuid, self.context,
'fake_uuid')
def test_instance_tag_delete_from_non_existing_instance(self):
self.assertRaises(exception.InstanceNotFound, db.instance_tag_delete,
self.context, 'fake_uuid', 'tag')
def test_instance_tag_delete_all_from_non_existing_instance(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_delete_all,
self.context, 'fake_uuid')
| 45.014797
| 79
| 0.604534
|
7bd5729764be387441d743bb3e42a062e0d7f6f5
| 3,211
|
py
|
Python
|
examples/postgresql/dao/db.py
|
ghga-de/ghga-service-chassis-lib
|
7a4d544386a88bec961601f2f838a6a955df6698
|
[
"Apache-2.0"
] | 1
|
2021-07-20T07:47:24.000Z
|
2021-07-20T07:47:24.000Z
|
examples/postgresql/dao/db.py
|
ghga-de/ghga-service-chassis-lib
|
7a4d544386a88bec961601f2f838a6a955df6698
|
[
"Apache-2.0"
] | 20
|
2021-07-02T14:38:57.000Z
|
2022-02-09T10:42:41.000Z
|
examples/postgresql/dao/db.py
|
ghga-de/ghga-service-chassis-lib
|
7a4d544386a88bec961601f2f838a6a955df6698
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 - 2022 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""db boilerplate"""
from typing import Any, List
from sqlalchemy.future import select
from ghga_service_chassis_lib.postgresql import (
AsyncPostgresqlConnector,
PostgresqlConfigBase,
)
from ghga_service_chassis_lib.utils import AsyncDaoGenericBase
from .. import models
from ..config import CONFIG
from . import db_models
# Since this is just a DAO stub without implementation,
# following pylint error are expected:
# pylint: disable=unused-argument,no-self-use
class DatabaseDao(AsyncDaoGenericBase):
"""
A DAO base class for interacting with the database.
"""
async def add_todo(self, item: models.ToDoItem) -> None:
"""add a todo item"""
...
async def get_all_todos(self) -> List[models.ToDoItem]:
"""get all todo items"""
...
class PostgresDatabase(DatabaseDao):
"""
An implementation of the DatabaseDao interface using a PostgreSQL backend.
"""
def __init__(self, config: PostgresqlConfigBase = CONFIG):
"""initialze DAO implementation"""
# will be defined on __enter__:
super().__init__(config=config)
self._config = config
self._session_cm: Any = None
self._session: Any = None
async def __aenter__(self):
"""Setup database connection"""
psql_connector = AsyncPostgresqlConnector(self._config)
self._session_cm = psql_connector.transactional_session()
# pylint: disable=no-member
self._session = await self._session_cm.__aenter__()
return self
async def __aexit__(self, error_type, error_value, error_traceback):
"""Teardown database connection"""
# pylint: disable=no-member
await self._session_cm.__aexit__(error_type, error_value, error_traceback)
async def add_todo(self, item: models.ToDoItem) -> None:
"""add a todo item"""
orm_item = db_models.ToDoItem(**item.dict())
self._session.add(orm_item)
async def get_all_todos(self) -> List[models.ToDoItem]:
"""get all todo items"""
# query all todo items:
query = await self._session.execute(
select(db_models.ToDoItem).order_by(db_models.ToDoItem.id)
)
# translate orm_items to business-logic data models:
items = [
models.ToDoItem(
title=orm_item.title,
description=orm_item.description,
due_date=orm_item.due_date,
)
for orm_item in query.scalars().all()
]
return items
| 31.792079
| 82
| 0.677048
|
7352b1f696f82c74163d9a6f43a2336ab59056e6
| 238
|
py
|
Python
|
setup.py
|
goedge-dev/dropt-util
|
3fa73b765651b21f67dfbf4720150958cbc715c4
|
[
"MIT"
] | null | null | null |
setup.py
|
goedge-dev/dropt-util
|
3fa73b765651b21f67dfbf4720150958cbc715c4
|
[
"MIT"
] | null | null | null |
setup.py
|
goedge-dev/dropt-util
|
3fa73b765651b21f67dfbf4720150958cbc715c4
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
use_scm_version=True,
long_description = long_description,
long_description_content_type='text/markdown'
)
| 18.307692
| 49
| 0.731092
|
4cff7fc0d97bf88ac4d81cfdbdf46289bbb30756
| 17,500
|
py
|
Python
|
optimal_path.py
|
fplk/mcs-scene-generator
|
e9351b39481f4a8afdbb69f1684f87f4d5c3f62a
|
[
"Apache-2.0"
] | null | null | null |
optimal_path.py
|
fplk/mcs-scene-generator
|
e9351b39481f4a8afdbb69f1684f87f4d5c3f62a
|
[
"Apache-2.0"
] | null | null | null |
optimal_path.py
|
fplk/mcs-scene-generator
|
e9351b39481f4a8afdbb69f1684f87f4d5c3f62a
|
[
"Apache-2.0"
] | null | null | null |
import logging
import math
from typing import Any, Dict, List, Optional, Tuple
from shapely.geometry import LineString, Polygon, Point
from shapely.ops import unary_union
import exceptions
from extremitypathfinder.extremitypathfinder import (
PolygonEnvironment as Environment
)
from extremitypathfinder import plotting
from geometry import ROOM_X_MIN, ROOM_X_MAX, ROOM_Z_MIN, ROOM_Z_MAX
from machine_common_sense.controller import MAX_REACH_DISTANCE, \
MOVE_DISTANCE, PERFORMER_CAMERA_Y
from util import PERFORMER_HALF_WIDTH
plotting.EXPORT_SIZE_X = plotting.EXPORT_SIZE_Y
VARIANCE = 0.01
class ShortestPath():
def __init__(
self,
action_list: List[Dict[str, Any]],
position: Tuple[float, float],
rotation: float
):
self.action_list = action_list
self.position = position
self.rotation = rotation
def _dilate_and_unify_object_bounds(
object_bounds_list: List[List[Dict[str, float]]],
dilation_amount: float,
source: Tuple[float, float] = None,
target: Tuple[float, float] = None
) -> Optional[List[Polygon]]:
"""Dilate the given object bounds by the given amount and return the
resulting coordinates. Fall back to the original bounds if the new bounds
would overlap the given source or target point."""
source_point = Point(source) if source else None
target_point = Point(target) if target else None
# Expand the rects by the dilation into bigger polys with 8 points.
poly_list = []
for bounds in object_bounds_list:
poly = Polygon([(point['x'], point['z']) for point in bounds])
logging.debug(f'original poly {poly}')
modified_poly = poly.buffer(dilation_amount, resolution=1, cap_style=3)
logging.debug(f'modified poly {modified_poly}')
# Use original poly if dilation would overlap with source/target.
if ((
source and not poly.contains(source_point) and
modified_poly.contains(source_point)
) or (
target and not poly.contains(target_point) and
modified_poly.contains(target_point)
)):
poly_list.append(poly)
else:
poly_list.append(modified_poly)
# Merge any intersecting polys.
merged_poly_list = (
unary_union(poly_list) if len(poly_list) > 1 else poly_list
)
if isinstance(merged_poly_list, Polygon):
merged_poly_list = [merged_poly_list]
poly_coords_list = [
list(poly.exterior.coords) for poly in merged_poly_list
]
# The polys returned by unary_union have the same first and last point,
# but the shortest path code doesn't want them to have the repeated point.
for coords in poly_coords_list:
if coords[0] == coords[-1]:
del coords[-1]
return poly_coords_list
def _dilate_target_bounds(
target_bounds: List[Dict[str, float]]
) -> List[Dict[str, float]]:
"""Dilate the given target bounds and return the resulting coordinates."""
# Dilate the bounds to account for the performer's reach distance.
# The resulting polygon should always have eight points.
coords = _dilate_and_unify_object_bounds(
[target_bounds],
MAX_REACH_DISTANCE - VARIANCE
)[0]
# Identify if the first two points are a (short) corner or a (long) side.
distance_1 = Point(coords[0]).distance(Point(coords[1]))
distance_2 = Point(coords[1]).distance(Point(coords[2]))
# Add the center points of each of the target's original four sides.
if distance_1 < distance_2:
coords.insert(0, coords.pop())
for i, j in [(6, 7), (4, 5), (2, 3), (0, 1)]:
center = LineString([coords[i], coords[j]]).centroid.coords[0]
coords.insert(j, center)
return coords
def _find_target_or_parent_dict(
target_object: Dict[str, Any],
object_list: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""Find and return the target object dict from the given object list."""
logging.debug('target {target_object}')
if 'locationParent' in target_object:
parent_object = [
object_dict for object_dict in object_list
if object_dict['id'] == target_object['locationParent']
][0]
if parent_object is None:
raise exceptions.SceneException(
f'target should have parent {target_object}')
logging.debug('parent {parent_object}')
return parent_object
return target_object
def _generate_path_list(
previous_path: ShortestPath,
next_position_list: List[Tuple[float, float]],
target_position: Tuple[float, float],
pathfinding_environment: Environment
) -> List[ShortestPath]:
"""Generate and return lists of MCS actions that each may be the shortest
path to the given target position. First generate MCS rotate and move
actions to the first element in the given position list, then regenerate
the shortest path from that position to the target position, and then
call recursively."""
logging.debug('----------------------------------------')
if len(next_position_list) == 0:
return [previous_path]
# Generate the MCS rotate and move actions toward just the next position.
next_path_list = _rotate_then_move(previous_path, next_position_list[0])
output_path_list = []
for path in next_path_list:
logging.debug(f'next path action list length {len(path.action_list)}')
logging.debug(f'next path position {path.position}')
logging.debug(f'next path rotation {path.rotation}')
# If the next part of the path didn't have a change in position...
if previous_path.position == path.position:
logging.debug('Path Done: is same position as previous path')
output_path_list.append(path)
continue
# If the next position was near enough to the target position...
if (
math.isclose(target_position[0], next_position_list[0][0]) and
math.isclose(target_position[1], next_position_list[0][1])
):
logging.debug('Path Done: is at target position')
output_path_list.append(path)
continue
# Else generate the path to the NEXT position.
position_list = _generate_shortest_path_position_list(
path.position,
target_position,
pathfinding_environment
)
if position_list:
output_path_list.extend(_generate_path_list(
path,
position_list[1:],
target_position,
pathfinding_environment
))
return output_path_list
def _generate_pathfinding_environment(
object_bounds_list: List[List[Dict[str, float]]],
source: Dict[str, float] = None,
target: Dict[str, float] = None,
save_path_plot_with_name: str = None
) -> Optional[Environment]:
"""Generate and return the pathfinding environment using the given list of
object bounds and the global room bounds. Save plots of the paths to the
local drive if save_path_plot_with_name is not None."""
poly_coords_list = _dilate_and_unify_object_bounds(
object_bounds_list,
((PERFORMER_HALF_WIDTH + VARIANCE)),
(source['x'], source['z']) if source else None,
(target['x'], target['z']) if target else None
)
logging.debug(f'poly coords list {poly_coords_list}')
pathfinding_environment = (
plotting.PlottingEnvironment(plotting_dir=save_path_plot_with_name)
if save_path_plot_with_name else Environment()
)
room_bounds = [
(ROOM_X_MAX - VARIANCE, ROOM_Z_MAX - VARIANCE),
(ROOM_X_MIN + VARIANCE, ROOM_Z_MAX - VARIANCE),
(ROOM_X_MIN + VARIANCE, ROOM_Z_MIN + VARIANCE),
(ROOM_X_MAX - VARIANCE, ROOM_Z_MIN + VARIANCE)
]
logging.debug(f'room bounds {room_bounds}')
try:
pathfinding_environment.store(
room_bounds,
poly_coords_list,
validate=True
)
pathfinding_environment.prepare()
except Exception as e:
logging.error('UNEXPECTED ERROR IN ENVIRONMENT')
logging.error(e)
return None
return pathfinding_environment
def _generate_shortest_path_position_list(
starting_position: Tuple[float, float],
goal_position: Tuple[float, float],
pathfinding_environment: Environment
) -> Optional[List[Tuple[float, float]]]:
"""Generate and return the postion list for the shortest path from the
given starting position to the given goal position."""
try:
if not pathfinding_environment.within_map(starting_position):
logging.debug('Starting position not in pathfinding environment.')
return None
if not pathfinding_environment.within_map(goal_position):
logging.debug('Goal position not in pathfinding environment.')
return None
path, length = pathfinding_environment.find_shortest_path(
starting_position,
goal_position
)
except Exception as e:
logging.error('UNEXPECTED ERROR IN PATHFINDING')
logging.error(e)
return None
return path if len(path) > 0 else None
def _remove_duplicate_paths(
path_list: List[ShortestPath]
) -> List[ShortestPath]:
"""Remove each duplicated path from the given list and return a new path
list."""
# Map each unique path to its stringified action list.
unique_path = {}
for path in path_list:
# Stringify the path's MCS action list.
text_action_list = []
for action_data in path.action_list:
text_action = action_data['action']
for key, value in action_data['params'].items():
text_action += ',' + key + '=' + value
text_action_list.append(text_action)
text = ';'.join(text_action_list)
if text not in unique_path:
unique_path[text] = path
return list(unique_path.values())
def _rotate_then_move(
path: ShortestPath,
next_position: Tuple[float, float],
single_best_path: bool = False
) -> List[ShortestPath]:
"""Returns new paths based on the given path that rotates and/or moves to
the given next position."""
if (
math.isclose(path.position[0], next_position[0]) and
math.isclose(path.position[1], next_position[1])
):
return [path]
# Find the degree difference from the path's rotation to the next position.
dx = next_position[0] - path.position[0]
dz = next_position[1] - path.position[1]
theta = math.degrees(math.atan2(dz, dx))
logging.debug(f'path position {path.position}')
logging.debug(f'path rotation {path.rotation}')
logging.debug(f'next position {next_position}')
logging.debug(f'theta {theta}')
delta = (path.rotation - theta) % 360
if delta > 180:
delta -= 360
rotate_left = (delta < 0)
logging.debug(f'delta {delta}')
# Find how many individual rotate actions are needed.
remainder, count = math.modf(abs(delta) / 10.0)
count = int(count)
rotate_list = [count] if remainder == 0 else [count, count + 1]
if single_best_path:
rotate_list = [rotate_list[-1]]
elif remainder != 0:
# Try a few other rotations to handle some edge cases.
if remainder <= 0.2:
rotate_list.append(count - 1)
if remainder >= 0.8:
rotate_list.append(count + 2)
# Create a new path for each rotation amount.
intermediate_path_list = []
for amount in rotate_list:
intermediate_path_list.append(ShortestPath(path.action_list.copy() + [{
'action': 'RotateLeft' if rotate_left else 'RotateRight',
"params": {}
}] * amount, path.position, (
path.rotation - ((-10 if rotate_left else 10) * amount)
)))
# Find the move distance from the path's position to the next position.
distance = math.sqrt(dx ** 2 + dz ** 2)
logging.debug(f'distance {distance}')
# Find how many individual move actions are needed.
remainder, count = math.modf(distance / MOVE_DISTANCE)
count = int(count)
move_list = [count] if remainder == 0 else [count, count + 1]
if single_best_path:
move_list = [move_list[-1]]
# Create a new path for each movement amount.
output_path_list = []
for path in intermediate_path_list:
x_increment = MOVE_DISTANCE * math.cos(math.radians(path.rotation))
z_increment = MOVE_DISTANCE * math.sin(math.radians(path.rotation))
for amount in move_list:
output_path_list.append(ShortestPath(path.action_list.copy() + [{
"action": "MoveAhead",
"params": {}
}] * amount, (
path.position[0] + x_increment * amount,
path.position[1] + z_increment * amount
), path.rotation))
# Return len(rotate_list) * len(move_list) paths (unless single_best_path).
return output_path_list
def find_possible_best_path_list(
performer_start: Dict[str, Any],
target_dict: Dict[str, Any],
object_list: List[Dict[str, Any]],
save_path_plot_with_name: str = None
) -> Tuple[List[ShortestPath]]:
"""Find and return lists of MCS actions that each may be the shortest path
to the target object with the given ID. Because rotate and move actions
are rounded, try many paths with rotations and movements of varying
amounts."""
target_or_parent_dict = _find_target_or_parent_dict(
target_dict,
object_list
)
object_bounds_list = [
object_dict['shows'][0]['boundingBox'] for object_dict in object_list
if object_dict['id'] != target_or_parent_dict['id'] and
'locationParent' not in object_dict
]
logging.debug(f'object bounds list {object_bounds_list}')
target_coords = _dilate_target_bounds(
target_or_parent_dict['shows'][0]['boundingBox']
)
logging.debug(f'target coords {target_coords}')
pathfinding_environment = _generate_pathfinding_environment(
object_bounds_list,
performer_start['position'],
target_or_parent_dict['shows'][0]['position'],
save_path_plot_with_name
)
if pathfinding_environment is None:
logging.error('Cannot create pathfinding environment!')
return None
# Create the base path from the performer start position/rotation.
# Note that shapely expects 0=east and 90=north but in MCS it's switched.
base_path = ShortestPath([], (
performer_start['position']['x'],
performer_start['position']['z']
), (90 - performer_start['rotation']['y']))
best_path_list = []
for target in target_coords:
logging.debug('========================================')
logging.debug(f'target {target}')
# Generate the position list for the shortest path to the target point.
position_list = _generate_shortest_path_position_list(
base_path.position,
target,
pathfinding_environment
)
logging.debug(f'position list {position_list}')
if not position_list:
logging.debug(f'Cannot find path to target corner {target}')
continue
# Generate a path of MCS actions for the shortest path's position list.
path_list = _generate_path_list(
base_path,
position_list[1:],
target,
pathfinding_environment
)
logging.debug('path list length {len(path_list)}')
# Add one more set of rotate and move actions to each path.
best_path_list.extend([_rotate_then_move(path, (
target_or_parent_dict['shows'][0]['position']['x'],
target_or_parent_dict['shows'][0]['position']['z']
), single_best_path=True)[0] for path in path_list])
unique_path_list = _remove_duplicate_paths(best_path_list)
logging.debug('output path list length {len(uniqe_path_list)}')
return sorted(unique_path_list, key=lambda path: len(path.action_list))
def look_at_target(
path: ShortestPath,
target_position: Tuple[float, float],
target_height: float
) -> None:
"""Update the given path to look down at the target with the given position
and height."""
grid_distance = math.sqrt(
(target_position[0] - path.position[0]) ** 2 +
(target_position[1] - path.position[1]) ** 2
)
height_distance = PERFORMER_CAMERA_Y - target_height
difference = math.degrees(math.atan2(height_distance, grid_distance))
while difference > 5:
path.action_list.append({
'action': 'LookDown',
'params': {}
})
difference -= 10
def open_container_and_pickup_target(
path: ShortestPath,
target_id: str,
container_dict: Dict[str, Any]
) -> None:
"""Update the given path to open the container with the given data and
pickup the target with the given ID."""
path.action_list.append({
'action': 'OpenObject',
'params': {
'objectId': container_dict['id']
}
})
pickup_target(path, target_id)
def pickup_target(
path: ShortestPath,
target_id: str
) -> None:
"""Update the given path to pickup the target with the given ID."""
path.action_list.append({
'action': 'PickupObject',
'params': {
'objectId': target_id
}
})
| 36.997886
| 79
| 0.6552
|
b785e6edeab7bdf8dd2f97f2989c204e06dc15c2
| 332
|
py
|
Python
|
cm-agent/test/test_rest.py
|
manue1/connectivity-manager-agent
|
32c89e5edc31f264bd0b9d7f49e2a34e97cc99db
|
[
"Apache-2.0"
] | null | null | null |
cm-agent/test/test_rest.py
|
manue1/connectivity-manager-agent
|
32c89e5edc31f264bd0b9d7f49e2a34e97cc99db
|
[
"Apache-2.0"
] | null | null | null |
cm-agent/test/test_rest.py
|
manue1/connectivity-manager-agent
|
32c89e5edc31f264bd0b9d7f49e2a34e97cc99db
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import httplib
__author__ = 'beb'
if __name__ == '__main__':
connection = httplib.HTTPConnection('0.0.0.0:8091')
headers = {'Content-type': 'application/json'}
# Retrieve list of hypervisors
connection.request('GET', '/hosts')
response = connection.getresponse()
print response.read()
| 20.75
| 55
| 0.674699
|
9a83992b4d1e468ee97c27de7316adfa5f90caf5
| 36,569
|
py
|
Python
|
benchmarks/jython/scratch/jython/Lib/os.py
|
PhongNgo/OptimalVerifiedFT
|
ad8d63833d16be9c2f312848a995c52a072ee1ad
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T13:59:43.000Z
|
2021-03-06T13:59:43.000Z
|
benchmarks/jython/scratch/jython/Lib/os.py
|
PhongNgo/OptimalVerifiedFT
|
ad8d63833d16be9c2f312848a995c52a072ee1ad
|
[
"BSD-3-Clause"
] | null | null | null |
benchmarks/jython/scratch/jython/Lib/os.py
|
PhongNgo/OptimalVerifiedFT
|
ad8d63833d16be9c2f312848a995c52a072ee1ad
|
[
"BSD-3-Clause"
] | null | null | null |
r"""OS routines for Java, with some attempts to support NT, and Posix
functionality.
This exports:
- all functions from posix, nt, dos, os2, mac, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, ntpath, macpath, or dospath
- os.name is 'posix', 'nt', 'dos', 'os2', 'mac', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
# CPython os.py __all__
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
# Would come from the posix/nt/etc. modules on CPython
__all__.extend(['EX_OK', 'F_OK', 'O_APPEND', 'O_CREAT', 'O_EXCL', 'O_RDONLY',
'O_RDWR', 'O_SYNC', 'O_TRUNC', 'O_WRONLY', 'R_OK', 'SEEK_CUR',
'SEEK_END', 'SEEK_SET', 'W_OK', 'X_OK', '_exit', 'access',
'altsep', 'chdir', 'chmod', 'close', 'curdir', 'defpath',
'environ', 'error', 'fdopen', 'fsync', 'getcwd', 'getcwdu',
'getenv', 'getpid', 'isatty', 'linesep', 'listdir', 'lseek',
'lstat', 'makedirs', 'mkdir', 'name', 'open', 'pardir', 'path',
'pathsep', 'popen', 'popen2', 'popen3', 'popen4', 'putenv',
'read', 'remove', 'removedirs', 'rename', 'renames', 'rmdir',
'sep', 'stat', 'stat_result', 'strerror', 'system', 'unlink',
'unsetenv', 'utime', 'walk', 'write'])
import errno
import jarray
import java.lang.System
import time
import stat as _stat
import sys
from java.io import File
from org.python.core.io import FileDescriptors, FileIO, IOBase
from org.python.core.Py import newString as asPyString
try:
from org.python.constantine.platform import Errno
except ImportError:
from com.kenai.constantine.platform import Errno
# Mapping of: os._name: [name list, shell command list]
_os_map = dict(nt=[
['Windows'],
[['cmd.exe', '/c'], ['command.com', '/c']]
],
posix=[
[], # posix is a fallback, instead of matching names
[['/bin/sh', '-c']]
]
)
def get_os_type():
"""Return the name of the type of the underlying OS.
Returns a value suitable for the os.name variable (though not
necessarily intended to be for os.name Jython). This value may be
overwritten in the Jython registry.
"""
os_name = sys.registry.getProperty('python.os')
if os_name:
return asPyString(os_name)
os_name = asPyString(java.lang.System.getProperty('os.name'))
os_type = None
for type, (patterns, shell_commands) in _os_map.iteritems():
for pattern in patterns:
if os_name.startswith(pattern):
# determine the shell_command later, when it's needed:
# it requires os.path (which isn't setup yet)
return type
return 'posix'
name = 'java'
# WARNING: _name is private: for Jython internal usage only! user code
# should *NOT* use it
_name = get_os_type()
try:
from org.python.posix import JavaPOSIX, POSIXHandler, POSIXFactory
except ImportError:
from org.jruby.ext.posix import JavaPOSIX, POSIXHandler, POSIXFactory
class PythonPOSIXHandler(POSIXHandler):
def error(self, error, msg):
err = getattr(errno, error.name(), None)
if err is None:
raise OSError('%s: %s' % (error, asPyString(msg)))
raise OSError(err, strerror(err), asPyString(msg))
def unimplementedError(self, method_name):
raise NotImplementedError(method_name)
def warn(self, warning_id, msg, rest):
pass # XXX implement
def isVerbose(self):
return False
def getCurrentWorkingDirectory(self):
return File(getcwdu())
def getEnv(self):
return ['%s=%s' % (key, val) for key, val in environ.iteritems()]
def getInputStream(self):
return getattr(java.lang.System, 'in') # XXX handle resetting
def getOutputStream(self):
return java.lang.System.out # XXX handle resetting
def getPID(self):
return 0
def getErrorStream(self):
return java.lang.System.err # XXX handle resetting
_posix = POSIXFactory.getPOSIX(PythonPOSIXHandler(), True)
_native_posix = not isinstance(_posix, JavaPOSIX)
if _name == 'nt':
import ntpath as path
else:
import posixpath as path
sys.modules['os.path'] = _path = path
from os.path import curdir, pardir, sep, pathsep, defpath, extsep, altsep, devnull
linesep = java.lang.System.getProperty('line.separator')
# open for reading only
O_RDONLY = 0x0
# open for writing only
O_WRONLY = 0x1
# open for reading and writing
O_RDWR = 0x2
# set append mode
O_APPEND = 0x8
# synchronous writes
O_SYNC = 0x80
# create if nonexistant
O_CREAT = 0x200
# truncate to zero length
O_TRUNC = 0x400
# error if already exists
O_EXCL = 0x800
# seek variables
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# test for existence of file
F_OK = 0
# test for execute or search permission
X_OK = 1<<0
# test for write permission
W_OK = 1<<1
# test for read permission
R_OK = 1<<2
# successful termination
EX_OK = 0
# Java class representing the size of a time_t. internal use, lazily set
_time_t = None
class stat_result:
_stat_members = (
('st_mode', _stat.ST_MODE),
('st_ino', _stat.ST_INO),
('st_dev', _stat.ST_DEV),
('st_nlink', _stat.ST_NLINK),
('st_uid', _stat.ST_UID),
('st_gid', _stat.ST_GID),
('st_size', _stat.ST_SIZE),
('st_atime', _stat.ST_ATIME),
('st_mtime', _stat.ST_MTIME),
('st_ctime', _stat.ST_CTIME),
)
def __init__(self, results):
if len(results) != 10:
raise TypeError("stat_result() takes an a 10-sequence")
for (name, index) in stat_result._stat_members:
self.__dict__[name] = results[index]
@classmethod
def from_jnastat(cls, s):
results = []
for meth in (s.mode, s.ino, s.dev, s.nlink, s.uid, s.gid, s.st_size,
s.atime, s.mtime, s.ctime):
try:
results.append(meth())
except NotImplementedError:
results.append(0)
return cls(results)
def __getitem__(self, i):
if i < 0 or i > 9:
raise IndexError(i)
return getattr(self, stat_result._stat_members[i][0])
def __setitem__(self, x, value):
raise TypeError("object doesn't support item assignment")
def __setattr__(self, name, value):
if name in [x[0] for x in stat_result._stat_members]:
raise TypeError(name)
raise AttributeError("readonly attribute")
def __len__(self):
return 10
def __cmp__(self, other):
if not isinstance(other, stat_result):
return 1
return cmp(self.__dict__, other.__dict__)
def __repr__(self):
return repr(tuple(self.__dict__[member[0]] for member
in stat_result._stat_members))
error = OSError
def _exit(n=0):
"""_exit(status)
Exit to the system with specified status, without normal exit
processing.
"""
java.lang.System.exit(n)
def getcwd():
"""getcwd() -> path
Return a string representing the current working directory.
"""
return asPyString(sys.getCurrentWorkingDir())
def getcwdu():
"""getcwd() -> path
Return a unicode string representing the current working directory.
"""
return sys.getCurrentWorkingDir()
def chdir(path):
"""chdir(path)
Change the current working directory to the specified path.
"""
realpath = _path.realpath(path)
if not _path.exists(realpath):
raise OSError(errno.ENOENT, strerror(errno.ENOENT), path)
if not _path.isdir(realpath):
raise OSError(errno.ENOTDIR, strerror(errno.ENOTDIR), path)
sys.setCurrentWorkingDir(realpath)
def listdir(path):
"""listdir(path) -> list_of_strings
Return a list containing the names of the entries in the directory.
path: path of directory to list
The list is in arbitrary order. It does not include the special
entries '.' and '..' even if they are present in the directory.
"""
l = File(sys.getPath(path)).list()
if l is None:
raise OSError(0, 'No such directory', path)
return [asPyString(entry) for entry in l]
def chmod(path, mode):
"""chmod(path, mode)
Change the access permissions of a file.
"""
# XXX no error handling for chmod in jna-posix
# catch not found errors explicitly here, for now
abs_path = sys.getPath(path)
if not File(abs_path).exists():
raise OSError(errno.ENOENT, strerror(errno.ENOENT), path)
_posix.chmod(abs_path, mode)
def mkdir(path, mode='ignored'):
"""mkdir(path [, mode=0777])
Create a directory.
The optional parameter is currently ignored.
"""
# XXX: use _posix.mkdir when we can get the real errno upon failure
fp = File(sys.getPath(path))
if not fp.mkdir():
if fp.isDirectory() or fp.isFile():
err = errno.EEXIST
else:
err = 0
msg = strerror(err) if err else "couldn't make directory"
raise OSError(err, msg, path)
def makedirs(path, mode='ignored'):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist.
The optional parameter is currently ignored.
"""
sys_path = sys.getPath(path)
if File(sys_path).mkdirs():
return
# if making a /x/y/z/., java.io.File#mkdirs inexplicably fails. So we need
# to force it
# need to use _path instead of path, because param is hiding
# os.path module in namespace!
head, tail = _path.split(sys_path)
if tail == curdir:
if File(_path.join(head)).mkdirs():
return
raise OSError(0, "couldn't make directories", path)
def remove(path):
"""remove(path)
Remove a file (same as unlink(path)).
"""
if not File(sys.getPath(path)).delete():
raise OSError(0, "couldn't delete file", path)
unlink = remove
def rename(path, newpath):
"""rename(old, new)
Rename a file or directory.
"""
if not File(sys.getPath(path)).renameTo(File(sys.getPath(newpath))):
raise OSError(0, "couldn't rename file", path)
#XXX: copied from CPython 2.5.1
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
def rmdir(path):
"""rmdir(path)
Remove a directory."""
f = File(sys.getPath(path))
if not f.exists():
raise OSError(errno.ENOENT, strerror(errno.ENOENT), path)
elif not f.isDirectory():
raise OSError(errno.ENOTDIR, strerror(errno.ENOTDIR), path)
elif not f.delete():
raise OSError(0, "couldn't delete directory", path)
#XXX: copied from CPython 2.5.1
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and empty all intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
__all__.extend(['makedirs', 'renames', 'removedirs'])
def strerror(code):
"""strerror(code) -> string
Translate an error code to a message string.
"""
if not isinstance(code, (int, long)):
raise TypeError('an integer is required')
constant = Errno.valueOf(code)
if constant is Errno.__UNKNOWN_CONSTANT__:
return 'Unknown error: %d' % code
if constant.name() == constant.description():
# XXX: have constantine handle this fallback
# Fake constant or just lacks a description, fallback to Linux's
try:
from org.python.constantine.platform.linux import Errno as LinuxErrno
except ImportError:
from com.kenai.constantine.platform.linux import Errno as LinuxErrno
constant = getattr(LinuxErrno, constant.name(), None)
if not constant:
return 'Unknown error: %d' % code
return asPyString(constant.toString())
def access(path, mode):
"""access(path, mode) -> True if granted, False otherwise
Use the real uid/gid to test for access to a path. Note that most
operations will use the effective uid/gid, therefore this routine can
be used in a suid/sgid environment to test if the invoking user has the
specified access to the path. The mode argument can be F_OK to test
existence, or the inclusive-OR of R_OK, W_OK, and X_OK.
"""
if not isinstance(mode, (int, long)):
raise TypeError('an integer is required')
f = File(sys.getPath(path))
result = True
if not f.exists():
result = False
if mode & R_OK and not f.canRead():
result = False
if mode & W_OK and not f.canWrite():
result = False
if mode & X_OK:
# NOTE: always False without jna-posix stat
try:
result = (stat(path).st_mode & _stat.S_IEXEC) != 0
except OSError:
result = False
return result
def stat(path):
"""stat(path) -> stat result
Perform a stat system call on the given path.
The Java stat implementation only returns a small subset of
the standard fields: size, modification time and change time.
"""
abs_path = sys.getPath(path)
try:
return stat_result.from_jnastat(_posix.stat(abs_path))
except NotImplementedError:
pass
except:
raise
f = File(abs_path)
if not f.exists():
raise OSError(errno.ENOENT, strerror(errno.ENOENT), path)
size = f.length()
mtime = f.lastModified() / 1000.0
mode = 0
if f.isDirectory():
mode = _stat.S_IFDIR
elif f.isFile():
mode = _stat.S_IFREG
if f.canRead():
mode = mode | _stat.S_IREAD
if f.canWrite():
mode = mode | _stat.S_IWRITE
return stat_result((mode, 0, 0, 0, 0, 0, size, mtime, mtime, 0))
def lstat(path):
"""lstat(path) -> stat result
Like stat(path), but do not follow symbolic links.
"""
abs_path = sys.getPath(path)
try:
return stat_result.from_jnastat(_posix.lstat(abs_path))
except NotImplementedError:
pass
except:
raise
f = File(sys.getPath(path))
# XXX: jna-posix implements similar link detection in
# JavaFileStat.calculateSymlink, fallback to that instead when not
# native
abs_parent = f.getAbsoluteFile().getParentFile()
if not abs_parent:
# root isn't a link
return stat(path)
can_parent = abs_parent.getCanonicalFile()
if can_parent.getAbsolutePath() == abs_parent.getAbsolutePath():
# The parent directory's absolute path is canonical..
if f.getAbsolutePath() != f.getCanonicalPath():
# but the file's absolute and canonical paths differ (a
# link)
return stat_result((_stat.S_IFLNK, 0, 0, 0, 0, 0, 0, 0, 0, 0))
# The parent directory's path is not canonical (one of the parent
# directories is a symlink). Build a new path with the parent's
# canonical path and compare the files
f = File(_path.join(can_parent.getAbsolutePath(), f.getName()))
if f.getAbsolutePath() != f.getCanonicalPath():
return stat_result((_stat.S_IFLNK, 0, 0, 0, 0, 0, 0, 0, 0, 0))
# Not a link, only now can we determine if it exists (because
# File.exists() returns False for dead links)
if not f.exists():
raise OSError(errno.ENOENT, strerror(errno.ENOENT), path)
return stat(path)
def utime(path, times):
"""utime(path, (atime, mtime))
utime(path, None)
Set the access and modification time of the file to the given values.
If the second form is used, set the access and modification times to the
current time.
Due to Java limitations, on some platforms only the modification time
may be changed.
"""
if path is None:
raise TypeError('path must be specified, not None')
if times is None:
atimeval = mtimeval = None
elif isinstance(times, tuple) and len(times) == 2:
atimeval = _to_timeval(times[0])
mtimeval = _to_timeval(times[1])
else:
raise TypeError('utime() arg 2 must be a tuple (atime, mtime)')
_posix.utimes(path, atimeval, mtimeval)
def _to_timeval(seconds):
"""Convert seconds (with a fraction) from epoch to a 2 item tuple of
seconds, microseconds from epoch as longs
"""
global _time_t
if _time_t is None:
from java.lang import Integer, Long
try:
from org.python.posix.util import Platform
except ImportError:
from org.jruby.ext.posix.util import Platform
_time_t = Integer if Platform.IS_32_BIT else Long
try:
floor = long(seconds)
except TypeError:
raise TypeError('an integer is required')
if not _time_t.MIN_VALUE <= floor <= _time_t.MAX_VALUE:
raise OverflowError('long int too large to convert to int')
# usec can't exceed 1000000
usec = long((seconds - floor) * 1e6)
if usec < 0:
# If rounding gave us a negative number, truncate
usec = 0
return floor, usec
def close(fd):
"""close(fd)
Close a file descriptor (for low level IO).
"""
rawio = FileDescriptors.get(fd)
_handle_oserror(rawio.close)
def fdopen(fd, mode='r', bufsize=-1):
"""fdopen(fd [, mode='r' [, bufsize]]) -> file_object
Return an open file object connected to a file descriptor.
"""
rawio = FileDescriptors.get(fd)
if (len(mode) and mode[0] or '') not in 'rwa':
raise ValueError("invalid file mode '%s'" % mode)
if rawio.closed():
raise OSError(errno.EBADF, strerror(errno.EBADF))
try:
fp = FileDescriptors.wrap(rawio, mode, bufsize)
except IOError:
raise OSError(errno.EINVAL, strerror(errno.EINVAL))
return fp
def ftruncate(fd, length):
"""ftruncate(fd, length)
Truncate a file to a specified length.
"""
rawio = FileDescriptors.get(fd)
try:
rawio.truncate(length)
except Exception, e:
raise IOError(errno.EBADF, strerror(errno.EBADF))
def lseek(fd, pos, how):
"""lseek(fd, pos, how) -> newpos
Set the current position of a file descriptor.
"""
rawio = FileDescriptors.get(fd)
return _handle_oserror(rawio.seek, pos, how)
def open(filename, flag, mode=0777):
"""open(filename, flag [, mode=0777]) -> fd
Open a file (for low level IO).
"""
reading = flag & O_RDONLY
writing = flag & O_WRONLY
updating = flag & O_RDWR
creating = flag & O_CREAT
truncating = flag & O_TRUNC
exclusive = flag & O_EXCL
sync = flag & O_SYNC
appending = flag & O_APPEND
if updating and writing:
raise OSError(errno.EINVAL, strerror(errno.EINVAL), filename)
if not creating and not path.exists(filename):
raise OSError(errno.ENOENT, strerror(errno.ENOENT), filename)
if not writing:
if updating:
writing = True
else:
reading = True
if truncating and not writing:
# Explicitly truncate, writing will truncate anyway
FileIO(filename, 'w').close()
if exclusive and creating:
try:
if not File(sys.getPath(filename)).createNewFile():
raise OSError(errno.EEXIST, strerror(errno.EEXIST),
filename)
except java.io.IOException, ioe:
raise OSError(ioe)
mode = '%s%s%s%s' % (reading and 'r' or '',
(not appending and writing) and 'w' or '',
(appending and (writing or updating)) and 'a' or '',
updating and '+' or '')
if sync and (writing or updating):
from java.io import FileNotFoundException, RandomAccessFile
try:
fchannel = RandomAccessFile(sys.getPath(filename), 'rws').getChannel()
except FileNotFoundException, fnfe:
if path.isdir(filename):
raise OSError(errno.EISDIR, strerror(errno.EISDIR))
raise OSError(errno.ENOENT, strerror(errno.ENOENT), filename)
return FileIO(fchannel, mode)
return FileIO(filename, mode)
def read(fd, buffersize):
"""read(fd, buffersize) -> string
Read a file descriptor.
"""
from org.python.core.util import StringUtil
rawio = FileDescriptors.get(fd)
buf = _handle_oserror(rawio.read, buffersize)
return asPyString(StringUtil.fromBytes(buf))
def write(fd, string):
"""write(fd, string) -> byteswritten
Write a string to a file descriptor.
"""
from java.nio import ByteBuffer
from org.python.core.util import StringUtil
rawio = FileDescriptors.get(fd)
return _handle_oserror(rawio.write,
ByteBuffer.wrap(StringUtil.toBytes(string)))
def _handle_oserror(func, *args, **kwargs):
"""Translate exceptions into OSErrors"""
try:
return func(*args, **kwargs)
except:
raise OSError(errno.EBADF, strerror(errno.EBADF))
def system(command):
"""system(command) -> exit_status
Execute the command (a string) in a subshell.
"""
import subprocess
return subprocess.call(command, shell=True)
def popen(command, mode='r', bufsize=-1):
"""popen(command [, mode='r' [, bufsize]]) -> pipe
Open a pipe to/from a command returning a file object.
"""
import subprocess
if mode == 'r':
return subprocess.Popen(command, bufsize=bufsize, shell=True,
stdout=subprocess.PIPE).stdout
elif mode == 'w':
return subprocess.Popen(command, bufsize=bufsize, shell=True,
stdin=subprocess.PIPE).stdin
else:
raise OSError(errno.EINVAL, strerror(errno.EINVAL))
# os module versions of the popen# methods have different return value
# order than popen2 functions
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command cmd in a sub-process.
On UNIX, 'cmd' may be a sequence, in which case arguments will be
passed directly to the program without shell intervention (as with
os.spawnv()). If 'cmd' is a string it will be passed to the shell
(as with os.system()). If 'bufsize' is specified, it sets the
buffer size for the I/O pipes. The file objects (child_stdin,
child_stdout) are returned.
"""
import popen2
stdout, stdin = popen2.popen2(cmd, bufsize)
return stdin, stdout
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process.
On UNIX, 'cmd' may be a sequence, in which case arguments will be
passed directly to the program without shell intervention
(as with os.spawnv()). If 'cmd' is a string it will be passed
to the shell (as with os.system()). If 'bufsize' is specified,
it sets the buffer size for the I/O pipes. The file objects
(child_stdin, child_stdout, child_stderr) are returned.
"""
import popen2
stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
return stdin, stdout, stderr
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process.
On UNIX, 'cmd' may be a sequence, in which case arguments will be
passed directly to the program without shell intervention
(as with os.spawnv()). If 'cmd' is a string it will be passed
to the shell (as with os.system()). If 'bufsize' is specified,
it sets the buffer size for the I/O pipes. The file objects
(child_stdin, child_stdout_stderr) are returned.
"""
import popen2
stdout, stdin = popen2.popen4(cmd, bufsize)
return stdin, stdout
def getlogin():
"""getlogin() -> string
Return the actual login name.
"""
return java.lang.System.getProperty("user.name")
#XXX: copied from CPython's release23-maint branch revision 56502
def walk(top, topdown=True, onerror=None):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
from os.path import join, getsize
for root, dirs, files in walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not islink(path):
for x in walk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
environ = sys.getEnviron()
if _name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
import UserDict
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
def __delitem__(self, key):
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
environ = _Environ(environ)
def putenv(key, value):
"""putenv(key, value)
Change or add an environment variable.
"""
environ[key] = value
def unsetenv(key):
"""unsetenv(key)
Delete an environment variable.
"""
if key in environ:
del environ[key]
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
if _name == 'posix':
def link(src, dst):
"""link(src, dst)
Create a hard link to a file.
"""
_posix.link(sys.getPath(src), sys.getPath(dst))
def symlink(src, dst):
"""symlink(src, dst)
Create a symbolic link pointing to src named dst.
"""
_posix.symlink(src, sys.getPath(dst))
def readlink(path):
"""readlink(path) -> path
Return a string representing the path to which the symbolic link
points.
"""
return _posix.readlink(sys.getPath(path))
def getegid():
"""getegid() -> egid
Return the current process's effective group id."""
return _posix.getegid()
def geteuid():
"""geteuid() -> euid
Return the current process's effective user id."""
return _posix.geteuid()
def getgid():
"""getgid() -> gid
Return the current process's group id."""
return _posix.getgid()
def getlogin():
"""getlogin() -> string
Return the actual login name."""
return _posix.getlogin()
def getpgrp():
"""getpgrp() -> pgrp
Return the current process group id."""
return _posix.getpgrp()
def getppid():
"""getppid() -> ppid
Return the parent's process id."""
return _posix.getppid()
def getuid():
"""getuid() -> uid
Return the current process's user id."""
return _posix.getuid()
def setpgrp():
"""setpgrp()
Make this process a session leader."""
return _posix.setpgrp()
def setsid():
"""setsid()
Call the system call setsid()."""
return _posix.setsid()
# This implementation of fork partially works on
# Jython. Diagnosing what works, what doesn't, and fixing it is
# left for another day. In any event, this would only be
# marginally useful.
# def fork():
# """fork() -> pid
#
# Fork a child process.
# Return 0 to child process and PID of child to parent process."""
# return _posix.fork()
def kill(pid, sig):
"""kill(pid, sig)
Kill a process with a signal."""
return _posix.kill(pid, sig)
def wait():
"""wait() -> (pid, status)
Wait for completion of a child process."""
status = jarray.zeros(1, 'i')
res_pid = _posix.wait(status)
if res_pid == -1:
raise OSError(status[0], strerror(status[0]))
return res_pid, status[0]
def waitpid(pid, options):
"""waitpid(pid, options) -> (pid, status)
Wait for completion of a given child process."""
status = jarray.zeros(1, 'i')
res_pid = _posix.waitpid(pid, status, options)
if res_pid == -1:
raise OSError(status[0], strerror(status[0]))
return res_pid, status[0]
def fdatasync(fd):
"""fdatasync(fildes)
force write of file with filedescriptor to disk.
does not force update of metadata.
"""
_fsync(fd, False)
__all__.extend(['link', 'symlink', 'readlink', 'getegid', 'geteuid',
'getgid', 'getlogin', 'getpgrp', 'getppid', 'getuid',
'setpgrp', 'setsid', 'kill', 'wait', 'waitpid',
'fdatasync'])
def fsync(fd):
"""fsync(fildes)
force write of file with filedescriptor to disk.
"""
_fsync(fd, True)
def _fsync(fd, metadata):
"""Internal fsync impl"""
rawio = FileDescriptors.get(fd)
rawio.checkClosed()
from java.nio.channels import FileChannel
channel = rawio.getChannel()
if not isinstance(channel, FileChannel):
raise OSError(errno.EINVAL, strerror(errno.EINVAL))
try:
channel.force(metadata)
except java.io.IOException, ioe:
raise OSError(ioe)
def getpid():
"""getpid() -> pid
Return the current process id."""
return _posix.getpid()
def isatty(fileno):
"""isatty(fd) -> bool
Return True if the file descriptor 'fd' is an open file descriptor
connected to the slave end of a terminal."""
from java.io import FileDescriptor
if isinstance(fileno, int):
if fileno == 0:
fd = getattr(FileDescriptor, 'in')
elif fileno == 1:
fd = FileDescriptor.out
elif fileno == 2:
fd = FileDescriptor.err
else:
raise NotImplemented('Integer file descriptor compatibility only '
'available for stdin, stdout and stderr (0-2)')
return _posix.isatty(fd)
if isinstance(fileno, FileDescriptor):
return _posix.isatty(fileno)
if not isinstance(fileno, IOBase):
raise TypeError('a file descriptor is required')
return fileno.isatty()
def umask(new_mask):
"""umask(new_mask) -> old_mask
Set the current numeric umask and return the previous umask."""
return _posix.umask(int(new_mask))
from java.security import SecureRandom
urandom_source = None
def urandom(n):
global urandom_source
if urandom_source is None:
urandom_source = SecureRandom()
buffer = jarray.zeros(n, 'b')
urandom_source.nextBytes(buffer)
return buffer.tostring()
| 31.79913
| 82
| 0.631765
|
e82181facea4296b532933ba1239327887f80987
| 1,514
|
py
|
Python
|
molssi_devops_uf/molssi_math.py
|
farhadrgh/molssi_devops_uf
|
718ad45ee8deb6d2048f0b370f1025c3541562df
|
[
"BSD-3-Clause"
] | null | null | null |
molssi_devops_uf/molssi_math.py
|
farhadrgh/molssi_devops_uf
|
718ad45ee8deb6d2048f0b370f1025c3541562df
|
[
"BSD-3-Clause"
] | null | null | null |
molssi_devops_uf/molssi_math.py
|
farhadrgh/molssi_devops_uf
|
718ad45ee8deb6d2048f0b370f1025c3541562df
|
[
"BSD-3-Clause"
] | null | null | null |
"""
molssi_math.py
A sample repository for the MOLSSI workshop at UF.
some math functions.
"""
def mean(num_list):
"""
Calculate the mean/average of a list of numbers.
Parameters
----------
num_list : list
The list to make the average of
Returns
----------
mean_list : float
The mean of the list
"""
# check input type list
if not isinstance(num_list, list):
raise TypeError('Invalid input %s - Input must be a list' % (num_list))
# check list not empty
if num_list == []:
raise ValueError('Cannot apply mean to empty list')
try:
mean_list = sum(num_list) / float(len(num_list))
except TypeError:
raise TypeError('Cannot calculate mean of list - all list elements must be numeric')
return mean_list
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from Henry David Thoreau"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
| 23.292308
| 92
| 0.636724
|
b6e6c6cd8f27e6ff042e5e7f694d63601c81370e
| 522
|
py
|
Python
|
tests/conftest.py
|
Scille/autobahn_sync
|
d75fceff0d1aee61fa6dd0168eb1cd40794ad827
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Scille/autobahn_sync
|
d75fceff0d1aee61fa6dd0168eb1cd40794ad827
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Scille/autobahn_sync
|
d75fceff0d1aee61fa6dd0168eb1cd40794ad827
|
[
"MIT"
] | null | null | null |
import pytest
def pytest_addoption(parser):
parser.addoption("--no-router", action='store_true',
help="Don't start WAMP router for the test"
" (must provide one on `ws://localhost:8080/ws` then)")
parser.addoption("--twisted-logs", action='store_true', help="Enable twisted logs output")
def pytest_runtest_setup(item):
if item.config.getoption("--twisted-logs"):
import sys
from twisted.python import log
log.startLogging(sys.stdout)
| 32.625
| 94
| 0.637931
|
2fe3b7268d1e92d1b0d08ce73da2fc01f22edcf6
| 263
|
py
|
Python
|
manage.py
|
morgan-county/site
|
86c0cc9d85fecd62e1d20bd3e841d19165ed3956
|
[
"MIT"
] | null | null | null |
manage.py
|
morgan-county/site
|
86c0cc9d85fecd62e1d20bd3e841d19165ed3956
|
[
"MIT"
] | null | null | null |
manage.py
|
morgan-county/site
|
86c0cc9d85fecd62e1d20bd3e841d19165ed3956
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"morgan.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 21.916667
| 64
| 0.749049
|
f4b7716d15bf1b4d2e62f6ebbe8a7bfe9de2fd08
| 1,774
|
py
|
Python
|
BenchmarkScripts/2d_evaluation/instances2dict.py
|
giuliano-97/ScanNet
|
8bf7bbc4acfba10750d3797a11d620db3a3b8fce
|
[
"MIT"
] | null | null | null |
BenchmarkScripts/2d_evaluation/instances2dict.py
|
giuliano-97/ScanNet
|
8bf7bbc4acfba10750d3797a11d620db3a3b8fce
|
[
"MIT"
] | null | null | null |
BenchmarkScripts/2d_evaluation/instances2dict.py
|
giuliano-97/ScanNet
|
8bf7bbc4acfba10750d3797a11d620db3a3b8fce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Convert instances from png files to a dictionary
#
from __future__ import print_function
import os, sys
from instance import Instance
from PIL import Image
import numpy as np
def instances2dict(imageFileList, class_labels, class_ids, verbose=False):
imgCount = 0
instanceDict = {}
label2id = {}
id2label = {}
for i in range(len(class_labels)):
label2id[class_labels[i]] = class_ids[i]
id2label[class_ids[i]] = class_labels[i]
if not isinstance(imageFileList, list):
imageFileList = [imageFileList]
if verbose:
print("Processing {} images...".format(len(imageFileList)))
for imageFileName in imageFileList:
# Load image
img = Image.open(imageFileName)
# Image as numpy array
imgNp = np.array(img)
# Initialize label categories
instances = {}
for label in class_labels:
instances[label] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
instanceObj = Instance(imgNp, instanceId)
if instanceObj.labelID in class_ids:
instances[id2label[instanceObj.labelID]].append(instanceObj.toDict())
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print("\rImages Processed: {}".format(imgCount), end=" ")
sys.stdout.flush()
if verbose:
print("")
return instanceDict
def main(argv):
fileList = []
if len(argv) > 2:
for arg in argv:
if "png" in arg:
fileList.append(arg)
instances2dict(fileList, True)
if __name__ == "__main__":
main(sys.argv[1:])
| 24.985915
| 85
| 0.61894
|
36391889e27d9b4bf2c27ad31f265b0b27b13b0c
| 4,907
|
py
|
Python
|
app.py
|
rlkennye/sqlalchemy-challenge
|
466b333ebbdc0c63706663a144a940d77f4ca7da
|
[
"ADSL"
] | null | null | null |
app.py
|
rlkennye/sqlalchemy-challenge
|
466b333ebbdc0c63706663a144a940d77f4ca7da
|
[
"ADSL"
] | null | null | null |
app.py
|
rlkennye/sqlalchemy-challenge
|
466b333ebbdc0c63706663a144a940d77f4ca7da
|
[
"ADSL"
] | null | null | null |
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={'check_same_thread': False})
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
#weather app
app = Flask(__name__)
Latest_Maximum_Date = (session.query(Measurement.date)
.order_by(Measurement.date.desc())
.first())
Latest_Maximum_Date = list(np.ravel(Latest_Maximum_Date))[0]
Latest_Maximum_Date = dt.datetime.strptime(Latest_Maximum_Date, '%Y-%m-%d')
latestYear = int(dt.datetime.strftime(Latest_Maximum_Date, '%Y'))
latestMonth = int(dt.datetime.strftime(Latest_Maximum_Date, '%m'))
latestDay = int(dt.datetime.strftime(Latest_Maximum_Date, '%d'))
date_1_year_ago = dt.date(latestYear, latestMonth, latestDay) - dt.timedelta(days=365)
date_1_year_ago = dt.datetime.strftime(date_1_year_ago, '%Y-%m-%d')
@app.route("/")
def home():
return (f"Welcome to Surf's Up!: Hawaii Climate API<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/stations ~~~~~ a list of all weather observation stations<br/>"
f"/api/v1.0/precipitaton ~~ the latest year of preceipitation data<br/>"
f"/api/v1.0/temperature ~~ the latest year of temperature data<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"~~~ datesearch (yyyy-mm-dd)<br/>"
f"/api/v1.0/datesearch/2017-06-14 ~~~~~~~~~~~ low, high, and average temp for date given and each date after<br/>"
f"/api/v1.0/datesearch/2017-06-14/2017-06-30 ~~ low, high, and average temp for date given and each date up to and including end date<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"~ data available from 2010-01-01 to 2017-08-23 ~<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.name).all()
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/precipitaton")
def precipitation():
results = (session.query(Measurement.date, Measurement.prcp, Measurement.station)
.filter(Measurement.date > date_1_year_ago)
.order_by(Measurement.date)
.all())
precipData = []
for result in results:
precipDict = {result.date: result.prcp, "Station": result.station}
precipData.append(precipDict)
return jsonify(precipData)
@app.route("/api/v1.0/temperature")
def temperature():
results = (session.query(Measurement.date, Measurement.tobs, Measurement.station)
.filter(Measurement.date > date_1_year_ago)
.order_by(Measurement.date)
.all())
tempData = []
for result in results:
tempDict = {result.date: result.tobs, "Station": result.station}
tempData.append(tempDict)
return jsonify(tempData)
@app.route('/api/v1.0/datesearch/<startDate>')
def start(startDate):
sel = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date) >= startDate)
.group_by(Measurement.date)
.all())
dates = []
for result in results:
date_dict = {}
date_dict["Date"] = result[0]
date_dict["Low Temp"] = result[1]
date_dict["Avg Temp"] = result[2]
date_dict["High Temp"] = result[3]
dates.append(date_dict)
return jsonify(dates)
@app.route('/api/v1.0/datesearch/<startDate>/<endDate>')
def startEnd(startDate, endDate):
trip_parms = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = (session.query(*trip_parms)
.filter(func.strftime("%Y-%m-%d", Measurement.date) >= startDate)
.filter(func.strftime("%Y-%m-%d", Measurement.date) <= endDate)
.group_by(Measurement.date)
.all())
dates = []
for result in results:
date_dict = {}
date_dict["Date"] = result[0]
date_dict["Low Temp"] = result[1]
date_dict["Avg Temp"] = result[2]
date_dict["High Temp"] = result[3]
dates.append(date_dict)
return jsonify(dates)
if __name__ == "__main__":
app.run(debug=True)
| 36.619403
| 151
| 0.604646
|
f477cefd34f1523ab2f23f12837cd49008ff7172
| 9,712
|
py
|
Python
|
Code/hashtable.py
|
reikamoon/CS_1.3_Core_Data_Structures
|
df7dae34fffda38221e170e3a9c4a410578bcca4
|
[
"MIT"
] | null | null | null |
Code/hashtable.py
|
reikamoon/CS_1.3_Core_Data_Structures
|
df7dae34fffda38221e170e3a9c4a410578bcca4
|
[
"MIT"
] | 6
|
2020-02-15T17:51:18.000Z
|
2020-03-09T23:16:38.000Z
|
Code/hashtable.py
|
reikamoon/CS_1.3_Core_Data_Structures
|
df7dae34fffda38221e170e3a9c4a410578bcca4
|
[
"MIT"
] | null | null | null |
#!python
from linkedlist import LinkedList
class HashTable(object):
def __init__(self, init_size=8):
"""Initialize this hash table with the given initial size."""
self.buckets = [LinkedList() for i in range(init_size)]
self.size = 0 # Number of key-value entries
def __str__(self):
"""Return a formatted string representation of this hash table."""
items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]
return '{' + ', '.join(items) + '}'
def __repr__(self):
"""Return a string representation of this hash table."""
return 'HashTable({!r})'.format(self.items())
def _bucket_index(self, key):
"""Return the bucket index where the given key would be stored."""
return hash(key) % len(self.buckets)
def load_factor(self):
"""Return the load factor, the ratio of number of entries to buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Best and Worst Case: O(1) because it only runs one process if it already knows the num of buckets and size.
# TODO: Calculate load factor
load_factor = self.size / len(self.buckets)
return load_factor
# return ...
def keys(self):
"""Return a list of all keys in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Best and Worst Case O(n), because we don't always know how many buckets there are so it has to go through.
# Collect all keys in each of the buckets
all_keys = []
for bucket in self.buckets:
for key, value in bucket.items():
all_keys.append(key)
return all_keys
def values(self):
"""Return a list of all values in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
#Best and Worst Case: O(n), because we don't always know how many values there are.
# Collect all values in each of the buckets
all_values = []
for bucket in self.buckets:
for key, value in bucket.items():
all_values.append(value)
return all_values
def items(self):
"""Return a list of all entries (key-value pairs) in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
#Best and Worst Case: O(n), because we don't always know how many items there are.
# Collect all pairs of key-value entries in each of the buckets
all_items = []
for bucket in self.buckets:
all_items.extend(bucket.items())
return all_items
def length(self):
"""Return the number of key-value entries by traversing its buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
#Best and Worse Case is O(n), because you never know how many buckets there are.
# Count number of key-value entries in each of the buckets
item_count = 0
for bucket in self.buckets:
item_count += bucket.length()
return item_count
# Equivalent to this list comprehension:
return sum(bucket.length() for bucket in self.buckets)
def contains(self, key):
"""Return True if this hash table contains the given key, or False.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
#Best Case Running Time: O(1) because it finds it right away.
#Worst Case Running Time: O(l) because it can't find the bucket.
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
return entry is not None # True or False
def get(self, key):
"""Return the value associated with the given key, or raise KeyError.
Best case running time: O(1) under what conditions? It found the key already.[TODO]
Worst case running time: O(l) under what conditions? It cannot find the key[TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Return the given key's associated value
assert isinstance(entry, tuple)
assert len(entry) == 2
return entry[1]
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def set(self, key, value):
"""Insert or update the given key with its associated value.
Best case running time: O(1) under what conditions? found it right away yay[TODO]
Worst case running time: O(l) under what conditions? can't find it is lost [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# In this case, the given key's value is being updated
# Remove the old key-value entry from the bucket first
bucket.delete(entry)
else:
self.size += 1
# Insert the new key-value entry into the bucket in either case
bucket.append((key, value))
# TODO: Check if the load factor exceeds a threshold such as 0.75
# ...
if self.load_factor() > 0.75:
self._resize()
# TODO: If so, automatically resize to reduce the load factor
# ...
def delete(self, key):
"""Delete the given key and its associated value, or raise KeyError.
Best case running time: O(1) under what conditions? it delete the thing yay[TODO]
Worst case running time: O(l) under what conditions? it can't find the bucket boo[TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Remove the key-value entry from the bucket
bucket.delete(entry)
self.size -= 1
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def _resize(self, new_size=None):
"""Resize this hash table's buckets and rehash all key-value entries.
Should be called automatically when load factor exceeds a threshold
such as 0.75 after an insertion (when set is called with a new key).
Best and worst case running time: O(l) under what conditions? The process has to go through the entire table and resize.[TODO]
Best and worst case space usage: ??? what uses this memory? [TODO]"""
# If unspecified, choose new size dynamically based on current size
if new_size is None:
new_size = len(self.buckets) * 2 # Double size
# Option to reduce size if buckets are sparsely filled (low load factor)
elif new_size is 0:
new_size = len(self.buckets) // 2 # Half size
# TODO: Get a list to temporarily hold all current key-value entries
# ...
keyvalues = self.items()
self.buckets = [LinkedList() for _ in range(new_size)]
self.size = 0
for item in keyvalues:
self.set(item[0], item[1])
# TODO: Create a new list of new_size total empty linked list buckets
# ...
# TODO: Insert each key-value entry into the new list of buckets,
# which will rehash them into a new bucket index based on the new size
# ...
def test_hash_table():
ht = HashTable(4)
print('HashTable: ' + str(ht))
print('Setting entries:')
ht.set('I', 1)
print('set(I, 1): ' + str(ht))
ht.set('V', 5)
print('set(V, 5): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
ht.set('X', 10)
print('set(X, 10): ' + str(ht))
ht.set('L', 50) # Should trigger resize
print('set(L, 50): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
print('Getting entries:')
print('get(I): ' + str(ht.get('I')))
print('get(V): ' + str(ht.get('V')))
print('get(X): ' + str(ht.get('X')))
print('get(L): ' + str(ht.get('L')))
print('contains(X): ' + str(ht.contains('X')))
print('contains(Z): ' + str(ht.contains('Z')))
print('Deleting entries:')
ht.delete('I')
print('delete(I): ' + str(ht))
ht.delete('V')
print('delete(V): ' + str(ht))
ht.delete('X')
print('delete(X): ' + str(ht))
ht.delete('L')
print('delete(L): ' + str(ht))
print('contains(X): ' + str(ht.contains('X')))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
if __name__ == '__main__':
test_hash_table()
| 43.164444
| 134
| 0.609143
|
397b737d56d33842bfa377e583062226973af017
| 918
|
py
|
Python
|
yotta/test/test_subcommand.py
|
headlessme/yotta
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
[
"Apache-2.0"
] | 176
|
2015-01-02T07:31:59.000Z
|
2022-03-21T12:40:02.000Z
|
yotta/test/test_subcommand.py
|
headlessme/yotta
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
[
"Apache-2.0"
] | 549
|
2015-01-05T16:19:54.000Z
|
2021-01-15T13:46:42.000Z
|
yotta/test/test_subcommand.py
|
headlessme/yotta
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
[
"Apache-2.0"
] | 84
|
2015-01-10T21:01:00.000Z
|
2022-03-24T16:04:42.000Z
|
#!/usr/bin/env python
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
# module to test:
from yotta import test_subcommand
class TestTestSubcommandModule(unittest.TestCase):
def test_moduleFromDirname(self):
self.assertTrue(test_subcommand.moduleFromDirname('ym/b/ym/c/d', {'b':'b', 'c':'c'}, 'a') == 'c')
self.assertTrue(test_subcommand.moduleFromDirname('ym/b/q/c/d', {'b':'b', 'c':'c'}, 'a') == 'b')
self.assertTrue(test_subcommand.moduleFromDirname('z/b/q/c/d', {'b':'b', 'c':'c'}, 'a') == 'a')
self.assertTrue(test_subcommand.moduleFromDirname('ym/e/d', {'b':'b', 'c':'c'}, 'a') == 'a')
self.assertTrue(test_subcommand.moduleFromDirname('ym/e/d', {'b':'b', 'c':'c', 'e':'e'}, 'a') == 'e')
# see also yotta/test/cli/test.py for cli-driven testing
| 35.307692
| 109
| 0.636166
|
1123e3421fd48ffe2ee33fa6ec18e3e96c11cdef
| 575
|
py
|
Python
|
nexusdash2/dashboardperdevice/views.py
|
fmichalo/n9k-programmability
|
3a359df5f048ea8c7695e47e9014ffdfe03835f4
|
[
"Apache-2.0"
] | null | null | null |
nexusdash2/dashboardperdevice/views.py
|
fmichalo/n9k-programmability
|
3a359df5f048ea8c7695e47e9014ffdfe03835f4
|
[
"Apache-2.0"
] | null | null | null |
nexusdash2/dashboardperdevice/views.py
|
fmichalo/n9k-programmability
|
3a359df5f048ea8c7695e47e9014ffdfe03835f4
|
[
"Apache-2.0"
] | null | null | null |
from django.views.generic import TemplateView
from hostnames.models import HostNames
from django.http import Http404
class DashboardPerDeviceView(TemplateView):
template_name = "dashboardperdevice/dashboardperdevice.html"
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
hostname = kwargs.get('hostname')
if len(HostNames.objects.filter(hostname=hostname)) == 0:
raise Http404
return self.render_to_response(context)
dashboard_view = DashboardPerDeviceView.as_view()
| 35.9375
| 66
| 0.716522
|
be5dd91382a4a69ab4b31a1ed12cc2c537a5841d
| 189
|
py
|
Python
|
segar/sim/__init__.py
|
fgolemo/segar
|
8e21f8ee01bc72adb84dec7998b014d11d2b1fbe
|
[
"MIT"
] | 19
|
2022-02-16T18:45:12.000Z
|
2022-03-25T10:42:19.000Z
|
segar/sim/__init__.py
|
microsoft/segar
|
78463968238482ae035121504458dd0909107e10
|
[
"MIT"
] | 4
|
2022-02-16T22:58:00.000Z
|
2022-03-02T23:11:10.000Z
|
segar/sim/__init__.py
|
fgolemo/segar
|
8e21f8ee01bc72adb84dec7998b014d11d2b1fbe
|
[
"MIT"
] | 5
|
2022-02-17T01:46:18.000Z
|
2022-03-21T21:21:19.000Z
|
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute"
__license__ = "MIT"
"""There is only one sim.
"""
from .sim import Simulator
__all__ = ["Simulator"]
| 18.9
| 84
| 0.719577
|
215840f32257f3d3f346aa15cb08052c1feb60fa
| 3,650
|
py
|
Python
|
simulations/2021.MAY/20210525-PXSTvsPrimRST/pruner03/PrimRST_Rpruner.py
|
GiliardGodoi/steiner-problem-with-evol
|
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
|
[
"MIT"
] | null | null | null |
simulations/2021.MAY/20210525-PXSTvsPrimRST/pruner03/PrimRST_Rpruner.py
|
GiliardGodoi/steiner-problem-with-evol
|
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
|
[
"MIT"
] | null | null | null |
simulations/2021.MAY/20210525-PXSTvsPrimRST/pruner03/PrimRST_Rpruner.py
|
GiliardGodoi/steiner-problem-with-evol
|
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
|
[
"MIT"
] | null | null | null |
from os import path
from ga4stpg.condition import BestKnownReached, Stagnation
from ga4stpg.customevol import GeneticEvolution as Evolution
from ga4stpg.customevol import GeneticPopulation as GPopulation
from ga4stpg.graph import ReaderORLibrary
from ga4stpg.graph.util import is_steiner_tree
from ga4stpg.normalization import normalize
from ga4stpg.selector import roullete
from ga4stpg.tracker import DataTracker
from ga4stpg.tree.evaluation import EvaluateTreeGraph
from ga4stpg.tree.generate import GenerateBasedPrimRST
from ga4stpg.tree.mstcrossover import CrossoverPrimRST
from ga4stpg.tree.mutate import (PrimBasedMutation, Prunning,
ReplaceByRandomEdge)
from ga4stpg.util import STEIN_B, display, update_best, update_generation
def simulation(simulation_name, params):
datasets_folder = path.join("datasets", "ORLibrary")
filename = path.join(datasets_folder, params["dataset"])
STPG = ReaderORLibrary().parser(filename)
print("STPG information", '\n', 10*'- ','\n')
print("Trial: ", parameters['runtrial'])
print('Instance: ', STPG.name)
print('Best Known cost: ', params['global_optimum'])
print("Nro. Node:", STPG.nro_nodes)
print("Nro. Edges:", STPG.nro_edges)
print("Nro. Terminals:", STPG.nro_terminals)
# print("Terminals: \n", STPG.terminals)
output_folder = path.join("data", simulation_name, STPG.name)
tracker = DataTracker(params['runtrial'],target=output_folder)
generator = GenerateBasedPrimRST(STPG)
evaluator = EvaluateTreeGraph(STPG)
crossover = CrossoverPrimRST(STPG)
prunner = Prunning(STPG)
### mut_prim = PrimBasedMutation(STPG)
replace_random = ReplaceByRandomEdge(STPG)
population = (GPopulation(
chromosomes=[ generator() for _ in range(params["population_size"])],
eval_function=evaluator,
maximize=True)
.evaluate()
.normalize(norm_function=normalize)
.callback(update_best))
evol = (Evolution()
.evaluate()
.normalize(norm_function=normalize)
.callback(update_best)
.callback(tracker.log_evaluation)
.select(selection_func=roullete)
.crossover(combiner=crossover)
.mutate(mutate_function=replace_random, probability=0.3)
.mutate(mutate_function=prunner, probability=0.3)
.callback(update_generation)
.callback(display, every=100))
with Stagnation(interval=params["stagnation_interval"]), \
BestKnownReached(global_optimum=params['global_optimum']):
result = population.evolve(evol, n=params["n_iterations"])
tracker.log_simulation(params, STPG, result)
best_overall = result.documented_best
test, response = is_steiner_tree(best_overall.chromosome, STPG)
tracker.log_bestIndividual(best_overall, test, response)
tracker.report()
if __name__ == "__main__":
parameters = {
'runtrial' : 0,
'dataset' : 'steinb1.txt',
'global_optimum' : 82,
'population_size' : 100,
'tx_mutation' : 0.2,
'tx_crossover' : 1.0,
'n_iterations' : 4_000,
'stagnation_interval' : 500,
}
for dataset, value in STEIN_B:
print('='*10,'\n', dataset)
print('global optimum ', value)
print('='*10, '\n')
parameters['dataset'] = dataset
parameters['global_optimum'] = value
for i in range(50):
parameters['runtrial'] = i + 1
simulation("PrimRST_Rpruner", parameters)
| 35.096154
| 78
| 0.663836
|
dc6cee0240c3153afddafc203ec51956b8ca0fa6
| 3,488
|
py
|
Python
|
tests/test_boosted_particle_output.py
|
wilds9/fbpic
|
902c3bc8757545496b8cbb772401de6b0974a3dc
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-11-21T07:39:05.000Z
|
2020-11-21T14:00:32.000Z
|
tests/test_boosted_particle_output.py
|
RemiLehe/fbpic
|
f0d55048eb669081c26eff28fee39891b62aaeb2
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/test_boosted_particle_output.py
|
RemiLehe/fbpic
|
f0d55048eb669081c26eff28fee39891b62aaeb2
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# Copyright 2017, FBPIC contributors
# Authors: Remi Lehe
# License: 3-Clause-BSD-LBNL
"""
This test file is part of FB-PIC (Fourier-Bessel Particle-In-Cell).
It tests the boosted-frame particle output routines.
This is done by initializing a set of known particles and making sure that
they are all retrieved by the boosted-frame diagnostics.
"""
# -------
# Imports
# -------
import os, shutil
import numpy as np
from scipy.constants import c
# Import the relevant structures in FBPIC
from fbpic.main import Simulation
from fbpic.lpa_utils.boosted_frame import BoostConverter
from fbpic.lpa_utils.bunch import add_elec_bunch_gaussian
from fbpic.openpmd_diag import BackTransformedParticleDiagnostic
# Import openPMD-viewer for checking output files
from openpmd_viewer import OpenPMDTimeSeries
# ----------
# Parameters
# ----------
use_cuda = True
def test_boosted_output( gamma_boost=10. ):
"""
# TODO
Parameters
----------
gamma_boost: float
The Lorentz factor of the frame in which the simulation is carried out.
"""
# The simulation box
Nz = 500 # Number of gridpoints along z
zmax_lab = 0.e-6 # Length of the box along z (meters)
zmin_lab = -20.e-6
Nr = 10 # Number of gridpoints along r
rmax = 10.e-6 # Length of the box along r (meters)
Nm = 2 # Number of modes used
# Number of timesteps
N_steps = 500
diag_period = 20 # Period of the diagnostics in number of timesteps
dt_lab = (zmax_lab - zmin_lab)/Nz * 1./c
T_sim_lab = N_steps * dt_lab
# Move into directory `tests`
os.chdir('./tests')
# Initialize the simulation object
sim = Simulation( Nz, zmax_lab, Nr, rmax, Nm, dt_lab,
0, 0, # No electrons get created because we pass p_zmin=p_zmax=0
0, rmax, 1, 1, 4,
n_e=0, zmin=zmin_lab, initialize_ions=False, gamma_boost=gamma_boost,
v_comoving=-0.9999*c, boundaries='open', use_cuda=use_cuda )
sim.set_moving_window( v=c )
# Remove the electron species
sim.ptcl = []
# Add a Gaussian electron bunch
# Note: the total charge is 0 so all fields should remain 0
# throughout the simulation. As a consequence, the motion of the beam
# is a mere translation.
N_particles = 3000
add_elec_bunch_gaussian( sim, sig_r=1.e-6, sig_z=1.e-6, n_emit=0.,
gamma0=100, sig_gamma=0., Q=0., N=N_particles,
zf=0.5*(zmax_lab+zmin_lab), boost=BoostConverter(gamma_boost) )
sim.ptcl[0].track( sim.comm )
# openPMD diagnostics
sim.diags = [
BackTransformedParticleDiagnostic( zmin_lab, zmax_lab, v_lab=c,
dt_snapshots_lab=T_sim_lab/3., Ntot_snapshots_lab=3,
gamma_boost=gamma_boost, period=diag_period, fldobject=sim.fld,
species={"bunch": sim.ptcl[0]}, comm=sim.comm) ]
# Run the simulation
sim.step( N_steps )
# Check consistency of the back-transformed openPMD diagnostics:
# Make sure that all the particles were retrived by checking particle IDs
ts = OpenPMDTimeSeries('./lab_diags/hdf5/')
ref_pid = np.sort( sim.ptcl[0].tracker.id )
for iteration in ts.iterations:
pid, = ts.get_particle( ['id'], iteration=iteration )
pid = np.sort( pid )
assert len(pid) == N_particles
assert np.all( ref_pid == pid )
# Remove openPMD files
shutil.rmtree('./lab_diags/')
os.chdir('../')
# Run the tests
if __name__ == '__main__':
test_boosted_output()
| 33.538462
| 79
| 0.671732
|
aae9d25cff1766f9675b8fa1bd7d15ba97301773
| 4,566
|
py
|
Python
|
ansible/modules/storage/purestorage/purefa_hg.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2021-04-02T08:08:39.000Z
|
2021-04-02T08:08:39.000Z
|
ansible/modules/storage/purestorage/purefa_hg.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ansible/modules/storage/purestorage/purefa_hg.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2020-05-03T01:13:16.000Z
|
2020-05-03T01:13:16.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_hg
version_added: '2.4'
short_description: Manage hostgroups on Pure Storage FlashArrays
description:
- Create, delete or modifiy hostgroups on Pure Storage FlashArrays.
author:
- Simon Dodsley (@sdodsley)
options:
hostgroup:
description:
- The name of the hostgroup.
required: true
state:
description:
- Define whether the hostgroup should exist or not.
default: present
choices: [ absent, present ]
host:
description:
- List of existing hosts to add to hostgroup.
volume:
description:
- List of existing volumes to add to hostgroup.
extends_documentation_fragment:
- purestorage
'''
EXAMPLES = r'''
- name: Create new hostgroup
purefa_hg:
hostgroup: foo
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
# This will disconnect all hosts and volumes in the hostgroup
- name: Delete hostgroup
purefa_hg:
hostgroup: foo
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
- name: Create host group with hosts and volumes
purefa_hg:
hostgroup: bar
host:
- host1
- host2
volume:
- vol1
- vol2
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
def get_hostgroup(module, array):
hostgroup = None
for h in array.list_hgroups():
if h["name"] == module.params['hostgroup']:
hostgroup = h
break
return hostgroup
def make_hostgroup(module, array):
changed = True
if not module.check_mode:
host = array.create_hgroup(module.params['hostgroup'])
if module.params['host']:
array.set_hgroup(module.params['hostgroup'], hostlist=module.params['host'])
if module.params['volume']:
for v in module.params['volume']:
array.connect_hgroup(module.params['hostgroup'], v)
module.exit_json(changed=changed)
def update_hostgroup(module, array):
changed = False
hostgroup = module.params['hostgroup']
module.exit_json(changed=changed)
def delete_hostgroup(module, array):
changed = True
if not module.check_mode:
for vol in array.list_hgroup_connections(module.params['hostgroup']):
array.disconnect_hgroup(module.params['hostgroup'], vol["vol"])
host = array.get_hgroup(module.params['hostgroup'])
array.set_hgroup(module.params['hostgroup'], remhostlist=host['hosts'])
array.delete_hgroup(module.params['hostgroup'])
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
hostgroup=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
host=dict(type='list'),
volume=dict(type='list'),
))
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in host')
state = module.params['state']
array = get_system(module)
hostgroup = get_hostgroup(module, array)
if module.params['host']:
try:
for h in module.params['host']:
array.get_host(h)
except:
module.fail_json(msg='Host not found')
if module.params['volume']:
try:
for v in module.params['volume']:
array.get_volume(v)
except:
module.fail_json(msg='Volume not found')
if hostgroup and state == 'present':
update_hostgroup(module, array)
elif hostgroup and state == 'absent':
delete_hostgroup(module, array)
elif hostgroup is None and state == 'absent':
module.exit_json(changed=False)
else:
make_hostgroup(module, array)
if __name__ == '__main__':
main()
| 26.546512
| 92
| 0.661191
|
a24d04128b3fe4749056eacfe1f2fb7752c24191
| 31,473
|
py
|
Python
|
pypy/module/pyexpat/interp_pyexpat.py
|
DinrusGroup/PyPy
|
9fb17e23a17e3cf511cf9c4d11408393df4748c2
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2018-12-27T20:40:49.000Z
|
2018-12-27T20:40:49.000Z
|
pypy/module/pyexpat/interp_pyexpat.py
|
GabriellaUwa/pypy
|
2ede3b557a25cb49db969e942ca5a7f8a9eae0d4
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/pyexpat/interp_pyexpat.py
|
GabriellaUwa/pypy
|
2ede3b557a25cb49db969e942ca5a7f8a9eae0d4
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-10-10T13:53:32.000Z
|
2021-10-10T13:53:32.000Z
|
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib import rgc, jit
from rpython.rlib.objectmodel import specialize
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rtyper.tool import rffi_platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.platform import platform
import sys
import weakref
import py
if sys.platform == "win32":
libname = 'libexpat'
pre_include_bits = ["#define XML_STATIC"]
else:
libname = 'expat'
pre_include_bits = []
eci = ExternalCompilationInfo(
libraries=[libname],
library_dirs=platform.preprocess_library_dirs([]),
includes=['expat.h'],
include_dirs=platform.preprocess_include_dirs([]),
pre_include_bits = pre_include_bits,
)
eci = rffi_platform.configure_external_library(
libname, eci,
[dict(prefix='expat-',
include_dir='lib', library_dir='win32/bin/release'),
])
XML_Content_Ptr = lltype.Ptr(lltype.ForwardReference())
XML_Parser = rffi.COpaquePtr(typedef='XML_Parser')
xml_error_list = [
"XML_ERROR_NO_MEMORY",
"XML_ERROR_SYNTAX",
"XML_ERROR_NO_ELEMENTS",
"XML_ERROR_INVALID_TOKEN",
"XML_ERROR_UNCLOSED_TOKEN",
"XML_ERROR_PARTIAL_CHAR",
"XML_ERROR_TAG_MISMATCH",
"XML_ERROR_DUPLICATE_ATTRIBUTE",
"XML_ERROR_JUNK_AFTER_DOC_ELEMENT",
"XML_ERROR_PARAM_ENTITY_REF",
"XML_ERROR_UNDEFINED_ENTITY",
"XML_ERROR_RECURSIVE_ENTITY_REF",
"XML_ERROR_ASYNC_ENTITY",
"XML_ERROR_BAD_CHAR_REF",
"XML_ERROR_BINARY_ENTITY_REF",
"XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF",
"XML_ERROR_MISPLACED_XML_PI",
"XML_ERROR_UNKNOWN_ENCODING",
"XML_ERROR_INCORRECT_ENCODING",
"XML_ERROR_UNCLOSED_CDATA_SECTION",
"XML_ERROR_EXTERNAL_ENTITY_HANDLING",
"XML_ERROR_NOT_STANDALONE",
"XML_ERROR_UNEXPECTED_STATE",
"XML_ERROR_ENTITY_DECLARED_IN_PE",
"XML_ERROR_FEATURE_REQUIRES_XML_DTD",
"XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING",
# Added in Expat 1.95.7.
"XML_ERROR_UNBOUND_PREFIX",
# Added in Expat 1.95.8.
"XML_ERROR_UNDECLARING_PREFIX",
"XML_ERROR_INCOMPLETE_PE",
"XML_ERROR_XML_DECL",
"XML_ERROR_TEXT_DECL",
"XML_ERROR_PUBLICID",
"XML_ERROR_SUSPENDED",
"XML_ERROR_NOT_SUSPENDED",
"XML_ERROR_ABORTED",
"XML_ERROR_FINISHED",
"XML_ERROR_SUSPEND_PE",
]
xml_model_list = [
"XML_CTYPE_EMPTY",
"XML_CTYPE_ANY",
"XML_CTYPE_MIXED",
"XML_CTYPE_NAME",
"XML_CTYPE_CHOICE",
"XML_CTYPE_SEQ",
"XML_CQUANT_NONE",
"XML_CQUANT_OPT",
"XML_CQUANT_REP",
"XML_CQUANT_PLUS",
]
class CConfigure:
_compilation_info_ = eci
XML_Content = rffi_platform.Struct('XML_Content', [
('numchildren', rffi.UINT),
('children', XML_Content_Ptr),
('name', rffi.CCHARP),
('type', rffi.INT),
('quant', rffi.INT),
])
XML_Encoding = rffi_platform.Struct('XML_Encoding', [
('map', rffi.CFixedArray(rffi.INT, 1)),
('data', rffi.VOIDP),
('convert', rffi.VOIDP),
('release', rffi.VOIDP),
])
for name in ['XML_PARAM_ENTITY_PARSING_NEVER',
'XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE',
'XML_PARAM_ENTITY_PARSING_ALWAYS']:
locals()[name] = rffi_platform.ConstantInteger(name)
XML_MAJOR_VERSION = rffi_platform.ConstantInteger('XML_MAJOR_VERSION')
XML_MINOR_VERSION = rffi_platform.ConstantInteger('XML_MINOR_VERSION')
XML_MICRO_VERSION = rffi_platform.ConstantInteger('XML_MICRO_VERSION')
XML_FALSE = rffi_platform.ConstantInteger('XML_FALSE')
XML_TRUE = rffi_platform.ConstantInteger('XML_TRUE')
for name in xml_error_list:
locals()[name] = rffi_platform.ConstantInteger(name)
for name in xml_model_list:
locals()[name] = rffi_platform.ConstantInteger(name)
XML_Parser_SIZE = rffi_platform.SizeOf("XML_Parser")
for k, v in rffi_platform.configure(CConfigure).items():
globals()[k] = v
XML_COMBINED_VERSION = 10000*XML_MAJOR_VERSION+100*XML_MINOR_VERSION+XML_MICRO_VERSION
XML_Content_Ptr.TO.become(rffi.CArray(XML_Content))
XML_Encoding_Ptr = lltype.Ptr(XML_Encoding)
def expat_external(*a, **kw):
kw['compilation_info'] = eci
return rffi.llexternal(*a, **kw)
INTERNED_CCHARP = "INTERNED"
HANDLERS = dict(
StartElementHandler = [INTERNED_CCHARP, rffi.CCHARPP],
EndElementHandler = [INTERNED_CCHARP],
ProcessingInstructionHandler = [INTERNED_CCHARP, INTERNED_CCHARP],
CharacterDataHandler = [rffi.CCHARP, rffi.INT],
UnparsedEntityDeclHandler = [INTERNED_CCHARP] * 5,
NotationDeclHandler = [INTERNED_CCHARP] * 4,
StartNamespaceDeclHandler = [INTERNED_CCHARP, INTERNED_CCHARP],
EndNamespaceDeclHandler = [INTERNED_CCHARP],
CommentHandler = [rffi.CCHARP],
StartCdataSectionHandler = [],
EndCdataSectionHandler = [],
DefaultHandler = [rffi.CCHARP, rffi.INT],
DefaultHandlerExpand = [rffi.CCHARP, rffi.INT],
NotStandaloneHandler = [],
ExternalEntityRefHandler = [rffi.CCHARP] + [INTERNED_CCHARP] * 3,
StartDoctypeDeclHandler = [INTERNED_CCHARP, INTERNED_CCHARP,
INTERNED_CCHARP, rffi.INT],
EndDoctypeDeclHandler = [],
EntityDeclHandler = [INTERNED_CCHARP, rffi.INT, rffi.CCHARP, rffi.INT,
INTERNED_CCHARP, INTERNED_CCHARP, INTERNED_CCHARP,
INTERNED_CCHARP],
XmlDeclHandler = [rffi.CCHARP, rffi.CCHARP, rffi.INT],
ElementDeclHandler = [INTERNED_CCHARP, lltype.Ptr(XML_Content)],
AttlistDeclHandler = [INTERNED_CCHARP, INTERNED_CCHARP,
rffi.CCHARP, rffi.CCHARP, rffi.INT],
)
if XML_COMBINED_VERSION >= 19504:
HANDLERS['SkippedEntityHandler'] = [INTERNED_CCHARP, rffi.INT]
NB_HANDLERS = len(HANDLERS)
class Storage:
"Store objects under a non moving ID"
def __init__(self):
self.clear()
def clear(self):
self.next_id = 0
self._last_object_id = -1
self._last_object = None
self.storage = {}
@staticmethod
def get_nonmoving_id(obj, id=-1):
if id < 0:
id = global_storage.next_id
global_storage.next_id += 1
global_storage.storage[id] = obj
return id
@staticmethod
def get_object(id):
if id == global_storage._last_object_id:
return global_storage._last_object
result = global_storage.storage[id]
global_storage._last_object_id = id
global_storage._last_object = result
return result
@staticmethod
def free_nonmoving_id(id):
if id == global_storage._last_object_id:
global_storage._last_object = None
global_storage._last_object_id = -1
del global_storage.storage[id]
global_storage = Storage()
class CallbackData(W_Root):
def __init__(self, space, parser):
self.space = space
self.parser = weakref.ref(parser)
SETTERS = {}
for index, (name, params) in enumerate(HANDLERS.items()):
arg_names = ['arg%d' % (i,) for i in range(len(params))]
warg_names = ['w_arg%d' % (i,) for i in range(len(params))]
converters = []
real_params = []
for i, ARG in enumerate(params):
# Some custom argument conversions
if name == "StartElementHandler" and i == 1:
converters.append(
'w_arg%d = parser.w_convert_attributes(space, arg%d)' % (i, i))
elif name in ["CharacterDataHandler", "DefaultHandlerExpand", "DefaultHandler"] and i == 0:
converters.append(
'w_arg%d = parser.w_convert_charp_n(space, arg%d, arg%d)' % (i, i, i+1))
del warg_names[i+1]
elif name in ["EntityDeclHandler"] and i == 2:
converters.append(
'w_arg%d = parser.w_convert_charp_n(space, arg%d, arg%d)' % (i, i, i+1))
del warg_names[i+1]
# the standard conversions
elif ARG == rffi.CCHARP:
converters.append(
'w_arg%d = parser.w_convert_charp(space, arg%d)' % (i, i))
elif ARG == INTERNED_CCHARP:
converters.append(
'w_arg%d = parser.w_convert_interned(space, arg%d)' % (i, i))
ARG = rffi.CCHARP
elif ARG == lltype.Ptr(XML_Content):
converters.append(
'w_arg%d = parser.w_convert_model(space, arg%d)' % (i, i))
converters.append(
'XML_FreeContentModel(parser.itself, arg%d)' % (i,))
elif ARG == rffi.INT:
converters.append("w_arg%d = space.newint(arg%d)" % (i, i))
else:
assert 0, "missing conversion case"
real_params.append(ARG)
converters = '; '.join(converters)
args = ', '.join(arg_names)
wargs = ', '.join(warg_names)
if name in ['ExternalEntityRefHandler',
'NotStandaloneHandler']:
result_type = rffi.INT
result_converter = "rffi.cast(rffi.INT, space.int_w(w_result))"
result_error = "rffi.cast(rffi.INT, 0)"
else:
result_type = lltype.Void
result_converter = "None"
result_error = "None"
if name == 'CharacterDataHandler':
pre_code = 'if parser.buffer_string(space, w_arg0, arg1): return'
else:
pre_code = 'parser.flush_character_buffer(space)'
if name == 'ExternalEntityRefHandler':
first_arg = 'll_parser'
first_lltype = XML_Parser
ll_id = 'XML_GetUserData(ll_parser)'
post_code = 'if space.is_w(w_result, space.w_None): return 0'
else:
first_arg = 'll_userdata'
first_lltype = rffi.VOIDP
ll_id = 'll_userdata'
post_code = ''
src = py.code.Source("""
@jit.jit_callback('XML:%(name)s')
def %(name)s_callback(%(first_arg)s, %(args)s):
id = rffi.cast(lltype.Signed, %(ll_id)s)
userdata = global_storage.get_object(id)
space = userdata.space
parser = userdata.parser()
handler = parser.handlers[%(index)s]
if not handler:
return %(result_error)s
try:
%(converters)s
%(pre_code)s
w_result = space.call_function(handler, %(wargs)s)
%(post_code)s
except OperationError, e:
if not parser._exc_info: # don't override an existing exception
parser._exc_info = e
XML_StopParser(parser.itself, XML_FALSE)
return %(result_error)s
return %(result_converter)s
callback = %(name)s_callback
""" % locals())
exec src.compile()
c_name = 'XML_Set' + name
callback_type = lltype.Ptr(lltype.FuncType(
[first_lltype] + real_params, result_type))
func = expat_external(c_name,
[XML_Parser, callback_type], lltype.Void)
SETTERS[name] = (index, func, callback)
# special case for UnknownEncodingHandlerData:
# XML_SetUnknownEncodingHandler() needs an additional argument,
# and it's not modifiable via user code anyway
def UnknownEncodingHandlerData_callback(ll_userdata, name, info):
id = rffi.cast(lltype.Signed, ll_userdata)
userdata = global_storage.get_object(id)
space = userdata.space
parser = userdata.parser()
name = rffi.charp2str(name)
try:
parser.UnknownEncodingHandler(space, name, info)
except OperationError as e:
if not parser._exc_info:
parser._exc_info = e
XML_StopParser(parser.itself, XML_FALSE)
result = 0
else:
result = 1
return rffi.cast(rffi.INT, result)
callback_type = lltype.Ptr(lltype.FuncType(
[rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT))
XML_SetUnknownEncodingHandler = expat_external(
'XML_SetUnknownEncodingHandler',
[XML_Parser, callback_type, rffi.VOIDP], lltype.Void)
# Declarations of external functions
XML_ParserCreate = expat_external(
'XML_ParserCreate', [rffi.CCHARP], XML_Parser)
XML_ParserCreateNS = expat_external(
'XML_ParserCreateNS', [rffi.CCHARP, rffi.CHAR], XML_Parser)
XML_ParserFree = expat_external(
'XML_ParserFree', [XML_Parser], lltype.Void, releasegil=False)
XML_SetUserData = expat_external(
'XML_SetUserData', [XML_Parser, rffi.VOIDP], lltype.Void)
def XML_GetUserData(parser):
# XXX is this always true?
return rffi.cast(rffi.VOIDPP, parser)[0]
XML_Parse = expat_external(
'XML_Parse', [XML_Parser, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT)
XML_StopParser = expat_external(
'XML_StopParser', [XML_Parser, rffi.INT], lltype.Void)
XML_SetReturnNSTriplet = expat_external(
'XML_SetReturnNSTriplet', [XML_Parser, rffi.INT], lltype.Void)
XML_GetSpecifiedAttributeCount = expat_external(
'XML_GetSpecifiedAttributeCount', [XML_Parser], rffi.INT)
XML_SetParamEntityParsing = expat_external(
'XML_SetParamEntityParsing', [XML_Parser, rffi.INT], lltype.Void)
XML_SetBase = expat_external(
'XML_SetBase', [XML_Parser, rffi.CCHARP], lltype.Void)
if XML_COMBINED_VERSION >= 19505:
XML_UseForeignDTD = expat_external(
'XML_UseForeignDTD', [XML_Parser, rffi.INT], lltype.Void)
XML_GetErrorCode = expat_external(
'XML_GetErrorCode', [XML_Parser], rffi.INT)
XML_ErrorString = expat_external(
'XML_ErrorString', [rffi.INT],
rffi.CCHARP)
XML_GetCurrentLineNumber = expat_external(
'XML_GetCurrentLineNumber', [XML_Parser], rffi.INT)
XML_GetErrorLineNumber = XML_GetCurrentLineNumber
XML_GetCurrentColumnNumber = expat_external(
'XML_GetCurrentColumnNumber', [XML_Parser], rffi.INT)
XML_GetErrorColumnNumber = XML_GetCurrentColumnNumber
XML_GetCurrentByteIndex = expat_external(
'XML_GetCurrentByteIndex', [XML_Parser], rffi.INT)
XML_GetErrorByteIndex = XML_GetCurrentByteIndex
XML_FreeContentModel = expat_external(
'XML_FreeContentModel', [XML_Parser, lltype.Ptr(XML_Content)], lltype.Void)
XML_ExternalEntityParserCreate = expat_external(
'XML_ExternalEntityParserCreate', [XML_Parser, rffi.CCHARP, rffi.CCHARP],
XML_Parser)
XML_ExpatVersion = expat_external(
'XML_ExpatVersion', [], rffi.CCHARP)
def get_expat_version(space):
return space.newtext(rffi.charp2str(XML_ExpatVersion()))
def get_expat_version_info(space):
return space.newtuple([
space.newint(XML_MAJOR_VERSION),
space.newint(XML_MINOR_VERSION),
space.newint(XML_MICRO_VERSION)])
class Cache:
def __init__(self, space):
self.w_error = space.new_exception_class("pyexpat.ExpatError")
class W_XMLParserType(W_Root):
id = -1
def __init__(self, space, parser, w_intern):
self.itself = parser
self.register_finalizer(space)
self.w_intern = w_intern
self.returns_unicode = True
self.ordered_attributes = False
self.specified_attributes = False
self.ns_prefixes = False
self.handlers = [None] * NB_HANDLERS
self.buffer = None
self.buffer_size = 8192
self.buffer_used = 0
self.w_character_data_handler = None
self._exc_info = None
# Set user data for callback function
self.id = global_storage.get_nonmoving_id(
CallbackData(space, self))
XML_SetUserData(self.itself, rffi.cast(rffi.VOIDP, self.id))
def _finalize_(self):
if XML_ParserFree: # careful with CPython interpreter shutdown
if self.itself:
XML_ParserFree(self.itself)
self.itself = lltype.nullptr(XML_Parser.TO)
if global_storage and self.id >= 0:
try:
global_storage.free_nonmoving_id(self.id)
except KeyError:
pass # maybe global_storage.clear() was already called
self.id = -1
@unwrap_spec(flag=int)
def SetParamEntityParsing(self, space, flag):
"""SetParamEntityParsing(flag) -> success
Controls parsing of parameter entities (including the external DTD
subset). Possible flag values are XML_PARAM_ENTITY_PARSING_NEVER,
XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE and
XML_PARAM_ENTITY_PARSING_ALWAYS. Returns true if setting the flag
was successful."""
XML_SetParamEntityParsing(self.itself, flag)
@unwrap_spec(w_flag=WrappedDefault(True))
def UseForeignDTD(self, space, w_flag):
"""UseForeignDTD([flag])
Allows the application to provide an artificial external subset if one is
not specified as part of the document instance. This readily allows the
use of a 'default' document type controlled by the application, while still
getting the advantage of providing document type information to the parser.
'flag' defaults to True if not provided."""
flag = space.is_true(w_flag)
XML_UseForeignDTD(self.itself, flag)
# Handlers management
def w_convert(self, space, s):
if self.returns_unicode:
from pypy.interpreter.unicodehelper import decode_utf8
return space.newunicode(decode_utf8(space, s))
else:
return space.newtext(s)
def w_convert_charp(self, space, data):
if data:
return self.w_convert(space, rffi.charp2str(data))
else:
return space.w_None
def w_convert_interned(self, space, data):
if not data:
return space.w_None
w_data = self.w_convert_charp(space, data)
if not self.w_intern:
return w_data
try:
return space.getitem(self.w_intern, w_data)
except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
space.setitem(self.w_intern, w_data, w_data)
return w_data
def w_convert_charp_n(self, space, data, length):
ll_length = rffi.cast(lltype.Signed, length)
if data:
return self.w_convert(space, rffi.charp2strn(data, ll_length))
else:
return space.w_None
def w_convert_attributes(self, space, attrs):
if self.specified_attributes:
maxindex = XML_GetSpecifiedAttributeCount(self.itself)
else:
maxindex = 0
while attrs[maxindex]:
maxindex += 2 # copied
if self.ordered_attributes:
w_attrs = space.newlist([
self.w_convert_charp(space, attrs[i])
for i in range(maxindex)])
else:
w_attrs = space.newdict()
for i in range(0, maxindex, 2):
space.setitem(
w_attrs,
self.w_convert_charp(space, attrs[i]),
self.w_convert_charp(space, attrs[i + 1]))
return w_attrs
def w_convert_model(self, space, model):
children = [self.w_convert_model(space, model.c_children[i])
for i in range(model.c_numchildren)]
return space.newtuple([
space.newint(model.c_type),
space.newint(model.c_quant),
self.w_convert_charp(space, model.c_name),
space.newtuple(children)])
def buffer_string(self, space, w_string, length):
ll_length = rffi.cast(lltype.Signed, length)
if self.buffer is not None:
if self.buffer_used + ll_length > self.buffer_size:
self.flush_character_buffer(space)
# handler might have changed; drop the rest on the floor
# if there isn't a handler anymore
if self.w_character_data_handler is None:
return True
if ll_length <= self.buffer_size:
self.buffer.append(w_string)
self.buffer_used += ll_length
return True
else:
self.buffer = []
self.buffer_used = 0
return False
def gethandler(self, space, name, index):
if name == 'CharacterDataHandler':
return self.w_character_data_handler or space.w_None
return self.handlers[index]
@specialize.arg(2)
def sethandler(self, space, name, w_handler, index, setter, handler):
if name == 'CharacterDataHandler':
self.flush_character_buffer(space)
if space.is_w(w_handler, space.w_None):
self.w_character_data_handler = None
else:
self.w_character_data_handler = w_handler
#
self.handlers[index] = w_handler
setter(self.itself, handler)
all_chars = ''.join(chr(i) for i in range(256))
def UnknownEncodingHandler(self, space, name, info):
# Yes, supports only 8bit encodings
translationmap = space.unicode_w(
space.call_method(
space.newbytes(self.all_chars), "decode",
space.newtext(name), space.newtext("replace")))
if len(translationmap) != 256:
raise oefmt(space.w_ValueError,
"multi-byte encodings are not supported")
for i in range(256):
c = translationmap[i]
if c == u'\ufffd':
info.c_map[i] = rffi.cast(rffi.INT, -1)
else:
info.c_map[i] = rffi.cast(rffi.INT, c)
info.c_data = lltype.nullptr(rffi.VOIDP.TO)
info.c_convert = lltype.nullptr(rffi.VOIDP.TO)
info.c_release = lltype.nullptr(rffi.VOIDP.TO)
return True
@staticmethod
def _make_property(name):
index, setter, handler = SETTERS[name]
#
def descr_get_property(self, space):
return self.gethandler(space, name, index)
#
def descr_set_property(self, space, w_value):
return self.sethandler(space, name, w_value,
index, setter, handler)
#
return GetSetProperty(descr_get_property,
descr_set_property,
cls=W_XMLParserType)
def get_namespace_prefixes(self, space):
return space.newbool(self.ns_prefixes)
def set_namespace_prefixes(self, space, w_value):
self.ns_prefixes = space.bool_w(w_value)
XML_SetReturnNSTriplet(self.itself, self.ns_prefixes)
# Parse methods
@unwrap_spec(data='text', isfinal=bool)
def Parse(self, space, data, isfinal=False):
"""Parse(data[, isfinal])
Parse XML data. `isfinal' should be true at end of input."""
res = XML_Parse(self.itself, data, len(data), isfinal)
if self._exc_info:
e = self._exc_info
self._exc_info = None
raise e
elif res == 0:
exc = self.set_error(space, XML_GetErrorCode(self.itself))
raise exc
self.flush_character_buffer(space)
return space.newint(res)
def ParseFile(self, space, w_file):
"""ParseFile(file)
Parse XML data from file-like object."""
eof = False
while not eof:
w_data = space.call_method(w_file, 'read', space.newint(2048))
data = space.text_w(w_data)
eof = len(data) == 0
w_res = self.Parse(space, data, isfinal=eof)
return w_res
@unwrap_spec(base='text')
def SetBase(self, space, base):
XML_SetBase(self.itself, base)
def ExternalEntityParserCreate(self, space, w_context, w_encoding=None):
"""ExternalEntityParserCreate(context[, encoding])
Create a parser for parsing an external entity based on the
information passed to the ExternalEntityRefHandler."""
if space.is_w(w_context, space.w_None):
context = None
else:
context = space.text_w(w_context)
if space.is_none(w_encoding):
encoding = None
else:
encoding = space.text_w(w_encoding)
xmlparser = XML_ExternalEntityParserCreate(
self.itself, context, encoding)
if not xmlparser:
raise MemoryError
parser = W_XMLParserType(space, xmlparser, self.w_intern)
# copy handlers from self
for i in range(NB_HANDLERS):
parser.handlers[i] = self.handlers[i]
return parser
def flush_character_buffer(self, space):
if not self.buffer:
return
w_data = space.call_function(
space.getattr(space.newtext(''), space.newtext('join')),
space.newlist(self.buffer))
self.buffer = []
self.buffer_used = 0
if self.w_character_data_handler:
space.call_function(self.w_character_data_handler, w_data)
# Error management
def set_error(self, space, code):
err = rffi.charp2strn(XML_ErrorString(code), 200)
lineno = XML_GetCurrentLineNumber(self.itself)
colno = XML_GetCurrentColumnNumber(self.itself)
msg = "%s: line %d, column %d" % (err, lineno, colno)
w_errorcls = space.fromcache(Cache).w_error
w_error = space.call_function(w_errorcls, space.newtext(msg))
space.setattr(w_error, space.newtext("code"), space.newint(code))
space.setattr(w_error, space.newtext("offset"), space.newint(colno))
space.setattr(w_error, space.newtext("lineno"), space.newint(lineno))
self.w_error = w_error
return OperationError(w_errorcls, w_error)
def descr_ErrorCode(self, space):
return space.newint(XML_GetErrorCode(self.itself))
def descr_ErrorLineNumber(self, space):
return space.newint(XML_GetErrorLineNumber(self.itself))
def descr_ErrorColumnNumber(self, space):
return space.newint(XML_GetErrorColumnNumber(self.itself))
def descr_ErrorByteIndex(self, space):
return space.newint(XML_GetErrorByteIndex(self.itself))
def get_buffer_size(self, space):
return space.newint(self.buffer_size)
def set_buffer_size(self, space, w_value):
value = space.getindex_w(w_value, space.w_TypeError)
if value <= 0:
raise oefmt(space.w_ValueError,
"buffer_size must be greater than zero")
self.flush_character_buffer(space)
self.buffer_size = value
def get_buffer_text(self, space):
return space.newbool(self.buffer is not None)
def set_buffer_text(self, space, w_value):
if space.is_true(w_value):
self.buffer = []
self.buffer_used = 0
else:
self.flush_character_buffer(space)
self.buffer = None
def get_intern(self, space):
if self.w_intern:
return self.w_intern
else:
return space.w_None
def bool_property(name, cls, doc=None):
def fget(space, obj):
return space.newbool(getattr(obj, name))
def fset(space, obj, value):
setattr(obj, name, space.bool_w(value))
return GetSetProperty(fget, fset, cls=cls, doc=doc)
XMLParser_methods = ['Parse', 'ParseFile', 'SetBase', 'SetParamEntityParsing',
'ExternalEntityParserCreate']
if XML_COMBINED_VERSION >= 19505:
XMLParser_methods.append('UseForeignDTD')
_XMLParser_extras = {}
for name in XMLParser_methods:
_XMLParser_extras[name] = interp2app(getattr(W_XMLParserType, name))
for name in SETTERS:
_XMLParser_extras[name] = W_XMLParserType._make_property(name)
W_XMLParserType.typedef = TypeDef(
"pyexpat.XMLParserType",
__doc__ = "XML parser",
namespace_prefixes = GetSetProperty(W_XMLParserType.get_namespace_prefixes,
W_XMLParserType.set_namespace_prefixes,
cls=W_XMLParserType),
returns_unicode = bool_property('returns_unicode', W_XMLParserType),
ordered_attributes = bool_property('ordered_attributes', W_XMLParserType),
specified_attributes = bool_property('specified_attributes', W_XMLParserType),
intern = GetSetProperty(W_XMLParserType.get_intern, cls=W_XMLParserType),
buffer_size = GetSetProperty(W_XMLParserType.get_buffer_size,
W_XMLParserType.set_buffer_size,
cls=W_XMLParserType),
buffer_text = GetSetProperty(W_XMLParserType.get_buffer_text,
W_XMLParserType.set_buffer_text, cls=W_XMLParserType),
ErrorCode = GetSetProperty(W_XMLParserType.descr_ErrorCode, cls=W_XMLParserType),
ErrorLineNumber = GetSetProperty(W_XMLParserType.descr_ErrorLineNumber, cls=W_XMLParserType),
ErrorColumnNumber = GetSetProperty(W_XMLParserType.descr_ErrorColumnNumber, cls=W_XMLParserType),
ErrorByteIndex = GetSetProperty(W_XMLParserType.descr_ErrorByteIndex, cls=W_XMLParserType),
CurrentLineNumber = GetSetProperty(W_XMLParserType.descr_ErrorLineNumber, cls=W_XMLParserType),
CurrentColumnNumber = GetSetProperty(W_XMLParserType.descr_ErrorColumnNumber, cls=W_XMLParserType),
CurrentByteIndex = GetSetProperty(W_XMLParserType.descr_ErrorByteIndex, cls=W_XMLParserType),
**_XMLParser_extras
)
def ParserCreate(space, w_encoding=None, w_namespace_separator=None,
w_intern=None):
"""ParserCreate([encoding[, namespace_separator]]) -> parser
Return a new XML parser object."""
if space.is_none(w_encoding):
encoding = None
elif space.isinstance_w(w_encoding, space.w_text):
encoding = space.text_w(w_encoding)
else:
raise oefmt(space.w_TypeError,
"ParserCreate() argument 1 must be string or None, not %T",
w_encoding)
if space.is_none(w_namespace_separator):
namespace_separator = -1
elif space.isinstance_w(w_namespace_separator, space.w_text):
separator = space.text_w(w_namespace_separator)
if len(separator) == 0:
namespace_separator = 0
elif len(separator) == 1:
namespace_separator = ord(separator[0])
else:
raise oefmt(space.w_ValueError,
"namespace_separator must be at most one character, "
"omitted, or None")
else:
raise oefmt(space.w_TypeError,
"ParserCreate() argument 2 must be string or None, not %T",
w_namespace_separator)
# Explicitly passing None means no interning is desired.
# Not passing anything means that a new dictionary is used.
if w_intern is None:
w_intern = space.newdict()
elif space.is_w(w_intern, space.w_None):
w_intern = None
if namespace_separator >= 0:
xmlparser = XML_ParserCreateNS(
encoding,
rffi.cast(rffi.CHAR, namespace_separator))
else:
xmlparser = XML_ParserCreate(encoding)
# Currently this is just the size of the pointer and some estimated bytes.
# The struct isn't actually defined in expat.h - it is in xmlparse.c
# XXX: find a good estimate of the XML_ParserStruct
if not xmlparser:
raise oefmt(space.w_RuntimeError, "XML_ParserCreate failed")
parser = W_XMLParserType(space, xmlparser, w_intern)
rgc.add_memory_pressure(XML_Parser_SIZE + 300, parser)
XML_SetUnknownEncodingHandler(
parser.itself, UnknownEncodingHandlerData_callback,
rffi.cast(rffi.VOIDP, parser.id))
return parser
@unwrap_spec(code=int)
def ErrorString(space, code):
"""ErrorString(errno) -> string
Returns string error for given number."""
return space.newtext(rffi.charp2str(XML_ErrorString(code)))
| 36.639115
| 103
| 0.658469
|
abf8e6a969c3ed03faa595b8927ac6bc650e0065
| 11,830
|
py
|
Python
|
nevergrad/benchmark/xpbase.py
|
CarolaDoerr/nevergrad
|
dfeda5cafd52536b9d523c0730193240841d8fb1
|
[
"MIT"
] | null | null | null |
nevergrad/benchmark/xpbase.py
|
CarolaDoerr/nevergrad
|
dfeda5cafd52536b9d523c0730193240841d8fb1
|
[
"MIT"
] | null | null | null |
nevergrad/benchmark/xpbase.py
|
CarolaDoerr/nevergrad
|
dfeda5cafd52536b9d523c0730193240841d8fb1
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import time
import random
import warnings
import traceback
from typing import Dict, Union, Any, Optional, Iterator, Type, Callable, Tuple
import torch
import numpy as np
from ..common import decorators
from .. import instrumentation as instru
from ..functions import utils as futils
from ..optimization import base
from ..optimization.optimizerlib import registry as optimizer_registry # import from optimizerlib so as to fill it
from . import execution
registry = decorators.Registry[Callable[..., Iterator['Experiment']]]()
class IFuncWrapper(execution.PostponedObject):
"""Simple wrapper to use encapsulate relevant parts of an InstrumentedFunction
Parameter
---------
func: Callable
the callable to wrap
"""
def __init__(self, func: instru.InstrumentedFunction) -> None:
self.func = func
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.func.function(*args, **kwargs) # compute *before* updating num calls
def get_postponing_delay(self, args: Tuple[Any, ...], kwargs: Dict[str, Any], value: float) -> float:
"""Propagate subfunction delay
"""
if isinstance(self.func, execution.PostponedObject):
return self.func.get_postponing_delay(args, kwargs, value)
return 1.
class OptimizerSettings:
"""Handle for optimizer settings (name, num_workers etc)
Optimizers can be instantiated through this class, providing the optimization space dimension.
Note
----
Eventually, this class should be moved to be directly used for defining experiments.
"""
def __init__(self, optimizer: Union[str, base.OptimizerFamily], budget: int, num_workers: int = 1, batch_mode: bool = True) -> None:
self._setting_names = [x for x in locals() if x != "self"]
if isinstance(optimizer, str):
assert optimizer in optimizer_registry, f"{optimizer} is not registered"
self.optimizer = optimizer
self.budget = budget
self.num_workers = num_workers
self.executor = execution.MockedTimedExecutor(batch_mode)
@property
def name(self) -> str:
return self.optimizer if isinstance(self.optimizer, str) else repr(self.optimizer)
@property
def batch_mode(self) -> bool:
return self.executor.batch_mode
def __repr__(self) -> str:
return f"Experiment: {self.name}<budget={self.budget}, num_workers={self.num_workers}, batch_mode={self.batch_mode}>"
def _get_factory(self) -> Union[Type[base.Optimizer], base.OptimizerFamily]:
return optimizer_registry[self.optimizer] if isinstance(self.optimizer, str) else self.optimizer
@property
def is_incoherent(self) -> bool:
"""Flags settings which are known to be impossible to process.
Currently, this means we flag:
- no_parallelization optimizers for num_workers > 1
"""
# flag no_parallelization when num_workers greater than 1
return self._get_factory().no_parallelization and bool(self.num_workers > 1)
def instantiate(self, instrumentation: instru.Instrumentation) -> base.Optimizer:
"""Instantiate an optimizer, providing the optimization space dimension
"""
return self._get_factory()(instrumentation=instrumentation, budget=self.budget, num_workers=self.num_workers)
def get_description(self) -> Dict[str, Any]:
"""Returns a dictionary describing the optimizer settings
"""
descr = {x: getattr(self, x) for x in self._setting_names if x != "optimizer"}
descr["optimizer_name"] = self.name
return descr
def __eq__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
return all(getattr(self, attr) == getattr(other, attr) for attr in self._setting_names)
return False
def create_seed_generator(seed: Optional[int]) -> Iterator[Optional[int]]:
"""Create a stream of seeds, independent from the standard random stream.
This is designed to be used in experiment plans generators, fore reproducibility.
Parameter
---------
seed: int or None
the initial seed
Yields
------
int or None
potential new seeds, or None if the initial seed was None
"""
generator = None if seed is None else np.random.RandomState(seed=seed)
while True:
yield None if generator is None else generator.randint(2**32, dtype=np.uint32)
class Experiment:
"""Specifies an experiment which can be run in benchmarks.
Parameters
----------
function: InstrumentedFunction
the function to run the experiment on. It must inherit from InstrumentedFunction to implement
descriptors for the function.
Note
----
- "run" method catches error but forwards stderr so that errors are not completely hidden
- "run" method outputs the description of the experiment, which is a set of figures/names from the functions
settings (dimension, etc...), the optimization settings (budget, etc...) and the results (loss, etc...)
"""
# pylint: disable=too-many-arguments
def __init__(self, function: instru.InstrumentedFunction,
optimizer: Union[str, base.OptimizerFamily], budget: int, num_workers: int = 1,
batch_mode: bool = True, seed: Optional[int] = None,
cheap_constraint_checker: Optional[Callable[[Any], Any]] = None,
) -> None:
assert isinstance(function, instru.InstrumentedFunction), ("All experiment functions should derive from InstrumentedFunction")
self.function = function
self.seed = seed # depending on the inner workings of the function, the experiment may not be repeatable
self.optimsettings = OptimizerSettings(optimizer=optimizer, num_workers=num_workers, budget=budget, batch_mode=batch_mode)
self.result = {"loss": np.nan, "elapsed_budget": np.nan, "elapsed_time": np.nan, "error": ""}
self.recommendation: Optional[base.Candidate] = None
self._optimizer: Optional[base.Optimizer] = None # to be able to restore stopped/checkpointed optimizer
self._cheap_constraint_checker = cheap_constraint_checker
def __repr__(self) -> str:
return f"Experiment: {self.optimsettings} (dim={self.function.dimension}) on {self.function}"
@property
def is_incoherent(self) -> bool:
"""Flags settings which are known to be impossible to process.
Currently, this means we flag:
- no_parallelization optimizers for num_workers > 1
"""
return self.optimsettings.is_incoherent
def run(self) -> Dict[str, Any]:
"""Run an experiment with the provided settings
Returns
-------
dict
A dict containing all the information about the experiments (optimizer/function settings + results)
Note
----
This function catches error (but forwards stderr). It fills up the "error" ("" if no error, else the error name),
"loss", "elapsed_time" and "elapsed_budget" of the experiment.
"""
try:
self._run_with_error()
except Exception as e: # pylint: disable=broad-except
# print the case and the traceback
self.result["error"] = e.__class__.__name__
print(f"Error when applying {self}:", file=sys.stderr)
traceback.print_exc()
print("\n", file=sys.stderr)
return self.get_description()
def _log_results(self, t0: float, num_calls: int) -> None:
"""Internal method for logging results before handling the error
"""
self.result["elapsed_time"] = time.time() - t0
self.result["pseudotime"] = self.optimsettings.executor.time
# make a final evaluation with oracle (no noise, but function may still be stochastic)
assert self.recommendation is not None
reco = self.recommendation
if isinstance(self.function, futils.NoisyBenchmarkFunction):
self.result["loss"] = self.function.noisefree_function(*reco.args, **reco.kwargs)
else:
self.result["loss"] = self.function.function(*reco.args, **reco.kwargs)
self.result["elapsed_budget"] = num_calls
if num_calls > self.optimsettings.budget:
raise RuntimeError(f"Too much elapsed budget {num_calls} for {self.optimsettings.name} on {self.function}")
def _run_with_error(self, callbacks: Optional[Dict[str, base._OptimCallBack]] = None) -> None:
"""Run an experiment with the provided artificial function and optimizer
Parameter
---------
callbacks: dict
a dictionary of callbacks to register on the optimizer with key "ask" and/or "tell" (see base Optimizer class).
This is only for easier debugging.
"""
instrumentation = self.function.instrumentation.copy() # make sure it is not shared
if self.seed is not None and self._optimizer is None:
# Note: when resuming a job (if optimizer is not None), seeding is pointless (reproducibility is lost)
np.random.seed(self.seed) # seeds both functions and instrumentation (for which random state init is lazy)
random.seed(self.seed)
torch.manual_seed(self.seed) # type: ignore
# optimizer instantiation can be slow and is done only here to make xp iterators very fast
if self._optimizer is None:
self._optimizer = self.optimsettings.instantiate(instrumentation=instrumentation)
if self._cheap_constraint_checker:
self._optimizer.instrumentation.set_cheap_constraint_checker(self._cheap_constraint_checker)
if callbacks is not None:
for name, func in callbacks.items():
self._optimizer.register_callback(name, func)
assert self._optimizer.budget is not None, "A budget must be provided"
t0 = time.time()
func = IFuncWrapper(self.function) # probably useless now (= num_ask) but helps being 100% sure
executor = self.optimsettings.executor
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=base.InefficientSettingsWarning) # benchmark do not need to be efficient
try:
# call the actual Optimizer.minimize method because overloaded versions could alter the worklflow
# and provide unfair comparisons (especially for parallelized settings)
self.recommendation = base.Optimizer.minimize(self._optimizer, func, batch_mode=executor.batch_mode, executor=executor)
except Exception as e: # pylint: disable=broad-except
self.recommendation = self._optimizer.provide_recommendation() # get the recommendation anyway
self._log_results(t0, self._optimizer.num_ask)
raise e
self._log_results(t0, self._optimizer.num_ask)
def get_description(self) -> Dict[str, Union[str, float, bool]]:
"""Return the description of the experiment, as a dict.
"run" must be called beforehand in order to have non-nan values for the loss.
"""
summary = dict(self.result, seed=-1 if self.seed is None else self.seed)
summary.update(self.function.descriptors)
summary.update(self.optimsettings.get_description())
return summary
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Experiment):
return False
return self.function == other.function and self.optimsettings == other.optimsettings
| 45.852713
| 136
| 0.675993
|
84aa5b0e98b5e413405dbb5f28c3220d777491e8
| 16,451
|
py
|
Python
|
hammer/library/aws/rds.py
|
MrBakalo/hammer
|
3d2143e5288c4272f6aae701668da32026099d9b
|
[
"Apache-2.0"
] | null | null | null |
hammer/library/aws/rds.py
|
MrBakalo/hammer
|
3d2143e5288c4272f6aae701668da32026099d9b
|
[
"Apache-2.0"
] | 3
|
2021-05-20T20:29:51.000Z
|
2022-02-26T09:28:08.000Z
|
hammer/library/aws/rds.py
|
MrBakalo/hammer
|
3d2143e5288c4272f6aae701668da32026099d9b
|
[
"Apache-2.0"
] | null | null | null |
import logging
from botocore.exceptions import ClientError
from library.aws.utility import convert_tags
from collections import namedtuple
from library.utility import timeit
# structure which describes EC2 instance
RDSInstance = namedtuple('RDSInstance', [
# instance ID
'id',
# DB engine
'engine',
# instance arn
'arn',
# status of db instance (available or not)
'status',
# boolean if RDS instance is public access or not
'public'
])
class RDSOperations:
@classmethod
@timeit
def get_rds_instance_details_of_sg_associated(cls, rds_client, group_id):
""" Retrieve rds instances meta data with security group attached
:param rds_client: boto3 rds client
:param group_id: security group id
:return: list with rds instance details
"""
# describe rds instances with security group attached
rds_instances = []
# this will include both DB and Cluster instances
rds_response = rds_client.describe_db_instances()
for db_instance in rds_response["DBInstances"]:
active_security_groups = [ sg["VpcSecurityGroupId"] for sg in db_instance['VpcSecurityGroups'] if sg["Status"] == "active" ]
if group_id in active_security_groups:
rds_instances.append(RDSInstance(
id=db_instance["DBInstanceIdentifier"],
engine=db_instance["Engine"],
arn=db_instance["DBInstanceArn"],
status=db_instance["DBInstanceStatus"],
public=db_instance["PubliclyAccessible"],
))
return rds_instances
class RdsSnapshotOperations(object):
@staticmethod
def make_private(rds_client, engine, snapshot_id):
"""
Change RDS snapshot to be private.
:param rds_client: RDS boto3 client
:param engine: The name of the database engine to modify snapshot attribute for (aurora, aurora-*, mariadb, mysql, ...)
:param snapshot_id: The identifier for the DB snapshot to make private
:return: nothing
"""
snapshot = RdsClusterSnapshot if engine.startswith("aurora") else RdsInstanceSnapshot
args = {
snapshot.snapshot_id_field: snapshot_id,
'AttributeName': 'restore',
'ValuesToRemove': [ 'all' ]
}
# TODO: error handling
getattr(rds_client, snapshot.modify_attribute_method)(
**args
)
@staticmethod
def make_public(rds_client, engine, snapshot_id):
"""
Change RDS snapshot to be public.
:param rds_client: RDS boto3 client
:param engine: The name of the database engine to modify snapshot attribute for (aurora, aurora-*, mariadb, mysql, ...)
:param snapshot_id: The identifier for the DB snapshot to make private
:return: nothing
"""
snapshot = RdsClusterSnapshot if engine.startswith("aurora") else RdsInstanceSnapshot
args = {
snapshot.snapshot_id_field: snapshot_id,
'AttributeName': 'restore',
'ValuesToAdd': [ 'all' ]
}
# TODO: error handling
getattr(rds_client, snapshot.modify_attribute_method)(
**args
)
class RdsSnapshot(object):
"""
Parent class for RDS snapshot (DB or Cluster). Child classes must define methods and fields to work with DB or Cluster instances.
Encapsulates `DB[Cluster]SnapshotIdentifier`/`DB[Cluster]SnapshotArn`/`DB[Cluster]InstanceIdentifier/Engine` and attributes.
"""
### all these static fields must be defined by child classes
# method which returns information about DB snapshots
describe_method = None
# field in `describe_method` response which specifies info about DB snapshots
response_field = None
# field in `response_field` which specifies DB snapshot identifier
snapshot_id_field = None
# field in `response_field` which specifies DB snapshot ARN
snapshot_arn_field = None
# field in `response_field` which specifies DB instance identifier
db_id_field = None
# method to use for modifying snaphost attributes
modify_attribute_method = None
def __init__(self, account, source):
"""
:param account: `Account` instance where S3 bucket is present
:param source: dict with RDS snapshot properties (as `describe_method` returns)
"""
self.account = account
# use snapshot ARN as id
self.id = source.get(self.snapshot_arn_field, None)
# DB instance identifier of the DB instance this DB snapshot was created from
self.db = source.get(self.db_id_field, None)
# snapshot name
self.name = source.get(self.snapshot_id_field, None)
self.source = source
# must be set later by creator
self.attributes = []
# name of the database engine
self.engine = source.get('Engine', None)
# tags placeholder
self._tags = {}
def __str__(self):
return f"{self.__class__.__name__}(Id={self.id}, db={self.db}, " \
f"engine={self.engine})"
@property
def tags(self):
""" :return: dict with tags associated with snapshot """
return self._tags
@tags.setter
def tags(self, value):
"""
Set AWS tags for snapshot with prior converting from AWS format to simple dict
:param value: AWS tags as AWS returns
"""
self._tags = convert_tags(value)
class RdsDB(object):
"""
Parent class for RDS database (Instance or Cluster). Child classes must define methods and fields to work with Instance or Cluster instances.
Encapsulates `DB[Cluster]InstanceIdentifier`/`DB[Cluster]InstanceArn`/`DB[Cluster]InstanceIdentifier/Engine` and attributes.
"""
### all these static fields must be defined by child classes
# method which returns information about DB instances
describe_method = None
# field in `describe_method` response which specifies info about DB instances
response_field = None
# field in `response_field` which specifies DB instance identifier
instance_id_field = None
# field in `response_field` which specifies DB instance ARN
instance_arn_field = None
# field in `response_field` which specifies DB instance storage encryption
storage_encryption_field = None
def __init__(self, account, source):
"""
:param account: `Account` instance where S3 bucket is present
:param source: dict with RDS instance properties (as `describe_method` returns)
"""
self.account = account
# use instance ARN as id
self.id = source.get(self.instance_arn_field, None)
# instance name
self.name = source.get(self.instance_id_field, None)
self.source = source
# must be set later by creator
self.attributes = []
# name of the database engine
self.engine = source.get('Engine', None)
# tags placeholder
self._tags = {}
def __str__(self):
return f"{self.__class__.__name__}(Id={self.id}, engine={self.engine})"
@property
def tags(self):
""" :return: dict with tags associated with instance """
return self._tags
@tags.setter
def tags(self, value):
"""
Set AWS tags for instance with prior converting from AWS format to simple dict
:param value: AWS tags as AWS returns
"""
self._tags = convert_tags(value)
class RdsInstanceSnapshot(RdsSnapshot):
describe_method = "describe_db_snapshots"
response_field = "DBSnapshots"
snapshot_id_field = "DBSnapshotIdentifier"
snapshot_arn_field = "DBSnapshotArn"
db_id_field = "DBInstanceIdentifier"
modify_attribute_method = "modify_db_snapshot_attribute"
class RdsClusterSnapshot(RdsSnapshot):
describe_method = "describe_db_cluster_snapshots"
response_field = "DBClusterSnapshots"
snapshot_id_field = "DBClusterSnapshotIdentifier"
snapshot_arn_field = "DBClusterSnapshotArn"
db_id_field = "DBClusterIdentifier"
modify_attribute_method = "modify_db_cluster_snapshot_attribute"
class RdsInstance(RdsDB):
describe_method = "describe_db_instances"
response_field = "DBInstances"
instance_id_field = "DBInstanceIdentifier"
instance_arn_field = "DBInstanceArn"
storage_encryption_field = "StorageEncrypted"
class RdsCluster(RdsDB):
describe_method = "describe_db_clusters"
response_field = "DBClusters"
instance_id_field = "DBClusterIdentifier"
instance_arn_field = "DBClusterArn"
storage_encryption_field = "StorageEncrypted"
class RdsSnapshotsChecker(object):
"""
Basic class for checking RDS snapshots in account/region.
Encapsulates discovered RDS snapshots.
"""
def __init__(self, account):
"""
:param account: `Account` instance with RDS snapshots to check
"""
self.account = account
self.snapshots = []
def get_snapshot(self, id):
"""
:return: `RdsInstanceSnapshot`/`RdsClusterSnapshot` by id (ARN)
"""
for snapshot in self.snapshots:
if snapshot.id == id:
return snapshot
return None
def collect_public_rds_snapshots(self, account, snapshot_cls):
"""
Walk through public RDS snapshots (DB or Cluster, depending on `snapshot_cls`) in the account.
Filter snapshots owned by current account in current region.
Put all gathered snapshots to `self.snapshots`.
:param account: `Account` instance where RDS snapshot is present
:param snapshot_cls: `RdsInstanceSnapshot` or `RdsClusterSnapshot`
:return: boolean. True - if check was successful,
False - otherwise
"""
marker = None
while True:
# ask AWS to return only public snapshots
args = {
'SnapshotType': 'public',
'IncludePublic': True
}
if marker:
args['Marker'] = marker
try:
# describe public snapshots
response = getattr(self.account.client("rds"), snapshot_cls.describe_method)(**args)
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(rds:{err.operation_name})")
else:
logging.exception(f"Failed to collect rds snapshots in {self.account}")
return False
for db_snapshot in response[snapshot_cls.response_field]:
# create RdsInstanceSnapshot/RdsClusterSnapshot instance
snapshot = snapshot_cls(
account=account,
source=db_snapshot
)
# filter from all public snapshots only snapshots owned by current account in current region
if snapshot.id.startswith(f"arn:aws:rds:{account.region}:{account.id}:"):
self.snapshots.append(snapshot)
if "Marker" in response:
marker = response["Marker"]
else:
break
# collect tags for all public snapshots
for snapshot in self.snapshots:
try:
snapshot.tags = self.account.client("rds").list_tags_for_resource(
ResourceName=snapshot.id
)['TagList']
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(rds:{err.operation_name})")
else:
logging.exception(f"Failed to describe db snapshot '{snapshot.id}' tags in {self.account}")
continue
return True
def check(self):
"""
Walk through public DB and Cluster RDS snapshots in the account.
:return: boolean. True - if check was successful,
False - otherwise
"""
instance = self.collect_public_rds_snapshots(
account=self.account,
snapshot_cls=RdsInstanceSnapshot
)
cluster = self.collect_public_rds_snapshots(
account=self.account,
snapshot_cls=RdsClusterSnapshot
)
return instance and cluster
class RdsEncryptionChecker(object):
"""
Basic class for checking RDS instances in account/region.
Encapsulates discovered RDS instances.
"""
def __init__(self, account):
"""
:param account: `Account` instance with RDS instances to check
"""
self.account = account
self.instances = []
def get_instance(self, id):
"""
:return: `RdsInstance`/`RdsCluster` by id (ARN)
"""
for instance in self.instances:
if instance.id == id:
return instance
return None
def collect_unencrypted_rds_instances(self, account, instance_cls):
"""
Walk through public RDS instances (DB or Cluster, depending on `instance_cls`) in the account.
Filter instances owned by current account in current region.
Put all gathered instances to `self.instances`.
:param account: `Account` instance where RDS instance is present
:param instance_cls: `RdsInstance` or `RdsCluster`
:return: boolean. True - if check was successful,
False - otherwise
"""
marker = None
while True:
args = {}
if marker:
args['Marker'] = marker
try:
# describe instances
response = getattr(self.account.client("rds"), instance_cls.describe_method)(**args)
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(rds:{err.operation_name})")
else:
logging.exception(f"Failed to collect rds instance in {self.account}")
return False
for db_instance in response[instance_cls.response_field]:
# create RdsInstance/RdsCluster instance
instance = instance_cls(
account=account,
source=db_instance
)
# filter from all un-encrypted instances only instances owned by current account in current region
if instance.id.startswith(f"arn:aws:rds:{account.region}:{account.id}:") and (not db_instance[instance_cls.storage_encryption_field]):
self.instances.append(instance)
if "Marker" in response:
marker = response["Marker"]
else:
break
# collect tags for all un-encrypted instances
for instance in self.instances:
try:
instance.tags = self.account.client("rds").list_tags_for_resource(
ResourceName=instance.id
)['TagList']
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(rds:{err.operation_name})")
else:
logging.exception(f"Failed to describe db instnaces '{instance.id}' tags in {self.account}")
continue
return True
def check(self):
"""
Walk through public DB and Cluster RDS instances in the account.
:return: boolean. True - if check was successful,
False - otherwise
"""
instance = self.collect_unencrypted_rds_instances(
account=self.account,
instance_cls=RdsInstance
)
cluster = self.collect_unencrypted_rds_instances(
account=self.account,
instance_cls=RdsCluster
)
return instance and cluster
| 37.13544
| 150
| 0.618564
|
f56a60ee4425a5839f9815cf4ef1f6eb25ba43e8
| 649
|
pyde
|
Python
|
new2.0/list_63/list_63.pyde
|
klimenkodasha/2019-fall-polytech-cs
|
19777f6adf6848af257a74db751e96f914a5f7e6
|
[
"MIT"
] | null | null | null |
new2.0/list_63/list_63.pyde
|
klimenkodasha/2019-fall-polytech-cs
|
19777f6adf6848af257a74db751e96f914a5f7e6
|
[
"MIT"
] | null | null | null |
new2.0/list_63/list_63.pyde
|
klimenkodasha/2019-fall-polytech-cs
|
19777f6adf6848af257a74db751e96f914a5f7e6
|
[
"MIT"
] | null | null | null |
xCoordinate = []
def setup():
size(500, 500)
smooth()
noStroke()
myInit()
def myInit():
print('New coordinates:')
for i in range(30):
xCoordinate.append(250+ int(random(-100,100)))
print(xCoordinate[i])
def draw():
global xCoordinate
background(30)
for i in range(len(xCoordinate)):
fill(20)
ellipse(xCoordinate[i], 250, 5, 5)
fill(250, 40)
ellipse(xCoordinate[i], 250, 10*i, 10*i)
if mouseX > 250:
xCoordinate = []
myInit()
def keyPressed():
if key == 's':
saveFrame('63.png')
| 20.935484
| 55
| 0.510015
|
15c019b6867068339fc1d27d149b61f34e958fd6
| 2,078
|
py
|
Python
|
sdap/tools/migrations/0002_auto_20190807_1139.py
|
umr1085-irset/reproGenomicsViewer
|
187ea320668e567d01572bfbf9497bebd691569a
|
[
"MIT"
] | null | null | null |
sdap/tools/migrations/0002_auto_20190807_1139.py
|
umr1085-irset/reproGenomicsViewer
|
187ea320668e567d01572bfbf9497bebd691569a
|
[
"MIT"
] | 1
|
2020-02-16T10:48:55.000Z
|
2020-02-16T11:06:36.000Z
|
sdap/tools/migrations/0002_auto_20190807_1139.py
|
umr1085-irset/reproGenomicsViewer
|
187ea320668e567d01572bfbf9497bebd691569a
|
[
"MIT"
] | 4
|
2019-11-04T15:00:55.000Z
|
2020-03-02T13:36:17.000Z
|
# Generated by Django 2.0.13 on 2019-08-07 11:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tools', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Argument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=200)),
('parameter', models.CharField(blank=True, max_length=10, null=True)),
('multiple', models.BooleanField(default=False)),
('user_filled', models.BooleanField(default=True)),
('optional', models.BooleanField(default=True)),
('order', models.PositiveIntegerField()),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='ArgumentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(default='TEXT', max_length=20)),
],
),
migrations.AddField(
model_name='tool',
name='form_name',
field=models.CharField(default='default_form', max_length=100),
),
migrations.AddField(
model_name='argument',
name='argument_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tools.ArgumentType'),
),
migrations.AddField(
model_name='argument',
name='tool',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='arguments', to='tools.Tool'),
),
migrations.AddField(
model_name='tool',
name='argument_types',
field=models.ManyToManyField(through='tools.Argument', to='tools.ArgumentType'),
),
]
| 36.45614
| 124
| 0.565448
|
6b86249ed635c8fdaee33b4f29aec0441a6c8b6f
| 31,998
|
py
|
Python
|
api/audit/utils.py
|
gpiechnik2/senter
|
6f64f5410fe02a5215ba148553dec45feaadcc09
|
[
"CC0-1.0"
] | 2
|
2021-12-08T19:38:33.000Z
|
2022-01-26T15:02:57.000Z
|
api/audit/utils.py
|
gpiechnik2/senter
|
6f64f5410fe02a5215ba148553dec45feaadcc09
|
[
"CC0-1.0"
] | null | null | null |
api/audit/utils.py
|
gpiechnik2/senter
|
6f64f5410fe02a5215ba148553dec45feaadcc09
|
[
"CC0-1.0"
] | 1
|
2021-12-08T19:38:39.000Z
|
2021-12-08T19:38:39.000Z
|
import requests
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlsplit, urljoin, urlparse
import re
import json
import random
import string
def analysis(url, user_agent):
results = []
#declare globally lists to avoid crawling website url multiple times
global crawled_urls
global urls_to_crawl
crawled_urls = []
urls_to_crawl = []
urls_to_crawl.append(url)
#until there are urls to "crawl"
while True:
#if there are no urls, break while loop
if not urls_to_crawl:
break
for url in urls_to_crawl:
#check if url is checked or not
checkedUrl = checkIfInCrawled(url)
if checkedUrl == True:
urls_to_crawl.remove(url)
continue
else:
#if url to crawl is not in crawled_urls, call website analysis
urlAnalysis = website_analysis(url, user_agent)
crawled_urls.append(url)
results.append(urlAnalysis)
continue
return results
def checkIfInCrawled(url):
#if url to crawl is in crawled_urls, remove crawled url
for curl in crawled_urls:
if url == curl:
return True
else:
continue
else:
return False
def checkIfInQueryToCrawl(url):
#check if url is in the query to crawl
for curl in urls_to_crawl:
if url == curl:
return True
else:
continue
else:
return False
def website_analysis(url, user_agent):
#declare headers
headers = {
"User-agent": user_agent
}
#create request
soup = make_soup(url, headers)
#get basic info
url_status = check_status(url)
ssl = check_ssl(url)
meta_robots_info = get_meta_robots_info(soup)
response_time = get_response_time(url, headers)
titles = get_title_tag(soup)
descriptions = get_meta_descriptions(soup)
h1 = get_h1(soup)
h2 = get_h2(soup)
other_h = get_other_h(soup)
url_title = get_url_title(url)
#get keyword based on data above
keyword = get_keyword(
titles[0] if titles else None,
descriptions[0] if descriptions else None,
h1[0] if h1 else None,
url_title)
#get main title based on keyword and len of title
main_title = get_main_title(keyword, titles)
#get main h1 based on keyword and len of h1
main_h1 = get_main_h1(keyword, h1)
#get main description based on keyword and len of descriptions
main_description = get_main_description(keyword, descriptions)
#internal and external links
urls_info = get_internal_and_external_links(url, soup)
external_links = urls_info['external_links']
internal_links = urls_info['internal_links']
#append internal links to urls_to_crawl
#but first, check if internal url is in crawled_urls
for interurl in internal_links:
#but first, check if internal url is in crawled_urls
checkedUrl = checkIfInCrawled(interurl)
if checkedUrl == True:
continue
#check if internal url is already in query to crawl
inQueryUrl = checkIfInQueryToCrawl(interurl)
if inQueryUrl == True:
continue
#add to urls to crawl
urls_to_crawl.append(interurl)
#images and keyword extraction
images = get_images(url, soup)
images_links = get_links_images(url, soup, images)
#analyze data
responseTimeAnalysis = response_time_analysis(response_time)
titleAnalysis = title_list_analysis(keyword, titles)
h1_analysis = h1_list_analysis(keyword, h1)
h2_analysis = h2_list_analysis(h2)
other_h_analysis = other_h_list_analysis(other_h)
descriptions_analysis = descriptions_list_analysis(keyword, descriptions)
urlTitleAnalysis = url_title_analysis(keyword, url_title)
#external_links_status = get_links_status(external_links)
#internal_links_status = get_links_status(internal_links)
externalLinksAnalysis = external_links_analysis(external_links)
internalLinksAnalysis = internal_links_analysis(internal_links)
images_analysis = get_images_analysis(url, soup)
#Append all stuff to competitions
results = {
'url' : url,
'url_status': url_status,
'ssl': {
'analysis': ssl
},
'meta_robots': meta_robots_info,
'response_time': {
'time': response_time * 1000,
'analysis': responseTimeAnalysis
},
'keyword': keyword,
'title' : {
'title_count': len(titles),
'main_title': main_title,
'titles': titles,
'analysis': titleAnalysis
},
'description': {
'descriptions_count': len(descriptions),
'main_description': main_description,
'descriptions': descriptions,
'analysis': descriptions_analysis
},
'url_title': {
'url_title': url_title,
'analysis': urlTitleAnalysis
},
'h1': {
'h1_count': len(h1),
'main_h1': main_h1,
'h1': h1,
'analysis': h1_analysis
},
'h2': {
'h2_count': len(h2),
'h2': h2,
'analysis': h2_analysis
},
'other_h': {
'other_h_count': len(other_h),
'other_h': other_h,
'analysis': other_h_analysis
},
'external_links': {
'external_links_count': len(external_links),
'external_links': external_links,
#'status': external_links_status,
'analysis': externalLinksAnalysis
},
'internal_links': {
'internal_links_count': len(internal_links),
'internal_links': internal_links,
#'status': internal_links_status,
'analysis': internalLinksAnalysis
},
'images': {
'images_counts': len(images),
'images': images_links,
'analysis': images_analysis
}
}
return results
def make_soup(url, headers):
r = requests.get(url, headers = headers, verify = False).content
soup = BeautifulSoup(r, 'html.parser', from_encoding='utf-8')
return soup
def get_response_time(url, headers):
r = requests.get(url, headers = headers)
response_time = r.elapsed.total_seconds()
return response_time
def response_time_analysis(response_time):
#convert seconds to milliseconds
response_time = response_time * 1000
#round to two decimal places
response_time = round(response_time, 2)
if response_time <= 0.00:
return [{
"status": "Invalid",
"message": "Czas odpowiedzi storny jest zbyt niski i wynosi: {:.2f} milisekund.".format(response_time)
}]
if 0.00 < response_time <= 200.00:
return [{
"status": "Valid",
"message": "Czas odpowiedzi strony jest niski i wynosi: {:.2f} milisekund.".format(response_time)
}]
elif response_time > 200.00:
return [{
"status": "Invalid",
"message": "Czas odpowiedzi strony jest zbyt wysoki i wynosi: {:.2f} milisekund.".format(response_time)
}]
def check_robots_txt(url, headers):
base_url = get_base_url(url)
#check robots.txt
result = os.popen("curl " + base_url + "robots.txt").read()
result_data_set = {"Disallowed":[], "Allowed":[]}
for line in result.split("\n"):
if line.startswith('Allow'): # this is for allowed url
result_data_set["Allowed"].append(base_url + line.split(': ')[1].split(' ')[0])
elif line.startswith('Disallow'): # this is for disallowed url
result_data_set["Disallowed"].append(base_url + line.split(': ')[1].split(' ')[0])
if result_data_set["Allowed"]:
return result_data_set["Allowed"]
else:
return False
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_meta_robots_info(soup):
meta_robots = soup.find("meta", content = "noindex")
if meta_robots:
return [{
"status": "Invalid",
"message": "Strona posiada atrybutu meta({}) z określonym nieindeksowaniem jej w wyszukiwarkach.".format(meta)
}]
else:
return [{
"status": "Valid",
"message": "Strona nie posiada atrybutu meta blokującego indeksowanie strony."
}]
def get_h1(soup):
h1 = soup.find_all('h1')
results = []
if h1:
temp_result = (h.text for h in h1)
for h in temp_result:
h = h.replace('\r\n', '').replace('\n', '').replace('\r', '').replace(' ', '').replace(' ', '').replace(' ', '').replace('. ','.').replace('\xa0', ' ')
results.append(h)
else:
pass
return results
def get_h2(soup):
h2 = soup.find_all('h2')
results = []
if h2:
temp_result = (h.text for h in h2)
for h in temp_result:
h = h.replace('\r\n', '').replace('\n', '').replace('\r', '').replace(' ', '').replace(' ', '').replace(' ', '').replace('. ','.').replace('\xa0', ' ')
results.append(h)
else:
pass
return results
def get_other_h(soup):
h_others = soup.find_all(re.compile('^h[3-6]$'))
results = []
if h_others:
temp_result = (h.text for h in h_others)
for h in temp_result:
h = h.replace('\r\n', '').replace('\n', '').replace('\r', '').replace(' ', '').replace(' ', '').replace(' ', '').replace('. ','.').replace('\xa0', ' ')
results.append(h)
else:
pass
return results
def get_title_tag(soup):
results = []
for title in soup.findAll("title"):
if title:
title_text = (title.text).replace('\r\n', '').replace('\n', '').replace('\r', '').replace(' ', '').replace(' ', '').replace(' ', '').replace('. ','.').replace('\xa0', ' ')
results.append(str(title_text))
else:
pass
if not results:
pass
return results
def get_meta_descriptions(soup):
results = []
for meta in soup.findAll("meta"):
if meta:
metaname = meta.get('name', '').lower()
metaprop = meta.get('property', '').lower()
if 'description' == metaname or metaprop.find("description")>0:
desc = meta['content'].strip()
results.append(desc)
else:
pass
if not results:
pass
return results
def get_images(url, soup):
images = soup.find_all('img')
return images
def get_links_images(url, soup, images):
results = []
base_url = get_base_url(url)
for image in images:
image_url = base_url + image.get('src')
results.append(image_url)
return results
def get_images_analysis(url, soup):
results = []
base_url = get_base_url(url)
images = soup.find_all('img')
reqs = (requests.get(base_url + pic.get('src')) for pic in images)
#open every image in new window and check attributes and size
for r in reqs:
path, file_ = os.path.split('{}'.format(soup.find('img').get('src')))
image_size = round(len(r.content)/(1024))
image_alt = soup.find('img').get('alt')
if image_size < 200:
results.append({
"url": r.url,
"file_name": file_,
"status": "Valid",
"message": "Rozmiar zdjęcia jest poprawny i wynosi {}.".format(image_size)
})
if image_size >= 200:
results.append({
"url": r.url,
"file_name": file_,
"status": "Invalid",
"message": "Rozmiar zdjęcia jest zbyt duży. Obecny: {}. Sugerowany: 200MB.".format(image_size)
})
if not image_alt:
results.append({
"url": r.url,
"file_name": file_,
"status": "Invalid",
"message": "Zdjęcie nie posiada atrybutu alt."
})
if image_alt is None:
results.append({
"url": r.url,
"file_name": file_,
"status": "Invalid",
"message": "Zdjęcie posiada pusty atrybut alt."
})
if image_alt:
results.append({
"url": r.url,
"file_name": file_,
"status": "Valid",
"message": "Zdjęcie posiada poprawny atrybut alt({}).".format(image_alt)
})
#check alt attribute length
if 115 < len(image_alt) < 135:
results.append({
"url": r.url,
"file_name": file_,
"status": "Valid",
"message": "Atrybut alt({}) zdjęcia jest odpowiedniej długości.".format(image_alt)
})
elif len(image_alt) <= 115:
free_characters = 115 - len(image_alt)
results.append({
"url": r.url,
"file_name": file_,
"status": "Valid",
"message": "Atrybut alt({}) zdjęcia jest zbyt krótki. Dodaj do niego przynajmniej {} znaki.".format(image_alt, free_characters)
})
elif len(image_alt) > 135:
free_characters = len(image_alt) - 135
results.append({
"url": r.url,
"file_name": file_,
"status": "Valid",
"message": "Atrybut alt({}) zdjęcia jest zbyt długo. Skróć go o przynajmniej {} znaki.".format(image_alt, free_characters)
})
return results
def get_base_url(url):
url_splited = urlsplit(url)
base_url = url_splited.scheme + "://" + url_splited.netloc + "/"
return base_url
def get_internal_and_external_links(url, soup):
internal_links = []
external_links = []
base_url = get_base_url(url)
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
# not a valid URL
continue
if href in internal_links:
# already in the set
continue
if 'javascript://void(0)' in href:
#remove all void hrefs
continue
if 'mailto://' in href:
#and mail hrefs
continue
if 'call://' in href:
#and call hrefs
continue
if 'tel:' in href:
#and call hrefs
continue
if base_url not in href:
# check if url is not in external link
if href not in external_links:
external_links.append(href)
continue
internal_links.append(href)
context = {
'external_links' : external_links,
'internal_links' : internal_links,
}
return context
def get_url_title(url):
title = urlparse(url).path
if str(title) == "/":
return None
return title
def get_keyword(title, meta_desc, h1, url_title):
#if one of parameters is None, match to it random hash
if title is not None:
title = title.lower()
else:
title = get_random_string(8)
if meta_desc is not None:
meta_desc = meta_desc.lower()
else:
meta_desc = get_random_string(8)
if h1 is not None:
h1 = h1.lower()
else:
h1 = get_random_string(8)
if url_title is not None:
url_title = url_title.lower()
else:
url_title = get_random_string(8)
#hierarchy of elements:
# 1 url_title
# 2 title
# 3 meta_desc
# 4 h1_list
#some of combinations of keyword extraction
if url_title in title:
if url_title in meta_desc:
return url_title
if url_title in title:
if title in meta_desc:
return title
if url_title in title:
if url_title in h1:
return url_title
if url_title in title:
if h1 in url_title:
return h1
if url_title in meta_desc:
if url_title in h1:
return url_title
if url_title in meta_desc:
if h1 in url_title:
return h1
if title in meta_desc:
if title in h1:
return title
if title in meta_desc:
if h1 in title:
return h1
if h1 in url_title:
if h1 in title:
return h1
if h1 in url_title:
if h1 in meta_desc:
return h1
if h1 in title:
if h1 in meta_desc:
return h1
if title:
return title
if meta_desc:
return meta_desc
if h1:
return h1
return None
def get_random_string(length):
# put your letters in the following string
sample_letters = 'abcdefghijklmnopqrstuvwxyz'
result_str = ''.join((random.choice(sample_letters) for i in range(length)))
return result_str
def check_status(url):
r = requests.get(url)
status = r.status_code
return status
def check_ssl(url):
if "https" in str(url):
return [{
"status": "Valid",
"message": "Strona posiada certyfikat ssl."
}]
else:
return [{
"status": "Invalid",
"message": "Strona nie posiada certyfikatu ssl."
}]
def get_links_status(urls):
results = []
for url in urls:
status = check_status(url)
if status == 200:
results.append({
"url": url,
"url_status": status,
"status": "Valid",
"message": "Poprawny status."
})
else:
results.append({
"url": url,
"url_status": status,
"status": "Invalid",
"message": "Status strony jest niepoprawny."
})
return results
def get_main_h1(keyword, h1):
if len(h1) == 1:
return h1[0]
elif len(h1) < 1:
return None
else:
#clean h1 to get h1 with keyword inside only
h1_with_keyword = []
for tag in h1:
if keyword.lower() in tag.lower():
h1_with_keyword.append(tag)
else:
continue
#return the shortest h1, null or the only h1
if len(h1_with_keyword) > 1:
#we assume, that if there are two the same titles, min_desc is None
min_desc = min(h1_with_keyword, key = len)
if min_desc:
return min_desc
else:
return h1_with_keyword[0]
# if there is 1 title, return him or get first from h1 list
elif len(h1_with_keyword) == 1:
return h1_with_keyword[0]
#remove duplicates if exists, and return first element
elif len(set(h1)) == 1:
return h1[0]
return None
def get_main_title(keyword, titles):
if len(titles) == 1:
return titles[0]
elif len(titles) < 1:
return None
else:
#clean h1 to get title with keyword inside only
titles_with_keyword = []
for tag in titles:
if keyword.lower() in tag.lower():
titles_with_keyword.append(tag)
else:
continue
#return the shortest title, null or the only title
if len(titles_with_keyword) > 1:
#we assume, that if there are two the same titles, min_desc is None
min_desc = min(titles_with_keyword, key = len)
if min_desc:
return min_desc
else:
return titles_with_keyword[0]
# if there is 1 title, return him or get first from h1 list
elif len(titles_with_keyword) == 1:
return titles_with_keyword[0]
#remove duplicates if exists, and return first element
elif len(set(titles)) == 1:
return titles[0]
return None
def get_main_description(keyword, descriptions):
if len(descriptions) == 1:
return descriptions[0]
elif len(descriptions) < 1:
return None
else:
#clean descriptions to get descriptions with keyword inside only
descriptions_with_keyword = []
for tag in descriptions:
if keyword.lower() in tag.lower():
descriptions_with_keyword.append(tag)
else:
continue
#return the shortest descriptions, null or the only descriptions
if len(descriptions_with_keyword) > 1:
#we assume, that if there are two the same descriptions, min_desc is None
min_desc = min(descriptions_with_keyword, key = len)
if min_desc:
return min_desc
else:
return descriptions_with_keyword[0]
# if there is 1 title, return him or get first from h1 list
elif len(descriptions_with_keyword) == 1:
return descriptions_with_keyword[0]
#remove duplicates if exists, and return first element
elif len(set(descriptions)) == 1:
return descriptions[0]
return None
def descriptions_list_analysis(keyword, descriptions):
if len(descriptions) < 1:
return [{
"status": "Invalid",
"message": "Brak znacznika description na stronie."
}]
elif len(descriptions) == 1:
results = [{
"status": "Valid",
"message": "Znaleziony znacznik description na stronie."
}]
analysis = description_analysis(keyword, descriptions[0])
results.extend(analysis)
return results
elif len(descriptions) > 1:
results = [{
"status": "Invalid",
"message": "Ilość znacznika description na stronie jest za duża."
}]
for tag in descriptions:
analysis = description_analysis(keyword, tag)
results.extend(analysis)
return results
def description_analysis(keyword, description):
results = []
if keyword is None:
keyword = get_random_string(8)
#check if keyword exists in description tag
if keyword.lower() in description.lower():
results.append({
"status": "Valid",
"message": "Znacznik description({}) zawiera w sobie słowo bądź frazę kluczową.".format(description)
})
else:
results.append({
"status": "Invalid",
"message": "Znacznik description({}) nie zawiera w sobie słowa bądź frazy kluczowej.".format(description)
})
#check length of description
if len(description) >= 50 <= 160:
results.append({
"status": "Valid",
"message": "Znacznik description({}) jest poprawnej długości.".format(description)
})
elif len(description) < 50:
free_characters = 50 - len(description)
results.append({
"status": "Invalid",
"message": "Znacznik description({}) jest zbyt krótki. Dodaj do niego {} znaków.".format(description, free_characters)
})
else:
free_characters = len(description) - 160
results.append({
"status": "Invalid",
"message": "Znacznik description({}) jest zbyt długi. Skróć go o {} znaków.".format(description, free_characters)
})
return results
def title_list_analysis(keyword, titles):
if len(titles) < 1:
return [{
"status": "Invalid",
"message": "Brak znacznika title na stronie."
}]
elif len(titles) == 1:
results = [{
"status": "Valid",
"message": "Znaleziony znacznik title na stronie."
}]
analysis = title_analysis(keyword, titles[0])
results.extend(analysis)
return results
elif len(titles) > 1:
results = [{
"status": "Invalid",
"message": "Ilość znacznika title na stronie jest za duża."
}]
for tag in titles:
analysis = title_analysis(keyword, tag)
results.extend(analysis)
return results
def title_analysis(keyword, title):
results = []
#if keyword is None, match to it random string
if keyword is None:
keyword = get_random_string(8)
#check length of title
if len(title) >= 50 <= 60:
results.append({
"status": "Valid",
"message": "Znacznik title({}) jest poprawnej długości.".format(title)
})
elif len(title) < 50:
free_characters = 50 - len(title)
results.append({
"status": "Invalid",
"message": "Znacznik title({}) jest zbyt krótki. Dodaj do niego {} znaków.".format(title, free_characters)
})
else:
free_characters = len(title)- 60
results.append({
"status": "Invalid",
"message": "Znacznik title({}) jest zbyt długi. Skróć go o {} znaków.".format(title, free_characters)
})
return results
def url_title_analysis(keyword, url_title):
results = []
#if keyword is None, match to it random string
if keyword is None:
keyword = get_random_string(8)
#if url_title is None, match to it random string
if url_title is None:
return None
#check if keyword exists in url_title
if keyword.lower() in url_title.lower().replace('-', ' ').replace('_', ' '):
results.append({
"status": "Valid",
"message": "Url title({}) zawiera w sobie słowo bądź frazę kluczową.".format(url_title)
})
else:
results.append({
"status": "Invalid",
"message": "Url title({}) nie zawiera w sobie słowa bądź frazy kluczowej.".format(url_title)
})
return results
def h1_list_analysis(keyword, h1):
if len(h1) < 1:
return [{
"status": "Invalid",
"message": "Brak znacznika h1 na stronie."
}]
elif len(h1) == 1:
results = [{
"status": "Valid",
"message": "Znaleziony znacznik h1 na stronie."
}]
analysis = h1_analysis(keyword, h1[0])
results.extend(analysis)
return results
elif len(h1) > 1:
results = [{
"status": "Invalid",
"message": "Ilość znacznika h1 na stronie jest za duża."
}]
for tag in h1:
analysis = h1_analysis(keyword, tag)
results.extend(analysis)
return results
def h1_analysis(keyword, h1):
results = []
#if keyword is None, match to it random string
if keyword is None:
keyword = get_random_string(8)
#check if keyword exists in h1 tag
if keyword.lower() in h1.lower():
results.append({
"status": "Valid",
"message": "Znacznik h1({}) zawiera w sobie słowo bądź frazę kluczową.".format(h1)
})
else:
results.append({
"status": "Invalid",
"message": "Znacznik h1({}) nie zawiera w sobie słowa bądź frazy kluczowej.".format(h1)
})
#check length of h1
if len(h1) >= 20 <= 70:
results.append({
"status": "Valid",
"message": "Znacznik h1({}) jest poprawnej długości.".format(h1)
})
elif len(h1) < 20:
free_characters = 20 - len(h1)
results.append({
"status": "Invalid",
"message": "Znacznik h1({}) jest zbyt krótki. Dodaj do niego {} znaków.".format(h1, free_characters)
})
else:
free_characters = len(h1)- 70
results.append({
"status": "Invalid",
"message": "Znacznik h1({}) jest zbyt długi. Skróć go o {} znaków.".format(h1, free_characters)
})
return results
def h2_list_analysis(h2):
if len(h2) < 1:
return [{
"status": "Invalid",
"message": "Brak znaczników h2 na stronie."
}]
elif len(h2) == 1:
free_h2_tags = 4 - len(h2)
results = [{
"status": "Invalid",
"message": "Zbyt mała ilość znaczników h2 na stronie. Dodaj przynajmniej {} znaczniki.".format(free_h2_tags)
}]
analysis = h2_analysis(h2[0])
results.extend(analysis)
return results
elif len(h2) >= 2:
results = [{
"status": "Valid",
"message": "Poprawna ilość znaczników h2 na stronie."
}]
for tag in h2:
analysis = h2_analysis(tag)
results.extend(analysis)
return results
def h2_analysis(h2):
results = []
#check length of h1
if 20 <= len(h2) <= 70:
results.append({
"status": "Valid",
"message": "Znacznik h2({}) jest poprawnej długości.".format(h2)
})
elif len(h2) < 20:
free_characters = 20 - len(h2)
results.append({
"status": "Invalid",
"message": "Znacznik h2({}) jest zbyt krótki. Dodaj do niego {} znaków.".format(h2, free_characters)
})
else:
free_characters = len(h2)- 70
results.append({
"status": "Invalid",
"message": "Znacznik h2({}) jest zbyt długi. Skróć go o {} znaków.".format(h2, free_characters)
})
return results
def other_h_list_analysis(other_h):
if len(other_h) is None:
return [{
"status": "Invalid",
"message": "Brak znaczników h3-h6 na stronie."
}]
elif len(other_h) < 4:
free_other_h_tags = 4 - len(other_h)
return [{
"status": "Invalid",
"message": "Zbyt mała ilość znaczników h3-h6 na stronie. Dodaj przynajmniej {} znaczniki.".format(free_other_h_tags)
}]
elif len(other_h) >= 4:
return [{
"status": "Valid",
"message": "Poprawna ilość znaczników h3-h6 na stronie."
}]
def external_links_analysis(external_links):
if len(external_links) is None:
return [{
"status": "Invalid",
"message": "Brak zewnętrznych linków na stronie."
}]
elif len(external_links) < 3:
free_external_links = 3 - len(external_links)
results = [{
"status": "Invalid",
"message": "Zbyt mała ilość zewnętrznych linków na stronie. Dodaj przynajmniej {} linki.".format(free_external_links)
}]
return results
elif len(external_links) >= 3:
results = [{
"status": "Valid",
"message": "Poprawna ilość zewnętrznych linków na stronie."
}]
return results
def internal_links_analysis(internal_links):
if len(internal_links) is None:
return [{
"status": "Invalid",
"message": "Brak wewnętrznych linków na stronie."
}]
elif len(internal_links) < 10:
free_internal_links = 10 - len(internal_links)
results = [{
"status": "Invalid",
"message": "Zbyt mała ilość wewnętrznych linków na stronie. Dodaj przynajmniej {} linki.".format(free_internal_links)
}]
return results
elif len(internal_links) >= 10:
results = [{
"status": "Valid",
"message": "Poprawna ilość wewnętrznych linków na stronie."
}]
return results
| 27.679931
| 189
| 0.561848
|
5af4fb422ffca37da6567defca8e5a12f82b9944
| 1,275
|
py
|
Python
|
setup.py
|
ociule/django-users-admin
|
817590dc7a6c1e3657f31202d4068fd25002a30a
|
[
"BSD-3-Clause"
] | 1
|
2019-03-30T15:28:36.000Z
|
2019-03-30T15:28:36.000Z
|
setup.py
|
ociule/django-users-admin
|
817590dc7a6c1e3657f31202d4068fd25002a30a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ociule/django-users-admin
|
817590dc7a6c1e3657f31202d4068fd25002a30a
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-users-admin',
version='0.1',
packages=['users_admin'],
install_requires=['django>=2.0'],
include_package_data=True,
license='BSD License', # example license
description='Hacking Django\'s admin to make views for the users',
long_description=README,
url='https://github.com/ociule/django-users-admin',
author='Ovidiu Ciule',
author_email='ovidiu.ciule@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 34.459459
| 78
| 0.636078
|
a087bf148491563199fa46426f3156befdb6f790
| 6,404
|
py
|
Python
|
build/android/pylib/host_driven/setup.py
|
nagineni/chromium-crosswalk
|
5725642f1c67d0f97e8613ec1c3e8107ab53fdf8
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 19
|
2015-02-19T21:08:27.000Z
|
2021-11-19T07:16:49.000Z
|
build/android/pylib/host_driven/setup.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8
|
2015-08-31T06:39:59.000Z
|
2021-12-04T14:53:28.000Z
|
build/android/pylib/host_driven/setup.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 10
|
2015-08-28T16:44:03.000Z
|
2019-07-17T17:37:34.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for instrumentation host-driven tests."""
import logging
import os
import sys
import types
import test_case
import test_info_collection
import test_runner
def _GetPythonFiles(root, files):
"""Returns all files from |files| that end in 'Test.py'.
Args:
root: A directory name with python files.
files: A list of file names.
Returns:
A list with all python files that match the testing naming scheme.
"""
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
def _InferImportNameFromFile(python_file):
"""Given a file, infer the import name for that file.
Example: /usr/foo/bar/baz.py -> baz.
Args:
python_file: Path to the Python file, ostensibly to import later.
Returns:
The module name for the given file.
"""
return os.path.splitext(os.path.basename(python_file))[0]
def _GetTestModules(host_driven_test_root, is_official_build):
"""Retrieve a list of python modules that match the testing naming scheme.
Walks the location of host-driven tests, imports them, and provides the list
of imported modules to the caller.
Args:
host_driven_test_root: The path to walk, looking for the
pythonDrivenTests or host_driven_tests directory
is_official_build: Whether to run only those tests marked 'official'
Returns:
A list of python modules under |host_driven_test_root| which match the
testing naming scheme. Each module should define one or more classes that
derive from HostDrivenTestCase.
"""
# By default run all host-driven tests under pythonDrivenTests or
# host_driven_tests.
host_driven_test_file_list = []
for root, _, files in os.walk(host_driven_test_root):
if (root.endswith('host_driven_tests') or
root.endswith('pythonDrivenTests') or
(is_official_build and (root.endswith('pythonDrivenTests/official') or
root.endswith('host_driven_tests/official')))):
host_driven_test_file_list += _GetPythonFiles(root, files)
host_driven_test_file_list.sort()
test_module_list = [_GetModuleFromFile(test_file)
for test_file in host_driven_test_file_list]
return test_module_list
def _GetModuleFromFile(python_file):
"""Gets the python module associated with a file by importing it.
Args:
python_file: File to import.
Returns:
The module object.
"""
sys.path.append(os.path.dirname(python_file))
import_name = _InferImportNameFromFile(python_file)
return __import__(import_name)
def _GetTestsFromClass(test_case_class, **kwargs):
"""Returns one test object for each test method in |test_case_class|.
Test methods are methods on the class which begin with 'test'.
Args:
test_case_class: Class derived from HostDrivenTestCase which contains zero
or more test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects, each initialized for a particular test method.
"""
test_names = [m for m in dir(test_case_class)
if _IsTestMethod(m, test_case_class)]
return [test_case_class(name, **kwargs) for name in test_names]
def _GetTestsFromModule(test_module, **kwargs):
"""Gets a list of test objects from |test_module|.
Args:
test_module: Module from which to get the set of test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects each initialized for a particular test method
defined in |test_module|.
"""
tests = []
for name in dir(test_module):
attr = getattr(test_module, name)
if _IsTestCaseClass(attr):
tests.extend(_GetTestsFromClass(attr, **kwargs))
return tests
def _IsTestCaseClass(test_class):
return (type(test_class) is types.TypeType and
issubclass(test_class, test_case.HostDrivenTestCase) and
test_class is not test_case.HostDrivenTestCase)
def _IsTestMethod(attrname, test_case_class):
"""Checks whether this is a valid test method.
Args:
attrname: The method name.
test_case_class: The test case class.
Returns:
True if test_case_class.'attrname' is callable and it starts with 'test';
False otherwise.
"""
attr = getattr(test_case_class, attrname)
return callable(attr) and attrname.startswith('test')
def _GetAllTests(test_root, is_official_build, **kwargs):
"""Retrieve a list of host-driven tests defined under |test_root|.
Args:
test_root: Path which contains host-driven test files.
is_official_build: Whether this is an official build.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
List of test case objects, one for each available test method.
"""
if not test_root:
return []
all_tests = []
test_module_list = _GetTestModules(test_root, is_official_build)
for module in test_module_list:
all_tests.extend(_GetTestsFromModule(module, **kwargs))
return all_tests
def InstrumentationSetup(host_driven_test_root, official_build,
instrumentation_options):
"""Creates a list of host-driven instrumentation tests and a runner factory.
Args:
host_driven_test_root: Directory where the host-driven tests are.
official_build: True if this is an official build.
instrumentation_options: An InstrumentationOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_collection = test_info_collection.TestInfoCollection()
all_tests = _GetAllTests(
host_driven_test_root, official_build,
instrumentation_options=instrumentation_options)
test_collection.AddTests(all_tests)
available_tests = test_collection.GetAvailableTests(
instrumentation_options.annotations,
instrumentation_options.exclude_annotations,
instrumentation_options.test_filter)
logging.debug('All available tests: ' + str(
[t.tagged_name for t in available_tests]))
def TestRunnerFactory(device, shard_index):
return test_runner.HostDrivenTestRunner(
device, shard_index,
instrumentation_options.tool,
instrumentation_options.push_deps,
instrumentation_options.cleanup_test_files)
return (TestRunnerFactory, available_tests)
| 31.546798
| 79
| 0.738445
|
7bae4a9a763e7c7746b1c5de1c6d177aaab735dd
| 13,726
|
py
|
Python
|
venv/Lib/site-packages/transformers/utils/dummy_flax_objects.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 4
|
2021-07-27T23:39:02.000Z
|
2021-09-23T04:17:08.000Z
|
venv/Lib/site-packages/transformers/utils/dummy_flax_objects.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/transformers/utils/dummy_flax_objects.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 3
|
2021-07-27T17:33:58.000Z
|
2021-07-29T12:46:59.000Z
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_backends
class FlaxForcedBOSTokenLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxForcedEOSTokenLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxLogitsProcessorList:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMinLengthLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxTemperatureLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxTopKLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxTopPLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None
FLAX_MODEL_FOR_MASKED_LM_MAPPING = None
FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
FLAX_MODEL_FOR_PRETRAINING_MAPPING = None
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
FLAX_MODEL_MAPPING = None
class FlaxAutoModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoModelForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBartForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBartForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBartForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxBigBirdPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxCLIPModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxCLIPPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxCLIPTextModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxCLIPVisionModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxElectraPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxGPT2LMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxGPT2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxRobertaPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxViTForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxViTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
| 24.467023
| 75
| 0.662465
|
889e211e39999e991ca9d49e06dcf1081eeeaabe
| 6,305
|
py
|
Python
|
pipelitools/models/models.py
|
nastiag67/tools
|
9989c98acd67ff8fda900d0a2ae3a6d21e2bbd88
|
[
"MIT"
] | 1
|
2020-12-20T14:58:35.000Z
|
2020-12-20T14:58:35.000Z
|
pipelitools/models/models.py
|
nastiag67/tools
|
9989c98acd67ff8fda900d0a2ae3a6d21e2bbd88
|
[
"MIT"
] | 5
|
2021-07-13T15:10:39.000Z
|
2021-07-26T14:44:21.000Z
|
pipelitools/models/models.py
|
nastiag67/tools
|
9989c98acd67ff8fda900d0a2ae3a6d21e2bbd88
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import random
import pickle
import os
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from pipelitools.models import metrics as m
def test_models():
""" """
print('test_models: ok')
class Model:
"""Runs a model, plots confusion matrix, calculates the metrics and outputs the reports in a folder.
Parameters
----------
X_train : pd.DataFrame
Features used in training.
y_train : pd.Series
Labels for training (1D vector).
X_test : pd.DataFrame
Features used in testing.
y_test : pd.Series
Labels for testing (1D vector).
"""
def __init__(self, X_train, y_train, X_test, y_test):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
def checkmodel(self,
name,
model,
steps=[],
parameters={},
average='binary',
multiclass=False,
metric='accuracy',
randomized_search=False,
nfolds=5,
n_jobs=None,
save_pickle=True,
verbose=0
):
""" Calculates the model based on the pipeline and hyperparameter grid.
Then, evaluates metrics (f1-score, accuracy, precision, recall) and plots a confusion matrix.
Can save the final fitted model with pickle to load later.
Parameters
----------
name : str
Name of the model.
model : abc.ABCMeta
Machine learning model.
steps : list, optional (default = [])
Steps of the preprocessing pipeline.
parameters : dict, optional (default = {})
Parameters of the model.
average : str, optional (default = 'binary')
This parameter is required for multiclass/multilabel targets. If None, the scores for each class are
returned. Otherwise, this determines the type of averaging performed on the data
multiclass : bool, optional (default = False)
True if the classification is multiclass.
metric : str, optional (default = 'accuracy')
Metric which should be used to select the best model.
randomized_search : bool, optional (default = False)
True if randomized search.
nfolds : int, optional (default = 5)
Number of folds in CV.
n_jobs : int, optional (default = None)
The number of parallel jobs to run.
save_pickle : bool, optional (default=True)
Save the best fitted model with pickle.
To load do:
loaded_model = pickle.load(open('./pickle_models/model.sav', 'rb'))
verbose : int, optional (default = 0)
Verbose CV.
Returns
-------
cv : sklearn.model_selection._search.GridSearchCV
The fitted model.
y_pred : np.ndarray
predicted values.
Figures are saved in a separate folder.
"""
assert ' ' not in name, "Parameter 'name' must be specified without space inside."
assert isinstance(self.y_train, pd.Series), "y_train must be of type pd.Series."
assert isinstance(self.y_test, pd.Series), "y_test must be of type pd.Series."
if len(parameters) != 0:
random_parameter = random.choice(list(parameters.keys()))
assert '__' in random_parameter and name in random_parameter, \
f"Parameters should be presented in a dictionary in the following way: \n\
'{name}__parameter': [parameter_value]"
steps_model = steps[:]
# Create the pipeline
if multiclass:
from imblearn.pipeline import Pipeline
else:
from sklearn.pipeline import Pipeline
steps_model.append((name, model))
pipeline = Pipeline(steps_model)
if multiclass:
cv_metric = metric + '_'+average
else:
cv_metric = metric
if randomized_search:
cv = RandomizedSearchCV(estimator=pipeline,
param_distributions=parameters,
cv=nfolds,
# refit=cv_metric',
scoring=cv_metric,
# n_iter=10,
verbose=verbose,
n_jobs=n_jobs,
random_state=42)
else:
cv = GridSearchCV(estimator=pipeline,
param_grid=parameters,
cv=nfolds,
# refit=cv_metric',
scoring=cv_metric,
verbose=verbose,
n_jobs=n_jobs)
# Fit to the training set
cv.fit(self.X_train, self.y_train)
# Mean cross-validated score of the best_estimator
print(f"Mean cross-validated score of the best_estimator: {round(cv.best_score_, 4)}")
# Parameter setting that gave the best results on the validation data
if len(parameters) != 0:
df_tuned = pd.DataFrame(cv.best_params_, index=[0]).transpose().reset_index().rename(
columns={'index': 'Parameter', 0: 'Tuned value'})
df_tuned['Parameter'] = df_tuned.Parameter.str.partition('__').iloc[:, -1]
print(df_tuned, '\n')
# Predict the labels of the test set
y_pred = cv.predict(self.X_test)
# METRICS
m.metrics_report(cv, name, self.X_test, self.y_test, self.y_train, data='validation')
# SAVE MODEL USING PICKLE
if save_pickle:
if os.path.exists("./temp_pickle_models/") is False:
os.mkdir("./temp_pickle_models/")
pickle.dump(cv, open(f"./temp_pickle_models/{name}.sav", 'wb'))
return cv, y_pred
def evaluate(self, model, name, X_test, y_test, y_train):
m.metrics_report(model, name, X_test, y_test, y_train, data='test')
if __name__ == '__main__':
test_models()
| 36.028571
| 112
| 0.557494
|
e479e639614e60e7b1de3d0106bf6634d17743b1
| 10,803
|
py
|
Python
|
scripts/fgi/kube.py
|
sitalkedia/aptos-core-1
|
14f0983143c4b16951586ebe11cbb0a6e102cd60
|
[
"Apache-2.0"
] | null | null | null |
scripts/fgi/kube.py
|
sitalkedia/aptos-core-1
|
14f0983143c4b16951586ebe11cbb0a6e102cd60
|
[
"Apache-2.0"
] | null | null | null |
scripts/fgi/kube.py
|
sitalkedia/aptos-core-1
|
14f0983143c4b16951586ebe11cbb0a6e102cd60
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Aptos
# SPDX-License-Identifier: Apache-2.0
import json
import os
import random
import subprocess
import tempfile
import time
FORGE_K8S_CLUSTERS = [
"forge-0",
"forge-1",
]
WORKSPACE_CHART_BUCKETS = {
"forge-0": "aptos-testnet-forge-0-helm-312428ba",
"forge-1": "aptos-testnet-forge-1-helm-a2b65112",
"forge-dev": "aptos-testnet-forge-dev-helm-8d0a5291",
}
AWS_ACCOUNT = (
subprocess.check_output(
["aws", "sts", "get-caller-identity",
"--query", "Account", "--output", "text"],
stderr=subprocess.DEVNULL,
encoding="UTF-8",
).strip()
if not os.getenv("AWS_ACCOUNT")
else os.getenv("AWS_ACCOUNT")
)
# ================ Kube job ================
def create_forge_job(context, user, tag, base_tag, timeout_secs, forge_envs, forge_args):
job_name = f"forge-{user}-{int(time.time())}"
job_name = job_name.replace("_", "-") # underscore not allowed in pod name
cluster_name = get_cluster_name_from_context(context)
# job template to spin up. Edit this in place
template = json.loads(
subprocess.check_output(
[
"kubectl",
"-o=json",
f"--context={context}",
"get",
"job",
"--selector=app.kubernetes.io/name=forge-debug",
],
stderr=subprocess.DEVNULL,
encoding="UTF-8",
)
)
if len(template["items"]) != 1:
print("ERROR: there must be exactly one forge-debug job")
return None
template = template["items"][0]
# delete old spec details
del template["spec"]["selector"]["matchLabels"]["controller-uid"]
del template["spec"]["template"]["metadata"]["labels"]["controller-uid"]
del template["spec"]["template"]["metadata"]["labels"]["job-name"]
# change job name, labels, and backoff limit
template["metadata"]["name"] = job_name
template["metadata"]["labels"]["app.kubernetes.io/name"] = "forge"
template["spec"]["template"]["metadata"]["labels"][
"app.kubernetes.io/name"
] = "forge"
template["spec"]["backoffLimit"] = 0
# change startup command with timeout and extra args
cmd = template["spec"]["template"]["spec"]["containers"][0]["command"][2]
template["spec"]["template"]["spec"]["containers"][0]["command"][2] = cmd.replace(
"tail -f /dev/null",
f"timeout {timeout_secs} forge {' '.join(forge_args)} test k8s-swarm --cluster-name {cluster_name} --image-tag {tag} --base-image-tag {base_tag}".strip(),
)
# additional environment variables
for env_var in forge_envs:
name, value = env_var.split("=")
template["spec"]["template"]["spec"]["containers"][0]["env"].append(
{"name": name, "value": value}
)
# new image tag
image_repo, _ = template["spec"]["template"]["spec"]["containers"][0][
"image"
].split(":")
template["spec"]["template"]["spec"]["containers"][0][
"image"
] = f"{image_repo}:{tag}"
return job_name, template
# ================ Kube queries ================
def get_cluster_context(cluster_name):
return f"arn:aws:eks:us-west-2:{AWS_ACCOUNT}:cluster/aptos-{cluster_name}"
def get_cluster_name_from_context(context):
return context.split("/")[1]
def kube_ensure_cluster(clusters):
attempts = 360
for attempt in range(attempts):
for cluster in clusters:
context = get_cluster_context(cluster)
running_pods = get_forge_pods_by_phase(context, "Running")
pending_pods = get_forge_pods_by_phase(context, "Pending")
monitoring_pods = get_monitoring_pod(context)
# check pod status
num_running_pods = len(running_pods["items"])
num_pending_pods = len(pending_pods["items"])
for pod in monitoring_pods["items"]:
pod_name = pod["metadata"]["name"]
healthy = pod["status"]["phase"] == "Running"
if not healthy:
print(
f"{cluster} has an unhealthy monitoring pod {pod_name}. Skipping."
)
continue
if num_running_pods > 0:
print(
f"{cluster} has {num_running_pods} running forge pods. Skipping.")
elif num_pending_pods > 0:
print(
f"{cluster} has {num_pending_pods} pending forge pods. Skipping.")
else:
return cluster
print(
f"All clusters have jobs running on them. Retrying in 10 secs. Attempt: {attempt}/{attempts}"
)
time.sleep(10)
print("Failed to schedule forge pod. All clusters are busy")
return None
# randomly select a cluster that is free based on its pod status:
# - no other forge pods currently Running or Pending
# - all monitoring pods are ready
def kube_select_cluster():
shuffled_clusters = random.sample(
FORGE_K8S_CLUSTERS, len(FORGE_K8S_CLUSTERS))
return kube_ensure_cluster(shuffled_clusters)
def kube_wait_job(job_name, context):
attempts = 360
for _ in range(attempts):
try:
phase = get_forge_job_phase(job_name, context)
except subprocess.CalledProcessError:
print(f"kubectl get pod {job_name} failed. Retrying.")
continue
# pod is either Running, Succeeded, or assume it's working
# https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
if phase in ["Running", "Succeeded", "Unknown"]:
print(f"{job_name} reached phase: {phase}")
return 0
if phase in ["Failed"]:
print(f"{job_name} reached phase: {phase}")
return 1
# error pulling the image
ret = subprocess.call(
f"kubectl --context='{context}' get pod --selector=job-name={job_name} | grep -i -e ImagePullBackOff -e "
f"InvalidImageName -e ErrImagePull",
shell=True,
# stdout=subprocess.DEVNULL,
# stderr=subprocess.DEVNULL,
)
if ret == 0:
image_name = get_forge_image_name(job_name, context)
print(
f"Job {job_name} failed to be scheduled because there was an error pulling the image: {image_name}"
)
subprocess.call(
["kubectl", f"--context={context}", "delete", "job", job_name]
)
return 1
print(
f"Waiting for {job_name} to be scheduled. Current phase: {phase}")
time.sleep(1)
print(f"Failed to schedule job: {job_name}")
return 1
# init the kube context for each available cluster
def kube_init_context(workspace=None):
try:
subprocess.run(
[
"aws",
"eks",
"--region",
"us-west-2",
"describe-cluster",
"--name",
f"aptos-{FORGE_K8S_CLUSTERS[0]}",
],
stdout=subprocess.DEVNULL,
)
except subprocess.CalledProcessError:
print("Failed to access EKS, try awsmfa?")
raise
# preserve the kube context by updating kubeconfig for the specified workspace
clusters = FORGE_K8S_CLUSTERS + \
[workspace] if workspace else FORGE_K8S_CLUSTERS
for cluster in clusters:
subprocess.run(
[
"aws",
"eks",
"--region",
"us-west-2",
"update-kubeconfig",
"--name",
f"aptos-{cluster}",
]
)
# ================ Internal helpers ================
def get_forge_pods_by_phase(context, phase):
try:
return json.loads(
subprocess.check_output(
[
"kubectl",
"-o=json",
f"--context={context}",
"get",
"pods",
"--selector=app.kubernetes.io/name=forge",
f"--field-selector=status.phase=={phase}",
],
stderr=subprocess.STDOUT,
encoding="UTF-8",
)
)
except subprocess.CalledProcessError as e:
print(e.output)
def get_monitoring_pod(context):
return json.loads(
subprocess.check_output(
[
"kubectl",
"-o=json",
f"--context={context}",
"get",
"pods",
"--selector=app.kubernetes.io/name=monitoring",
],
stderr=subprocess.DEVNULL,
encoding="UTF-8",
)
)
def get_forge_image_name(job_name, context):
return get_forge_job_jsonpath(
job_name, context, "{.items[0].spec.containers[0].image}"
)
def get_forge_job_phase(job_name, context):
return get_forge_job_jsonpath(job_name, context, "{.items[0].status.phase}")
def get_forge_job_jsonpath(job_name, context, jsonpath):
return subprocess.check_output(
[
"kubectl",
f"--context={context}",
"get",
"pod",
f"--selector=job-name={job_name}",
"-o",
f"jsonpath={jsonpath}",
],
encoding="UTF-8",
)
def helm_s3_init(workspace):
bucket_url = WORKSPACE_CHART_BUCKETS[workspace]
subprocess.run(
f"helm plugin install https://github.com/hypnoglow/helm-s3.git || true",
shell=True,
check=True
)
subprocess.run(
["helm", "s3", "init", f"s3://{bucket_url}/charts"],
check=True
)
subprocess.run(
["helm", "repo", "add",
f"testnet-{workspace}", f"s3://{bucket_url}/charts"],
check=True
)
def helm_package_push(chart_path, chart_name, workspace, dir):
subprocess.run(
[
"helm",
"package",
chart_path,
"-d",
dir,
"--app-version",
"1.0.0",
"--version",
"1.0.0"
],
check=True
)
subprocess.run(
f"helm s3 push --force {dir}/{chart_name}-*.tgz testnet-{workspace}",
shell=True,
check=True,
)
def push_helm_charts(workspace):
helm_s3_init(workspace)
tempdir = tempfile.mkdtemp()
helm_package_push("terraform/testnet/testnet",
"testnet", workspace, tempdir)
helm_package_push("terraform/helm/validator",
"aptos-validator", workspace, tempdir)
helm_package_push("terraform/helm/fullnode",
"aptos-fullnode", workspace, tempdir)
| 31.132565
| 162
| 0.552069
|
660bcbe5329eacb36a97e88e2c41bc8389190625
| 8,758
|
py
|
Python
|
scripts/libxsmm_specialized.py
|
mjanderson09/libxsmm
|
d8359effcbe456e1e7625c0b9b0cf5533ee0e370
|
[
"BSD-3-Clause"
] | 1
|
2018-06-29T03:07:54.000Z
|
2018-06-29T03:07:54.000Z
|
scripts/libxsmm_specialized.py
|
qq332982511/libxsmm
|
e376c569252c042193ad2215857c35cfb598ec45
|
[
"BSD-3-Clause"
] | 4
|
2018-03-19T18:18:22.000Z
|
2018-07-05T05:09:09.000Z
|
scripts/libxsmm_specialized.py
|
qq332982511/libxsmm
|
e376c569252c042193ad2215857c35cfb598ec45
|
[
"BSD-3-Clause"
] | 5
|
2017-06-28T21:48:18.000Z
|
2018-04-10T04:07:38.000Z
|
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2014-2018, Intel Corporation #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
###############################################################################
# Hans Pabst (Intel Corp.)
###############################################################################
import sys
if __name__ == "__main__":
argc = len(sys.argv)
if (6 == argc):
precision = int(sys.argv[1])
m, n, k = int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4])
prefetch = int(sys.argv[5])
mnkstr = str(m) + "_" + str(n) + "_" + str(k)
optional = ["", ", ..."][0 > prefetch]
signature = ["a, b, c", "a, b, c, pa, pb, pc"][0 < prefetch]
if (2 != precision):
pfsig = [optional + ")", "\n"
", const float* pa"
", const float* pb"
", const float* pc)"][0 < prefetch]
print
print
print("LIBXSMM_API void libxsmm_smm_" + mnkstr +
"(const float* a, const float* b, float* c" + pfsig)
print("{")
print("#if defined(__AVX512F__) && "
"defined(LIBXSMM_GENTARGET_skx_sp) && \\")
print(" !(defined(__AVX512PF__) && defined(__AVX512ER__))")
print(" libxsmm_smm_" + mnkstr + "_skx(" + signature + ");")
print("#elif defined(__AVX512F__) && "
"defined(LIBXSMM_GENTARGET_knl_sp)")
print(" libxsmm_smm_" + mnkstr + "_knl(" + signature + ");")
print("#elif defined(__AVX2__) && "
"defined(LIBXSMM_GENTARGET_hsw_sp)")
print(" libxsmm_smm_" + mnkstr + "_hsw(" + signature + ");")
print("#elif defined(__AVX__) && "
"defined(LIBXSMM_GENTARGET_snb_sp)")
print(" libxsmm_smm_" + mnkstr + "_snb(" + signature + ");")
print("#elif defined(__SSE3__) && "
"defined(LIBXSMM_GENTARGET_wsm_sp)")
print(" libxsmm_smm_" + mnkstr + "_wsm(" + signature + ");")
print("#elif defined(__MIC__) && "
"defined(LIBXSMM_GENTARGET_knc_sp)")
print(" libxsmm_smm_" + mnkstr + "_knc(" + signature + ");")
print("#else")
print(" const char transa = (0 == (LIBXSMM_GEMM_FLAG_TRANS_A & "
"LIBXSMM_FLAGS) ? 'N' : 'T');")
print(" const char transb = (0 == (LIBXSMM_GEMM_FLAG_TRANS_B & "
"LIBXSMM_FLAGS) ? 'N' : 'T');")
print(" const float alpha = LIBXSMM_ALPHA, beta = LIBXSMM_BETA;")
print(" const libxsmm_blasint "
"m = " + str(m) + ", "
"n = " + str(n) + ", "
"k = " + str(k) + ";")
if (0 < prefetch):
print(" LIBXSMM_UNUSED(pa);"
" LIBXSMM_UNUSED(pb);"
" LIBXSMM_UNUSED(pc);")
print(" LIBXSMM_INLINE_XGEMM(float, float, &transa, &transb,"
" &m, &n, &k, &alpha, a, &m, b, &k, &beta, c, &m);")
print("#endif")
print("}")
print
print
print("LIBXSMM_API void LIBXSMM_FSYMBOL(libxsmm_smm_" + mnkstr +
")(const float* a, const float* b, float* c" +
pfsig + ";")
print("LIBXSMM_API void LIBXSMM_FSYMBOL(libxsmm_smm_" +
mnkstr + ")(const float* a, const float* b, float* c" +
pfsig)
print("{")
print(" libxsmm_smm_" + mnkstr + "(" + signature + ");")
print("}")
if (1 != precision):
pfsig = [optional + ")", "\n"
", const double* pa"
", const double* pb"
", const double* pc)"][0 < prefetch]
print
print
print("LIBXSMM_API void libxsmm_dmm_" + mnkstr +
"(const double* a, const double* b, double* c" + pfsig)
print("{")
print("#if defined(__AVX512F__) && "
"defined(LIBXSMM_GENTARGET_skx_dp) && \\")
print(" !(defined(__AVX512PF__) && defined(__AVX512ER__))")
print(" libxsmm_dmm_" + mnkstr + "_skx(" + signature + ");")
print("#elif defined(__AVX512F__) && "
"defined(LIBXSMM_GENTARGET_knl_dp)")
print(" libxsmm_dmm_" + mnkstr + "_knl(" + signature + ");")
print("#elif defined(__AVX2__) && "
"defined(LIBXSMM_GENTARGET_hsw_dp)")
print(" libxsmm_dmm_" + mnkstr + "_hsw(" + signature + ");")
print("#elif defined(__AVX__) && "
"defined(LIBXSMM_GENTARGET_snb_dp)")
print(" libxsmm_dmm_" + mnkstr + "_snb(" + signature + ");")
print("#elif defined(__SSE3__) && "
"defined(LIBXSMM_GENTARGET_wsm_dp)")
print(" libxsmm_dmm_" + mnkstr + "_wsm(" + signature + ");")
print("#elif defined(__MIC__) && "
"defined(LIBXSMM_GENTARGET_knc_dp)")
print(" libxsmm_dmm_" + mnkstr + "_knc(" + signature + ");")
print("#else")
print(" const char transa = (0 == (LIBXSMM_GEMM_FLAG_TRANS_A & "
"LIBXSMM_FLAGS) ? 'N' : 'T');")
print(" const char transb = (0 == (LIBXSMM_GEMM_FLAG_TRANS_B & "
"LIBXSMM_FLAGS) ? 'N' : 'T');")
print(" const double alpha = LIBXSMM_ALPHA, beta = LIBXSMM_BETA;")
print(" const libxsmm_blasint "
"m = " + str(m) + ", "
"n = " + str(n) + ", "
"k = " + str(k) + ";")
if (0 < prefetch):
print(" LIBXSMM_UNUSED(pa);"
" LIBXSMM_UNUSED(pb);"
" LIBXSMM_UNUSED(pc);")
print(" LIBXSMM_INLINE_XGEMM(double, double, &transa, &transb,"
" &m, &n, &k, &alpha, a, &m, b, &k, &beta, c, &m);")
print("#endif")
print("}")
print
print
print("LIBXSMM_API void LIBXSMM_FSYMBOL(libxsmm_dmm_" + mnkstr +
")(const double* a, const double* b, double* c" +
pfsig + ";")
print("LIBXSMM_API void LIBXSMM_FSYMBOL(libxsmm_dmm_" +
mnkstr + ")(const double* a, const double* b, double* c" +
pfsig)
print("{")
print(" libxsmm_dmm_" + mnkstr + "(" + signature + ");")
print("}")
else:
sys.tracebacklimit = 0
raise ValueError(sys.argv[0] + ": wrong number of arguments!")
| 53.402439
| 79
| 0.479333
|
850e287d3b7d0f9410f9b2bf9e18b2cdfbae003c
| 1,391
|
py
|
Python
|
examples/get_posts.py
|
michaelwalkerfl/facebook-sdk
|
8f63102079a30e7bd8ed51a48bccdf7aa0858452
|
[
"Apache-2.0"
] | 1,330
|
2016-04-04T06:24:55.000Z
|
2022-03-27T20:12:29.000Z
|
examples/get_posts.py
|
michaelwalkerfl/facebook-sdk
|
8f63102079a30e7bd8ed51a48bccdf7aa0858452
|
[
"Apache-2.0"
] | 188
|
2016-04-03T03:49:35.000Z
|
2022-03-09T04:10:24.000Z
|
examples/get_posts.py
|
michaelwalkerfl/facebook-sdk
|
8f63102079a30e7bd8ed51a48bccdf7aa0858452
|
[
"Apache-2.0"
] | 619
|
2016-04-02T23:12:27.000Z
|
2022-03-04T14:53:20.000Z
|
"""
A simple example script to get all posts on a user's timeline.
Originally created by Mitchell Stewart.
<https://gist.github.com/mylsb/10294040>
"""
import facebook
import requests
def some_action(post):
"""Here you might want to do something with each post. E.g. grab the
post's message (post['message']) or the post's picture (post['picture']).
In this implementation we just print the post's created time.
"""
print(post["created_time"])
# You'll need an access token here to do anything. You can get a temporary one
# here: https://developers.facebook.com/tools/explorer/
access_token = ""
# Look at Bill Gates's profile for this example by using his Facebook id.
user = "BillGates"
graph = facebook.GraphAPI(access_token)
profile = graph.get_object(user)
posts = graph.get_connections(profile["id"], "posts")
# Wrap this block in a while loop so we can keep paginating requests until
# finished.
while True:
try:
# Perform some action on each post in the collection we receive from
# Facebook.
[some_action(post=post) for post in posts["data"]]
# Attempt to make a request to the next page of data, if it exists.
posts = requests.get(posts["paging"]["next"]).json()
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
| 33.926829
| 79
| 0.691589
|
11909fca0125d3cc06632973ca2dab517aa3cee8
| 7,556
|
py
|
Python
|
tools/visualisations/model_and_dataloader_visualiser.py
|
kiyoon/PyVideoAI
|
c4d3ba7a69723aeae7da48245989ae11cbdb1f8b
|
[
"MIT"
] | 22
|
2021-06-01T07:40:01.000Z
|
2022-03-14T07:09:01.000Z
|
tools/visualisations/model_and_dataloader_visualiser.py
|
kiyoon/PyVideoAI
|
c4d3ba7a69723aeae7da48245989ae11cbdb1f8b
|
[
"MIT"
] | null | null | null |
tools/visualisations/model_and_dataloader_visualiser.py
|
kiyoon/PyVideoAI
|
c4d3ba7a69723aeae7da48245989ae11cbdb1f8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
cfg needs dataloader_shape_to_model_input_shape(inputs) function
model_cfg needs model_input_shape_to_NTHWC(inputs) function
model_cfg needs input_std, input_mean, input_bgr, input_normalise
'''
import argparse
import logging
import numpy as np
import os
import sys
import coloredlogs, verboselogs
logger = verboselogs.VerboseLogger(__name__)
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
import torchvision
from experiment_utils.argparse_utils import add_exp_arguments
from experiment_utils.telegram_post import send_numpy_video_as_gif, send_numpy_photo
from experiment_utils import ExperimentBuilder
import dataset_configs, model_configs, exp_configs
import time
from pyvideoai import config
from pyvideoai.utils import misc
import git
def main():
parser = argparse.ArgumentParser(
description='Randomly sample some training videos and send to Telegram/TensorBoard as GIF. Run with no GPUs (CUDA_VISIBLE_DEVICES=)')
add_exp_arguments(parser,
root_default=config.DEFAULT_EXPERIMENT_ROOT, dataset_default='something_v1', model_default='trn_resnet50', name_default='largejit_nosched',
dataset_channel_choices=dataset_configs.available_channels, model_channel_choices=model_configs.available_channels, exp_channel_choices=exp_configs.available_channels)
parser.add_argument("--seed", type=int, default=12, help="Random seed for np, torch, torch.cuda, DALI. Actual seed will be seed+rank.")
parser.add_argument("--telegram", action='store_true', help="Send to Telegram instead of TensorBoard (default: TensorBoard)")
parser.add_argument("-B", "--telegram_bot_idx", type=int, default=0, help="Which Telegram bot to use defined in key.ini?")
parser.add_argument("-w", "--dataloader_num_workers", type=int, default=4, help="num_workers for PyTorch Dataset loader.")
parser.add_argument("-s", "--split", type=str, default='train', choices=['train', 'val', 'multicropval'], help="Which split to use")
parser.add_argument("-b", "--batch_size", type=int, help="How many videos to visualise")
parser.add_argument("--jpgs", action='store_true', help="Send as JPGs instead of GIFs")
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
coloredlogs.install(fmt='%(name)s: %(lineno)4d - %(levelname)s - %(message)s', level='INFO')
logging.getLogger('slowfast.utils.checkpoint').setLevel(logging.WARNING)
cfg = exp_configs.load_cfg(args.dataset, args.model, args.experiment_name, args.dataset_channel, args.model_channel, args.experiment_channel)
metrics = cfg.dataset_cfg.task.get_metrics(cfg)
summary_fieldnames, summary_fieldtypes = ExperimentBuilder.return_fields_from_metrics(metrics)
exp = ExperimentBuilder(args.experiment_root, args.dataset, args.model, args.experiment_name, summary_fieldnames = summary_fieldnames, summary_fieldtypes = summary_fieldtypes, version = -1, telegram_key_ini = config.KEY_INI_PATH, telegram_bot_idx = args.telegram_bot_idx)
exp.make_dirs_for_training()
try:
if not args.telegram:
writer = SummaryWriter(os.path.join(exp.tensorboard_runs_dir, 'train_model_and_data'), comment='train_model_and_data')
# Network
# Construct the model
model = cfg.load_model()
if hasattr(cfg, 'get_optim_policies'):
policies = cfg.get_optim_policies(model)
elif hasattr(cfg.model_cfg, 'get_optim_policies'):
policies = cfg.model_cfg.get_optim_policies(model)
else:
policies = model.parameters()
misc.log_model_info(model)
#criterion = cfg.dataset_cfg.task.get_criterion(cfg)
#optimiser = cfg.optimiser(policies)
# Construct dataloader
dataset = cfg.get_torch_dataset(args.split)
data_unpack_func = cfg.get_data_unpack_func(args.split)
if args.batch_size is None:
batch_size = cfg.batch_size() if callable(cfg.batch_size) else cfg.batch_size
else:
batch_size = args.batch_size
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True if args.split=='train' else False, sampler=None, num_workers=args.dataloader_num_workers, pin_memory=True, drop_last=False)
class_keys = cfg.dataset_cfg.class_keys
if args.telegram:
message = 'Video from dataloader: reshaped to fit the model shape, and reshaped back to video'
logger.info(message)
exp.tg_send_text_with_expname(message)
data = next(iter(dataloader))
inputs, uids, labels, _ = data_unpack_func(data)
inputs = cfg.get_input_reshape_func(args.split)(inputs)
if not args.telegram:
# Write model graph
# if inputs is a list(or tuple), add_graph will consider it as args and unpack the list.
# So we wrap it as a tuple
writer.add_graph(model, (inputs,))
inputs = cfg.model_cfg.model_input_shape_to_NTHWC(inputs)
inputs *= torch.tensor(cfg.model_cfg.input_std)
inputs += torch.tensor(cfg.model_cfg.input_mean)
if cfg.model_cfg.input_normalise:
inputs *= 255
if cfg.model_cfg.input_bgr:
inputs = inputs[...,::-1]
inputs = inputs.to(torch.uint8)
uids = uids.numpy()
labels = labels.numpy()
for idx, (video, uid, label) in enumerate(zip(inputs, uids, labels)):
# T, H, W, C
class_key = class_keys[label]
caption = f"uid: {uid}, label: {label}, class_key: {class_key}, video shape: {video.shape}"
logger.info(caption)
if args.jpgs:
if args.telegram:
for jpg in video:
# Telegram
send_numpy_photo(exp.tg_token, exp.tg_chat_id, jpg.numpy(), caption=caption)
caption=None # only send caption on the first frame
else:
# Tensorboard
# Add in grid format
writer.add_images('dataloader_imgs', video, global_step = idx, dataformats='NHWC')
writer.add_text('dataloader_imgs', caption, global_step=idx)
# Also, add in series format
tag = f'dataloader_imgs-uid{uid}'
for frame_idx, jpg in enumerate(video):
writer.add_image(tag, jpg, global_step=frame_idx, dataformats='HWC')
else:
if args.telegram:
# Telegram
send_numpy_video_as_gif(exp.tg_token, exp.tg_chat_id, video.numpy(), caption=caption)
else:
# Tensorboard
# T, H, W, C -> 1, T, C, H, W
video_tensorboard = video.unsqueeze(0).permute((0,1,4,2,3))
writer.add_video('dataloader_gif', video_tensorboard, global_step=idx)
writer.add_text('dataloader_gif', caption, global_step=idx)
if args.telegram:
logger.success('Finished. Visualisation sent on Telegram')
else:
logger.success('Finished. Visualisation sent on TensorBoard')
except Exception as e:
logger.exception("Exception occurred")
exp.tg_send_text_with_expname('Exception occurred\n\n' + repr(e))
if __name__ == '__main__':
main()
| 44.710059
| 275
| 0.667284
|
42dc000c050ddde65000aa1b170c42b9f6cb8c0b
| 309
|
py
|
Python
|
setup.py
|
nyukhalov/srt-text
|
12c4599da053394f2799e922ddcddf543fdab39d
|
[
"MIT"
] | 3
|
2019-04-25T20:30:07.000Z
|
2021-12-20T23:27:31.000Z
|
setup.py
|
nyukhalov/srt-text
|
12c4599da053394f2799e922ddcddf543fdab39d
|
[
"MIT"
] | null | null | null |
setup.py
|
nyukhalov/srt-text
|
12c4599da053394f2799e922ddcddf543fdab39d
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='srt-text',
version='1.0.1',
url='https://github.com/nyukhalov/srt-text',
author='Roman Niukhalov',
license='MIT',
packages=['srttext'],
entry_points={
'console_scripts': [
'srt-text=srttext.app:main'
]
}
)
| 19.3125
| 48
| 0.572816
|
f25a84763f304abcf0d21174fb73771320ed5478
| 24,910
|
py
|
Python
|
Oasis1115.py
|
shimdol22/2021-2-OSSProj-TEAM-MMS-6
|
2d157432d8c6f042caebd8935c7851bc40f14d98
|
[
"MIT"
] | null | null | null |
Oasis1115.py
|
shimdol22/2021-2-OSSProj-TEAM-MMS-6
|
2d157432d8c6f042caebd8935c7851bc40f14d98
|
[
"MIT"
] | null | null | null |
Oasis1115.py
|
shimdol22/2021-2-OSSProj-TEAM-MMS-6
|
2d157432d8c6f042caebd8935c7851bc40f14d98
|
[
"MIT"
] | null | null | null |
from typing import get_origin
import pygame
import random
import time
from datetime import datetime
from pygame.constants import VIDEORESIZE
from math import ceil
# sx, sy => 피사체의 x위치 y 위치
# x, y => 비행기의 가로길이, 세로길이
# 1. 게임초기화
# 2. 게임창 옵션 설정
# 2-1 고정된 화면 크기
# size = [700,800]
# screen = pygame.display.set_mode(size)
# 2-2 플레이어의 컴퓨터 환경에 맞춘 화면의 크기
infoObject = pygame.display.Info()
# 896 * 1020
size = [infoObject.current_w,infoObject.current_h]
screen = pygame.display.set_mode(size,pygame.RESIZABLE)
class Move:
# 좌방향 이동키
left_go = False
# 우방향 이동키
right_go = False
# 윗방향 이동키
up_go = False
# 아랫방향 이동키
down_go = False
# 미사일 발사 키
space_go = False
# 게임의 FPS
FPS = 60
# 객체의 변경된 위치변경의 Key
position = False
# 객체들이 화면 밖으로 나갔는지 판정에 필요한 boundary 값
boundary = 70
class Color:
# RGB 검정
black = (0,0,0)
# RGB 흰색
white = (255,255,255)
red = (255,0,0)
purple = (100,40,225)
yellow = (255,255,0)
class Size:
# 비행체의 x,y사이즈
a_xsize = size[0]//9
a_ysize = size[1]//8
# 미사일의 x,y사이즈
m_xsize = size[0]//30
m_ysize = size[1]//20
# 크기 조정(최대값, 최소값)
min_size = ceil((sum(size)//50)*2)
max_size = ceil((sum(size)//30)*2)
block_max_size = size[0]//5
# 2등분 3등분 값을 찾기위한 num
half_split_num = 2
third_split_num = 3
three_five = 2/5
m_rand_size = 10
x_resize_rate = 1
y_resize_rate = 1
err_x = 400
err_y = 500
standard_size = 30
rand_min_size = 1
x = 0
y = 1
restart_middle = 290
class Speed:
# 미사일의 스피드
m_speed = 0 # 초기화`
m_initiate_speed_30 = 30
m_initiate_speed_25 = 25
m_initiate_speed_15 = 15
# 미사일의 max 스피드
m_max_speed = 6
# 비행체 스피드
s_speed =5
# 미사일 빈도 조정
k=0
create_rate_r = 0.995
create_rate_c = 0.98
# 미사일 스피드의 초기값 15 고정
speed_initializing_15 = 15
# 초기 스피드
a_init_speed = 2
m_init_speed = 2
b_init_speed = 2
speed_end_point = 0
class Util:
# 미사일을 발사할때 미사일 객체가 저장되는 리스트 공간
m_list = []
# 피사체 출현시 피사체 객체가 저장되는 리스트 공산
a_list = []
# 장애물 객체가 저장되는 리스트
block_list=[]
# 피사체를 미사일로 맞추었을때 맞춘 피사체의 개수
kill = 0
# 피사체를 죽이지못하고 화면밖으로 놓친 피사체의 개수
loss = 0
# 현재 내가 획득한 점수
score = 0
# Game Over
GO = 0
# 최고점수 불러오기
f = open('Oasisscore.txt', 'r')
x = f.read()
highscore = int(x)
score_10 = 10
score_100 = 100
score_200 = 200
score_300 = 300
score_400 = 400
m_loc_10 = 10
a_loc_10 = 10
start_loc = (0,0)
kill_score_cal = 5
loss_score_cal = 8
missile_rate = 1
obj_num = 1
time_sleep = 1
end_point = 0
class FontSize:
size_start = 20
lensize_start = 50
size_kill_loss = sum(size) // 85
size_gameover = sum(size) // 40
size_restart = 15
lensize_gameover = 65
len_for_time = size[0] // 6
len_for_time_ysize = 5
loc_kill_loss = (10,5)
class Sound:
m_sound = 0.1
crash1_sound = 0.1
crash2_sound = 0.1
game_over_sound = 0.3
background_sound = 0.1
class Resizing:
a_xsize = 9
a_ysize = 8
m_xsize = 30
m_ysize = 20
min_size_rel = 50
max_size_rel = 30
min_size = 2
max_size = 2
block_max_size = 5
size_kill_loss = 85
size_gameover = 47
len_for_time = 6
class obj:
def __init__(self):
self.x =0
self.y=0
self.move =0
def put_img(self,address):
# png파일 일때
# convert해줘야하는 문제가 있기때문에
if address[-3:] == "png":
self.img = pygame.image.load(address).convert_alpha()
else:
self.img = pygame.image.load(address)
self.sx, self.sy = self.img.get_size()
# 피사체의 그림 조정
def change_size(self,sx,sy):
self.img = pygame.transform.scale(self.img,(sx,sy)) # 그림의 크기를 조정한다.
self.sx, self.sy = self.img.get_size()
def show(self):
screen.blit(self.img,(self.x,self.y))
pygame.init()
title = "My Game"
pygame.display.set_caption(title) # 창의 제목 표시줄 옵션
# 3. 게임 내 필요한 설정
clock = pygame.time.Clock()
#파이게임 배경음악
pygame.mixer.init()
pygame.mixer.music.load("SourceCode/Sound/ariant.mp3")
# 미사일 효과음
missile1 = pygame.mixer.Sound("SourceCode/Sound/weapon-sound8.ogg")
missile1.set_volume(Sound.m_sound)
missile2 = pygame.mixer.Sound("SourceCode/Sound/weapon-sound9 .ogg")
missile2.set_volume(Sound.m_sound)
missile3 = pygame.mixer.Sound("SourceCode/Sound/weapon-sound16.ogg")
missile3.set_volume(Sound.m_sound)
# 피사체 파괴시 효과음
monster1 = pygame.mixer.Sound("SourceCode/Sound/monster-sound7.ogg")
monster1.set_volume(Sound.crash1_sound)
# 피사체와 비행체 충돌시 효과음
boom1 = pygame.mixer.Sound("SourceCode/Sound/weapon-sound9 .ogg")
boom1.set_volume(Sound.crash2_sound)
# 게임오버 효과음
game_over = pygame.mixer.Sound("SourceCode/Sound/gameover.wav")
game_over.set_volume(Sound.game_over_sound)
# 충돌이 일어났는지 확인하는 함수!
# return 값이 boolean 타입임
# 직사각형 형태로 충돌이 일어났음을 판단하는 함수
def crash(a,b):
# 요 범위 안에 있을때 충돌이 일어남
if (a.x-b.sx <=b.x) and (b.x<=a.x + a.sx):
if(a.y-b.sy <= b.y) and (b.y <= a.y+a.sy):
return True
else:
return False
else:
return False
# 기존 충돌판정에서 모든 모서리의 x,y값을 가지고 객체가 겹친다면 충돌이 일어나는 함수 생성
# 직사각현 모양에서 발생했던 부딛치지 않았지만 부딛혔다고 판정된 문제 해결
def crash2(a,b):
a_mask = pygame.mask.from_surface(a.img)
b_mask = pygame.mask.from_surface(b.img)
offset = (int(b.x - a.x), int(b.y - a.y))
collision = a_mask.overlap(b_mask, offset)
if collision:
return True
else:
return False
def cal_score(kill,loss):
Util.score = (Util.kill * Util.kill_score_cal - Util.loss * Util.loss_score_cal)
def change_size_rate(size):
Size.a_xsize = size[Size.x] // Resizing.a_xsize
Size.a_ysize = size[Size.y] // Resizing.a_ysize
Size.m_xsize = size[Size.x] // Resizing.m_xsize
Size.m_ysize = size[Size.y] // Resizing.m_ysize
Size.min_size = ceil((sum(size) // Resizing.min_size_rel ) * Resizing.min_size)
Size.max_size = ceil((sum(size) // Resizing.max_size_rel ) * Resizing.max_size)
Size.block_max_size = size[Size.x] // Resizing.block_max_size
FontSize.size_kill_loss = sum(size) // Resizing.size_kill_loss
FontSize.size_gameover = sum(size) // Resizing.size_gameover
FontSize.len_for_time = size[Size.x] // Resizing.len_for_time
# # 오른쪽 끝 선에서 크기를 줄일 시 객체가 화면 밖으로 못나가게 제한 함
# if ss.x + ss.sx > size[0]:
# ss.x = size[0]- ss.sx
# # 바닥 선에서 크기를 줄일 시 객체가 화면 밖으로 못나가게 제한 함
# if ss.y + ss.sy >size[1]:
# ss.y = size[1] - ss.sy
# 비행체 객체의 사이즈 변경
try:
ss.put_img("SourceCode/Image/DesrtCar.png")
ss.change_size(Size.a_xsize, Size.a_ysize)
ss.x*=Size.x_resize_rate
ss.y*=Size.y_resize_rate
except :
pass
try:
# 지금 현재 미사일을 발생시키지 않는 상태 일 수도 있기 때문에 try, except구문 사용
for i in Util.m_list:
i.change_size(int(i.sx*Size.x_resize_rate),int(i.sy*Size.y_resize_rate))
except :
pass
# 선인장 장애물의 resizing
# 선인장이 나타나지 않았을때 resizing 했을 수도 있으므로 try except로 error 잡아줌
try:
for i in Util.block_list:
i.change_size(int(i.sx*Size.x_resize_rate),int(i.sy*Size.y_resize_rate))
i.x*=Size.x_resize_rate
i.y*=Size.y_resize_rate
except :
pass
try:
for i in Util.a_list:
i.change_size(ceil(i.sx*Size.x_resize_rate),ceil(i.sy*Size.y_resize_rate))
if a.sx > Size.err_x or a.sy > Size.err_y:
i.change_size(Size.standard_size,Size.standard_size)
# print(a.sx,a.sy)
i.x*=Size.x_resize_rate
i.y*=Size.y_resize_rate
except :
pass
# FPS도 리사이징이 됨에따라 변화시켜주고 속도제어
Move.FPS = int(Move.FPS*(Size.x_resize_rate+Size.y_resize_rate)/Size.half_split_num)
pygame.display.flip()
# 4-0 게임 시작 대기 화면(작은 event)
# SB=0
# while SB==0:
# clock.tick(Move.FPS)
# for event in pygame.event.get(): # 이벤트가 있다면
# if event.type == pygame.KEYDOWN: # 그 이벤트가 어떤 버튼을 누르는 것이라면
# if event.key == pygame.K_SPACE: # 그 버튼이 스페이스 버튼이라면?
# SB=1
# elif event.type == pygame.VIDEORESIZE:
# width, height = event.w, event.h
# size =[width,height]
# window = pygame.display.set_mode(size, pygame.RESIZABLE)
# screen.fill(Color.black)
# font = pygame.font.Font("SourceCode/Font/DXHanlgrumStd-Regular.otf",FontSize.size_start)
# text_kill = font.render("PRESS \"SPACE\" KEY TO START THE GAME",True,Color.white) # 폰트가지고 랜더링 하는데 표시할 내용, True는 글자가 잘 안깨지게 하는 거임 걍 켜두기, 글자의 색깔
# screen.blit(text_kill,(size[0]//Size.half_split_num-(size[0]//Size.half_split_num)//Size.half_split_num,round((size[1]/Size.half_split_num)-FontSize.lensize_start))) # 이미지화 한 텍스트라 이미지를 보여준다고 생각하면 됨
# pygame.display.flip() # 그려왔던게 화면에 업데이트가 됨
# 객체 생성
ss = obj()
# 우리들이 움직여야할 물체
ss.put_img("SourceCode/Image/DesrtCar.png")
# 그림(비행체)의 크기를 조정
ss.change_size(Size.a_xsize,Size.a_ysize)
# 비행체의 위치를 하단의 중앙으로 바꾸기위해!
# x값의 절반에서 피사체의 길이의 절반만큼 왼쪽으로 이동해야 정확히 가운데임
ss.x = round(size[Size.x]/Size.half_split_num - ss.sx/Size.half_split_num)
# 맨 밑에서 피사체의 y길이만큼 위로 올라와야함
ss.y = size[Size.y] - ss.sy
# 비행체가 움직이는 속도를 결정함
ss.move = Speed.s_speed
# 게임의 배경화면 설정
background_image_desert = pygame.image.load("SourceCode/Image/DESERT.jpeg")
background_image_desert = pygame.transform.scale(background_image_desert,size) # 그림의 크기를 조정한다.
# 4. 메인 이벤트
#사막맵 배경음악 실행
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(Sound.background_sound)
# 코드를 첫 실행한 시간 저장
start_time = datetime.now()
SB = False
while not SB:
# 4-1. FPS 설정
# FPS를 60으로 설정함
clock.tick(Move.FPS)
# 4-2. 각종 입력 감지
for event in pygame.event.get(): # 어떤 동작을 했을때 그 동작을 받아옴
if event.type == pygame.QUIT: # x버튼을 눌렀을때!
SB = True # SB 가 1이되면 while문을 빠져나오게 된다!
if event.type == pygame.KEYDOWN: # 어떤 키를 눌렀을때!(키보드가 눌렸을 때)
# 키를 누르고있는 상태 : True
# 키를 떼고있는 상태 : False
if event.key == pygame.K_LEFT: # 만약 누른 키가 왼쪽 방향키 라면?
Move.left_go = True
if event.key == pygame.K_RIGHT: # 만약 누른 키가 오른쪽 방향키 라면?
Move.right_go = True
if event.key == pygame.K_SPACE: # 만약 누른키가 space키 라면?
Move.space_go = True
# 속도를 1/6으로 낮췄는데 누를때마다도 한번씩 발사하고싶어서 누르면 k=0으로 초기화시킴 -> while문 조건 통과하기위해
# k=0
if event.key == pygame.K_UP :
Move.up_go = True
if event.key == pygame.K_DOWN:
Move.down_go = True
elif event.type == pygame.KEYUP: # 키를 누르는것을 뗐을때!
if event.key == pygame.K_LEFT: # 키를 뗐다면 그 키가 왼쪽 방향키 인가?
Move.left_go = False
elif event.key == pygame.K_RIGHT: # 키를 뗐다면 그 키가 오른쪽 방향키 인가?
Move.right_go = False
elif event.key == pygame.K_SPACE: # 키를 뗐다면 그 키가 스페이스 키인가?
Move.space_go = False
elif event.key == pygame.K_UP:
Move.up_go = False
elif event.key == pygame.K_DOWN:
Move.down_go = False
elif event.type == pygame.VIDEORESIZE:
width, height = event.w, event.h
Size.x_resize_rate = width / size[Size.x]
Size.y_resize_rate = height / size[Size.y]
size =[width,height]
window = pygame.display.set_mode(size, pygame.RESIZABLE)
Move.position = True
# 마우스로 인해 화면이 작아지면 다른 객체들의 사이즈도 전부 변경
if Move.position is True:
change_size_rate(size)
# 4-3. 입력과 시간에 따른 변화
now_time = datetime.now()
# 코드실행 시점에서 현재시간과릐 차이를 초로 바꿈
delta_time = (now_time - start_time).total_seconds()
# 버튼을 꾹 길게 눌렀을때 움직이게 하기
# 왼쪽 방향키를 눌렀을 때
if Move.left_go == True:
ss.x -= ss.move
# 물체가 왼쪽 끝 경계값으로 이동하면 더이상 나가지 않게끔 만듬!
# 배경이 뭐냐에 따라 달라질 듯 !
if ss.x < Move.boundary:
# 더 이상 나가지 못하도록 0 으로 막아줌
ss.x = Move.boundary
# 오른쪽 방향키를 눌렀을 때
elif Move.right_go == True:
ss.x += ss.move
# 오른쪽 끝에서 비행선의 가로크기만큼 빼줘야한다
if ss.x >= size[Size.x] - (ss.sx+Move.boundary):
# 더 이상 오른쪽 바깥으로 못나가게 오른쪽 끝값으로 초기화
ss.x = size[Size.x] - (ss.sx+Move.boundary)
# 윗 방향키를 눌렀을때
# 윗 방향키를 elif에서 if로 시작
# 좌우와 상하가 독립된 상태로 구분됨
if Move.up_go == True:
ss.y -= ss.move
# 게임화면 위쪽 화면으로 나가는 경우
if ss.y < Move.boundary:
# 더이상 나가지 못하게 위치값 고정
ss.y = Move.boundary
# 아래 방향키를 눌렀을때
elif Move.down_go == True:
ss.y += ss.move
# 게임화면 위쪽 화면으로 나가는 경우
if ss.y >= size[Size.y] - ss.sy:
# 더이상 나가지 못하게 위치값 고정
ss.y = size[Size.y] - ss.sy
# 미사일의 속도 조정
if Speed.m_initiate_speed_30-(Util.score // Util.score_10)>=Speed.m_max_speed:
m_speed = Speed.m_initiate_speed_30 - (Util.score // Util.score_10)
else:
m_speed = Speed.m_max_speed
# 점수와 관련해서 미사일의 속도를 바꾸면 좋을듯 !
# k%6 이면 미사일의 발생 확률을 1/6으로 낮춤!
if (Move.space_go == True) and Speed.k % m_speed == Speed.speed_end_point:
# 미사일 객체 생성
mm = obj()
# 미사일의 사진
mm.put_img('SourceCode/Image/MISSILE_2.png')
# 미사일의 크기 조정
# m_xsize = 5, m_ysize = 15
mm.change_size(Size.m_xsize,Size.m_ysize)
# 미사일 생성시 효과음
missile1.play()
# 미사일의 x값 (위치)
if Util.score < Util.score_200:
mm.x = round(ss.x + ss.sx / Size.half_split_num - mm.sx / Size.half_split_num)
# 미사일의 위치 = 비행기의 위치 - 미사일의 y크기
mm.y = ss.y - mm.sy - Util.m_loc_10
elif Util.score >= Util.score_200 and Util.score < Util.score_400:
mm.x = round(ss.x + ss.sx / Size.third_split_num - mm.sx / Size.half_split_num)
# 미사일의 위치 = 비행기의 위치 - 미사일의 y크기
mm.y = ss.y - mm.sy - Util.m_loc_10
elif Util.score >= Util.score_400:
mm.x = round(ss.x + ss.sx / Size.half_split_num - mm.sx / Size.half_split_num)
mm.y = ss.y - mm.sy - Util.m_loc_10
# 미사일의 움직이는 속도를 결정함
mm.move = Speed.m_initiate_speed_25
# 미사일의 객체를 리스트에 저장한다.
Util.m_list.append(mm)
# 점수가 200점 이상이라면 미사일이 한개 더 늘어남
# 점수가 400점 이상이라면 미사일의 발사 형태가 바뀜
if (Move.space_go == True) and (Speed.k%m_speed == Speed.speed_end_point) and Util.score >= Util.score_200:
# 두번째 미사일 객체 생성
missile1.stop()
missile2.play()
mm2 = obj()
mm2.put_img('SourceCode/Image/MISSILE_2.png')
mm2.change_size(Size.m_xsize, Size.m_ysize)
mm2.x = round(ss.x +(ss.sx * Size.half_split_num) / Size.third_split_num - mm.sx / Size.half_split_num)
mm2.y = ss.y - mm2.sy - Util.m_loc_10
mm2.move = Speed.m_initiate_speed_15
Util.m_list.append(mm2)
# 미사일의 발생 빈도 조절
Speed.k += Util.missile_rate
# 피사체의 리스트를 초기화함
# delete list
d_list = []
for i in range(len(Util.m_list)):
# i 번째 미사일
m = Util.m_list[i]
# 미사일 속도만큼 미사일이 y축방향으로 빠져나간다.
m.y -= m.move
if Util.score > Util.score_400:
missile2.stop()
missile3.play()
# 점수가 400점 이상이면 미사일이 꼬여서 나가는것 처럼 보이게 함
m.x+= random.uniform(-Util.m_loc_10,Util.m_loc_10)
# 미사일의 사이즈만큼 나갔을때 지워준다.
if m.y < -m.sx:
d_list.append(i)
d_list.reverse()
for d in d_list:
del Util.m_list[d]
# score 400점마다 비행체의 속도 1씩 증가
Speed.s_speed = Speed.s_speed + Util.score // Util.score_400
# score 가 10점 증가함에따라 피사체 발생 개수 0.01확률 증가
if random.random() > Speed.create_rate_c - (Util.score//Util.score_200)/Util.score_100:
# 피사체 객체 생성
aa = obj()
aa.put_img("SourceCode/Image/Scorphion.png")
# 피사체의 그림 크기 조정
random_size = random.randint(Size.min_size,Size.max_size)
# print("Size.min_size : {} Size.max_size : {} ss.x : {} ss.y : {} ss.sx : {} ss.sy : {} size : {} aa.sx : {} aa.sy : {}".format(Size.min_size, Size.max_size,ss.x,ss.y,ss.sx,ss.sy,size,aa.sx,aa.sy))
# 정사각형 모양의 피사체
# 이미 사이즈가 한번 바뀌었으므로 다시 바뀔 필요가 없음 또 바꾸면 오류 발생
if Move.position is not True:
aa.change_size(random_size,random_size)
aa.change_size(random_size,random_size)
# 0부터 오른쪽 끝까지의 랜덤변수인데 비행기크기보다 작으므로 미사일을 안맞는 외계인도 고려해야함(비행선크기/2 를 뺴줘야함)
aa.x = random.randrange(Size.rand_min_size, size[Size.x] - aa.sx - round(ss.sx/Size.half_split_num))
aa.y = Util.a_loc_10
aa.move = Speed.a_init_speed + (Util.score//Util.score_300)
Util.a_list.append(aa)
# 장애물 등장
if random.random() > Speed.create_rate_r:
# 장애물 객체 생성
block = obj()
block.put_img('SourceCode/Image/Catus.png')
random_size = random.randint(Size.min_size,Size.block_max_size)
block.change_size(random_size, random_size)
# block.change_size(Size.block_size, Size.block_size)
block.x = Util.start_loc[Size.x] - block.sx
block.y = random.randint(Size.rand_min_size, size[Size.x] - block.sx - round(ss.sx/Size.half_split_num))
block.move = Speed.b_init_speed + (Util.score//Util.score_100)
Util.block_list.append(block)
d2_list=[]
for i in range(len(Util.block_list)):
b = Util.block_list[i]
b.x += b.move
if b.x >= size[Size.x]:
d2_list.append(i)
d2_list.reverse()
for d2 in d2_list:
del Util.block_list[d2]
# 살생부 리스트 초기화
d_list = []
for i in range(len(Util.a_list)):
a = Util.a_list[i]
a.y += a.move
# 외계인이 화면 밖으로 나갔다면 지워준다.
if a.y >= size[Size.y]:
d_list.append(i)
# 메모리 효율을 위해 삭제
# 앞에서 부터 지워지면 리스트가 앞당겨져서 오류가 일어나기때문에 reverse해주고 지워준다.
d_list.reverse()
for d in d_list:
del Util.a_list[d]
# 외계인이 화면 밖으로 나간 횟수
Util.loss += Util.obj_num
dm_list = []
da_list = []
for i in range(len(Util.m_list)):
for j in range(len(Util.a_list)):
m = Util.m_list[i]
a = Util.a_list[j]
if crash2(m,a) is True:
dm_list.append(i)
da_list.append(j)
# 미사일2개와 외계인 1개가 같이 만나는 경우가 있을 수도 있으니까 배제하기위해 중복제거를 해준다.
dm_list = list(set(dm_list))
da_list = list(set(da_list))
# reverse 하지않고 지우면 앞에서 부터 지워지고 앞에서부터지워지면 index의 변화가 일어나서 reverse를 해야함
dm_list.reverse()
da_list.reverse()
# del로 미사일과 외계인 삭제하기
try:
for dm in dm_list:
del Util.m_list[dm]
except :
pass
try:
for da in da_list:
del Util.a_list[da]
# 피사체 사망시 효과음
monster1.play()
# 피사체를 파괴한 횟수
Util.kill += Util.obj_num
except :
pass
for i in range(len(Util.a_list)):
a = Util.a_list[i]
# 만약 외계인이 ss 와 부딛치면 게임 종료
if crash2(a,ss) is True:
# 부딛칠 때 효과음
boom1.play()
# 1초뒤에 꺼지도록 함
time.sleep(Util.time_sleep)
# while 문이 종료되도록 하는 key
SB = True
# Go 가 0 인상태로 while문을 빠져나왔다면 x버튼으로 빠져나온것
Util.GO = True
for i in range(len(Util.block_list)):
b = Util.block_list[i]
# 만약 장애물과 ss가 부딛치면 게임 종료시킴
if crash2(b,ss) is True:
# 부딛칠 때 효과음
boom1.play()
time.sleep(Util.time_sleep)
# while문 종료 키
SB = True
Util.GO = True
# score 가 0 점이 되면 프로그램 종료
if Util.score < Util.end_point:
SB = True
Util.GO = True
# 4-4. 그리기
# 마우스에의해 창크기가 바뀜에 따라 배경화면 크기가 바뀜
background_image_desert = pygame.image.load("SourceCode/Image/DESERT.jpeg")
background_image_desert = pygame.transform.scale(background_image_desert, size)
screen.blit(background_image_desert, Util.start_loc)
# 비행체 보여주기
ss.show()
# 미사일 보여주기
for m in Util.m_list:
m.show()
# 피사체 보여주기
for a in Util.a_list:
# print(a.sx,a.sy)
if (a.sx > Size.err_x) or (a.sy > Size.err_y):
a.put_img("SourceCode/Image/Scorphion.png")
a.change_size(Size.standard_size,Size.standard_size)
a.show()
# 선인장 장애물 보여주기
for d in Util.block_list:
d.show()
# 점수 산정
# Util.score = (Util.kill*5 - Util.loss*8)
# 점수산정을 메소드화 하였음
cal_score(Util.kill, Util.loss)
font = pygame.font.Font("SourceCode/Font/DXHanlgrumStd-Regular.otf", FontSize.size_kill_loss)
text_kill = font.render("Killed : {} Loss : {} Score : {} HighScore : {}".format(Util.kill, Util.loss, Util.score, Util.highscore), True, Color.yellow) # 폰트가지고 랜더링 하는데 표시할 내용, True는 글자가 잘 안깨지게 하는 거임 걍 켜두기, 글자의 색깔
screen.blit(text_kill,FontSize.loc_kill_loss) # 이미지화 한 텍스트라 이미지를 보여준다고 생각하면 됨
# 현재 흘러간 시간
text_time = font.render("Time : {:.2f}".format(delta_time), True, Color.purple)
screen.blit(text_time,(size[Size.x]-FontSize.len_for_time, FontSize.len_for_time_ysize))
# 4-5. 업데이트
pygame.display.flip() # 그려왔던게 화면에 업데이트가 됨
Move.position = False
def restart():
pygame.quit
import Main
# 5. 게임종료(1. x키를 눌러서 게임이 종료된 경우, 2. 죽어서 게임이 종료된 경우)
# 이건 게임오버가 된 상황!
game_over.play()
while Util.GO:
clock.tick(Move.FPS)
for event in pygame.event.get(): # 이벤트가 있다면
if event.type == pygame.KEYDOWN: # 키보드가 눌렸음
if event.key == pygame.K_r: # r버튼이 눌림
restart()
if event.type == pygame.QUIT:
Util.GO = False
if event.type == pygame.VIDEORESIZE:
width, height = event.w, event.h
Size.x_resize_rate = width / size[Size.x]
Size.y_resize_rate = height / size[Size.y]
size =[width, height]
window = pygame.display.set_mode(size, pygame.RESIZABLE)
Move.position = True
# 최고점수 수정
if Util.score > Util.highscore:
d = open('Oasisscore.txt', 'w')
d.write(str(Util.score))
d.close()
background_image_desert = pygame.transform.scale(background_image_desert, size)
screen.blit(background_image_desert, Util.start_loc)
FontSize.size_gameover = sum(size) // Resizing.size_gameover
font = pygame.font.Font("SourceCode/Font/DXHanlgrumStd-Regular.otf", FontSize.size_gameover)
Rfont = pygame.font.Font("SourceCode/Font/DXHanlgrumStd-Regular.otf", FontSize.size_restart)
text_kill = font.render("GAME OVER", True, Color.red) # 폰트가지고 랜더링 하는데 표시할 내용, True는 글자가 잘 안깨지게 하는 거임 걍 켜두기, 글자의 색깔
text_restart = Rfont.render("Restart >> Press R", True, Color.yellow)
# screen.blit(text_kill,(size[0] // Size.half_split_num - (size[0] // Size.half_split_num) // Size.half_split_num + FontSize.lensize_gameover, round((size[1] / Size.half_split_num) - FontSize.lensize_gameover))) # 이미지화 한 텍스트라 이미지를 보여준다고 생각하면 됨
screen.blit(text_kill, (size[Size.x] * Size.three_five - FontSize.size_gameover, size[Size.x]//Size.half_split_num ))
screen.blit(text_restart, (Size.restart_middle, size[Size.y]//Size.half_split_num ))
pygame.display.flip() # 그려왔던게 화면에 업데이트가 됨
Move.position = False
pygame.quit()
# 텍스트 띄우는 방법(내용, 위치, 글자체, 크기, 색깔)
# 1. 폰트 설정
# font = pygame.font.Font(address,size)
# 2. Surface 생성(텍스트의 이미지화)
# text = font.render(contents,True,color)
# 3. Surface 화면에 표시
# screen.blit(text,position)
# 위 코드 세줄이 한 묶음으로 다니게 될것임
# 점수가 올라감에 따라 더 작은 피사체가 나올수도있게 끔 해보자 !
# 채균
# 첫화면 인터페이스
# 스코어별로 다른피사체 그림
# 스코어 별로 다른 미사일 사운드
# 스코어 별로 다른 미사일 이미지
# 스코어 별로 다른 비행체 그림
# 변수정리
# 가로로 나오는 장애물 (격추안됨, 피하기만 해야함)
# 1000 점 이상되면 가로 세로 막 졸라 (과제과제, 오픈소스 ) 10~15
# score 400점마다 비행체의 속도 1씩 증가
# 선인장 장애물 생성
# 해야할거
# 점수가 증가하면 선인장의 크기도 증가
# 크기를 줄였다가 늘렸다가 할때 객체들의 위치도 이동이 되어야 함(가로로 발생하는 선인장 만 즉시반영이 안됨)
# 화면 크기조절 동영상 드리기
# 해야될꺼 보내드리기
# 이전에는 사이즈 변경이 발생하면 비행체가 아닌다른 객체들은 새로 생성될때 변경사항이 적용되어 나타났지만 이제는 즉시 사이즈의 변경이 일어나도록 change_size_rate 안에 현상태를 직접 모든객체에 적용시키기위해 for loop를 돌려 각 객체마다 모든 변경사항을 적용시켜주고 게임을 실행 시ㅣㅁ
# 첫화면 사이즈 변경
# 충돌 판정
# 리사이징할때 화면깨짐 해결,
# 리사이징할떄 순간이동 해결
| 30.564417
| 249
| 0.58065
|
110e439b9319770232a746de970510823a92e77b
| 9,051
|
py
|
Python
|
nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
|
bclau/nova
|
ec260a937d9c5e3d19cd3de1972615e5322cf477
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
|
bclau/nova
|
ec260a937d9c5e3d19cd3de1972615e5322cf477
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
|
bclau/nova
|
ec260a937d9c5e3d19cd3de1972615e5322cf477
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.api.openstack.compute.contrib import attach_interfaces
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova.network import api as network_api
from nova.openstack.common import jsonutils
from nova import test
import webob
from webob import exc
CONF = cfg.CONF
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_list_ports(self, *args, **kwargs):
result = []
for port in ports:
if port['device_id'] == kwargs['device_id']:
result.append(port)
return {'ports': result}
def fake_show_port(self, context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
network_info = [
{'bridge': 'br-100',
'id': network_id,
'cidr': '192.168.1.0/24',
'vlan': '101',
'injected': 'False',
'multi_host': 'False',
'bridge_interface': 'bridge_interface'
},
{'label': 'fake_network',
'broadcast': '192.168.1.255',
'mac': '11:22:33:11:22:33',
'vif_uuid': port_id,
'rxtx_cap': 0,
'dns': '8.8.8.8',
'dhcp_server': '192.168.1.1',
'ips': {'ip': requested_ip,
'enabled': 1,
'netmask': '255.255.255.0',
'gateway': '192.168.1.254'}
}
]
return network_info
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, context, intance_id):
return {}
class InterfaceAttachTests(test.NoDBTestCase):
def setUp(self):
super(InterfaceAttachTests, self).setUp()
self.flags(neutron_auth_strategy=None)
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.context = context.get_admin_context()
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
def test_show(self):
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_invalid(self):
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1)
def test_delete(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual('202 Accepted', result.status)
def test_delete_interface_not_found(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
attachments.delete,
req,
FAKE_UUID1,
'invaid-port-id')
def test_attach_interface_without_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/attach')
req.method = 'POST'
body = jsonutils.dumps({'port_id': FAKE_PORT_ID1})
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
def test_attach_interface_with_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/attach')
req.method = 'POST'
req.body = jsonutils.dumps({'interfaceAttachment':
{'net_id': FAKE_NET_ID2}})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def test_attach_interface_with_port_and_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/attach')
req.method = 'POST'
req.body = jsonutils.dumps({'interfaceAttachment':
{'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2}})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
attachments.create, req, FAKE_UUID1,
jsonutils.loads(req.body))
| 36.792683
| 79
| 0.632195
|
936587c4ceb3c01939531325c8c851115ac8e17f
| 47
|
py
|
Python
|
whalesharkM2M/datautil/__init__.py
|
theprismdata/WhaleShark_IIoT
|
da3448b25e87155fde438e427f4bdf92091ceb1b
|
[
"Apache-2.0"
] | 3
|
2020-11-09T02:15:33.000Z
|
2021-01-11T07:43:11.000Z
|
whalesharkM2M/datautil/__init__.py
|
theprismdata/WhaleShark_IIoT
|
da3448b25e87155fde438e427f4bdf92091ceb1b
|
[
"Apache-2.0"
] | null | null | null |
whalesharkM2M/datautil/__init__.py
|
theprismdata/WhaleShark_IIoT
|
da3448b25e87155fde438e427f4bdf92091ceb1b
|
[
"Apache-2.0"
] | null | null | null |
"""
whalesharkM2M
"""
__version__ = '0.0.1'
| 11.75
| 21
| 0.574468
|
99ef52f8cde21cc2da317ab96127be9e001f4508
| 804
|
py
|
Python
|
01_process_reads/at_singles_ex39/03_merge_class_fs.py
|
ddingding/coevolution_mechanism
|
4a03f7a105a2606e812dd8a4fa88272519ddab86
|
[
"MIT"
] | 1
|
2022-01-16T09:32:33.000Z
|
2022-01-16T09:32:33.000Z
|
01_process_reads/at_singles_ex39/03_merge_class_fs.py
|
ddingding/coevolution_mechanism
|
4a03f7a105a2606e812dd8a4fa88272519ddab86
|
[
"MIT"
] | null | null | null |
01_process_reads/at_singles_ex39/03_merge_class_fs.py
|
ddingding/coevolution_mechanism
|
4a03f7a105a2606e812dd8a4fa88272519ddab86
|
[
"MIT"
] | null | null | null |
# concatenate classified read files for 2 machines
from constants import (
AT_SINGLE_CALLED_DIR_M1,
AT_SINGLE_CALLED_DIR_M2,
AT_SINGLE_CALLED_DIR_BOTH,
)
from os import listdir
from pipelineTools import concat_files
at_fs1 = sorted(
[f for f in listdir(AT_SINGLE_CALLED_DIR_M1) if f.endswith("class.csv")]
)
at_fs2 = sorted(
[f for f in listdir(AT_SINGLE_CALLED_DIR_M2) if f.endswith("class.csv")]
)
list_fs_concat = list(zip(at_fs1, at_fs2))
for fs in list_fs_concat:
f1, f2 = fs
f_id = f1[: len("180716Lau_D18-6083")]
# print(f1, f2)
assert f1[: len("180716Lau_D18-6083")] == f2[: len("180716Lau_D18-6083")]
concat_files(
[AT_SINGLE_CALLED_DIR_M1 + f1, AT_SINGLE_CALLED_DIR_M2 + f2],
AT_SINGLE_CALLED_DIR_BOTH + f_id + "_class.csv",
)
| 25.125
| 77
| 0.705224
|
10a59f81aa255d1a755fdf8305f17ccc5c8738b0
| 2,831
|
py
|
Python
|
2019/14/chemlab.py
|
GeoffRiley/AdventOfCode
|
27fe8670a1923cb3b0675784f5e855ad18c29c93
|
[
"Unlicense"
] | 2
|
2020-12-12T03:18:45.000Z
|
2021-12-17T00:35:33.000Z
|
2019/14/chemlab.py
|
GeoffRiley/AdventOfCode
|
27fe8670a1923cb3b0675784f5e855ad18c29c93
|
[
"Unlicense"
] | null | null | null |
2019/14/chemlab.py
|
GeoffRiley/AdventOfCode
|
27fe8670a1923cb3b0675784f5e855ad18c29c93
|
[
"Unlicense"
] | null | null | null |
from collections import defaultdict
from math import ceil
from typing import List
class Reagent(object):
@classmethod
def factory(cls, reagent_str: str):
res = []
for r in reagent_str.split(','):
res.append(cls(r))
return res
def __init__(self, chem: str, qty: int = None):
if qty is None:
q, c = chem.split()
self._qty, self._chem = int(q), c
else:
self._qty, self._chem = int(qty), chem
@property
def chem(self):
return self._chem
@property
def qty(self):
return self._qty
def __repr__(self):
return f'{self.__class__.__name__}({self.chem} * {self.qty})'
class Reactor(dict):
def __init__(self):
super().__init__()
self.requirements = defaultdict(int)
reaction_list = defaultdict(List[Reagent])
reagent_min_order = defaultdict(int)
reagent_stock = defaultdict(int)
reagent_used = defaultdict(int)
def calc_min_ore(chem: str, req_qty: int):
# first shift existing stock
if reagent_stock[chem] > 0:
s = min(reagent_stock[chem], req_qty)
reagent_stock[chem] -= s
req_qty -= s
if req_qty > 0:
# if we need more, then a full unit or multiple thereof must be requested
units = ceil(req_qty / reagent_min_order[chem])
order = reagent_min_order[chem] * units
reagent_stock[chem] += order - req_qty
reagent_used[chem] += order
# get raw materials to cover
for r in reaction_list[chem]:
if r.chem == 'ORE':
reagent_used[r.chem] += r.qty * units
else:
calc_min_ore(r.chem, r.qty * units)
def ore_calc(reaction_equ: str, units: int) -> int:
reagent_stock.clear()
reagent_used.clear()
for reaction in reaction_equ.splitlines(keepends=False):
reagents, produce = reaction.split('=>')
chem_min_qty, chem_name = produce.split()
reaction_list[chem_name] = Reagent.factory(reagents)
reagent_min_order[chem_name] = int(chem_min_qty)
calc_min_ore('FUEL', units)
return reagent_used['ORE']
def max_fuel_for_ore(reaction_equ: str, target: int) -> int:
est_range = [0, 1]
while ore_calc(reaction_equ, est_range[1]) < target:
est_range[0] = est_range[1]
est_range[1] *= 2
while est_range[0] + 1 < est_range[1]:
av = sum(est_range) // 2
if ore_calc(reaction_equ, av) > target:
est_range[1] = av
else:
est_range[0] = av
return est_range[0]
if __name__ == '__main__':
with open('input') as f:
reaction_str = f.read()
print(f'PART 1: {ore_calc(reaction_str, 1)}')
print(f'PART 2: {max_fuel_for_ore(reaction_str, 1000000000000)}')
# PART 1: 1582325
# PART 2: 2267486
| 28.59596
| 81
| 0.612151
|
7500df34971c315cf858147fc8c55e8bc27fbb27
| 345
|
py
|
Python
|
Darlington/phase2/LIST/day 42 solution/qtn1.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Darlington/phase2/LIST/day 42 solution/qtn1.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Darlington/phase2/LIST/day 42 solution/qtn1.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
# program to count the number of elements in a list within a specified range.
def count_range_in_list(li, min, max):
ctr = 0
for x in li:
if min <= x <= max:
ctr += 1
return ctr
list1 = [10,20,30,40,40,40,70,80,99]
print(count_range_in_list(list1, 40, 100))
list2 = ['a','b','c','d','e','f']
print(count_range_in_list(list2, 'a', 'e'))
| 26.538462
| 77
| 0.649275
|
c7446d6d25eeb57157f678a1a57b55312b0a3b00
| 3,272
|
py
|
Python
|
src/metricrule/agent/mrrecorder.py
|
MetricRule/metricrule-agent-python
|
e56399625d3c1587d82d9fe78cae97a022b851fc
|
[
"Apache-2.0"
] | 1
|
2021-06-23T10:27:51.000Z
|
2021-06-23T10:27:51.000Z
|
src/metricrule/agent/mrrecorder.py
|
MetricRule/metricrule-agent-python
|
e56399625d3c1587d82d9fe78cae97a022b851fc
|
[
"Apache-2.0"
] | 1
|
2021-06-21T10:31:20.000Z
|
2021-06-21T10:31:20.000Z
|
src/metricrule/agent/mrrecorder.py
|
MetricRule/metricrule-agent-python
|
e56399625d3c1587d82d9fe78cae97a022b851fc
|
[
"Apache-2.0"
] | null | null | null |
"""Recorder of metrics for request and response payloads.
"""
import json
from typing import MutableSequence, Optional, Tuple, Union
from ..config_gen.metric_configuration_pb2 import SidecarConfig # pylint: disable=relative-beyond-top-level
from .mrmetric import get_context_labels, get_metric_instances, MetricContext, MetricInstrumentSpec
from .mrotel import Instrument
InstrumentMap = dict[MetricInstrumentSpec, Instrument]
MutableLabelSequence = Optional[MutableSequence[Tuple[Tuple[str, str], ...]]]
def log_request_metrics(config: SidecarConfig,
input_instruments: InstrumentMap,
request_body: Union[str, bytes],
context_label_sink: MutableLabelSequence = None) -> None:
"""Logs metrics for a request payload.
Args:
config: A populated config proto.
input_instruments: A map of instrument specifications to their
equivalent initialized instruments.
request_body: Content of the request payload received.
context_label_sink: A mutable sequence to which any context labels
will be appended.
"""
try:
json_obj = json.loads(request_body)
except ValueError:
return
# TODO(jishnu): Cache these labels to use with response.
context_labels = get_context_labels(
config, json_obj, MetricContext.INPUT)
metric_instances = get_metric_instances(
config, json_obj, MetricContext.INPUT)
for spec, instances in metric_instances.items():
instrument = input_instruments[spec]
for instance in instances:
labels = {label[0]: label[1] for label in instance.labels}
labels.update({label[0]: label[1] for label in context_labels})
_ = [instrument.record(val, labels)
for val in instance.metricValues]
if context_label_sink is not None:
context_label_sink.append(context_labels)
def log_response_metrics(config: SidecarConfig,
output_instruments: InstrumentMap,
response_body: Union[str, bytes],
context_label_source: MutableLabelSequence = None) -> None:
"""Logs metrics for a response payload.
Args:
config: A populated config proto.
output_instruments: A map of instrument specifications to their
equivalent initialized instruments.
response_body: Content of the response payload sent.
context_label_source: A mutable source from which any context labels
will be popped.
"""
try:
json_obj = json.loads(response_body)
except ValueError:
return
metric_instances = get_metric_instances(
config, json_obj, MetricContext.OUTPUT)
for spec, instances in metric_instances.items():
instrument = output_instruments[spec]
for instance in instances:
labels = {label[0]: label[1] for label in instance.labels}
if context_label_source is not None and len(context_label_source) > 0:
context_labels = context_label_source.pop()
labels.update({label[0]: label[1] for label in context_labels})
_ = [instrument.record(val, labels)
for val in instance.metricValues]
| 41.417722
| 108
| 0.67879
|
741aa180b3fdbd3dfb75d1f8754b90aeecd32519
| 1,520
|
py
|
Python
|
Media/common/Interface/FrameXML/MvPlayerInfo.py
|
dmacka/MultiverseClientServer
|
b64d7d754a0b2b1a3e5acabd4d6ebb80ab1d9379
|
[
"MIT"
] | 5
|
2020-04-29T19:14:57.000Z
|
2022-02-18T08:48:37.000Z
|
Media/common/Interface/FrameXML/MvPlayerInfo.py
|
dmacka/MultiverseClientServer
|
b64d7d754a0b2b1a3e5acabd4d6ebb80ab1d9379
|
[
"MIT"
] | null | null | null |
Media/common/Interface/FrameXML/MvPlayerInfo.py
|
dmacka/MultiverseClientServer
|
b64d7d754a0b2b1a3e5acabd4d6ebb80ab1d9379
|
[
"MIT"
] | 2
|
2021-03-09T06:53:30.000Z
|
2021-03-27T12:02:39.000Z
|
import ClientAPI
import MarsSkill
def MvPlayerInfoFrame_OnLoad(frame):
frame.SetBackdropColor(0, 0, 0)
frame.RegisterEvent("SKILL_LIST_UPDATE")
def MvPlayerInfoFrame_OnEvent(frame, event):
if event.eventType == "SKILL_LIST_UPDATE":
UpdateSkillInfo()
def GetUnitProperty(obj, prop, default):
if obj is None:
return default
if not obj.PropertyExists(prop):
return default
return obj.GetProperty(prop)
def LoadPlayerValues():
player = ClientAPI.GetPlayerObject()
MvPlayerNameValue.SetText(player.Name)
MvPlayerProfessionValue.SetText(str(GetUnitProperty(player, "class", 0)))
MvPlayerLevelValue.SetText(str(GetUnitProperty(player, "level", 0)))
strength = GetUnitProperty(player, "strength", 0)
MvPlayerStrengthValue.SetText(str(strength))
MvPlayerDexterityValue.SetText(str(GetUnitProperty(player, "dexterity", 0)))
MvPlayerWisdomValue.SetText(str(GetUnitProperty(player, "wisdom", 0)))
MvPlayerIntelligenceValue.SetText(str(GetUnitProperty(player, "intelligence", 0)))
UpdateSkillInfo()
def UpdateSkillInfo():
numSkills = MarsSkill.GetNumSkills()
if numSkills > 0:
#Skill.SetText(MarsSkill.GetSkillName(1))
for i in range(1,numSkills+1):
frame = getglobal("Skill" + str(i))
frame.SetText(MarsSkill.GetSkillName(i))
frame = getglobal("SkillRank" + str(i))
frame.SetText(str(MarsSkill.GetSkillRank(i)))
| 35.348837
| 83
| 0.685526
|
0a001203c23d336884b82ac475104fb7d5f83faf
| 816
|
py
|
Python
|
var/spack/repos/builtin/packages/xhost/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-06-25T15:25:29.000Z
|
2020-06-25T15:25:29.000Z
|
var/spack/repos/builtin/packages/xhost/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
var/spack/repos/builtin/packages/xhost/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-09-21T14:35:49.000Z
|
2020-09-21T14:35:49.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xhost(AutotoolsPackage):
"""xhost is used to manage the list of host names or user names
allowed to make connections to the X server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xhost"
url = "https://www.x.org/archive/individual/app/xhost-1.0.7.tar.gz"
version('1.0.7', sha256='8dd1b6245dfbdef45a64a18ea618f233f77432c2f30881b1db9dc40d510d9490')
depends_on('libx11')
depends_on('libxmu')
depends_on('libxau')
depends_on('xproto@7.0.22:', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 32.64
| 95
| 0.714461
|
178b0d6bfdb3970cddc38087206c4d4fea388e1f
| 193
|
py
|
Python
|
casepy/eulerRuO1/nNoh64x64/chars.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | 8
|
2017-05-04T07:50:02.000Z
|
2019-05-17T02:27:20.000Z
|
casepy/eulerRuO1/nNoh64x64/chars.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | null | null | null |
casepy/eulerRuO1/nNoh64x64/chars.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | null | null | null |
# Domain properties
lx = 1.0
ly = 1.0
Nx = 64
Ny = 64
# Scheme execution options
T = 0.6
CFL = 0.5
gamma = 5./3.
BCtype = 'N'
BClayer = 1
quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE']
| 11.352941
| 50
| 0.595855
|
19f89acf5ea34028d2d3316de2dce429fa38559e
| 1,715
|
py
|
Python
|
parlai/utils/pickle.py
|
zl930216/ParlAI
|
abf0ad6d1779af0f8ce0b5aed00d2bab71416684
|
[
"MIT"
] | 552
|
2020-09-24T18:16:09.000Z
|
2022-03-25T06:21:55.000Z
|
parlai/utils/pickle.py
|
zl930216/ParlAI
|
abf0ad6d1779af0f8ce0b5aed00d2bab71416684
|
[
"MIT"
] | 722
|
2020-09-24T19:48:44.000Z
|
2022-03-31T17:42:41.000Z
|
parlai/utils/pickle.py
|
zl930216/ParlAI
|
abf0ad6d1779af0f8ce0b5aed00d2bab71416684
|
[
"MIT"
] | 442
|
2020-09-24T14:24:21.000Z
|
2022-03-25T10:40:16.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
ParlAI's custom unpickler.
As modules move around or are renamed, it old torch model files become invalid,
since they look for modules in all the wrong places. Furthermore, we occassionally
use APEX for performance reasons, but we don't want to outright die if the user
has not installed it.
This module is to handle both of these issues. It is used like this:
>>> import parlai.utils.pickle
>>> state_dict = torch.load(filename, pickle_module=parlai.utils.pickle)
"""
import pickle
class FakeAPEXClass:
pass
class Unpickler(pickle._Unpickler): # type: ignore
"""
Custom unpickler to handle moved classes and optional libraries.
"""
def find_class(self, module, name):
try:
return super().find_class(module, name)
except (ModuleNotFoundError, AttributeError):
if module.startswith('apex.'):
# user doesn't have apex installed. We'll deal with this later.
return FakeAPEXClass
else:
if (
module == 'parlai.core.utils' or module == 'parlai.utils.misc'
) and name == 'Opt':
from parlai.core.opt import Opt
return Opt
if module == 'parlai.core.dict' and name == '_BPEHelper':
from parlai.utils.bpe import SubwordBPEHelper as _BPEHelper
return _BPEHelper
raise
def load(*args, **kwargs):
return Unpickler(*args, **kwargs).load()
| 30.087719
| 82
| 0.634402
|
5423d894600a5581316235dea0017bc149e526f9
| 107
|
py
|
Python
|
main.py
|
stanford-rc/gcp-flask-stanford
|
d0da1b5650792582ada90fac63796ee974805c17
|
[
"MIT"
] | 3
|
2020-07-28T21:23:29.000Z
|
2021-07-14T17:37:02.000Z
|
main.py
|
stanford-rc/gcp-flask-stanford
|
d0da1b5650792582ada90fac63796ee974805c17
|
[
"MIT"
] | 2
|
2020-07-22T22:07:43.000Z
|
2020-07-24T20:22:56.000Z
|
main.py
|
stanford-rc/gcp-flask-stanford
|
d0da1b5650792582ada90fac63796ee974805c17
|
[
"MIT"
] | null | null | null |
from gcpflask import app
if __name__ == "__main__":
app.run(host="localhost", port=8080, debug=True)
| 17.833333
| 52
| 0.700935
|
9f3c47e67f9c708df8933726a129baea298e66cf
| 1,276
|
py
|
Python
|
PyTorch-cSAWGAN/Components.py
|
mshaikh2/sagan
|
c978d18b0400eddecde303e30900107093029876
|
[
"MIT"
] | null | null | null |
PyTorch-cSAWGAN/Components.py
|
mshaikh2/sagan
|
c978d18b0400eddecde303e30900107093029876
|
[
"MIT"
] | null | null | null |
PyTorch-cSAWGAN/Components.py
|
mshaikh2/sagan
|
c978d18b0400eddecde303e30900107093029876
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
class SelfAttention(nn.Module):
def __init__(self, in_channels):
super(SelfAttention, self).__init__()
self.query_op = nn.utils.spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1))
self.key_op = nn.utils.spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1))
self.value_op = nn.utils.spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=1))
self.safm_op = nn.utils.spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=1))
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
n, c, h, w = x.shape
query = self.query_op(x).view(n, -1, h*w).permute(0, 2, 1)
key = self.key_op(x).view(n, -1, h*w)
value = self.value_op(x).view(n, -1, h*w)
attn_map = F.softmax(torch.bmm(query, key), dim=-1)
safm = self.safm_op(torch.bmm(attn_map, value).view(n, c, h, w))
return x + self.gamma*safm, attn_map
| 49.076923
| 126
| 0.700627
|
c3ccaf1c0ddb98829cad0ce3ffae4907e33184a7
| 2,622
|
py
|
Python
|
mlonmcu/flow/tflite/framework.py
|
PhilippvK/mlonmcu
|
6b5ed9b2abe8d3caa18c20a604547513e8097b49
|
[
"Apache-2.0"
] | null | null | null |
mlonmcu/flow/tflite/framework.py
|
PhilippvK/mlonmcu
|
6b5ed9b2abe8d3caa18c20a604547513e8097b49
|
[
"Apache-2.0"
] | null | null | null |
mlonmcu/flow/tflite/framework.py
|
PhilippvK/mlonmcu
|
6b5ed9b2abe8d3caa18c20a604547513e8097b49
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Definitions for TFLiteFramework."""
from pathlib import Path
from mlonmcu.flow.framework import Framework
from mlonmcu.flow.tflite import TFLiteBackend
class TFLiteFramework(Framework):
"""TFLite Framework specialization."""
name = "tflite"
FEATURES = ["muriscvnn", "cmsisnn"]
DEFAULTS = {
"optimized_kernel": None,
"optimized_kernel_inc_dirs": [],
"optimized_kernel_libs": [],
}
REQUIRED = ["tf.src_dir"]
backends = TFLiteBackend.registry
def __init__(self, features=None, config=None):
super().__init__(features=features, config=config)
@property
def tf_src(self):
return Path(self.config["tf.src_dir"])
@property
def optimized_kernel(self):
return self.config["optimized_kernel"]
@property
def optimized_kernel_libs(self):
return self.config["optimized_kernel_libs"]
@property
def optimized_kernel_inc_dirs(self):
return self.config["optimized_kernel_inc_dirs"]
def get_cmake_args(self):
args = super().get_cmake_args()
args.append(f"-DTF_SRC={self.tf_src}")
if self.optimized_kernel:
args.append(f"-DTFLM_OPTIMIZED_KERNEL={self.optimized_kernel}")
if self.optimized_kernel_inc_dirs:
temp = "\;".join(self.optimized_kernel_inc_dirs)
args.append(f"-DTFLM_OPTIMIZED_KERNEL_INCLUDE_DIR={temp}")
if self.optimized_kernel_libs:
temp = "\;".join(self.optimized_kernel_libs)
args.append(f"-DTFLM_OPTIMIZED_KERNEL_LIB={temp}")
return args
# TODO: get_cmake_args -> get_plaform_vars (dict instead of list of strings)
def get_espidf_defs(self):
if self.extra_incs or self.extra_libs:
raise NotImplementedError("Extra incs or libs are currently not supported for esp-idf")
return {"TF_DIR": str(self.tf_src)}
| 32.37037
| 99
| 0.694508
|
4d96e6a6572edc3763dd617d04c13b5f86a6abe5
| 2,244
|
py
|
Python
|
Parser-hybrid/nparser/scripts/compression_ratio.py
|
sb-b/BOUN-PARSE
|
2b529924897d8e2613c4d2193a67796a895da40b
|
[
"Apache-2.0"
] | 12
|
2020-03-04T17:36:12.000Z
|
2021-09-26T14:02:49.000Z
|
Parser-hybrid/nparser/scripts/compression_ratio.py
|
sb-b/BOUN-PARSE
|
2b529924897d8e2613c4d2193a67796a895da40b
|
[
"Apache-2.0"
] | 1
|
2020-12-09T08:21:11.000Z
|
2020-12-09T08:21:11.000Z
|
Parser-hybrid/nparser/scripts/compression_ratio.py
|
sb-b/BOUN-PARSE
|
2b529924897d8e2613c4d2193a67796a895da40b
|
[
"Apache-2.0"
] | 3
|
2020-11-18T09:53:42.000Z
|
2020-12-17T23:04:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import argparse
import codecs
from backports import lzma
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
from collections import Counter
#***************************************************************
if __name__ == '__main__':
""" """
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--k_trials', type=int, default=100)
parser.add_argument('-n', '--n_words', type=int, default=5000)
parser.add_argument('files', nargs='+')
args = parser.parse_args()
type_counter = Counter()
for filename in args.files:
with codecs.open(filename, encoding='utf-8', errors='ignore') as f:
for line in f:
line = line.strip()
if line:
if not re.match('#|[0-9]+[-.][0-9]+', line):
type_counter[line.split('\t')[1]] += 1
types = list(type_counter.keys())
total = sum(type_counter.values())
probs = [type_counter[type_] / total for type_ in types]
trials = []
n_words = min(args.n_words, len(types)) or len(types)
for _ in range(args.k_trials):
chosen_types = np.random.choice(types, size=n_words, replace=False, p=probs)
with codecs.open('uncompressed.txt', 'w', encoding='utf-8', errors='ignore') as f:
f.write('\n'.join(chosen_types))
with lzma.open('compressed.txt.xz', 'wb') as f:
f.write('\n'.join(chosen_types).encode('utf-8', 'ignore'))
trials.append(os.path.getsize('compressed.txt.xz')/os.path.getsize('uncompressed.txt'))
os.remove('uncompressed.txt')
os.remove('compressed.txt.xz')
print(np.mean(trials),file=sys.stderr)
| 32.521739
| 91
| 0.666221
|
624e699d8e5c7a844b8afc85bf8cf1c68b081ad6
| 16,321
|
py
|
Python
|
Python/grn_solver.py
|
regulomics/expansion-network
|
93c8cf5b2a8512c1bf83404bc33a36ce045c4c60
|
[
"MIT"
] | 1
|
2021-04-07T10:35:03.000Z
|
2021-04-07T10:35:03.000Z
|
Python/grn_solver.py
|
regulomics/expansion-network
|
93c8cf5b2a8512c1bf83404bc33a36ce045c4c60
|
[
"MIT"
] | null | null | null |
Python/grn_solver.py
|
regulomics/expansion-network
|
93c8cf5b2a8512c1bf83404bc33a36ce045c4c60
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from grn_inference import *
from utils import *
from time import time
#######################
## MODEL PROCESSING ##
#######################
#' Search the value of present regulator bit-vector variable
#'
#' @param C set of genes/nodes
#' @param regulatorsVar list of present regulator bit-vector variables
#' @param M model returned by the solver
#' @return res list of pairs (bit-vector variable, value in model @M)
def getPresentRegulators(C, regulatorsVar, M):
return([[v, filterNone([ifthenelse(simplify(testValue(M[v], i, true)), C[i]) for i in range(len(C))])] for v in regulatorsVar])
#' Give the list of perturbed genes in the experiments
#'
#' @param C set of genes/nodes
#' @param chi dictionary of perturbed genes
#' @param typeP type of considered perturbation (KO or FE)
#' @return res list of known perturbed genes in the experiments
def getPerturbedGenes(C, chi, typeP):
if (chi):
return([[typeP + "(" + C[i] + ")", typeP + "(" + C[i] + ")"] for i in chi.keys()])
return([])
#' Generate a model from a solution returned by the solver
#'
#' @param C set of genes/nodes
#' @param length maximum length of experiment
#' @param Iopt set of optional interactions
#' @param ngenes number of genes/nodes
#' @param t timestamp of computation start
#' @param s solver
#' @param intVar list containing Is, Is is the bit-vector variable
#' associated with the selected optional interactions
#' @param regVar list of #nodes Rs_v, Rs_v is the bit-vector variable
#' associated with the selected regulation condition for node v
#' @param stateVar list of list of state variables at each step for each experiment
#' @param regulators list of present regulator bit-vector for each gene
#' @param chiDOWN dictionary associated with KO perturbations
#' @param chiUP dictionary associated with FE perturbations
#' @param R list of allowed regulation conditions for each node
#' @param uniqueness string for uniqueness condition to find the next solution:
#' "interaction", "full", or "paths"
#' @param verbose logical for printing messages
#' @param resList current list of model solutions
#' @param solNB number of the currently processed solution
#' @param stopSol logical for not applying the uniqueness condition at the end of the processing
#' @param printSolutions logical for printing message about the processed solution
#' @return res list of updated solution list, new timestamp, updated solver with uniqueness
#' condition, logical indicating if the solver should stop looking for conditions
def getSolution(C, length, Iopt, ngenes, t, s, intVar, regVar, stateVar, regulators, chiDOWN, chiUP, R, uniqueness, verbose, resList=[], solNB=1, stopSol=False, printSolutions=True):
lenIopt = len(Iopt)
verboseIt("\nTIME = %.2f" % (time()-t) + " sec\n", printSolutions)
t = time()
s.check()
verboseIt("CHECKING TIME = %.2f" % (time()-t) + " sec\n", printSolutions)
noMoreModel = False
M = None
try:
M = s.model()
except:
if (resList):
verboseIt("No other model found.\n", printSolutions)
else:
verboseIt("No model found.\n", True)
noMoreModel = True
return([resList, t, s, noMoreModel])
if (M):
dec = lambda lsVar, size : [[v, getBinaryDec(M[v], size)] for v in lsVar]
intSol = dec(intVar, lenIopt) if (lenIopt) else []
stateSol = dec(stateVar, ngenes)
regSol = dec(regVar, None)
actVar, repVar = [x[0] for x in regulators], [x[1] for x in regulators]
actSol = getPresentRegulators(C, actVar, M)
repSol = getPresentRegulators(C, repVar, M)
koSol = getPerturbedGenes(C, chiDOWN, "KO")
feSol = getPerturbedGenes(C, chiUP, "FE")
solution = intSol + regSol + stateSol + actSol + repSol + koSol + feSol
resList.append(solution)
verboseIt("Model no. " + str(solNB) + " found:", printSolutions)
if (lenIopt and printSolutions):
verboseIt("> Interaction vector: ", True)
verboseIt(reduce(lambda x,y: x+y, [str(x) for x in rev(Iopt)]), True)
printPretty(intSol)
if (printSolutions):
verboseIt("> GRFs: ", True)
printPretty(regSol)
if (verbose and printSolutions):
verboseIt("> States: ", True)
for n in range(len(stateSol)/(length+1)):
verboseIt(">> Experiment: ", True)
verboseIt(reduce(lambda x,y: x+y, rev(C)), True)
printPretty(stateSol[n*(length+1):(n+1)*(length+1)])
verboseIt("____________________________________\n", printSolutions)
if (stopSol):
return([resList, t, s, True])
## Uniqueness of models: interactions ##
## Add condition newIs != Is ##
## (Is != value of Is in model M) ##
## "interactions": unique interactions ##
## "full": unique interactions OR unique set of ##
## regulation conditions ##
## "paths": unique interactions OR unique set of ##
## regulation conditions OR set of trajectories ##
cond1 = diff_interactions(s, intVar, M)
if (uniqueness == "interactions"):
s.add(cond1)
return([resList, t, s, noMoreModel])
cond2 = different(s, regVar, M)
if (uniqueness == "full"):
s.add(Or(cond2, cond1))
return([resList, t, s, noMoreModel])
if (uniqueness == "paths"):
cond3 = different(s, stateVar, M)
s.add(Or(cond1, Or(cond2, cond3)))
return([resList, t, s, noMoreModel])
verboseIt("MSG: Warning! No correct uniqueness condition detected.", True)
verboseIt("MSG: Current uniqueness condition: \'" + uniqueness + "\'.", True)
verboseIt("MSG: Uniqueness conditions can only be in [\'interactions\', \'full\', \'paths\'].\n", True)
noMoreModel = True
return([resList, t, s, noMoreModel])
#######################
## NETWORK INFERENCE ##
#######################
#' Solve an instance of the GRN inference problem
#'
#' Given the network topology and the regulation
#' function types for each node of the network,
#' find a subset of optional interactions and
#' of regulation function types that comply with
#' the provided experiments
#'
#' @param C node names in the network
#' (character string list)
#' @param CRM for each node (eventually) the gene
#' it directly regulates (if the node is a CRM)
#' @param length maximum experiment length
#' (integer)
#' @param Idef set of definite interactions
#' (list of lists of character strings)
#' @param Iopt set of optional interactions
#' (list of lists of character strings)
#' @param R set of set of possible regulation
#' conditions for each node
#' (list of lists of integers)
#' @param E set of experiments:
#' list of [experiment name, list of (step x gene x GE value)]
#' (list of string x (list of (integer x character string x binary integer)))
#' @param typeT either sync (synchronous) or async (asynchronous)
#' transition
#' (character string)
#' @param solmax maximal number of solutions to return
#' (integer)
#' @param KO knock-down perturbations
#' (list of [experiment name, list of (step x KO(gene) x binary)])
#' @param FE forced expression perturbations
#' (list of [experiment name, list of (step x FE(gene) x binary)])
#' @param uniqueness character string (see getSolution function)
#' @param limreg character string (to limit regulation functions)
#' @param P perturbation list
#' (list of size |C| containing lists with 0 element, or "-" or "+" or both)
#' @param Fixpoint fixpoint constraints
#' (list of [start step of fix point state, experiment name])
#' @param verbose boolean: prints status messages if set to True
#' @return resList list of models where Is and Rs are
#' the instanciated constrained ABN
#' that agree with all the experiments (+ solver)
def grn_solver(C, CRM, length, Idef, Iopt, R, E, typeT, solmax, KO, FE, uniqueness, limreg, P, Fixpoint, verbose=False, printSolutions=True, printmain=True):
## Selected interaction number limit ##
interaction_limit = 0
if (not interaction_limit and Iopt):
interaction_limit = len(Iopt)
#____________________________________________________#
# Initialization of constants and variables #
#____________________________________________________#
if (not solmax):
solmax = 10
s = Solver()
ngenes = len(C)
zero, one = buildZERO(ngenes), buildONE(ngenes)
mustHaveActivator = []
UP, DOWN = [], []
chiUP, chiDOWN = dict(), dict()
for i in range(len(P)):
if ("-" in P[i]):
DOWN.append(C[i])
chiDOWN.setdefault(i, len(DOWN)-1)
if ("+" in P[i]):
UP.append(C[i])
chiUP.setdefault(i, len(UP)-1)
if ("!" in P[i]):
mustHaveActivator.append(i)
## Variable for the subset of optional interactions ##
Is = BitVec("selected_interactions", len(Iopt)) if (Iopt) else false
intVar = [Is]
## Variables for the regulation functions ##
Rs = [BitVec("grf_%s" % node, 5) for node in C]
regVar = Rs
## Variables for regulators ##
regulators = [[BitVec("activators_%s" % gene, ngenes),
BitVec("repressors_%s" % gene, ngenes)] for gene in C]
## Variables for perturbations ##
ko = [BitVec("ko_%s" % e[0], len(DOWN)) for e in E] if (DOWN) else []
fe = [BitVec("fe_%s" % e[0], len(UP)) for e in E] if (UP) else []
regInt = []
stateVar = []
exp_names = [e[0] for e in E]
t = time()
#____________________________________________________#
# Conditions on regulation functions #
#____________________________________________________#
verboseIt("Conditions on regulation functions", verbose)
s = regulation_condition(s, Rs, R, ngenes)
#____________________________________________________#
# Conditions on perturbations #
#____________________________________________________#
s = perturbation_condition(s, KO, ko, exp_names, DOWN, "KO") if (ko and KO) else s
s = perturbation_condition(s, FE, fe, exp_names, UP, "FE") if (fe and FE) else s
#____________________________________________________#
# Conditions on regulators #
#____________________________________________________#
verboseIt("Computation of interactions", verbose)
if (any([len(c) > 0 for c in CRM])):
s = crmInteractions_condition(s, Is, Idef, Iopt, CRM, C)
if (Iopt):
s = interaction_condition(s, interaction_limit, Iopt, Is)
for gene in C:
idx = C.index(gene)
## Build lists of (indices of) ##
## activators and ##
## repressors for input gene ##
## For interaction edges in Iopt ##
## Keep track of the regulatory ##
## interactions for the gene ##
regIntActivators, regIntRepressors = None, None
if (Iopt):
[regIntActivators, regIntRepressors] = buildInteractionDict(Iopt, gene, C, opt=True)
## For interaction edges in Idef ##
if (Idef):
[regIntActivators, regIntRepressors] = buildInteractionDict(Idef, gene, C,
regIntActivators=regIntActivators, regIntRepressors=regIntRepressors)
regInt.append([regIntActivators, regIntRepressors])
## Potential regulator is a ##
## regulator iff. the interaction ##
## where it is involved is ##
## selected or definite ##
activators, repressors = regulators[idx]
s = regulators_condition(s, Is, activators, regIntActivators, default)
s = regulators_condition(s, Is, repressors, regIntRepressors, default)
if (idx in mustHaveActivator):
s = mustHaveActivator_condition(s, activators, ngenes)
regulatorList = lambda regulators, regInt : [regulators+
" of gene "+gene+": "]+["gene "+C[i]+", interaction "
+ str(regInt[i]) + "; " for i in regInt.keys()]
verboseIt(strList2Str(regulatorList("ACTIVATORS", regIntActivators)), verbose)
verboseIt(strList2Str(regulatorList("REPRESSORS", regIntRepressors)), verbose)
verboseIt("Computation of GRFs", verbose)
prepreComputation = [prepreCompute(regulators[ci][0], regulators[ci][1]) for ci in range(len(C))]
#____________________________________________________#
# Conditions on experiments #
#____________________________________________________#
verboseIt("Conditions on experiments", verbose)
for exp in E:
verboseIt("--------- EXPERIMENT \'" + exp[0] + "\'", verbose=printmain)
## State variables ##
q = [BitVec(getState(n, exp[0]), ngenes) for n in range(length+1)]
stateVar += q
## Adding KO and FE constraints ##
if (KO and FE and ko and fe):
[ko_e, s] = pert2full(s, ko[exp_names.index(exp[0])], chiDOWN, "ko_" + exp[0] + "_f", ngenes)
[fe_e, s] = pert2full(s, fe[exp_names.index(exp[0])], chiUP, "fe_" + exp[0] + "_f", ngenes)
res = lambda x : (x & ~ko_e) | fe_e
elif (KO and ko):
[ko_e, s] = pert2full(s, ko[exp_names.index(exp[0])], chiDOWN, "ko_" + exp[0] + "_f", ngenes)
res = lambda x : x & ~ko_e
elif (FE and fe):
[fe_e, s] = pert2full(s, fe[exp_names.index(exp[0])], chiUP, "fe_" + exp[0] + "_f", ngenes)
res = lambda x : x | fe_e
else:
res = lambda x : x
#____________________________________#
## States must define a trajectory ##
## in the search space ##
#____________________________________#
existsFixpoint = filter(lambda x : x[1] == exp[0], Fixpoint)
## Finds the starting step point ##
## for fix point ##
if (existsFixpoint):
sstep = existsFixpoint[0][0]
else:
sstep = None
## Enforces constraint for all i, ##
## 0 <= i < sstep, T(q[i],q[i+1]) ##
s = to_next_state(s, exp[0], prepreComputation, 0, ifthenelse(sstep == None, length, sstep), q, typeT, length, regulators, ngenes, R, Rs, res, verbose)
## Fixpoint constraint ##
## for all i, sstep <= i <= length ##
## T(q[i], q[i+1]) (synchronous) and##
## q[i] = q[i+1] ##
s = fixpoint_condition(s, exp[0], prepreComputation, ifthenelse(sstep==None, length+1, sstep), q, typeT, length, regulators, ngenes, R, Rs, res, verbose)
#____________________________________#
## Experiment values should be ##
## satisfied ##
#____________________________________#
## For each observation in e ##
## ee = { n, gene, value } ##
for [n, gene, value] in exp[1]:
verboseIt("Experiment=\'" + exp[0] + "\', Step="
+ str(n) + ": grf(" + gene + ")=" + str(value), verbose)
s = experiment_condition(s, q, n, C, gene, value)
#____________________________________#
## Solution processing ##
#____________________________________#
[resList, t, s, res] = getSolution(C, length, Iopt, ngenes, t, s, intVar,
regVar, stateVar, regulators, chiDOWN, chiUP, R, uniqueness, verbose,
resList=[], stopSol=(solmax==1), printSolutions=printSolutions)
if (not len(resList)):
return([resList, s, regInt])
sol = 2
while (sol <= solmax and not res):
[resList, t, s, res] = getSolution(C, length, Iopt, ngenes, t, s, intVar,
regVar, stateVar, regulators, chiDOWN, chiUP, R, uniqueness,
verbose, resList=resList, solNB=sol, stopSol=(solmax == sol), printSolutions=printSolutions)
if (res):
return([resList, s, regInt])
sol += 1
if (sol == solmax+1):
verboseIt("Maximum number of solutions reached.\n", printSolutions)
if (sol > 0):
verboseIt("There are solutions.\n", printSolutions)
return([resList, s, regInt])
| 48.144543
| 182
| 0.619876
|
9f5c6eada76e6d79d583ad2f5bda0af3016ecc52
| 399
|
py
|
Python
|
website/doctype/website_script/website_script.py
|
gangadhar-kadam/sapphite_lib
|
90cecbd53f781747d09d967ef5794fc8d77d3fe0
|
[
"MIT"
] | null | null | null |
website/doctype/website_script/website_script.py
|
gangadhar-kadam/sapphite_lib
|
90cecbd53f781747d09d967ef5794fc8d77d3fe0
|
[
"MIT"
] | null | null | null |
website/doctype/website_script/website_script.py
|
gangadhar-kadam/sapphite_lib
|
90cecbd53f781747d09d967ef5794fc8d77d3fe0
|
[
"MIT"
] | 1
|
2018-10-26T01:06:38.000Z
|
2018-10-26T01:06:38.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def on_update(self):
# make js and css
from website.doctype.website_settings.make_web_include_files import make
make()
| 24.9375
| 74
| 0.756892
|
12ac8d4ac52981d06bb556696882e346654cd6d5
| 2,063
|
py
|
Python
|
tests/helpers.py
|
kissmikijr/hammurabi
|
42c7d73a68d434a941b69ccc919a688193ef4990
|
[
"Apache-2.0"
] | 12
|
2020-03-15T22:53:20.000Z
|
2021-12-06T13:35:07.000Z
|
tests/helpers.py
|
kissmikijr/hammurabi
|
42c7d73a68d434a941b69ccc919a688193ef4990
|
[
"Apache-2.0"
] | 304
|
2020-01-14T08:55:29.000Z
|
2022-03-17T08:00:58.000Z
|
tests/helpers.py
|
kissmikijr/hammurabi
|
42c7d73a68d434a941b69ccc919a688193ef4990
|
[
"Apache-2.0"
] | 2
|
2020-05-29T13:14:31.000Z
|
2021-01-07T14:03:51.000Z
|
from typing import Any
from hammurabi.mixins import GitHubMixin, GitMixin, PullRequestHelperMixin
from hammurabi.rules.base import Precondition, Rule
class ExamplePrecondition(Precondition):
def task(self) -> bool:
return self.param
class ExampleRule(Rule):
"""ExampleRule docstring"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.made_changes = True
def task(self) -> Any:
"""ExampleRule task docstring"""
return self.param
class ExampleExceptionRule(Rule):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.made_changes = False
def task(self) -> Any:
raise Exception(self.param)
class ExampleGitMixinRule(ExampleRule, GitMixin):
pass
class ExamplePullRequestHelperMixinRule(ExampleRule, GitMixin, PullRequestHelperMixin):
pass
class ExampleGitHubMixinRule(ExampleRule, GitHubMixin):
pass
class ExampleMROFaultyPrecondition(Precondition):
"""
This example precondition exists to ensure MRO related cases like
https://github.com/gabor-boros/hammurabi/issues/216 are tested.
"""
def __init__(self, param=None) -> None:
stacks = self.validate(param, required=True)
super().__init__(None, stacks)
def task(self) -> bool:
return self.param
PASSING_PRECONDITION = ExamplePrecondition(name="Passing", param=True)
FAILING_PRECONDITION = ExamplePrecondition(name="Failing", param=False)
def get_passing_rule(name: str = "Passing"):
return ExampleRule(name=name, param="passing rule")
def get_failing_rule():
return ExampleExceptionRule(name="Failing", param="raise exception")
def get_git_mixin_consumer():
return ExampleGitMixinRule(name="Passing", param="passing rule")
def get_pull_request_helper_mixin_consumer():
return ExamplePullRequestHelperMixinRule(name="Passing", param="passing rule")
def get_github_mixin_consumer():
return ExampleGitHubMixinRule(name="Passing", param="passing rule")
| 25.469136
| 87
| 0.719825
|
cbc2386f7995105b0f42836e291a232ef609ff62
| 6,940
|
py
|
Python
|
reinvent_chemistry/library_design/fragment_reaction_slice_enumerator.py
|
MolecularAI/reinvent-chemistry
|
bf0235bc2b1168b1db54c1e04bdba04b166ab7bf
|
[
"MIT"
] | null | null | null |
reinvent_chemistry/library_design/fragment_reaction_slice_enumerator.py
|
MolecularAI/reinvent-chemistry
|
bf0235bc2b1168b1db54c1e04bdba04b166ab7bf
|
[
"MIT"
] | null | null | null |
reinvent_chemistry/library_design/fragment_reaction_slice_enumerator.py
|
MolecularAI/reinvent-chemistry
|
bf0235bc2b1168b1db54c1e04bdba04b166ab7bf
|
[
"MIT"
] | 1
|
2022-03-22T15:24:13.000Z
|
2022-03-22T15:24:13.000Z
|
from collections import OrderedDict
from typing import List, Tuple, Set
from rdkit.Chem.rdchem import Mol
from reinvent_chemistry import TransformationTokens, Conversions
from reinvent_chemistry.library_design import FragmentFilter
from reinvent_chemistry.library_design.dtos import FilteringConditionDTO, ReactionDTO
from reinvent_chemistry.library_design.fragment_reactions import FragmentReactions
from reinvent_chemistry.library_design.fragmented_molecule import FragmentedMolecule
class FragmentReactionSliceEnumerator:
def __init__(self, chemical_reactions: List[ReactionDTO],
scaffold_conditions: List[FilteringConditionDTO],
decoration_conditions: List[FilteringConditionDTO]):
"""
Class to enumerate slicings given certain conditions.
:param chemical_reactions: A list of ChemicalReaction objects.
:param scaffold_conditions: Conditions to use when filtering scaffolds obtained from slicing molecules (see FragmentFilter).
:param decoration_conditions: Conditions to use when filtering decorations obtained from slicing molecules.
"""
self._tockens = TransformationTokens()
self._chemical_reactions = chemical_reactions
self._scaffold_filter = FragmentFilter(scaffold_conditions)
self._decoration_filter = FragmentFilter(decoration_conditions)
self._reactions = FragmentReactions()
self._conversions = Conversions()
def enumerate(self, molecule: Mol, cuts: int) -> List[FragmentedMolecule]:
"""
Enumerates all possible combination of slicings of a molecule given a number of cuts.
:param molecule: A mol object with the molecule to slice.
:param cuts: The number of cuts to perform.
:return : A list with all the possible (scaffold, decorations) pairs as SlicedMol objects.
"""
original_smiles = self._conversions.mol_to_smiles(molecule)
sliced_mols = set()
for cut in range(1, cuts + 1):
if cut == 1:
fragment_pairs = self._reactions.slice_molecule_to_fragments(molecule, self._chemical_reactions)
for pair in fragment_pairs:
for indx, _ in enumerate(pair):
decorations = self._select_all_except(pair, indx)
decoration = self._conversions.copy_mol(decorations[0])
labeled_decoration = OrderedDict()
labeled_decoration[0] = decoration # [ for decoration in decorations]
scaffold = self._conversions.copy_mol(pair[indx])
labeled_scaffold = self._label_scaffold(scaffold)
# TODO: filtering should take place after scaffold is generated
sliced_mol = FragmentedMolecule(labeled_scaffold, labeled_decoration, original_smiles)
if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:
sliced_mols.add(sliced_mol)
else:
for slice in sliced_mols:
to_add = self._scaffold_slicing(slice, cut)
sliced_mols = sliced_mols.union(to_add)
return list(filter(self._filter, sliced_mols))
def _scaffold_slicing(self, slice: FragmentedMolecule, cut: int) -> Set[FragmentedMolecule]:
to_add = set()
if slice.decorations_count() == cut - 1:
fragment_pairs = self._reactions.slice_molecule_to_fragments(slice.scaffold, self._chemical_reactions)
for pair in fragment_pairs:
scaffold, decoration = self._split_scaffold_from_decorations(pair, cut)
if scaffold:
labeled_scaffold = self._label_scaffold(scaffold)
labeled_scaffold = self._conversions.copy_mol(labeled_scaffold)
decoration = self._conversions.copy_mol(decoration)
sliced_mol = self._create_sliced_molecule(slice, labeled_scaffold, decoration)
if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:
to_add.add(sliced_mol)
return to_add
def _select_all_except(self, fragments: Tuple[Mol], to_exclude: int) -> List[Mol]:
return [fragment for indx, fragment in enumerate(fragments) if indx != to_exclude]
def _filter(self, sliced_mol: FragmentedMolecule) -> bool:
return self._scaffold_filter.filter(sliced_mol.scaffold) \
and all(self._decoration_filter.filter(dec) for dec in sliced_mol.decorations.values())
def _split_scaffold_from_decorations(self, pair: Tuple[Mol], cuts: int) -> Tuple[Mol, Mol]:
decoration = None
scaffold = None
for frag in pair:
num_att = len(
[atom for atom in frag.GetAtoms() if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN])
# detect whether there is one fragment with as many attachment points as cuts (scaffold)
# the rest are decorations
if num_att == cuts and not scaffold:
scaffold = frag
if num_att == 1:
decoration = frag
if decoration and scaffold:
return scaffold, decoration
else:
return (None, None)
def _label_scaffold(self, scaffold: Mol) -> Mol:
highest_number = self._find_highest_number(scaffold)
for atom in scaffold.GetAtoms():
if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:
try:
atom_number = int(atom.GetProp("molAtomMapNumber"))
except:
highest_number += 1
num = atom.GetIsotope()
atom.SetIsotope(0)
atom.SetProp("molAtomMapNumber", str(highest_number))
scaffold.UpdatePropertyCache()
return scaffold
def _find_highest_number(self, cut_mol: Mol) -> int:
highest_number = -1
for atom in cut_mol.GetAtoms():
if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:
try:
atom_number = int(atom.GetProp("molAtomMapNumber"))
if highest_number < atom_number:
highest_number = atom_number
except:
pass
return highest_number
def _create_sliced_molecule(self, original_sliced_mol: FragmentedMolecule, scaffold: Mol,
decoration: Mol) -> FragmentedMolecule:
old_decorations = OrderedDict()
for k, v in original_sliced_mol.decorations.items():
old_decorations[k] = v
old_decorations[original_sliced_mol.decorations_count()] = decoration
sliced_mol = FragmentedMolecule(scaffold, old_decorations, original_sliced_mol.original_smiles)
return sliced_mol
| 48.531469
| 132
| 0.646974
|
f1e0b7f719a2a9afc23ff8b1d006e4f17a2cdfe5
| 1,092
|
py
|
Python
|
tests/kafka_cluster_manager/decommission_test.py
|
EladLeev/kafka-utils
|
74831206648512db1a29426c6ebb428b33820d04
|
[
"Apache-2.0"
] | 302
|
2016-05-18T02:05:04.000Z
|
2022-03-28T21:36:28.000Z
|
tests/kafka_cluster_manager/decommission_test.py
|
EladLeev/kafka-utils
|
74831206648512db1a29426c6ebb428b33820d04
|
[
"Apache-2.0"
] | 135
|
2016-05-17T23:15:16.000Z
|
2021-11-04T13:35:51.000Z
|
tests/kafka_cluster_manager/decommission_test.py
|
EladLeev/kafka-utils
|
74831206648512db1a29426c6ebb428b33820d04
|
[
"Apache-2.0"
] | 133
|
2016-05-18T10:23:05.000Z
|
2022-01-29T17:24:17.000Z
|
from __future__ import unicode_literals
from argparse import Namespace
import mock
import pytest
from kafka_utils.kafka_cluster_manager.cluster_info \
.partition_count_balancer import PartitionCountBalancer
from kafka_utils.kafka_cluster_manager.cmds import decommission
from tests.kafka_cluster_manager.helper import broker_range
@pytest.fixture
def command_instance():
cmd = decommission.DecommissionCmd()
cmd.args = mock.Mock(spec=Namespace)
cmd.args.force_progress = False
cmd.args.broker_ids = []
cmd.args.auto_max_movement_size = True
cmd.args.max_partition_movements = 10
cmd.args.max_leader_changes = 10
return cmd
def test_decommission_no_partitions_to_move(command_instance, create_cluster_topology):
cluster_one_broker_empty = create_cluster_topology(
assignment={('topic', 0): [0, 1]},
brokers=broker_range(3),
)
command_instance.args.brokers_ids = [2]
balancer = PartitionCountBalancer(cluster_one_broker_empty, command_instance.args)
command_instance.run_command(cluster_one_broker_empty, balancer)
| 32.117647
| 87
| 0.789377
|
acba9566186785c294f2716683db9b89227de2fd
| 941
|
py
|
Python
|
setup.py
|
raph92/forkexplorer
|
cef85857e1d7034bbe03e0b87c444969f0c60b4e
|
[
"Unlicense"
] | null | null | null |
setup.py
|
raph92/forkexplorer
|
cef85857e1d7034bbe03e0b87c444969f0c60b4e
|
[
"Unlicense"
] | null | null | null |
setup.py
|
raph92/forkexplorer
|
cef85857e1d7034bbe03e0b87c444969f0c60b4e
|
[
"Unlicense"
] | null | null | null |
import re
from os import path
from setuptools import setup
from io import open as io_open
here = path.abspath(path.dirname(__file__))
with open("requirements.txt") as f:
dependencies = f.read().splitlines()
def readall(*args):
with io_open(path.join(here, *args), encoding="utf-8") as fp:
return fp.read()
metadata = dict(
re.findall(r"""__([a-z]+)__ = "([^"]+)""", readall("forkexplorer", "__init__.py"))
)
setup(
name='forkexplorer',
version=metadata["version"],
packages=['forkexplorer', 'forkexplorer.tests'],
url='https://github.com/raph92/forkexplorer',
license="GPLv3",
author='Raphael N',
author_email='rtnanje@gmail.com',
maintainer="Raphael N",
description='Easily get the latest fork of a Github repo',
entry_points={
'console_scripts': ['forkexplorer=forkexplorer.cli:main'],
},
install_requires=dependencies,
platforms=["linux", "linux2"],
)
| 26.138889
| 86
| 0.664187
|
b9a6c4e456dd3c421f4f5568dc0ce5512fc32191
| 267
|
py
|
Python
|
bp_content/themes/default/config/localhost.py
|
chuycepeda/mboilerplate
|
1fad3f10d491e5f6e051ff615370073e38dba1fe
|
[
"MIT"
] | 5
|
2016-02-23T17:18:16.000Z
|
2016-08-05T22:26:29.000Z
|
bp_content/themes/default/config/localhost.py
|
chuycepeda/mboilerplate
|
1fad3f10d491e5f6e051ff615370073e38dba1fe
|
[
"MIT"
] | 1
|
2016-03-19T02:02:32.000Z
|
2016-05-09T05:43:36.000Z
|
bp_content/themes/default/config/localhost.py
|
chuycepeda/mboilerplate
|
1fad3f10d491e5f6e051ff615370073e38dba1fe
|
[
"MIT"
] | 9
|
2016-02-19T18:56:18.000Z
|
2019-01-13T16:50:05.000Z
|
config = {
# This config file will be detected in localhost environment and values defined here will overwrite those in config.py
'environment': "localhost",
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
'app_name': "app @localhost"
}
| 24.272727
| 122
| 0.655431
|
d99d8fa4eba56eb2c2825341dda5b316813dfdf5
| 526
|
py
|
Python
|
jobs/migrations/0010_auto_20160127_2118.py
|
Santiago-vdk/jabbs
|
66d03d1033e0a6b1a2770bb0ddbadec78f2570df
|
[
"Apache-2.0"
] | 1
|
2018-05-02T11:06:52.000Z
|
2018-05-02T11:06:52.000Z
|
jobs/migrations/0010_auto_20160127_2118.py
|
Santiago-vdk/jabbs
|
66d03d1033e0a6b1a2770bb0ddbadec78f2570df
|
[
"Apache-2.0"
] | null | null | null |
jobs/migrations/0010_auto_20160127_2118.py
|
Santiago-vdk/jabbs
|
66d03d1033e0a6b1a2770bb0ddbadec78f2570df
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-28 03:18
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0009_post'),
]
operations = [
migrations.DeleteModel(
name='Post',
),
migrations.AddField(
model_name='job',
name='description',
field=ckeditor.fields.RichTextField(blank=True),
),
]
| 21.04
| 60
| 0.593156
|
39fb2f97c54543fb098611803480460ee06e6b19
| 1,255
|
py
|
Python
|
custom_components/rika_firenet/entity.py
|
antibill51/rika-firenet-custom-component
|
ad81d12a466d953148ebbb1440f5fd8d81edd1d2
|
[
"MIT"
] | 2
|
2022-02-05T11:53:23.000Z
|
2022-02-19T23:55:56.000Z
|
custom_components/rika_firenet/entity.py
|
antibill51/rika-firenet-custom-component
|
ad81d12a466d953148ebbb1440f5fd8d81edd1d2
|
[
"MIT"
] | null | null | null |
custom_components/rika_firenet/entity.py
|
antibill51/rika-firenet-custom-component
|
ad81d12a466d953148ebbb1440f5fd8d81edd1d2
|
[
"MIT"
] | null | null | null |
import logging
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, NAME, DEFAULT_NAME, VERSION
from .core import RikaFirenetStove, RikaFirenetCoordinator
_LOGGER = logging.getLogger(__name__)
class RikaFirenetEntity(CoordinatorEntity):
def __init__(self, config_entry, stove: RikaFirenetStove, coordinator: RikaFirenetCoordinator, suffix=None):
super().__init__(coordinator)
self._config_entry = config_entry
self._stove = stove
self._u_id = stove.get_id()
if suffix is not None:
self._name = f"{stove.get_name()} {suffix}"
self._unique_id = f"{suffix} {stove.get_name()}"
else:
self._name = stove.get_name()
self._unique_id = stove.get_id()
_LOGGER.info('RikaFirenetEntity creation with name: ' + self._name + ' unique_id: ' + self._unique_id)
@property
def unique_id(self):
return self._unique_id
@property
def name(self):
return self._name
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self._u_id)},
"name": NAME,
"model": VERSION,
"manufacturer": DEFAULT_NAME,
}
| 29.880952
| 112
| 0.645418
|
8b4770d61d6649128ea7bc884dcd302a6d7bad24
| 518
|
py
|
Python
|
tests/test/algorithms/convert/QuaToSM/test.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
tests/test/algorithms/convert/QuaToSM/test.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
tests/test/algorithms/convert/QuaToSM/test.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
import unittest
from reamber.algorithms.convert.QuaToSM import QuaToSM
from reamber.quaver.QuaMap import QuaMap
from tests.test.RSC_PATHS import *
# import logging
#
# logging.basicConfig(filename="event.log", filemode="w+", level=logging.DEBUG)
class TestQuaToSM(unittest.TestCase):
# @profile
def test(self):
# Complex BPM Points
qua = QuaMap.readFile(QUA_NEURO_CLOUD)
sm = QuaToSM.convert(qua)
# sm.writeFile("out.sm")
if __name__ == '__main__':
unittest.main()
| 19.923077
| 79
| 0.696911
|
8af2408b5022b26d56507aa9e0d8a01808636797
| 2,291
|
py
|
Python
|
tests/test_ops/test_voxelization.py
|
LiuXiaoxuanPKU/mmcv
|
479cf3a088e2b1eb708f4a29db24423eddf8ee0e
|
[
"Apache-2.0"
] | 1
|
2022-02-17T04:42:51.000Z
|
2022-02-17T04:42:51.000Z
|
tests/test_ops/test_voxelization.py
|
LiuXiaoxuanPKU/mmcv
|
479cf3a088e2b1eb708f4a29db24423eddf8ee0e
|
[
"Apache-2.0"
] | 1
|
2022-01-23T13:28:49.000Z
|
2022-01-23T13:28:49.000Z
|
tests/test_ops/test_voxelization.py
|
LiuXiaoxuanPKU/mmcv
|
479cf3a088e2b1eb708f4a29db24423eddf8ee0e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import torch
from mmcv.ops import Voxelization
def _get_voxel_points_indices(points, coors, voxel):
result_form = np.equal(coors, voxel)
return result_form[:, 0] & result_form[:, 1] & result_form[:, 2]
@pytest.mark.parametrize('device_type', [
'cpu',
pytest.param(
'cuda:0',
marks=pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support'))
])
def test_voxelization(device_type):
voxel_size = [0.5, 0.5, 0.5]
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
voxel_dict = np.load(
'tests/data/for_3d_ops/test_voxel.npy', allow_pickle=True).item()
expected_coors = voxel_dict['coors']
expected_voxels = voxel_dict['voxels']
expected_num_points_per_voxel = voxel_dict['num_points_per_voxel']
points = voxel_dict['points']
points = torch.tensor(points)
max_num_points = -1
dynamic_voxelization = Voxelization(voxel_size, point_cloud_range,
max_num_points)
max_num_points = 1000
hard_voxelization = Voxelization(voxel_size, point_cloud_range,
max_num_points)
device = torch.device(device_type)
# test hard_voxelization on cpu/gpu
points = points.contiguous().to(device)
coors, voxels, num_points_per_voxel = hard_voxelization.forward(points)
coors = coors.cpu().detach().numpy()
voxels = voxels.cpu().detach().numpy()
num_points_per_voxel = num_points_per_voxel.cpu().detach().numpy()
assert np.all(coors == expected_coors)
assert np.all(voxels == expected_voxels)
assert np.all(num_points_per_voxel == expected_num_points_per_voxel)
# test dynamic_voxelization on cpu/gpu
coors = dynamic_voxelization.forward(points)
coors = coors.cpu().detach().numpy()
points = points.cpu().detach().numpy()
for i in range(expected_voxels.shape[0]):
indices = _get_voxel_points_indices(points, coors, expected_voxels[i])
num_points_current_voxel = points[indices].shape[0]
assert num_points_current_voxel > 0
assert np.all(
points[indices] == expected_coors[i][:num_points_current_voxel])
assert num_points_current_voxel == expected_num_points_per_voxel[i]
| 36.951613
| 78
| 0.683544
|
db88383115b013bc6cb96c2e14d3b685ce65168c
| 104,301
|
py
|
Python
|
superset/viz.py
|
rijojoseph07/superset
|
94893ff280e1db3998cf71dffb7731a6a3689b33
|
[
"Apache-2.0"
] | null | null | null |
superset/viz.py
|
rijojoseph07/superset
|
94893ff280e1db3998cf71dffb7731a6a3689b33
|
[
"Apache-2.0"
] | 47
|
2021-02-17T18:43:18.000Z
|
2022-02-23T10:29:47.000Z
|
superset/viz.py
|
rc-ontruck/incubator-superset
|
5757b84857b100606871e8159a7eb5e701b33a26
|
[
"Apache-2.0"
] | 1
|
2022-01-14T18:00:04.000Z
|
2022-01-14T18:00:04.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
import copy
import inspect
import logging
import math
import re
from collections import defaultdict, OrderedDict
from datetime import date, datetime, timedelta
from itertools import product
from typing import (
Any,
Callable,
cast,
Dict,
List,
Optional,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import geohash
import numpy as np
import pandas as pd
import polyline
import simplejson as json
from dateutil import relativedelta as rdelta
from flask import request
from flask_babel import lazy_gettext as _
from geopy.point import Point
from pandas.tseries.frequencies import to_offset
from superset import app, db, is_feature_enabled
from superset.constants import NULL_STRING
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
CacheLoadError,
NullValueException,
QueryObjectValidationError,
SpatialException,
)
from superset.extensions import cache_manager, security_manager
from superset.models.cache import CacheKey
from superset.models.helpers import QueryResult
from superset.typing import QueryObjectDict, VizData, VizPayload
from superset.utils import core as utils
from superset.utils.cache import set_and_log_cache
from superset.utils.core import (
DTTM_ALIAS,
JS_MAX_INTEGER,
merge_extra_filters,
QueryMode,
to_adhoc,
)
from superset.utils.date_parser import get_since_until, parse_past_timedelta
from superset.utils.dates import datetime_to_epoch
from superset.utils.hashing import md5_sha_from_str
import dataclasses # isort:skip
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
config = app.config
stats_logger = config["STATS_LOGGER"]
relative_start = config["DEFAULT_RELATIVE_START_TIME"]
relative_end = config["DEFAULT_RELATIVE_END_TIME"]
logger = logging.getLogger(__name__)
METRIC_KEYS = [
"metric",
"metrics",
"percent_metrics",
"metric_2",
"secondary_metric",
"x",
"y",
"size",
]
# This regex is to get user defined filter column name, which is the first param in the filter_values function.
# see the definition of filter_values template:
# https://github.com/apache/superset/blob/24ad6063d736c1f38ad6f962e586b9b1a21946af/superset/jinja_context.py#L63
FILTER_VALUES_REGEX = re.compile(r"filter_values\(['\"](\w+)['\"]\,")
class BaseViz:
"""All visualizations derive this base class"""
viz_type: Optional[str] = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
cache_type = "df"
enforce_numerical_metrics = True
def __init__(
self,
datasource: "BaseDatasource",
form_data: Dict[str, Any],
force: bool = False,
force_cached: bool = False,
) -> None:
if not datasource:
raise QueryObjectValidationError(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = utils.get_form_data_token(form_data)
self.groupby: List[str] = self.form_data.get("groupby") or []
self.time_shift = timedelta()
self.status: Optional[str] = None
self.error_msg = ""
self.results: Optional[QueryResult] = None
self.errors: List[Dict[str, Any]] = []
self.force = force
self._force_cached = force_cached
self.from_dttm: Optional[datetime] = None
self.to_dttm: Optional[datetime] = None
self._extra_chart_data: List[Tuple[str, pd.DataFrame]] = []
self.process_metrics()
self.applied_filters: List[Dict[str, str]] = []
self.rejected_filters: List[Dict[str, str]] = []
@property
def force_cached(self) -> bool:
return self._force_cached
def process_metrics(self) -> None:
# metrics in Viz is order sensitive, so metric_dict should be
# OrderedDict
self.metric_dict = OrderedDict()
fd = self.form_data
for mkey in METRIC_KEYS:
val = fd.get(mkey)
if val:
if not isinstance(val, list):
val = [val]
for o in val:
label = utils.get_metric_name(o)
self.metric_dict[label] = o
# Cast to list needed to return serializable object in py3
self.all_metrics = list(self.metric_dict.values())
self.metric_labels = list(self.metric_dict.keys())
@staticmethod
def handle_js_int_overflow(
data: Dict[str, List[Dict[str, Any]]]
) -> Dict[str, List[Dict[str, Any]]]:
for d in data.get("records", {}):
for k, v in list(d.items()):
if isinstance(v, int):
# if an int is too big for Java Script to handle
# convert it to a string
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
def run_extra_queries(self) -> None:
"""Lifecycle method to use when more than one query is needed
In rare-ish cases, a visualization may need to execute multiple
queries. That is the case for FilterBox or for time comparison
in Line chart for instance.
In those cases, we need to make sure these queries run before the
main `get_payload` method gets called, so that the overall caching
metadata can be right. The way it works here is that if any of
the previous `get_df_payload` calls hit the cache, the main
payload's metadata will reflect that.
The multi-query support may need more work to become a first class
use case in the framework, and for the UI to reflect the subtleties
(show that only some of the queries were served from cache for
instance). In the meantime, since multi-query is rare, we treat
it with a bit of a hack. Note that the hack became necessary
when moving from caching the visualization's data itself, to caching
the underlying query(ies).
"""
pass
def apply_rolling(self, df: pd.DataFrame) -> pd.DataFrame:
fd = self.form_data
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ("mean", "std", "sum") and rolling_periods:
kwargs = dict(window=rolling_periods, min_periods=min_periods)
if rolling_type == "mean":
df = df.rolling(**kwargs).mean()
elif rolling_type == "std":
df = df.rolling(**kwargs).std()
elif rolling_type == "sum":
df = df.rolling(**kwargs).sum()
elif rolling_type == "cumsum":
df = df.cumsum()
if min_periods:
df = df[min_periods:]
if df.empty:
raise QueryObjectValidationError(
_(
"Applied rolling window did not return any data. Please make sure "
"the source query satisfies the minimum periods defined in the "
"rolling window."
)
)
return df
def get_samples(self) -> List[Dict[str, Any]]:
query_obj = self.query_obj()
query_obj.update(
{
"is_timeseries": False,
"groupby": [],
"metrics": [],
"orderby": [],
"row_limit": config["SAMPLES_ROW_LIMIT"],
"columns": [o.column_name for o in self.datasource.columns],
}
)
df = self.get_df_payload(query_obj)["df"] # leverage caching logic
return df.to_dict(orient="records")
def get_df(self, query_obj: Optional[QueryObjectDict] = None) -> pd.DataFrame:
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return pd.DataFrame()
self.error_msg = ""
timestamp_format = None
if self.datasource.type == "table":
granularity_col = self.datasource.get_column(query_obj["granularity"])
if granularity_col:
timestamp_format = granularity_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.errors = self.results.errors
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if not df.empty:
df = utils.normalize_dttm_col(
df=df,
timestamp_format=timestamp_format,
offset=self.datasource.offset,
time_shift=self.time_shift,
)
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def df_metrics_to_num(self, df: pd.DataFrame) -> None:
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = self.metric_labels
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def process_query_filters(self) -> None:
utils.convert_legacy_filters_into_adhoc(self.form_data)
merge_extra_filters(self.form_data)
utils.split_adhoc_filters_into_base_filters(self.form_data)
def query_obj(self) -> QueryObjectDict:
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = self.groupby
metrics = self.all_metrics or []
columns = form_data.get("columns") or []
# merge list and dedup while preserving order
groupby = list(OrderedDict.fromkeys(gb + columns))
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(form_data.get("row_limit") or config["ROW_LIMIT"])
# default order direction
order_desc = form_data.get("order_desc", True)
try:
since, until = get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
time_shift = form_data.get("time_shift", "")
self.time_shift = parse_past_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise QueryObjectValidationError(
_("From date cannot be larger than to date")
)
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
"druid_time_origin": form_data.get("druid_time_origin", ""),
"having": form_data.get("having", ""),
"having_druid": form_data.get("having_filters", []),
"time_grain_sqla": form_data.get("time_grain_sqla"),
"time_range_endpoints": form_data.get("time_range_endpoints"),
"where": form_data.get("where", ""),
}
return {
"granularity": granularity,
"from_dttm": from_dttm,
"to_dttm": to_dttm,
"is_timeseries": is_timeseries,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"filter": self.form_data.get("filters", []),
"timeseries_limit": limit,
"extras": extras,
"timeseries_limit_metric": timeseries_limit_metric,
"order_desc": order_desc,
}
@property
def cache_timeout(self) -> int:
if self.form_data.get("cache_timeout") is not None:
return int(self.form_data["cache_timeout"])
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
if config["DATA_CACHE_CONFIG"].get("CACHE_DEFAULT_TIMEOUT") is not None:
return config["DATA_CACHE_CONFIG"]["CACHE_DEFAULT_TIMEOUT"]
return config["CACHE_DEFAULT_TIMEOUT"]
def get_json(self) -> str:
return json.dumps(
self.get_payload(), default=utils.json_int_dttm_ser, ignore_nan=True
)
def cache_key(self, query_obj: QueryObjectDict, **extra: Any) -> str:
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm`, `to_dttm`,
`inner_from_dttm`, and `inner_to_dttm` values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm", "inner_from_dttm", "inner_to_dttm"]:
if k in cache_dict:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
cache_dict["rls"] = (
security_manager.get_rls_ids(self.datasource)
if is_feature_enabled("ROW_LEVEL_SECURITY")
and self.datasource.is_rls_supported
else []
)
cache_dict["changed_on"] = self.datasource.changed_on
json_data = self.json_dumps(cache_dict, sort_keys=True)
return md5_sha_from_str(json_data)
def get_payload(self, query_obj: Optional[QueryObjectDict] = None) -> VizPayload:
"""Returns a payload of metadata and data"""
self.run_extra_queries()
payload = self.get_df_payload(query_obj)
df = payload.get("df")
if self.status != utils.QueryStatus.FAILED:
payload["data"] = self.get_data(df)
if "df" in payload:
del payload["df"]
filters = self.form_data.get("filters", [])
filter_columns = [flt.get("col") for flt in filters]
columns = set(self.datasource.column_names)
filter_values_columns = []
# if using virtual datasource, check filter_values
if self.datasource.sql:
filter_values_columns = (
re.findall(FILTER_VALUES_REGEX, self.datasource.sql)
) or []
applied_time_extras = self.form_data.get("applied_time_extras", {})
applied_time_columns, rejected_time_columns = utils.get_time_filter_status(
self.datasource, applied_time_extras
)
payload["applied_filters"] = [
{"column": col}
for col in filter_columns
if col in columns or col in filter_values_columns
] + applied_time_columns
payload["rejected_filters"] = [
{"reason": "not_in_datasource", "column": col}
for col in filter_columns
if col not in columns and col not in filter_values_columns
] + rejected_time_columns
return payload
def get_df_payload(
self, query_obj: Optional[QueryObjectDict] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Handles caching around the df payload retrieval"""
if not query_obj:
query_obj = self.query_obj()
cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
cache_value = None
logger.info("Cache key: {}".format(cache_key))
is_loaded = False
stacktrace = None
df = None
if cache_key and cache_manager.data_cache and not self.force:
cache_value = cache_manager.data_cache.get(cache_key)
if cache_value:
stats_logger.incr("loading_from_cache")
try:
df = cache_value["df"]
self.query = cache_value["query"]
self.status = utils.QueryStatus.SUCCESS
is_loaded = True
stats_logger.incr("loaded_from_cache")
except Exception as ex:
logger.exception(ex)
logger.error(
"Error reading cache: " + utils.error_msg_from_exception(ex)
)
logger.info("Serving from cache")
if query_obj and not is_loaded:
if self.force_cached:
logger.warning(
f"force_cached (viz.py): value not found for cache key {cache_key}"
)
raise CacheLoadError(_("Cached value not found"))
try:
invalid_columns = [
col
for col in (query_obj.get("columns") or [])
+ (query_obj.get("groupby") or [])
+ utils.get_column_names_from_metrics(
cast(
List[Union[str, Dict[str, Any]]], query_obj.get("metrics"),
)
)
if col not in self.datasource.column_names
]
if invalid_columns:
raise QueryObjectValidationError(
_(
"Columns missing in datasource: %(invalid_columns)s",
invalid_columns=invalid_columns,
)
)
df = self.get_df(query_obj)
if self.status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
if not self.force:
stats_logger.incr("loaded_from_source_without_force")
is_loaded = True
except QueryObjectValidationError as ex:
error = dataclasses.asdict(
SupersetError(
message=str(ex),
level=ErrorLevel.ERROR,
error_type=SupersetErrorType.VIZ_GET_DF_ERROR,
)
)
self.errors.append(error)
self.status = utils.QueryStatus.FAILED
except Exception as ex:
logger.exception(ex)
error = dataclasses.asdict(
SupersetError(
message=str(ex),
level=ErrorLevel.ERROR,
error_type=SupersetErrorType.VIZ_GET_DF_ERROR,
)
)
self.errors.append(error)
self.status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if is_loaded and cache_key and self.status != utils.QueryStatus.FAILED:
set_and_log_cache(
cache_manager.data_cache,
cache_key,
{"df": df, "query": self.query},
self.cache_timeout,
self.datasource.uid,
)
return {
"cache_key": cache_key,
"cached_dttm": cache_value["dttm"] if cache_value is not None else None,
"cache_timeout": self.cache_timeout,
"df": df,
"errors": self.errors,
"form_data": self.form_data,
"is_cached": cache_value is not None,
"query": self.query,
"from_dttm": self.from_dttm,
"to_dttm": self.to_dttm,
"status": self.status,
"stacktrace": stacktrace,
"rowcount": len(df.index) if df is not None else 0,
}
def json_dumps(self, obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
)
def has_error(self, payload: VizPayload) -> bool:
return (
payload.get("status") == utils.QueryStatus.FAILED
or payload.get("error") is not None
or bool(payload.get("errors"))
)
def payload_json_and_has_error(self, payload: VizPayload) -> Tuple[str, bool]:
return self.json_dumps(payload), self.has_error(payload)
@property
def data(self) -> Dict[str, Any]:
"""This is the data object serialized to the js layer"""
content = {
"form_data": self.form_data,
"token": self.token,
"viz_name": self.viz_type,
"filter_select_enabled": self.datasource.filter_select_enabled,
}
return content
def get_csv(self) -> Optional[str]:
df = self.get_df_payload()["df"] # leverage caching logic
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config["CSV_EXPORT"])
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
@property
def json_data(self) -> str:
return json.dumps(self.data)
def raise_for_access(self) -> None:
"""
Raise an exception if the user cannot access the resource.
:raises SupersetSecurityException: If the user cannot access the resource
"""
security_manager.raise_for_access(viz=self)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def process_metrics(self) -> None:
"""Process form data and store parsed column configs.
1. Determine query mode based on form_data params.
- Use `query_mode` if it has a valid value
- Set as RAW mode if `all_columns` is set
- Otherwise defaults to AGG mode
2. Determine output columns based on query mode.
"""
# Verify form data first: if not specifying query mode, then cannot have both
# GROUP BY and RAW COLUMNS.
fd = self.form_data
if (
not fd.get("query_mode")
and fd.get("all_columns")
and (fd.get("groupby") or fd.get("metrics") or fd.get("percent_metrics"))
):
raise QueryObjectValidationError(
_(
"You cannot use [Columns] in combination with "
"[Group By]/[Metrics]/[Percentage Metrics]. "
"Please choose one or the other."
)
)
super().process_metrics()
self.query_mode: QueryMode = QueryMode.get(fd.get("query_mode")) or (
# infer query mode from the presence of other fields
QueryMode.RAW
if len(fd.get("all_columns") or []) > 0
else QueryMode.AGGREGATE
)
columns: List[str] = [] # output columns sans time and percent_metric column
percent_columns: List[str] = [] # percent columns that needs extra computation
if self.query_mode == QueryMode.RAW:
columns = utils.get_metric_names(fd.get("all_columns") or [])
else:
columns = utils.get_metric_names(self.groupby + (fd.get("metrics") or []))
percent_columns = utils.get_metric_names(fd.get("percent_metrics") or [])
self.columns = columns
self.percent_columns = percent_columns
self.is_timeseries = self.should_be_timeseries()
def should_be_timeseries(self) -> bool:
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (fd.get("granularity") and fd.get("granularity") != "all") or (
fd.get("granularity_sqla") and fd.get("time_grain_sqla")
)
if fd.get("include_time") and not conditions_met:
raise QueryObjectValidationError(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return bool(fd.get("include_time"))
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if self.query_mode == QueryMode.RAW:
d["columns"] = fd.get("all_columns")
order_by_cols = fd.get("order_by_cols") or []
d["orderby"] = [json.loads(t) for t in order_by_cols]
# must disable groupby and metrics in raw mode
d["groupby"] = []
d["metrics"] = []
# raw mode does not support timeseries queries
d["timeseries_limit_metric"] = None
d["timeseries_limit"] = None
d["is_timeseries"] = None
else:
sort_by = fd.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
elif d["metrics"]:
# Legacy behavior of sorting by first metric by default
first_metric = d["metrics"][0]
d["orderby"] = [(first_metric, not fd.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform the query result to the table representation.
:param df: The interim dataframe
:returns: The table visualization data
The interim dataframe comprises of the group-by and non-group-by columns and
the union of the metrics representing the non-percent and percent metrics. Note
the percent metrics have yet to be transformed.
"""
# Transform the data frame to adhere to the UI ordering of the columns and
# metrics whilst simultaneously computing the percentages (via normalization)
# for the percent metrics.
if df.empty:
return None
columns, percent_columns = self.columns, self.percent_columns
if DTTM_ALIAS in df and self.is_timeseries:
columns = [DTTM_ALIAS] + columns
df = pd.concat(
[
df[columns],
(df[percent_columns].div(df[percent_columns].sum()).add_prefix("%")),
],
axis=1,
)
return self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)
def json_dumps(self, obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
obj, default=utils.json_iso_dttm_ser, sort_keys=sort_keys, ignore_nan=True
)
class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
viz_type = "time_table"
verbose_name = _("Time Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if not fd.get("metrics"):
raise QueryObjectValidationError(_("Pick at least one metric"))
if fd.get("groupby") and len(fd["metrics"]) > 1:
raise QueryObjectValidationError(
_("When using 'Group By' you are limited to use a single metric")
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
columns = None
values: Union[List[str], str] = self.metric_labels
if fd.get("groupby"):
values = self.metric_labels[0]
columns = fd.get("groupby")
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
return dict(
records=pt.to_dict(orient="index"),
columns=list(pt.columns),
is_group_by=len(fd.get("groupby", [])) > 0,
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
metrics = self.form_data.get("metrics")
transpose = self.form_data.get("transpose_pivot")
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise QueryObjectValidationError(
_("Please choose at least one 'Group by' field ")
)
if transpose and not columns:
raise QueryObjectValidationError(
_(
(
"Please choose at least one 'Columns' field when "
"select 'Transpose Pivot' option"
)
)
)
if not metrics:
raise QueryObjectValidationError(_("Please choose at least one metric"))
if set(groupby) & set(columns):
raise QueryObjectValidationError(_("Group By' and 'Columns' can't overlap"))
return d
@staticmethod
def get_aggfunc(
metric: str, df: pd.DataFrame, form_data: Dict[str, Any]
) -> Union[str, Callable[[Any], Any]]:
aggfunc = form_data.get("pandas_aggfunc") or "sum"
if pd.api.types.is_numeric_dtype(df[metric]):
# Ensure that Pandas's sum function mimics that of SQL.
if aggfunc == "sum":
return lambda x: x.sum(min_count=1)
# only min and max work properly for non-numerics
return aggfunc if aggfunc in ("min", "max") else "max"
@staticmethod
def _format_datetime(value: Union[pd.Timestamp, datetime, date, str]) -> str:
"""
Format a timestamp in such a way that the viz will be able to apply
the correct formatting in the frontend.
:param value: the value of a temporal column
:return: formatted timestamp if it is a valid timestamp, otherwise
the original value
"""
tstamp: Optional[pd.Timestamp] = None
if isinstance(value, pd.Timestamp):
tstamp = value
if isinstance(value, datetime) or isinstance(value, date):
tstamp = pd.Timestamp(value)
if isinstance(value, str):
try:
tstamp = pd.Timestamp(value)
except ValueError:
pass
if tstamp:
return f"__timestamp:{datetime_to_epoch(tstamp)}"
# fallback in case something incompatible is returned
return cast(str, value)
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
if self.form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
metrics = [utils.get_metric_name(m) for m in self.form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Callable[[Any], Any]]] = {}
for metric in metrics:
aggfuncs[metric] = self.get_aggfunc(metric, df, self.form_data)
groupby = self.form_data.get("groupby") or []
columns = self.form_data.get("columns") or []
for column_name in groupby + columns:
column = self.datasource.get_column(column_name)
if column and column.is_temporal:
ts = df[column_name].apply(self._format_datetime)
df[column_name] = ts
if self.form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=self.form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if self.form_data.get("combine_metric"):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep="null",
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover"
).split(" "),
),
)
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric: str, df: pd.DataFrame) -> List[Dict[str, Any]]:
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v} for n, v in zip(df.index, df[metric])]
else:
result = [
{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]
]
return result
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.set_index(self.form_data.get("groupby"))
chart_data = [
{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns
]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = "<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>"
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10 ** 9)] = obj.get(metric)
data[metric] = values
try:
start, end = get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
if not start or not end:
raise QueryObjectValidationError(
"Please provide both time bounds (Since and Until)"
)
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1 # type: ignore
else:
range_ = diff_secs // (60 * 60) + 1 # type: ignore
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["metrics"] = fd.get("metrics")
return d
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type: Optional[str] = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [form_data.get("entity")]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
# dedup groupby if it happens to be the same
d["groupby"] = list(dict.fromkeys(d["groupby"]))
self.x_metric = form_data["x"]
self.y_metric = form_data["y"]
self.z_metric = form_data["size"]
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [self.z_metric, self.x_metric, self.y_metric]
if len(set(self.metric_labels)) < 3:
raise QueryObjectValidationError(_("Please use 3 different metric labels"))
if not all(d["metrics"] + [self.entity]):
raise QueryObjectValidationError(_("Pick a metric for x, y and size"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["x"] = df[[utils.get_metric_name(self.x_metric)]]
df["y"] = df[[utils.get_metric_name(self.y_metric)]]
df["size"] = df[[utils.get_metric_name(self.z_metric)]]
df["shape"] = "circle"
df["group"] = df[[self.series]]
series: Dict[Any, List[Any]] = defaultdict(list)
for row in df.to_dict(orient="records"):
series[row["group"]].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({"key": k, "values": v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
form_data = self.form_data
d = super().query_obj()
self.metric = form_data["metric"]
d["metrics"] = [self.metric]
if not self.metric:
raise QueryObjectValidationError(_("Pick a metric to display"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["metric"] = df[[utils.get_metric_name(self.metric)]]
values = df["metric"].values
return {
"measures": values.tolist(),
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.pivot_table(
index=DTTM_ALIAS,
columns=[],
values=self.metric_labels,
dropna=False,
aggfunc=np.min, # looking for any (only) value, preserving `None`
)
df = self.apply_rolling(df)
df[DTTM_ALIAS] = df.index
return super().get_data(df)
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
# Limiting rows is not required as only one cell is returned
d["row_limit"] = None
return d
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
pivot_fill_value: Optional[int] = None
def to_series(
self, df: pd.DataFrame, classed: str = "", title_suffix: str = ""
) -> List[Dict[str, Any]]:
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
series_title: Union[List[str], str, Tuple[str, ...]]
if isinstance(name, list):
series_title = [str(title) for title in name]
elif isinstance(name, tuple):
series_title = tuple(str(title) for title in name)
else:
series_title = str(name)
if (
isinstance(series_title, (list, tuple))
and len(series_title) > 1
and len(self.metric_labels) == 1
):
# Removing metric from series name if only one metric
series_title = series_title[1:]
if title_suffix:
if isinstance(series_title, str):
series_title = (series_title, title_suffix)
elif isinstance(series_title, list):
series_title = series_title + [title_suffix]
elif isinstance(series_title, tuple):
series_title = series_title + (title_suffix,)
values = []
non_nan_cnt = 0
for ds in df.index:
if ds in ys:
d = {"x": ds, "y": ys[ds]}
if not np.isnan(ys[ds]):
non_nan_cnt += 1
else:
d = {}
values.append(d)
if non_nan_cnt == 0:
continue
d = {"key": series_title, "values": values}
if classed:
d["classed"] = classed
chart_data.append(d)
return chart_data
def process_data(self, df: pd.DataFrame, aggregate: bool = False) -> VizData:
fd = self.form_data
if fd.get("granularity") == "all":
raise QueryObjectValidationError(
_("Pick a time granularity for your time series")
)
if df.empty:
return df
if aggregate:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=0,
aggfunc=sum,
)
else:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=self.pivot_fill_value,
)
rule = fd.get("resample_rule")
method = fd.get("resample_method")
if rule and method:
df = getattr(df.resample(rule), method)()
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
df = self.apply_rolling(df)
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
return df
def run_extra_queries(self) -> None:
fd = self.form_data
time_compare = fd.get("time_compare") or []
# backwards compatibility
if not isinstance(time_compare, list):
time_compare = [time_compare]
for option in time_compare:
query_object = self.query_obj()
try:
delta = parse_past_timedelta(option)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
query_object["inner_from_dttm"] = query_object["from_dttm"]
query_object["inner_to_dttm"] = query_object["to_dttm"]
if not query_object["from_dttm"] or not query_object["to_dttm"]:
raise QueryObjectValidationError(
_(
"An enclosed time range (both start and end) must be specified "
"when using a Time Comparison."
)
)
query_object["from_dttm"] -= delta
query_object["to_dttm"] -= delta
df2 = self.get_df_payload(query_object, time_compare=option).get("df")
if df2 is not None and DTTM_ALIAS in df2:
label = "{} offset".format(option)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
self._extra_chart_data.append((label, df2))
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
comparison_type = fd.get("comparison_type") or "values"
df = self.process_data(df)
if comparison_type == "values":
# Filter out series with all NaN
chart_data = self.to_series(df.dropna(axis=1, how="all"))
for i, (label, df2) in enumerate(self._extra_chart_data):
chart_data.extend(
self.to_series(
df2, classed="time-shift-{}".format(i), title_suffix=label
)
)
else:
chart_data = []
for i, (label, df2) in enumerate(self._extra_chart_data):
# reindex df2 into the df2 index
combined_index = df.index.union(df2.index)
df2 = (
df2.reindex(combined_index)
.interpolate(method="time")
.reindex(df.index)
)
if comparison_type == "absolute":
diff = df - df2
elif comparison_type == "percentage":
diff = (df - df2) / df2
elif comparison_type == "ratio":
diff = df / df2
else:
raise QueryObjectValidationError(
"Invalid `comparison_type`: {0}".format(comparison_type)
)
# remove leading/trailing NaNs from the time shift difference
diff = diff[diff.first_valid_index() : diff.last_valid_index()]
chart_data.extend(
self.to_series(
diff, classed="time-shift-{}".format(i), title_suffix=label
)
)
if not self.sort_series:
chart_data = sorted(chart_data, key=lambda x: tuple(x["key"]))
return chart_data
class MultiLineViz(NVD3Viz):
"""Pile on multiple line charts"""
viz_type = "line_multi"
verbose_name = _("Time Series - Multiple Line Charts")
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
return {}
def get_data(self, df: pd.DataFrame) -> VizData:
multiline_fd = self.form_data
# Late import to avoid circular import issues
from superset.charts.dao import ChartDAO
axis1_chart_ids = multiline_fd.get("line_charts", [])
axis2_chart_ids = multiline_fd.get("line_charts_2", [])
all_charts = {
chart.id: chart
for chart in ChartDAO.find_by_ids(axis1_chart_ids + axis2_chart_ids)
}
axis1_charts = [all_charts[chart_id] for chart_id in axis1_chart_ids]
axis2_charts = [all_charts[chart_id] for chart_id in axis2_chart_ids]
filters = multiline_fd.get("filters", [])
add_prefix = multiline_fd.get("prefix_metric_with_slice_name", False)
data = []
min_x, max_x = None, None
for chart, y_axis in [(chart, 1) for chart in axis1_charts] + [
(chart, 2) for chart in axis2_charts
]:
prefix = f"{chart.chart}: " if add_prefix else ""
chart_fd = chart.form_data
chart_fd["filters"] = chart_fd.get("filters", []) + filters
if "extra_filters" in multiline_fd:
chart_fd["extra_filters"] = multiline_fd["extra_filters"]
if "time_range" in multiline_fd:
chart_fd["time_range"] = multiline_fd["time_range"]
viz_obj = viz_types[chart.viz_type](
chart.datasource,
form_data=chart_fd,
force=self.force,
force_cached=self.force_cached,
)
df = viz_obj.get_df_payload()["df"]
chart_series = viz_obj.get_data(df) or []
for series in chart_series:
x_values = [value["x"] for value in series["values"]]
min_x = min(x_values + ([min_x] if min_x is not None else []))
max_x = max(x_values + ([max_x] if max_x is not None else []))
data.append(
{
"key": prefix + ", ".join(series["key"]),
"type": "line",
"values": series["values"],
"yAxis": y_axis,
}
)
bounds = []
if min_x is not None:
bounds.append({"x": min_x, "y": None})
if max_x is not None:
bounds.append({"x": max_x, "y": None})
for series in data:
series["values"].extend(bounds)
return data
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
m1 = self.form_data.get("metric")
m2 = self.form_data.get("metric_2")
d["metrics"] = [m1, m2]
if not m1:
raise QueryObjectValidationError(_("Pick a metric for left axis!"))
if not m2:
raise QueryObjectValidationError(_("Pick a metric for right axis!"))
if m1 == m2:
raise QueryObjectValidationError(
_("Please choose different metrics" " on left and right axis")
)
return d
def to_series(self, df: pd.DataFrame, classed: str = "") -> List[Dict[str, Any]]:
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
metrics = [self.form_data["metric"], self.form_data["metric_2"]]
for i, m in enumerate(metrics):
m = utils.get_metric_name(m)
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{"x": ds, "y": ys[ds] if ds in ys else None} for ds in df.index
],
"yAxis": i + 1,
"type": "line",
}
chart_data.append(d)
return chart_data
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
if self.form_data.get("granularity") == "all":
raise QueryObjectValidationError(
_("Pick a time granularity for your time series")
)
metric = utils.get_metric_name(fd["metric"])
metric_2 = utils.get_metric_name(fd["metric_2"])
df = df.pivot_table(index=DTTM_ALIAS, values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3TimePivotViz(NVD3TimeSeriesViz):
"""Time Series - Periodicity Pivot"""
viz_type = "time_pivot"
sort_series = True
verbose_name = _("Time Series - Period Pivot")
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
d["metrics"] = [self.form_data.get("metric")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
df = self.process_data(df)
freq = to_offset(fd.get("freq"))
try:
freq = type(freq)(freq.n, normalize=True, **freq.kwds)
except ValueError:
freq = type(freq)(freq.n, **freq.kwds)
df.index.name = None
df[DTTM_ALIAS] = df.index.map(freq.rollback)
df["ranked"] = df[DTTM_ALIAS].rank(method="dense", ascending=False) - 1
df.ranked = df.ranked.map(int)
df["series"] = "-" + df.ranked.map(str)
df["series"] = df["series"].str.replace("-0", "current")
rank_lookup = {
row["series"]: row["ranked"] for row in df.to_dict(orient="records")
}
max_ts = df[DTTM_ALIAS].max()
max_rank = df["ranked"].max()
df[DTTM_ALIAS] = df.index + (max_ts - df[DTTM_ALIAS])
df = df.pivot_table(
index=DTTM_ALIAS,
columns="series",
values=utils.get_metric_name(fd["metric"]),
)
chart_data = self.to_series(df)
for serie in chart_data:
serie["rank"] = rank_lookup[serie["key"]]
serie["perc"] = 1 - (serie["rank"] / (max_rank + 1))
return chart_data
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = "compare"
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
pivot_fill_value = 0
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
"""Returns the query object for this visualization"""
d = super().query_obj()
d["row_limit"] = self.form_data.get("row_limit", int(config["VIZ_ROW_LIMIT"]))
numeric_columns = self.form_data.get("all_columns_x")
if numeric_columns is None:
raise QueryObjectValidationError(
_("Must have at least one numeric column specified")
)
self.columns = numeric_columns
d["columns"] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d["groupby"] = []
return d
def labelify(self, keys: Union[List[str], str], column: str) -> str:
if isinstance(keys, str):
keys = [keys]
# removing undesirable characters
labels = [re.sub(r"\W+", r"_", k) for k in keys]
if len(self.columns) > 1 or not self.groupby:
# Only show numeric column in label if there are many
labels = [column] + labels
return "__".join(labels)
def get_data(self, df: pd.DataFrame) -> VizData:
"""Returns the chart data"""
if df.empty:
return None
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend(
[
{
"key": self.labelify(keys, column),
"values": data[column].tolist(),
}
for column in self.columns
]
)
return chart_data
class DistributionBarViz(BaseViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if len(d["groupby"]) < len(fd.get("groupby") or []) + len(
fd.get("columns") or []
):
raise QueryObjectValidationError(
_("Can't have overlap between Series and Breakdowns")
)
if not fd.get("metrics"):
raise QueryObjectValidationError(_("Pick at least one metric"))
if not fd.get("groupby"):
raise QueryObjectValidationError(_("Pick at least one field for [Series]"))
d["orderby"] = [(metric, False) for metric in d["metrics"]]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
metrics = self.metric_labels
columns = fd.get("columns") or []
# pandas will throw away nulls when grouping/pivoting,
# so we substitute NULL_STRING for any nulls in the necessary columns
filled_cols = self.groupby + columns
df = df.copy()
df[filled_cols] = df[filled_cols].fillna(value=NULL_STRING)
row = df.groupby(self.groupby).sum()[metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
pt = df.pivot_table(index=self.groupby, columns=columns, values=metrics)
if fd.get("contribution"):
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
# Re-order the columns adhering to the metric ordering.
pt = pt[metrics]
chart_data = []
for name, ys in pt.items():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, str):
series_title = name
else:
offset = 0 if len(metrics) > 1 else 1
series_title = ", ".join([str(s) for s in name[offset:]])
values = []
for i, v in ys.items():
x = i
if isinstance(x, (tuple, list)):
x = ", ".join([str(s) for s in x])
else:
x = str(x)
values.append({"x": x, "y": v})
d = {"key": series_title, "values": values}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
"Kerry Rodden "
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>'
)
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = copy.deepcopy(self.form_data)
cols = fd.get("groupby") or []
cols.extend(["m1", "m2"])
metric = utils.get_metric_name(fd["metric"])
secondary_metric = (
utils.get_metric_name(fd["secondary_metric"])
if "secondary_metric" in fd
else None
)
if metric == secondary_metric or secondary_metric is None:
df.rename(columns={df.columns[-1]: "m1"}, inplace=True)
df["m2"] = df["m1"]
else:
df.rename(columns={df.columns[-2]: "m1"}, inplace=True)
df.rename(columns={df.columns[-1]: "m2"}, inplace=True)
# Re-order the columns as the query result set column ordering may differ from
# that listed in the hierarchy.
df = df[cols]
return df.to_numpy().tolist()
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
fd = self.form_data
qry["metrics"] = [fd["metric"]]
secondary_metric = fd.get("secondary_metric")
if secondary_metric and secondary_metric != fd["metric"]:
qry["metrics"].append(secondary_metric)
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
if len(qry["groupby"]) != 2:
raise QueryObjectValidationError(
_("Pick exactly 2 columns as [Source / Target]")
)
qry["metrics"] = [self.form_data["metric"]]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
source, target = self.groupby
(value,) = self.metric_labels
df.rename(
columns={source: "source", target: "target", value: "value",}, inplace=True,
)
df["source"] = df["source"].astype(str)
df["target"] = df["target"].astype(str)
recs = df.to_dict(orient="records")
hierarchy: Dict[str, Set[str]] = defaultdict(set)
for row in recs:
hierarchy[row["source"]].add(row["target"])
def find_cycle(g: Dict[str, Set[str]]) -> Optional[Tuple[str, str]]:
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex: str) -> Optional[Tuple[str, str]]:
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
return None
for v in g:
cycle = visit(v)
if cycle:
return cycle
return None
cycle = find_cycle(hierarchy)
if cycle:
raise QueryObjectValidationError(
_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}"
).format(cycle)
)
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
if len(self.form_data["groupby"]) != 2:
raise QueryObjectValidationError(_("Pick exactly 2 columns to 'Group By'"))
qry["metrics"] = [self.form_data["metric"]]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
return df.to_dict(orient="records")
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
fd = self.form_data
qry["groupby"] = [fd.get("groupby"), fd.get("columns")]
qry["metrics"] = [fd.get("metric")]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df["source"]) | set(df["target"]))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {"nodes": list(nodes), "matrix": m}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = "From bl.ocks.org By john-guerra"
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
qry["metrics"] = [self.form_data["metric"]]
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
cols = [fd.get("entity")]
metric = self.metric_labels[0]
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ["country_id", "metric"]
d = df.to_dict(orient="records")
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
qry["groupby"] = [self.form_data["entity"]]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
from superset.examples import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd["metric"])
secondary_metric = (
utils.get_metric_name(fd["secondary_metric"])
if "secondary_metric" in fd
else None
)
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
if "country_fieldtype" in fd:
country = countries.get(fd["country_fieldtype"], row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
cache_type = "get_data"
filter_row_limit = 1000
def query_obj(self) -> QueryObjectDict:
return {}
def run_extra_queries(self) -> None:
qry = super().query_obj()
filters = self.form_data.get("filter_configs") or []
qry["row_limit"] = self.filter_row_limit
self.dataframes = {}
for flt in filters:
col = flt.get("column")
if not col:
raise QueryObjectValidationError(
_("Invalid filter configuration, please select a column")
)
qry["groupby"] = [col]
metric = flt.get("metric")
qry["metrics"] = [metric] if metric else []
df = self.get_df_payload(query_obj=qry).get("df")
self.dataframes[col] = df
def get_data(self, df: pd.DataFrame) -> VizData:
filters = self.form_data.get("filter_configs") or []
d = {}
for flt in filters:
col = flt.get("column")
metric = flt.get("metric")
df = self.dataframes.get(col)
if df is not None and not df.empty:
if metric:
df = df.sort_values(
utils.get_metric_name(metric), ascending=flt.get("asc")
)
d[col] = [
{"id": row[0], "text": row[0], "metric": row[1]}
for row in df.itertuples(index=False)
]
else:
df = df.sort_values(col, ascending=flt.get("asc"))
d[col] = [
{"id": row[0], "text": row[0]}
for row in df.itertuples(index=False)
]
else:
df[col] = []
return d
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
"Syntagmatic's library</a>"
)
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["groupby"] = [fd.get("series")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
"bl.ocks.org</a>"
)
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["metrics"] = [fd.get("metric")]
d["groupby"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if self.form_data.get("sort_by_metric", False):
d["orderby"] = [(d["metrics"][0], False)]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
x = fd.get("all_columns_x")
y = fd.get("all_columns_y")
v = self.metric_labels[0]
if x == y:
df.columns = ["x", "y", "v"]
else:
df = df[[x, y, v]]
df.columns = ["x", "y", "v"]
norm = fd.get("normalize_across")
overall = False
max_ = df.v.max()
min_ = df.v.min()
if norm == "heatmap":
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df["perc"] = gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min())
)
df["rank"] = gb.apply(lambda x: x.v.rank(pct=True))
if overall:
df["perc"] = (df.v - min_) / (max_ - min_)
df["rank"] = df.v.rank(pct=True)
return {"records": df.to_dict(orient="records"), "extents": [min_, max_]}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
"d3-horizon-chart</a>"
)
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = "<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>"
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
label_col = fd.get("mapbox_label")
if not fd.get("groupby"):
if fd.get("all_columns_x") is None or fd.get("all_columns_y") is None:
raise QueryObjectValidationError(
_("[Longitude] and [Latitude] must be set")
)
d["columns"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise QueryObjectValidationError(
_(
"Must have a [Group By] column to have 'count' as the "
+ "[Label]"
)
)
d["columns"].append(label_col[0])
if fd.get("point_radius") != "Auto":
d["columns"].append(fd.get("point_radius"))
d["columns"] = list(set(d["columns"]))
else:
# Ensuring columns chosen are all in group by
if (
label_col
and len(label_col) >= 1
and label_col[0] != "count"
and label_col[0] not in fd["groupby"]
):
raise QueryObjectValidationError(
_("Choice of [Label] must be present in [Group By]")
)
if (
fd.get("point_radius") != "Auto"
and fd.get("point_radius") not in fd["groupby"]
):
raise QueryObjectValidationError(
_("Choice of [Point Radius] must be present in [Group By]")
)
if (
fd.get("all_columns_x") not in fd["groupby"]
or fd.get("all_columns_y") not in fd["groupby"]
):
raise QueryObjectValidationError(
_(
"[Longitude] and [Latitude] columns must be present in "
+ "[Group By]"
)
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
label_col = fd.get("mapbox_label")
has_custom_metric = label_col is not None and len(label_col) > 0
metric_col = [None] * len(df.index)
if has_custom_metric:
if label_col[0] == fd.get("all_columns_x"): # type: ignore
metric_col = df[fd.get("all_columns_x")]
elif label_col[0] == fd.get("all_columns_y"): # type: ignore
metric_col = df[fd.get("all_columns_y")]
else:
metric_col = df[label_col[0]] # type: ignore
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")]
)
# limiting geo precision as long decimal values trigger issues
# around json-bignumber in Mapbox
GEO_PRECISION = 10
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"metric": metric, "radius": point_radius},
"geometry": {
"type": "Point",
"coordinates": [
round(lon, GEO_PRECISION),
round(lat, GEO_PRECISION),
],
},
}
for lon, lat, metric, point_radius in zip(
df[fd.get("all_columns_x")],
df[fd.get("all_columns_y")],
metric_col,
point_radius_col,
)
],
}
x_series, y_series = df[fd.get("all_columns_x")], df[fd.get("all_columns_y")]
south_west = [x_series.min(), y_series.min()]
north_east = [x_series.max(), y_series.max()]
return {
"geoJSON": geo_json,
"hasCustomMetric": has_custom_metric,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"bounds": [south_west, north_east],
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class DeckGLMultiLayer(BaseViz):
"""Pile on multiple DeckGL layers"""
viz_type = "deck_multi"
verbose_name = _("Deck.gl - Multiple Layers")
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
def query_obj(self) -> QueryObjectDict:
return {}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
# Late imports to avoid circular import issues
from superset import db
from superset.models.slice import Slice
slice_ids = fd.get("deck_slices")
slices = db.session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
return {
"mapboxApiKey": config["MAPBOX_API_KEY"],
"slices": [slc.data for slc in slices],
}
class BaseDeckGLViz(BaseViz):
"""Base class for deck.gl visualizations"""
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
spatial_control_keys: List[str] = []
def get_metrics(self) -> List[str]:
self.metric = self.form_data.get("size")
return [self.metric] if self.metric else []
def process_spatial_query_obj(self, key: str, group_by: List[str]) -> None:
group_by.extend(self.get_spatial_columns(key))
def get_spatial_columns(self, key: str) -> List[str]:
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
return [spatial.get("lonCol"), spatial.get("latCol")]
elif spatial.get("type") == "delimited":
return [spatial.get("lonlatCol")]
elif spatial.get("type") == "geohash":
return [spatial.get("geohashCol")]
return []
@staticmethod
def parse_coordinates(s: Any) -> Optional[Tuple[float, float]]:
if not s:
return None
try:
p = Point(s)
return (p.latitude, p.longitude)
except Exception:
raise SpatialException(_("Invalid spatial point encountered: %s" % s))
@staticmethod
def reverse_geohash_decode(geohash_code: str) -> Tuple[str, str]:
lat, lng = geohash.decode(geohash_code)
return (lng, lat)
@staticmethod
def reverse_latlong(df: pd.DataFrame, key: str) -> None:
df[key] = [tuple(reversed(o)) for o in df[key] if isinstance(o, (list, tuple))]
def process_spatial_data_obj(self, key: str, df: pd.DataFrame) -> pd.DataFrame:
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
df[key] = list(
zip(
pd.to_numeric(df[spatial.get("lonCol")], errors="coerce"),
pd.to_numeric(df[spatial.get("latCol")], errors="coerce"),
)
)
elif spatial.get("type") == "delimited":
lon_lat_col = spatial.get("lonlatCol")
df[key] = df[lon_lat_col].apply(self.parse_coordinates)
del df[lon_lat_col]
elif spatial.get("type") == "geohash":
df[key] = df[spatial.get("geohashCol")].map(self.reverse_geohash_decode)
del df[spatial.get("geohashCol")]
if spatial.get("reverseCheckbox"):
self.reverse_latlong(df, key)
if df.get(key) is None:
raise NullValueException(
_(
"Encountered invalid NULL spatial entry, \
please consider filtering those out"
)
)
return df
def add_null_filters(self) -> None:
fd = self.form_data
spatial_columns = set()
for key in self.spatial_control_keys:
for column in self.get_spatial_columns(key):
spatial_columns.add(column)
if fd.get("adhoc_filters") is None:
fd["adhoc_filters"] = []
line_column = fd.get("line_column")
if line_column:
spatial_columns.add(line_column)
for column in sorted(spatial_columns):
filter_ = to_adhoc({"col": column, "op": "IS NOT NULL", "val": ""})
fd["adhoc_filters"].append(filter_)
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
# add NULL filters
if fd.get("filter_nulls", True):
self.add_null_filters()
d = super().query_obj()
gb: List[str] = []
for key in self.spatial_control_keys:
self.process_spatial_query_obj(key, gb)
if fd.get("dimension"):
gb += [fd["dimension"]]
if fd.get("js_columns"):
gb += fd.get("js_columns") or []
metrics = self.get_metrics()
gb = list(set(gb))
if metrics:
d["groupby"] = gb
d["metrics"] = metrics
d["columns"] = []
first_metric = d["metrics"][0]
d["orderby"] = [(first_metric, not fd.get("order_desc", True))]
else:
d["columns"] = gb
return d
def get_js_columns(self, d: Dict[str, Any]) -> Dict[str, Any]:
cols = self.form_data.get("js_columns") or []
return {col: d.get(col) for col in cols}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
# Processing spatial info
for key in self.spatial_control_keys:
df = self.process_spatial_data_obj(key, df)
features = []
for d in df.to_dict(orient="records"):
feature = self.get_properties(d)
extra_props = self.get_js_columns(d)
if extra_props:
feature["extraProps"] = extra_props
features.append(feature)
return {
"features": features,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"metricLabels": self.metric_labels,
}
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
raise NotImplementedError()
class DeckScatterViz(BaseDeckGLViz):
"""deck.gl's ScatterLayer"""
viz_type = "deck_scatter"
verbose_name = _("Deck.gl - Scatter plot")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
self.point_radius_fixed = fd.get("point_radius_fixed") or {
"type": "fix",
"value": 500,
}
return super().query_obj()
def get_metrics(self) -> List[str]:
self.metric = None
if self.point_radius_fixed.get("type") == "metric":
self.metric = self.point_radius_fixed["value"]
return [self.metric]
return []
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"metric": d.get(self.metric_label) if self.metric_label else None,
"radius": self.fixed_value
if self.fixed_value
else d.get(self.metric_label)
if self.metric_label
else None,
"cat_color": d.get(self.dim) if self.dim else None,
"position": d.get("spatial"),
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
self.point_radius_fixed = fd.get("point_radius_fixed")
self.fixed_value = None
self.dim = self.form_data.get("dimension")
if self.point_radius_fixed and self.point_radius_fixed.get("type") != "metric":
self.fixed_value = self.point_radius_fixed.get("value")
return super().get_data(df)
class DeckScreengrid(BaseDeckGLViz):
"""deck.gl's ScreenGridLayer"""
viz_type = "deck_screengrid"
verbose_name = _("Deck.gl - Screen Grid")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
"__timestamp": d.get(DTTM_ALIAS) or d.get("__time"),
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
class DeckGrid(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_grid"
verbose_name = _("Deck.gl - 3D Grid")
spatial_control_keys = ["spatial"]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
def geohash_to_json(geohash_code: str) -> List[List[float]]:
p = geohash.bbox(geohash_code)
return [
[p.get("w"), p.get("n")],
[p.get("e"), p.get("n")],
[p.get("e"), p.get("s")],
[p.get("w"), p.get("s")],
[p.get("w"), p.get("n")],
]
class DeckPathViz(BaseDeckGLViz):
"""deck.gl's PathLayer"""
viz_type = "deck_path"
verbose_name = _("Deck.gl - Paths")
deck_viz_key = "path"
is_timeseries = True
deser_map = {
"json": json.loads,
"polyline": polyline.decode,
"geohash": geohash_to_json,
}
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
d = super().query_obj()
self.metric = fd.get("metric")
line_col = fd.get("line_column")
if d["metrics"]:
self.has_metrics = True
d["groupby"].append(line_col)
else:
self.has_metrics = False
d["columns"].append(line_col)
return d
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
fd = self.form_data
line_type = fd["line_type"]
deser = self.deser_map[line_type]
line_column = fd["line_column"]
path = deser(d[line_column])
if fd.get("reverse_long_lat"):
path = [(o[1], o[0]) for o in path]
d[self.deck_viz_key] = path
if line_type != "geohash":
del d[line_column]
d["__timestamp"] = d.get(DTTM_ALIAS) or d.get("__time")
return d
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
class DeckPolygon(DeckPathViz):
"""deck.gl's Polygon Layer"""
viz_type = "deck_polygon"
deck_viz_key = "polygon"
verbose_name = _("Deck.gl - Polygon")
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.elevation = fd.get("point_radius_fixed") or {"type": "fix", "value": 500}
return super().query_obj()
def get_metrics(self) -> List[str]:
metrics = [self.form_data.get("metric")]
if self.elevation.get("type") == "metric":
metrics.append(self.elevation.get("value"))
return [metric for metric in metrics if metric]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
super().get_properties(d)
fd = self.form_data
elevation = fd["point_radius_fixed"]["value"]
type_ = fd["point_radius_fixed"]["type"]
d["elevation"] = (
d.get(utils.get_metric_name(elevation)) if type_ == "metric" else elevation
)
return d
class DeckHex(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_hex"
verbose_name = _("Deck.gl - 3D HEX")
spatial_control_keys = ["spatial"]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super(DeckHex, self).get_data(df)
class DeckGeoJson(BaseDeckGLViz):
"""deck.gl's GeoJSONLayer"""
viz_type = "deck_geojson"
verbose_name = _("Deck.gl - GeoJSON")
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
d["columns"] += [self.form_data.get("geojson")]
d["metrics"] = []
d["groupby"] = []
return d
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
geojson = d[self.form_data["geojson"]]
return json.loads(geojson)
class DeckArc(BaseDeckGLViz):
"""deck.gl's Arc Layer"""
viz_type = "deck_arc"
verbose_name = _("Deck.gl - Arc")
spatial_control_keys = ["start_spatial", "end_spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
dim = self.form_data.get("dimension")
return {
"sourcePosition": d.get("start_spatial"),
"targetPosition": d.get("end_spatial"),
"cat_color": d.get(dim) if dim else None,
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
d = super().get_data(df)
return {
"features": d["features"], # type: ignore
"mapboxApiKey": config["MAPBOX_API_KEY"],
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
query = super().query_obj()
form_data = self.form_data
event_key = form_data["all_columns_x"]
entity_key = form_data["entity"]
meta_keys = [
col
for col in form_data["all_columns"] or []
if col != event_key and col != entity_key
]
query["columns"] = [event_key, entity_key] + meta_keys
if form_data["order_by_entity"]:
query["orderby"] = [(entity_key, True)]
return query
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = "paired_ttest"
verbose_name = _("Time Series - Paired t-test")
sort_series = False
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform received data frame into an object of the form:
{
'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
}, ...
], ...
}
"""
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby")
metrics = self.metric_labels
df = df.pivot_table(index=DTTM_ALIAS, columns=groups, values=metrics)
cols = []
# Be rid of falsey keys
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
data: Dict[str, List[Dict[str, Any]]] = {}
series = df.to_dict("series")
for nameSet in df.columns:
# If no groups are defined, nameSet will be the metric name
hasGroup = not isinstance(nameSet, str)
Y = series[nameSet]
d = {
"group": nameSet[1:] if hasGroup else "All",
"values": [{"x": t, "y": Y[t] if t in Y else None} for t in df.index],
}
key = nameSet[0] if hasGroup else nameSet
if key in data:
data[key].append(d)
else:
data[key] = [d]
return data
class RoseViz(NVD3TimeSeriesViz):
viz_type = "rose"
verbose_name = _("Time Series - Nightingale Rose Chart")
sort_series = False
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
data = super().get_data(df)
result: Dict[str, List[Dict[str, str]]] = {}
for datum in data: # type: ignore
key = datum["key"]
for val in datum["values"]:
timestamp = val["x"].value
if not result.get(timestamp):
result[timestamp] = []
value = 0 if math.isnan(val["y"]) else val["y"]
result[timestamp].append(
{
"key": key,
"value": value,
"name": ", ".join(key) if isinstance(key, list) else key,
"time": val["x"],
}
)
return result
class PartitionViz(NVD3TimeSeriesViz):
"""
A hierarchical data visualization with support for time series.
"""
viz_type = "partition"
verbose_name = _("Partition Diagram")
def query_obj(self) -> QueryObjectDict:
query_obj = super().query_obj()
time_op = self.form_data.get("time_series_option", "not_time")
# Return time series data if the user specifies so
query_obj["is_timeseries"] = time_op != "not_time"
return query_obj
def levels_for(
self, time_op: str, groups: List[str], df: pd.DataFrame
) -> Dict[int, pd.Series]:
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean()
if time_op == "agg_mean"
else agg_df.sum(numeric_only=True)
)
return levels
def levels_for_diff(
self, time_op: str, groups: List[str], df: pd.DataFrame
) -> Dict[int, pd.DataFrame]:
# Obtain a unique list of the time grains
times = list(set(df[DTTM_ALIAS]))
times.sort()
until = times[len(times) - 1]
since = times[0]
# Function describing how to calculate the difference
func = {
"point_diff": [pd.Series.sub, lambda a, b, fill_value: a - b],
"point_factor": [pd.Series.div, lambda a, b, fill_value: a / float(b)],
"point_percent": [
lambda a, b, fill_value=0: a.div(b, fill_value=fill_value) - 1,
lambda a, b, fill_value: a / float(b) - 1,
],
}[time_op]
agg_df = df.groupby(DTTM_ALIAS).sum()
levels = {
0: pd.Series(
{
m: func[1](agg_df[m][until], agg_df[m][since], 0)
for m in agg_df.columns
}
)
}
for i in range(1, len(groups) + 1):
agg_df = df.groupby([DTTM_ALIAS] + groups[:i]).sum()
levels[i] = pd.DataFrame(
{
m: func[0](agg_df[m][until], agg_df[m][since], fill_value=0)
for m in agg_df.columns
}
)
return levels
def levels_for_time(
self, groups: List[str], df: pd.DataFrame
) -> Dict[int, VizData]:
procs = {}
for i in range(0, len(groups) + 1):
self.form_data["groupby"] = groups[:i]
df_drop = df.drop(groups[i:], 1)
procs[i] = self.process_data(df_drop, aggregate=True)
self.form_data["groupby"] = groups
return procs
def nest_values(
self,
levels: Dict[int, pd.DataFrame],
level: int = 0,
metric: Optional[str] = None,
dims: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if dims is None:
dims = []
if not level:
return [
{
"name": m,
"val": levels[0][m],
"children": self.nest_values(levels, 1, m),
}
for m in levels[0].index
]
if level == 1:
metric_level = levels[1][metric]
return [
{
"name": i,
"val": metric_level[i],
"children": self.nest_values(levels, 2, metric, [i]),
}
for i in metric_level.index
]
if level >= len(levels):
return []
dim_level = levels[level][metric][[dims[0]]]
return [
{
"name": i,
"val": dim_level[i],
"children": self.nest_values(levels, level + 1, metric, dims + [i]),
}
for i in dim_level.index
]
def nest_procs(
self,
procs: Dict[int, pd.DataFrame],
level: int = -1,
dims: Optional[Tuple[str, ...]] = None,
time: Any = None,
) -> List[Dict[str, Any]]:
if dims is None:
dims = ()
if level == -1:
return [
{"name": m, "children": self.nest_procs(procs, 0, (m,))}
for m in procs[0].columns
]
if not level:
return [
{
"name": t,
"val": procs[0][dims[0]][t],
"children": self.nest_procs(procs, 1, dims, t),
}
for t in procs[0].index
]
if level >= len(procs):
return []
return [
{
"name": i,
"val": procs[level][dims][i][time],
"children": self.nest_procs(procs, level + 1, dims + (i,), time),
}
for i in procs[level][dims].columns
]
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby", [])
time_op = fd.get("time_series_option", "not_time")
if not len(groups):
raise ValueError("Please choose at least one groupby")
if time_op == "not_time":
levels = self.levels_for("agg_sum", groups, df)
elif time_op in ["agg_sum", "agg_mean"]:
levels = self.levels_for(time_op, groups, df)
elif time_op in ["point_diff", "point_factor", "point_percent"]:
levels = self.levels_for_diff(time_op, groups, df)
elif time_op == "adv_anal":
procs = self.levels_for_time(groups, df)
return self.nest_procs(procs)
else:
levels = self.levels_for("agg_sum", [DTTM_ALIAS] + groups, df)
return self.nest_values(levels)
def get_subclasses(cls: Type[BaseViz]) -> Set[Type[BaseViz]]:
return set(cls.__subclasses__()).union(
[sc for c in cls.__subclasses__() for sc in get_subclasses(c)]
)
viz_types = {
o.viz_type: o
for o in get_subclasses(BaseViz)
if o.viz_type not in config["VIZ_TYPE_DENYLIST"]
}
| 34.400066
| 112
| 0.557339
|
9bd59433f5900071908bb936a5c6cda3205cecfc
| 471
|
py
|
Python
|
produtos/migrations/0004_produto_upload.py
|
Moisestuli/karrata
|
962ce0c573214bfc83720727c9cacae823a8c372
|
[
"MIT"
] | null | null | null |
produtos/migrations/0004_produto_upload.py
|
Moisestuli/karrata
|
962ce0c573214bfc83720727c9cacae823a8c372
|
[
"MIT"
] | null | null | null |
produtos/migrations/0004_produto_upload.py
|
Moisestuli/karrata
|
962ce0c573214bfc83720727c9cacae823a8c372
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-06 11:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('produtos', '0003_auto_20170903_1435'),
]
operations = [
migrations.AddField(
model_name='produto',
name='upload',
field=models.FileField(blank=True, upload_to='produtos/'),
),
]
| 22.428571
| 70
| 0.619958
|
047fa7da49042a25297fa58405125352debf1c22
| 1,868
|
py
|
Python
|
sympy/polys/__init__.py
|
pernici/sympy
|
5e6e3b71da777f5b85b8ca2d16f33ed020cf8a41
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/polys/__init__.py
|
pernici/sympy
|
5e6e3b71da777f5b85b8ca2d16f33ed020cf8a41
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/polys/__init__.py
|
pernici/sympy
|
5e6e3b71da777f5b85b8ca2d16f33ed020cf8a41
|
[
"BSD-3-Clause"
] | null | null | null |
"""Polynomial manipulation algorithms and algebraic objects. """
from polytools import (
Poly, poly,
poly_from_expr,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
cancel,
reduced, groebner,
)
from polyfuncs import (
symmetrize, horner, interpolate, viete,
)
from rationaltools import (
together,
)
from polyerrors import (
OperationNotSupported,
ExactQuotientFailed,
ComputationFailed,
UnificationFailed,
GeneratorsNeeded,
RefinementFailed,
PolynomialError,
CoercionFailed,
NotInvertible,
NotReversible,
NotAlgebraic,
DomainError,
)
from numberfields import (
minimal_polynomial, minpoly,
primitive_element, primelt,
field_isomorphism,
to_number_field,
AlgebraicNumber,
isolate,
)
from monomialtools import (
Monomial, monomials, monomial_count,
)
from rootoftools import (
RootOf, RootSum,
)
from polyroots import (
roots,
)
from domains import (
FF, GF, ZZ, QQ, RR, EX,
)
from constructor import (
construct_domain,
)
from specialpolys import (
swinnerton_dyer_poly,
interpolating_poly,
cyclotomic_poly,
symmetric_poly,
random_poly,
)
from orthopolys import (
chebyshevt_poly,
chebyshevu_poly,
hermite_poly,
legendre_poly,
laguerre_poly,
)
from partfrac import (
apart,
)
from polyoptions import Options
import polycontext as ctx
| 17.961538
| 64
| 0.694325
|
859144b1e8625ba4764818ed1b76b930373ec530
| 102,843
|
py
|
Python
|
plenum/server/consensus/ordering_service.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/server/consensus/ordering_service.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/server/consensus/ordering_service.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
import itertools
import logging
import time
from collections import defaultdict, OrderedDict, deque
from functools import partial
from typing import Tuple, List, Set, Optional, Dict, Iterable
import math
from orderedset._orderedset import OrderedSet
from sortedcontainers import SortedList
from common.exceptions import PlenumValueError, LogicError
from common.serializers.serialization import state_roots_serializer, invalid_index_serializer
from crypto.bls.bls_bft_replica import BlsBftReplica
from plenum.common.config_util import getConfig
from plenum.common.constants import POOL_LEDGER_ID, SEQ_NO_DB_LABEL, ReplicaHooks, AUDIT_LEDGER_ID, TXN_TYPE, \
LAST_SENT_PP_STORE_LABEL, AUDIT_TXN_PP_SEQ_NO, AUDIT_TXN_VIEW_NO, AUDIT_TXN_PRIMARIES, PREPREPARE, PREPARE, COMMIT, \
DOMAIN_LEDGER_ID, TS_LABEL
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.exceptions import SuspiciousNode, InvalidClientMessageException, SuspiciousPrePrepare, \
UnknownIdentifier
from plenum.common.ledger import Ledger
from plenum.common.messages.internal_messages import HookMessage, RequestPropagates, BackupSetupLastOrdered, \
RaisedSuspicion, ViewChangeStarted, NewViewCheckpointsApplied
from plenum.common.messages.node_messages import PrePrepare, Prepare, Commit, Reject, ThreePhaseKey, Ordered, \
CheckpointState, MessageReq
from plenum.common.metrics_collector import MetricsName, MetricsCollector, NullMetricsCollector, measure_time
from plenum.common.request import Request
from plenum.common.router import Subscription
from plenum.common.stashing_router import StashingRouter, PROCESS, DISCARD
from plenum.common.timer import TimerService, RepeatingTimer
from plenum.common.txn_util import get_payload_digest, get_payload_data, get_seq_no
from plenum.common.types import f
from plenum.common.util import compare_3PC_keys, updateNamedTuple, SortedDict, getMaxFailures, mostCommonElement, \
get_utc_epoch, max_3PC_key
from plenum.server.batch_handlers.three_pc_batch import ThreePcBatch
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData, BatchID
from plenum.server.consensus.metrics_decorator import measure_consensus_time
from plenum.server.consensus.msg_validator import ThreePCMsgValidator
from plenum.server.models import Prepares, Commits
from plenum.server.replica_helper import PP_APPLY_REJECT_WRONG, PP_APPLY_WRONG_DIGEST, PP_APPLY_WRONG_STATE, \
PP_APPLY_ROOT_HASH_MISMATCH, PP_APPLY_HOOK_ERROR, PP_SUB_SEQ_NO_WRONG, PP_NOT_FINAL, PP_APPLY_AUDIT_HASH_MISMATCH, \
PP_REQUEST_ALREADY_ORDERED, PP_CHECK_NOT_FROM_PRIMARY, PP_CHECK_TO_PRIMARY, PP_CHECK_DUPLICATE, \
PP_CHECK_INCORRECT_POOL_STATE_ROOT, PP_CHECK_OLD, PP_CHECK_REQUEST_NOT_FINALIZED, PP_CHECK_NOT_NEXT, \
PP_CHECK_WRONG_TIME, Stats, ConsensusDataHelper, OrderedTracker, TPCStat
from plenum.server.replica_freshness_checker import FreshnessChecker
from plenum.server.replica_helper import replica_batch_digest
from plenum.server.replica_validator_enums import INCORRECT_INSTANCE, INCORRECT_PP_SEQ_NO, ALREADY_ORDERED, \
STASH_VIEW, FUTURE_VIEW, OLD_VIEW, GREATER_PREP_CERT, STASH_CATCH_UP, CATCHING_UP, OUTSIDE_WATERMARKS, \
STASH_WATERMARKS
from plenum.server.request_managers.write_request_manager import WriteRequestManager
from plenum.server.suspicion_codes import Suspicions
from stp_core.common.log import getlogger
class OrderingService:
def __init__(self,
data: ConsensusSharedData,
timer: TimerService,
bus: InternalBus,
network: ExternalBus,
write_manager: WriteRequestManager,
bls_bft_replica: BlsBftReplica,
freshness_checker: FreshnessChecker,
stasher=None,
get_current_time=None,
get_time_for_3pc_batch=None,
metrics: MetricsCollector = NullMetricsCollector()):
self.metrics = metrics
self._data = data
self._requests = self._data.requests
self._timer = timer
self._bus = bus
self._network = network
self._write_manager = write_manager
self._name = self._data.name
self.get_time_for_3pc_batch = get_time_for_3pc_batch or get_utc_epoch
# Flag which node set, when it have set new primaries and need to send batch
self.primaries_batch_needed = False
self._config = getConfig()
self._logger = getlogger()
# TODO: Change just to self._stasher = stasher
self._stasher = stasher
self._subscription = Subscription()
self._validator = ThreePCMsgValidator(self._data)
self.get_current_time = get_current_time or self._timer.get_current_time
self._out_of_order_repeater = RepeatingTimer(self._timer,
self._config.PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL,
self._process_stashed_out_of_order_commits,
active=False)
"""
Maps from legacy replica code
"""
self._state_root_serializer = state_roots_serializer
# Keeps a map of PRE-PREPAREs which did not satisfy timestamp
# criteria, they can be accepted if >f PREPAREs are encountered.
# This is emptied on view change. With each PRE-PREPARE, a flag is
# stored which indicates whether there are sufficient acceptable
# PREPAREs or not
self.pre_prepares_stashed_for_incorrect_time = {}
# Time of the last PRE-PREPARE which satisfied all validation rules
# (time, digest, roots were all correct). This time is not to be
# reverted even if the PRE-PREPAREs are not ordered. This implies that
# the next primary would have seen all accepted PRE-PREPAREs or another
# view change will happen
self.last_accepted_pre_prepare_time = None
# Tracks for which keys PRE-PREPAREs have been requested.
# Cleared in `gc`
# type: Dict[Tuple[int, int], Optional[Tuple[str, str, str]]]
self.requested_pre_prepares = {}
# Tracks for which keys PREPAREs have been requested.
# Cleared in `gc`
# type: Dict[Tuple[int, int], Optional[Tuple[str, str, str]]]
self.requested_prepares = {}
# Tracks for which keys COMMITs have been requested.
# Cleared in `gc`
# type: Dict[Tuple[int, int], Optional[Tuple[str, str, str]]]
self.requested_commits = {}
# PRE-PREPAREs timestamps stored by non primary replica to check
# obsolescence of incoming PrePrepares. Pre-prepares with the same
# 3PC key are not merged since we need to keep incoming timestamps
# for each new PP from every nodes separately.
# Dictionary:
# key: Tuple[pp.viewNo, pp.seqNo]
# value: Dict[Tuple[PrePrepare, sender], timestamp]
self.pre_prepare_tss = defaultdict(dict)
# PRE-PREPAREs that are waiting to be processed but do not have the
# corresponding request finalised. Happens when replica has not been
# forwarded the request by the node but is getting 3 phase messages.
# The value is a list since a malicious entry might send PRE-PREPARE
# with a different digest and since we dont have the request finalised
# yet, we store all PRE-PPREPAREs
# type: List[Tuple[PrePrepare, str, Set[Tuple[str, int]]]]
self.prePreparesPendingFinReqs = []
# PrePrepares waiting for previous PrePrepares, key being tuple of view
# number and pre-prepare sequence numbers and value being tuple of
# PrePrepare and sender
# TODO: Since pp_seq_no will start from 1 in each view, the comparator
# of SortedDict needs to change
self.prePreparesPendingPrevPP = SortedDict(lambda k: (k[0], k[1]))
# PREPAREs that are stored by non primary replica for which it has not
# got any PRE-PREPARE. Dictionary that stores a tuple of view no and
# prepare sequence number as key and a deque of PREPAREs as value.
# This deque is attempted to be flushed on receiving every
# PRE-PREPARE request.
self.preparesWaitingForPrePrepare = {}
# Defines if there was a batch after last catchup
self.first_batch_after_catchup = False
self._lastPrePrepareSeqNo = self._data.low_watermark # type: int
# COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
# received
self.commitsWaitingForPrepare = {}
# type: Dict[Tuple[int, int], deque]
# Dictionary of sent PRE-PREPARE that are stored by primary replica
# which it has broadcasted to all other non primary replicas
# Key of dictionary is a 2 element tuple with elements viewNo,
# pre-prepare seqNo and value is the received PRE-PREPARE
self.sentPrePrepares = SortedDict(lambda k: (k[0], k[1]))
# type: Dict[Tuple[int, int], PrePrepare]
# Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
# element tuple with elements viewNo, pre-prepare seqNo and value
# is the received PRE-PREPARE
self.prePrepares = SortedDict(lambda k: (k[0], k[1]))
# type: Dict[Tuple[int, int], PrePrepare]
# Dictionary of received Prepare requests. Key of dictionary is a 2
# element tuple with elements viewNo, seqNo and value is a 2 element
# tuple containing request digest and set of sender node names(sender
# replica names in case of multiple protocol instances)
# (viewNo, seqNo) -> ((identifier, reqId), {senders})
self.prepares = Prepares()
# type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]
self.commits = Commits()
# type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]
# Dictionary to keep track of the which replica was primary during each
# view. Key is the view no and value is the name of the primary
# replica during that view
self.primary_names = OrderedDict() # type: OrderedDict[int, str]
# Indicates name of the primary replica of this protocol instance.
# None in case the replica does not know who the primary of the
# instance is
self._primary_name = None # type: Optional[str]
# Did we log a message about getting request while absence of primary
self.warned_no_primary = False
self.requestQueues = {} # type: Dict[int, OrderedSet]
self.stats = Stats(TPCStat)
self.batches = OrderedDict() # type: OrderedDict[Tuple[int, int]]
self.l_bls_bft_replica = bls_bft_replica
# Set of tuples to keep track of ordered requests. Each tuple is
# (viewNo, ppSeqNo).
self.ordered = OrderedTracker()
self.lastBatchCreated = self.get_current_time()
# Commits which are not being ordered since commits with lower
# sequence numbers have not been ordered yet. Key is the
# viewNo and value a map of pre-prepare sequence number to commit
# type: Dict[int,Dict[int,Commit]]
self.stashed_out_of_order_commits = {}
self._freshness_checker = freshness_checker
self._skip_send_3pc_ts = None
self._consensus_data_helper = ConsensusDataHelper(self._data)
self._subscription.subscribe(self._stasher, PrePrepare, self.process_preprepare)
self._subscription.subscribe(self._stasher, Prepare, self.process_prepare)
self._subscription.subscribe(self._stasher, Commit, self.process_commit)
self._subscription.subscribe(self._stasher, NewViewCheckpointsApplied, self.process_new_view_checkpoints_applied)
self._subscription.subscribe(self._bus, ViewChangeStarted, self.process_view_change_started)
# Dict to keep PrePrepares from old view to be re-ordered in the new view
# key is (viewNo, ppDigest) tuple, and value is a PrePrepare
self.old_view_preprepares = {}
def cleanup(self):
self._subscription.unsubscribe_all()
def __repr__(self):
return self.name
@measure_consensus_time(MetricsName.PROCESS_PREPARE_TIME,
MetricsName.BACKUP_PROCESS_PREPARE_TIME)
def process_prepare(self, prepare: Prepare, sender: str):
"""
Validate and process the PREPARE specified.
If validation is successful, create a COMMIT and broadcast it.
:param prepare: a PREPARE msg
:param sender: name of the node that sent the PREPARE
"""
result, reason = self._validate(prepare)
if result != PROCESS:
return result, reason
key = (prepare.viewNo, prepare.ppSeqNo)
self._logger.debug("{} received PREPARE{} from {}".format(self, key, sender))
# TODO move this try/except up higher
try:
if self._validate_prepare(prepare, sender):
self._add_to_prepares(prepare, sender)
self.stats.inc(TPCStat.PrepareRcvd)
self._logger.debug("{} processed incoming PREPARE {}".format(
self, (prepare.viewNo, prepare.ppSeqNo)))
else:
# TODO let's have isValidPrepare throw an exception that gets
# handled and possibly logged higher
self._logger.trace("{} cannot process incoming PREPARE".format(self))
except SuspiciousNode as ex:
self.report_suspicious_node(ex)
return None, None
def _validate_prepare(self, prepare: Prepare, sender: str) -> bool:
"""
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
"""
key = (prepare.viewNo, prepare.ppSeqNo)
primaryStatus = self._is_primary_for_msg(prepare)
ppReq = self.get_preprepare(*key)
# If a non primary replica and receiving a PREPARE request before a
# PRE-PREPARE request, then proceed
# PREPARE should not be sent from primary
if self._is_msg_from_primary(prepare, sender):
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare))
return False
# If non primary replica
if primaryStatus is False:
if self.prepares.hasPrepareFrom(prepare, sender):
self.report_suspicious_node(SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare))
return False
# If PRE-PREPARE not received for the PREPARE, might be slow
# network
if not ppReq:
self._enqueue_prepare(prepare, sender)
self.l_setup_last_ordered_for_non_master()
return False
# If primary replica
if primaryStatus is True:
if self.prepares.hasPrepareFrom(prepare, sender):
self.report_suspicious_node(SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare))
return False
# If PRE-PREPARE was not sent for this PREPARE, certainly
# malicious behavior
elif not ppReq:
self.report_suspicious_node(SuspiciousNode(
sender, Suspicions.UNKNOWN_PR_SENT, prepare))
return False
if primaryStatus is None and not ppReq:
self._enqueue_prepare(prepare, sender)
self.l_setup_last_ordered_for_non_master()
return False
if prepare.digest != ppReq.digest:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare))
return False
elif prepare.stateRootHash != ppReq.stateRootHash:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_STATE_WRONG,
prepare))
return False
elif prepare.txnRootHash != ppReq.txnRootHash:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_TXN_WRONG,
prepare))
return False
elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG,
prepare))
return False
# BLS multi-sig:
self.l_bls_bft_replica.validate_prepare(prepare, sender)
return True
"""Method from legacy code"""
def _enqueue_prepare(self, pMsg: Prepare, sender: str):
key = (pMsg.viewNo, pMsg.ppSeqNo)
self._logger.debug("{} queueing prepare due to unavailability of PRE-PREPARE. "
"Prepare {} for key {} from {}".format(self, pMsg, key, sender))
if key not in self.preparesWaitingForPrePrepare:
self.preparesWaitingForPrePrepare[key] = deque()
self.preparesWaitingForPrePrepare[key].append((pMsg, sender))
if key not in self.pre_prepares_stashed_for_incorrect_time:
if self.is_master or self.last_ordered_3pc[1] != 0:
self._request_pre_prepare_for_prepare(key)
else:
self._process_stashed_pre_prepare_for_time_if_possible(key)
def _process_stashed_pre_prepare_for_time_if_possible(
self, key: Tuple[int, int]):
"""
Check if any PRE-PREPAREs that were stashed since their time was not
acceptable, can now be accepted since enough PREPAREs are received
"""
self._logger.debug('{} going to process stashed PRE-PREPAREs with '
'incorrect times'.format(self))
q = self._data.quorums.f
if len(self.preparesWaitingForPrePrepare[key]) > q:
times = [pr.ppTime for (pr, _) in
self.preparesWaitingForPrePrepare[key]]
most_common_time, freq = mostCommonElement(times)
if self._data.quorums.timestamp.is_reached(freq):
self._logger.debug('{} found sufficient PREPAREs for the '
'PRE-PREPARE{}'.format(self, key))
stashed_pp = self.pre_prepares_stashed_for_incorrect_time
pp, sender, done = stashed_pp[key]
if done:
self._logger.debug('{} already processed PRE-PREPARE{}'.format(self, key))
return True
# True is set since that will indicate to `is_pre_prepare_time_acceptable`
# that sufficient PREPAREs are received
stashed_pp[key] = (pp, sender, True)
self._network.process_incoming(pp, sender)
return True
return False
def _request_pre_prepare_for_prepare(self, three_pc_key) -> bool:
"""
Check if has an acceptable PRE_PREPARE already stashed, if not then
check count of PREPAREs, make sure >f consistent PREPAREs are found,
store the acceptable PREPARE state (digest, roots) for verification of
the received PRE-PREPARE
"""
if three_pc_key in self.prePreparesPendingPrevPP:
self._logger.debug('{} not requesting a PRE-PREPARE since already found '
'stashed for {}'.format(self, three_pc_key))
return False
if len(
self.preparesWaitingForPrePrepare[three_pc_key]) < self._data.quorums.prepare.value:
self._logger.debug(
'{} not requesting a PRE-PREPARE because does not have'
' sufficient PREPAREs for {}'.format(
self, three_pc_key))
return False
digest, state_root, txn_root, _ = \
self._get_acceptable_stashed_prepare_state(three_pc_key)
# Choose a better data structure for `prePreparesPendingFinReqs`
pre_prepares = [pp for pp, _, _ in self.prePreparesPendingFinReqs
if (pp.viewNo, pp.ppSeqNo) == three_pc_key]
if pre_prepares:
if [pp for pp in pre_prepares if (pp.digest, pp.stateRootHash, pp.txnRootHash) == (digest, state_root, txn_root)]:
self._logger.debug('{} not requesting a PRE-PREPARE since already '
'found stashed for {}'.format(self, three_pc_key))
return False
self._request_pre_prepare(three_pc_key,
stash_data=(digest, state_root, txn_root))
return True
def _get_acceptable_stashed_prepare_state(self, three_pc_key):
prepares = {s: (m.digest, m.stateRootHash, m.txnRootHash) for m, s in
self.preparesWaitingForPrePrepare[three_pc_key]}
acceptable, freq = mostCommonElement(prepares.values())
return (*acceptable, {s for s, state in prepares.items()
if state == acceptable})
def _is_primary_for_msg(self, msg) -> Optional[bool]:
"""
Return whether this replica is primary if the request's view number is
equal this replica's view number and primary has been selected for
the current view.
Return None otherwise.
:param msg: message
"""
return self._data.is_primary if self._is_msg_for_current_view(msg) \
else self._is_primary_in_view(msg.viewNo)
def _is_primary_in_view(self, viewNo: int) -> Optional[bool]:
"""
Return whether this replica was primary in the given view
"""
if viewNo not in self.primary_names:
return False
return self.primary_names[viewNo] == self.name
@measure_consensus_time(MetricsName.PROCESS_COMMIT_TIME,
MetricsName.BACKUP_PROCESS_COMMIT_TIME)
def process_commit(self, commit: Commit, sender: str):
"""
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
"""
result, reason = self._validate(commit)
if result != PROCESS:
return result, reason
self._logger.debug("{} received COMMIT{} from {}".format(
self, (commit.viewNo, commit.ppSeqNo), sender))
if self._validate_commit(commit, sender):
self.stats.inc(TPCStat.CommitRcvd)
self._add_to_commits(commit, sender)
self._logger.debug("{} processed incoming COMMIT{}".format(
self, (commit.viewNo, commit.ppSeqNo)))
return result, reason
def _validate_commit(self, commit: Commit, sender: str) -> bool:
"""
Return whether the COMMIT specified is valid.
:param commit: the COMMIT to validate
:return: True if `request` is valid, False otherwise
"""
key = (commit.viewNo, commit.ppSeqNo)
if not self._has_prepared(key):
self._enqueue_commit(commit, sender)
return False
if self.commits.hasCommitFrom(commit, sender):
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit))
return False
# BLS multi-sig:
pre_prepare = self.get_preprepare(commit.viewNo, commit.ppSeqNo)
why_not = self.l_bls_bft_replica.validate_commit(commit, sender, pre_prepare)
if why_not == BlsBftReplica.CM_BLS_SIG_WRONG:
self._logger.warning("{} discard Commit message from "
"{}:{}".format(self, sender, commit))
self.report_suspicious_node(SuspiciousNode(sender,
Suspicions.CM_BLS_SIG_WRONG,
commit))
return False
elif why_not is not None:
self._logger.warning("Unknown error code returned for bls commit "
"validation {}".format(why_not))
return True
def _enqueue_commit(self, request: Commit, sender: str):
key = (request.viewNo, request.ppSeqNo)
self._logger.debug("{} - Queueing commit due to unavailability of PREPARE. "
"Request {} with key {} from {}".format(self, request, key, sender))
if key not in self.commitsWaitingForPrepare:
self.commitsWaitingForPrepare[key] = deque()
self.commitsWaitingForPrepare[key].append((request, sender))
@measure_consensus_time(MetricsName.PROCESS_PREPREPARE_TIME,
MetricsName.BACKUP_PROCESS_PREPREPARE_TIME)
def process_preprepare(self, pre_prepare: PrePrepare, sender: str):
"""
Validate and process provided PRE-PREPARE, create and
broadcast PREPARE for it.
:param pre_prepare: message
:param sender: name of the node that sent this message
"""
pp_key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
# the same PrePrepare might come here multiple times
if (pp_key and (pre_prepare, sender) not in self.pre_prepare_tss[pp_key]):
# TODO more clean solution would be to set timestamps
# earlier (e.g. in zstack)
self.pre_prepare_tss[pp_key][pre_prepare, sender] = self.get_time_for_3pc_batch()
result, reason = self._validate(pre_prepare)
if result != PROCESS:
return result, reason
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
self._logger.debug("{} received PRE-PREPARE{} from {}".format(self, key, sender))
# TODO: should we still do it?
# Converting each req_idrs from list to tuple
req_idrs = {f.REQ_IDR.nm: [key for key in pre_prepare.reqIdr]}
pre_prepare = updateNamedTuple(pre_prepare, **req_idrs)
def report_suspicious(reason):
ex = SuspiciousNode(sender, reason, pre_prepare)
self.report_suspicious_node(ex)
why_not = self._can_process_pre_prepare(pre_prepare, sender)
if why_not is None:
why_not_applied = \
self._process_valid_preprepare(pre_prepare, sender)
if why_not_applied is not None:
if why_not_applied == PP_APPLY_REJECT_WRONG:
report_suspicious(Suspicions.PPR_REJECT_WRONG)
elif why_not_applied == PP_APPLY_WRONG_DIGEST:
report_suspicious(Suspicions.PPR_DIGEST_WRONG)
elif why_not_applied == PP_APPLY_WRONG_STATE:
report_suspicious(Suspicions.PPR_STATE_WRONG)
elif why_not_applied == PP_APPLY_ROOT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_TXN_WRONG)
elif why_not_applied == PP_APPLY_HOOK_ERROR:
report_suspicious(Suspicions.PPR_PLUGIN_EXCEPTION)
elif why_not_applied == PP_SUB_SEQ_NO_WRONG:
report_suspicious(Suspicions.PPR_SUB_SEQ_NO_WRONG)
elif why_not_applied == PP_NOT_FINAL:
# this is fine, just wait for another
return None, None
elif why_not_applied == PP_APPLY_AUDIT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG)
elif why_not_applied == PP_REQUEST_ALREADY_ORDERED:
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
elif why_not == PP_CHECK_NOT_FROM_PRIMARY:
report_suspicious(Suspicions.PPR_FRM_NON_PRIMARY)
elif why_not == PP_CHECK_TO_PRIMARY:
report_suspicious(Suspicions.PPR_TO_PRIMARY)
elif why_not == PP_CHECK_DUPLICATE:
report_suspicious(Suspicions.DUPLICATE_PPR_SENT)
elif why_not == PP_CHECK_INCORRECT_POOL_STATE_ROOT:
report_suspicious(Suspicions.PPR_POOL_STATE_ROOT_HASH_WRONG)
elif why_not == PP_CHECK_OLD:
self._logger.info("PRE-PREPARE {} has ppSeqNo lower "
"then the latest one - ignoring it".format(key))
elif why_not == PP_CHECK_REQUEST_NOT_FINALIZED:
absents = set()
non_fin = set()
non_fin_payload = set()
for key in pre_prepare.reqIdr:
req = self._requests.get(key)
if req is None:
absents.add(key)
elif not req.finalised:
non_fin.add(key)
non_fin_payload.add(req.request.payload_digest)
absent_str = ', '.join(str(key) for key in absents)
non_fin_str = ', '.join(
'{} ({} : {})'.format(str(key),
str(len(self._requests[key].propagates)),
', '.join(self._requests[key].propagates.keys())) for key in non_fin)
self._logger.warning(
"{} found requests in the incoming pp, of {} ledger, that are not finalized. "
"{} of them don't have propagates: [{}]. "
"{} of them don't have enough propagates: [{}].".format(self, pre_prepare.ledgerId,
len(absents), absent_str,
len(non_fin), non_fin_str))
def signal_suspicious(req):
self._logger.info("Request digest {} already ordered. Discard {} "
"from {}".format(req, pre_prepare, sender))
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
# checking for payload digest is more effective
for payload_key in non_fin_payload:
if self.db_manager.get_store(SEQ_NO_DB_LABEL).get_by_payload_digest(payload_key) != (None, None):
signal_suspicious(payload_key)
return None, None
# for absents we can only check full digest
for full_key in absents:
if self.db_manager.get_store(SEQ_NO_DB_LABEL).get_by_full_digest(full_key) is not None:
signal_suspicious(full_key)
return None, None
bad_reqs = absents | non_fin
self._enqueue_pre_prepare(pre_prepare, sender, bad_reqs)
# TODO: An optimisation might be to not request PROPAGATEs
# if some PROPAGATEs are present or a client request is
# present and sufficient PREPAREs and PRE-PREPARE are present,
# then the digest can be compared but this is expensive as the
# PREPARE and PRE-PREPARE contain a combined digest
self._schedule(partial(self._request_propagates_if_needed, bad_reqs, pre_prepare),
self._config.PROPAGATE_REQUEST_DELAY)
elif why_not == PP_CHECK_NOT_NEXT:
pp_view_no = pre_prepare.viewNo
pp_seq_no = pre_prepare.ppSeqNo
last_pp_view_no, last_pp_seq_no = self.__last_pp_3pc
if pp_view_no >= last_pp_view_no and (
self.is_master or self.last_ordered_3pc[1] != 0):
seq_frm = last_pp_seq_no + 1 if pp_view_no == last_pp_view_no else 1
seq_to = pp_seq_no - 1
if seq_to >= seq_frm >= pp_seq_no - self._config.CHK_FREQ + 1:
self._logger.warning(
"{} missing PRE-PREPAREs from {} to {}, "
"going to request".format(self, seq_frm, seq_to))
self._request_missing_three_phase_messages(
pp_view_no, seq_frm, seq_to)
self._enqueue_pre_prepare(pre_prepare, sender)
self.l_setup_last_ordered_for_non_master()
elif why_not == PP_CHECK_WRONG_TIME:
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
item = (pre_prepare, sender, False)
self.pre_prepares_stashed_for_incorrect_time[key] = item
report_suspicious(Suspicions.PPR_TIME_WRONG)
elif why_not == BlsBftReplica.PPR_BLS_MULTISIG_WRONG:
report_suspicious(Suspicions.PPR_BLS_MULTISIG_WRONG)
else:
self._logger.warning("Unknown PRE-PREPARE check status: {}".format(why_not))
return None, None
"""Properties from legacy code"""
@property
def view_no(self):
return self._data.view_no
@property
def last_ordered_3pc(self):
return self._data.last_ordered_3pc
@last_ordered_3pc.setter
def last_ordered_3pc(self, lo_tuple):
self._data.last_ordered_3pc = lo_tuple
self._logger.info('{} set last ordered as {}'.format(self, lo_tuple))
@property
def last_preprepare(self):
last_3pc = (0, 0)
lastPp = None
if self.sentPrePrepares:
(v, s), pp = self.sentPrePrepares.peekitem(-1)
last_3pc = (v, s)
lastPp = pp
if self.prePrepares:
(v, s), pp = self.prePrepares.peekitem(-1)
if compare_3PC_keys(last_3pc, (v, s)) > 0:
lastPp = pp
return lastPp
@property
def __last_pp_3pc(self):
last_pp = self.last_preprepare
if not last_pp:
return self.last_ordered_3pc
last_3pc = (last_pp.viewNo, last_pp.ppSeqNo)
if compare_3PC_keys(self.last_ordered_3pc, last_3pc) > 0:
return last_3pc
return self.last_ordered_3pc
@property
def db_manager(self):
return self._write_manager.database_manager
@property
def is_master(self):
return self._data.is_master
@property
def primary_name(self):
"""
Name of the primary replica of this replica's instance
:return: Returns name if primary is known, None otherwise
"""
return self._data.primary_name
@property
def name(self):
return self._data.name
@name.setter
def name(self, n):
self._data._name = n
@property
def f(self):
return getMaxFailures(self._data.total_nodes)
def gc(self, till3PCKey):
self._logger.info("{} cleaning up till {}".format(self, till3PCKey))
tpcKeys = set()
reqKeys = set()
for key3PC, pp in itertools.chain(
self.sentPrePrepares.items(),
self.prePrepares.items()
):
if compare_3PC_keys(till3PCKey, key3PC) <= 0:
tpcKeys.add(key3PC)
for reqKey in pp.reqIdr:
reqKeys.add(reqKey)
for key3PC, pp_dict in self.pre_prepare_tss.items():
if compare_3PC_keys(till3PCKey, key3PC) <= 0:
tpcKeys.add(key3PC)
# TODO INDY-1983: was found that it adds additional
# requests to clean, need to explore why
# for (pp, _) in pp_dict:
# for reqKey in pp.reqIdr:
# reqKeys.add(reqKey)
self._logger.trace("{} found {} 3-phase keys to clean".
format(self, len(tpcKeys)))
self._logger.trace("{} found {} request keys to clean".
format(self, len(reqKeys)))
to_clean_up = (
self.pre_prepare_tss,
self.sentPrePrepares,
self.prePrepares,
self.prepares,
self.commits,
self.batches,
self.requested_pre_prepares,
self.requested_prepares,
self.requested_commits,
self.pre_prepares_stashed_for_incorrect_time,
self.old_view_preprepares
)
for request_key in tpcKeys:
for coll in to_clean_up:
coll.pop(request_key, None)
for request_key in reqKeys:
self._requests.free(request_key)
for ledger_id, keys in self.requestQueues.items():
if request_key in keys:
self.discard_req_key(ledger_id, request_key)
self._logger.trace('{} freed request {} from previous checkpoints'
.format(self, request_key))
# ToDo: do we need ordered messages there?
self.ordered.clear_below_view(self.view_no - 1)
# BLS multi-sig:
self.l_bls_bft_replica.gc(till3PCKey)
def discard_req_key(self, ledger_id, req_key):
self.requestQueues[ledger_id].discard(req_key)
def _clear_prev_view_pre_prepares(self):
to_remove = []
for idx, (pp, _, _) in enumerate(self.prePreparesPendingFinReqs):
if pp.viewNo < self.view_no:
to_remove.insert(0, idx)
for idx in to_remove:
self.prePreparesPendingFinReqs.pop(idx)
for (v, p) in list(self.prePreparesPendingPrevPP.keys()):
if v < self.view_no:
self.prePreparesPendingPrevPP.pop((v, p))
def report_suspicious_node(self, ex: SuspiciousNode):
self._bus.send(RaisedSuspicion(inst_id=self._data.inst_id,
ex=ex))
def _validate(self, msg):
return self._validator.validate(msg)
"""Method from legacy code"""
def l_compact_primary_names(self):
min_allowed_view_no = self.view_no - 1
views_to_remove = []
for view_no in self.primary_names:
if view_no >= min_allowed_view_no:
break
views_to_remove.append(view_no)
for view_no in views_to_remove:
self.primary_names.pop(view_no)
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str):
"""
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
"""
# TODO: Check whether it is rejecting PRE-PREPARE from previous view
# PRE-PREPARE should not be sent from non primary
if not self._is_msg_from_primary(pre_prepare, sender):
return PP_CHECK_NOT_FROM_PRIMARY
# Already has a PRE-PREPARE with same 3 phase key
if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares:
return PP_CHECK_DUPLICATE
if not self._is_pre_prepare_time_acceptable(pre_prepare, sender):
return PP_CHECK_WRONG_TIME
if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo),
self.__last_pp_3pc) > 0:
return PP_CHECK_OLD # ignore old pre-prepare
if self._non_finalised_reqs(pre_prepare.reqIdr):
return PP_CHECK_REQUEST_NOT_FINALIZED
if not self._is_next_pre_prepare(pre_prepare.viewNo,
pre_prepare.ppSeqNo):
return PP_CHECK_NOT_NEXT
if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \
pre_prepare.poolStateRootHash != self.get_state_root_hash(POOL_LEDGER_ID):
return PP_CHECK_INCORRECT_POOL_STATE_ROOT
# BLS multi-sig:
status = self.l_bls_bft_replica.validate_pre_prepare(pre_prepare,
sender)
if status is not None:
return status
return None
def _schedule(self, func, delay):
self._timer.schedule(delay, func)
def _process_valid_preprepare(self, pre_prepare: PrePrepare, sender: str):
self.first_batch_after_catchup = False
old_state_root = self.get_state_root_hash(pre_prepare.ledgerId, to_str=False)
old_txn_root = self.get_txn_root_hash(pre_prepare.ledgerId)
if self.is_master:
self._logger.debug('{} state root before processing {} is {}, {}'.format(
self,
pre_prepare,
old_state_root,
old_txn_root))
# 1. APPLY
reqs, invalid_indices, rejects, suspicious = self._apply_pre_prepare(pre_prepare)
# 2. CHECK IF MORE CHUNKS NEED TO BE APPLIED FURTHER BEFORE VALIDATION
if pre_prepare.sub_seq_no != 0:
return PP_SUB_SEQ_NO_WRONG
if not pre_prepare.final:
return PP_NOT_FINAL
# 3. VALIDATE APPLIED
invalid_from_pp = invalid_index_serializer.deserialize(pre_prepare.discarded)
if suspicious:
why_not_applied = PP_REQUEST_ALREADY_ORDERED
else:
why_not_applied = self._validate_applied_pre_prepare(pre_prepare,
reqs, invalid_indices, invalid_from_pp)
# 4. IF NOT VALID AFTER APPLYING - REVERT
if why_not_applied is not None:
if self.is_master:
self._revert(pre_prepare.ledgerId,
old_state_root,
len(pre_prepare.reqIdr) - len(invalid_indices))
return why_not_applied
# 5. EXECUTE HOOK
if self.is_master:
try:
self.l_execute_hook(ReplicaHooks.APPLY_PPR, pre_prepare)
except Exception as ex:
self._logger.warning('{} encountered exception in replica '
'hook {} : {}'.
format(self, ReplicaHooks.APPLY_PPR, ex))
self._revert(pre_prepare.ledgerId,
old_state_root,
len(pre_prepare.reqIdr) - len(invalid_from_pp))
return PP_APPLY_HOOK_ERROR
# 6. TRACK APPLIED
if rejects:
for reject in rejects:
self._network.send(reject)
self._add_to_pre_prepares(pre_prepare)
if self.is_master:
# BLS multi-sig:
self.l_bls_bft_replica.process_pre_prepare(pre_prepare, sender)
self._logger.trace("{} saved shared multi signature for "
"root".format(self, old_state_root))
if not self.is_master:
self.db_manager.get_store(LAST_SENT_PP_STORE_LABEL).store_last_sent_pp_seq_no(
self._data.inst_id, pre_prepare.ppSeqNo)
self._track_batches(pre_prepare, old_state_root)
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
self._logger.debug("{} processed incoming PRE-PREPARE{}".format(self, key),
extra={"tags": ["processing"]})
return None
def _enqueue_pre_prepare(self, pre_prepare: PrePrepare, sender: str,
nonFinReqs: Set = None):
if nonFinReqs:
self._logger.info("{} - Queueing pre-prepares due to unavailability of finalised "
"requests. PrePrepare {} from {}".format(self, pre_prepare, sender))
self.prePreparesPendingFinReqs.append((pre_prepare, sender, nonFinReqs))
else:
# Possible exploit, an malicious party can send an invalid
# pre-prepare and over-write the correct one?
self._logger.info("Queueing pre-prepares due to unavailability of previous pre-prepares. {} from {}".
format(pre_prepare, sender))
self.prePreparesPendingPrevPP[pre_prepare.viewNo, pre_prepare.ppSeqNo] = (pre_prepare, sender)
def _request_propagates_if_needed(self, bad_reqs: list, pre_prepare: PrePrepare):
if any(pre_prepare is pended[0] for pended in self.prePreparesPendingFinReqs):
self._bus.send(RequestPropagates(bad_reqs))
def _request_missing_three_phase_messages(self, view_no: int, seq_frm: int, seq_to: int) -> None:
for pp_seq_no in range(seq_frm, seq_to + 1):
key = (view_no, pp_seq_no)
self._request_pre_prepare(key)
self._request_prepare(key)
self._request_commit(key)
def _request_three_phase_msg(self, three_pc_key: Tuple[int, int],
stash: Dict[Tuple[int, int], Optional[Tuple[str, str, str]]],
msg_type: str,
recipients: Optional[List[str]] = None,
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
if three_pc_key in stash:
self._logger.debug('{} not requesting {} since already '
'requested for {}'.format(self, msg_type, three_pc_key))
return False
# TODO: Using a timer to retry would be a better thing to do
self._logger.trace('{} requesting {} for {} from {}'.format(
self, msg_type, three_pc_key, recipients))
# An optimisation can be to request PRE-PREPARE from f+1 or
# f+x (f+x<2f) nodes only rather than 2f since only 1 correct
# PRE-PREPARE is needed.
self._request_msg(msg_type, {f.INST_ID.nm: self._data.inst_id,
f.VIEW_NO.nm: three_pc_key[0],
f.PP_SEQ_NO.nm: three_pc_key[1]},
frm=recipients)
stash[three_pc_key] = stash_data
return True
def _request_pre_prepare(self, three_pc_key: Tuple[int, int],
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
recipients = self.primary_name
return self._request_three_phase_msg(three_pc_key,
self.requested_pre_prepares,
PREPREPARE,
recipients,
stash_data)
def _request_prepare(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None,
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
if recipients is None:
recipients = self._network.connecteds.copy()
primary_name = self.primary_name[:self.primary_name.rfind(":")]
if primary_name in recipients:
recipients.remove(primary_name)
return self._request_three_phase_msg(three_pc_key, self.requested_prepares, PREPARE, recipients, stash_data)
def _request_commit(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None) -> bool:
"""
Request commit
"""
if recipients is None:
recipients = self._network.connecteds.copy()
return self._request_three_phase_msg(three_pc_key, self.requested_commits, COMMIT, recipients)
@measure_time(MetricsName.SEND_MESSAGE_REQ_TIME)
def _request_msg(self, typ, params: Dict, frm: List[str] = None):
self._send(MessageReq(**{
f.MSG_TYPE.nm: typ,
f.PARAMS.nm: params
}), dst=frm)
"""Method from legacy code"""
def l_setup_last_ordered_for_non_master(self):
"""
Since last ordered view_no and pp_seq_no are only communicated for
master instance, backup instances use this method for restoring
`last_ordered_3pc`
:return:
"""
if not self.is_master and self.first_batch_after_catchup and \
not self._data.is_primary:
# If not master instance choose last ordered seq no to be 1 less
# the lowest prepared certificate in this view
lowest_prepared = self.l_get_lowest_probable_prepared_certificate_in_view(
self.view_no)
if lowest_prepared is not None:
# now after catch up we have in last_ordered_3pc[1] value 0
# it value should change last_ordered_3pc to lowest_prepared - 1
self._logger.info('{} Setting last ordered for non-master as {}'.
format(self, self.last_ordered_3pc))
self.last_ordered_3pc = (self.view_no, lowest_prepared - 1)
self._bus.send(BackupSetupLastOrdered(inst_id=self._data.inst_id))
self.first_batch_after_catchup = False
def get_state_root_hash(self, ledger_id: str, to_str=True, committed=False):
return self.db_manager.get_state_root_hash(ledger_id, to_str, committed) \
if self.is_master \
else None
def get_txn_root_hash(self, ledger_id: str, to_str=True):
return self.db_manager.get_txn_root_hash(ledger_id, to_str) \
if self.is_master \
else None
def _is_msg_from_primary(self, msg, sender: str) -> bool:
"""
Return whether this message was from primary replica
:param msg:
:param sender:
:return:
"""
if self._is_msg_for_current_view(msg):
return self.primary_name == sender
try:
return self.primary_names[msg.viewNo] == sender
except KeyError:
return False
def _is_msg_for_current_view(self, msg):
"""
Return whether this request's view number is equal to the current view
number of this replica.
"""
viewNo = getattr(msg, "viewNo", None)
return viewNo == self.view_no
def _is_pre_prepare_time_correct(self, pp: PrePrepare, sender: str) -> bool:
"""
Check if this PRE-PREPARE is not older than (not checking for greater
than since batches maybe sent in less than 1 second) last PRE-PREPARE
and in a sufficient range of local clock's UTC time.
:param pp:
:return:
"""
tpcKey = (pp.viewNo, pp.ppSeqNo)
if (self.last_accepted_pre_prepare_time and
pp.ppTime < self.last_accepted_pre_prepare_time):
return False
elif ((tpcKey not in self.pre_prepare_tss) or
((pp, sender) not in self.pre_prepare_tss[tpcKey])):
return False
else:
return (
abs(pp.ppTime - self.pre_prepare_tss[tpcKey][pp, sender]) <=
self._config.ACCEPTABLE_DEVIATION_PREPREPARE_SECS
)
def _is_pre_prepare_time_acceptable(self, pp: PrePrepare, sender: str) -> bool:
"""
Returns True or False depending on the whether the time in PRE-PREPARE
is acceptable. Can return True if time is not acceptable but sufficient
PREPAREs are found to support the PRE-PREPARE
:param pp:
:return:
"""
key = (pp.viewNo, pp.ppSeqNo)
if key in self.requested_pre_prepares:
# Special case for requested PrePrepares
return True
correct = self._is_pre_prepare_time_correct(pp, sender)
if not correct:
if key in self.pre_prepares_stashed_for_incorrect_time and \
self.pre_prepares_stashed_for_incorrect_time[key][-1]:
self._logger.debug('{} marking time as correct for {}'.format(self, pp))
correct = True
else:
self._logger.warning('{} found {} to have incorrect time.'.format(self, pp))
return correct
def _non_finalised_reqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self._requests.is_finalised(key)}
def _is_next_pre_prepare(self, view_no: int, pp_seq_no: int):
if view_no == self.view_no and pp_seq_no == 1:
# First PRE-PREPARE in a new view
return True
(last_pp_view_no, last_pp_seq_no) = self.__last_pp_3pc
if last_pp_view_no > view_no:
return False
if last_pp_view_no < view_no:
if view_no != self.view_no:
return False
last_pp_seq_no = 0
if pp_seq_no - last_pp_seq_no > 1:
return False
return True
def _apply_pre_prepare(self, pre_prepare: PrePrepare):
"""
Applies (but not commits) requests of the PrePrepare
to the ledger and state
"""
reqs = []
idx = 0
rejects = []
invalid_indices = []
suspicious = False
# 1. apply each request
for req_key in pre_prepare.reqIdr:
req = self._requests[req_key].finalised
try:
self._process_req_during_batch(req,
pre_prepare.ppTime)
except (InvalidClientMessageException, UnknownIdentifier, SuspiciousPrePrepare) as ex:
self._logger.warning('{} encountered exception {} while processing {}, '
'will reject'.format(self, ex, req))
rejects.append((req.key, Reject(req.identifier, req.reqId, ex)))
invalid_indices.append(idx)
if isinstance(ex, SuspiciousPrePrepare):
suspicious = True
finally:
reqs.append(req)
idx += 1
# 2. call callback for the applied batch
if self.is_master:
three_pc_batch = ThreePcBatch.from_pre_prepare(pre_prepare,
state_root=self.get_state_root_hash(pre_prepare.ledgerId,
to_str=False),
txn_root=self.get_txn_root_hash(pre_prepare.ledgerId,
to_str=False),
primaries=[],
valid_digests=self._get_valid_req_ids_from_all_requests(
reqs, invalid_indices))
self.post_batch_creation(three_pc_batch)
return reqs, invalid_indices, rejects, suspicious
def _get_valid_req_ids_from_all_requests(self, reqs, invalid_indices):
return [req.key for idx, req in enumerate(reqs) if idx not in invalid_indices]
def _validate_applied_pre_prepare(self, pre_prepare: PrePrepare,
reqs, invalid_indices, invalid_from_pp) -> Optional[int]:
if len(invalid_indices) != len(invalid_from_pp):
return PP_APPLY_REJECT_WRONG
digest = self.replica_batch_digest(reqs)
if digest != pre_prepare.digest:
return PP_APPLY_WRONG_DIGEST
if self.is_master:
if pre_prepare.stateRootHash != self.get_state_root_hash(pre_prepare.ledgerId):
return PP_APPLY_WRONG_STATE
if pre_prepare.txnRootHash != self.get_txn_root_hash(pre_prepare.ledgerId):
return PP_APPLY_ROOT_HASH_MISMATCH
# TODO: move this kind of validation to batch handlers
if f.AUDIT_TXN_ROOT_HASH.nm in pre_prepare and pre_prepare.auditTxnRootHash != self.get_txn_root_hash(AUDIT_LEDGER_ID):
return PP_APPLY_AUDIT_HASH_MISMATCH
return None
"""Method from legacy code"""
def l_get_lowest_probable_prepared_certificate_in_view(
self, view_no) -> Optional[int]:
"""
Return lowest pp_seq_no of the view for which can be prepared but
choose from unprocessed PRE-PREPAREs and PREPAREs.
"""
# TODO: Naive implementation, dont need to iterate over the complete
# data structures, fix this later
seq_no_pp = SortedList() # pp_seq_no of PRE-PREPAREs
# pp_seq_no of PREPAREs with count of PREPAREs for each
seq_no_p = set()
for (v, p) in self.prePreparesPendingPrevPP:
if v == view_no:
seq_no_pp.add(p)
if v > view_no:
break
for (v, p), pr in self.preparesWaitingForPrePrepare.items():
if v == view_no and len(pr) >= self._data.quorums.prepare.value:
seq_no_p.add(p)
for n in seq_no_pp:
if n in seq_no_p:
return n
return None
def _revert(self, ledgerId, stateRootHash, reqCount):
# A batch should only be reverted if all batches that came after it
# have been reverted
ledger = self.db_manager.get_ledger(ledgerId)
state = self.db_manager.get_state(ledgerId)
self._logger.info('{} reverting {} txns and state root from {} to {} for ledger {}'
.format(self, reqCount, Ledger.hashToStr(state.headHash),
Ledger.hashToStr(stateRootHash), ledgerId))
state.revertToHead(stateRootHash)
ledger.discardTxns(reqCount)
self.post_batch_rejection(ledgerId)
"""Method from legacy code"""
def l_execute_hook(self, hook_id, *args):
# ToDo: need to receive results from hooks
self._bus.send(HookMessage(hook=hook_id,
args=args))
def _track_batches(self, pp: PrePrepare, prevStateRootHash):
# pp.discarded indicates the index from where the discarded requests
# starts hence the count of accepted requests, prevStateRoot is
# tracked to revert this PRE-PREPARE
self._logger.trace('{} tracking batch for {} with state root {}'.format(
self, pp, prevStateRootHash))
if self.is_master:
self.metrics.add_event(MetricsName.THREE_PC_BATCH_SIZE, len(pp.reqIdr))
else:
self.metrics.add_event(MetricsName.BACKUP_THREE_PC_BATCH_SIZE, len(pp.reqIdr))
self.batches[(pp.viewNo, pp.ppSeqNo)] = [pp.ledgerId, pp.discarded,
pp.ppTime, prevStateRootHash, len(pp.reqIdr)]
@property
def lastPrePrepareSeqNo(self):
return self._lastPrePrepareSeqNo
@lastPrePrepareSeqNo.setter
def lastPrePrepareSeqNo(self, n):
"""
This will _lastPrePrepareSeqNo to values greater than its previous
values else it will not. To forcefully override as in case of `revert`,
directly set `self._lastPrePrepareSeqNo`
"""
if n > self._lastPrePrepareSeqNo:
self._lastPrePrepareSeqNo = n
else:
self._logger.debug(
'{} cannot set lastPrePrepareSeqNo to {} as its '
'already {}'.format(
self, n, self._lastPrePrepareSeqNo))
def _add_to_pre_prepares(self, pp: PrePrepare) -> None:
"""
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
"""
key = (pp.viewNo, pp.ppSeqNo)
# ToDo:
self.prePrepares[key] = pp
self._consensus_data_helper.preprepare_batch(pp)
self.lastPrePrepareSeqNo = pp.ppSeqNo
self.last_accepted_pre_prepare_time = pp.ppTime
self._dequeue_prepares(*key)
self._dequeue_commits(*key)
self.stats.inc(TPCStat.PrePrepareRcvd)
self.try_prepare(pp)
def _dequeue_prepares(self, viewNo: int, ppSeqNo: int):
key = (viewNo, ppSeqNo)
if key in self.preparesWaitingForPrePrepare:
i = 0
# Keys of pending prepares that will be processed below
while self.preparesWaitingForPrePrepare[key]:
prepare, sender = self.preparesWaitingForPrePrepare[
key].popleft()
self._logger.debug("{} popping stashed PREPARE{}".format(self, key))
self._network.process_incoming(prepare, sender)
i += 1
self.preparesWaitingForPrePrepare.pop(key)
self._logger.debug("{} processed {} PREPAREs waiting for PRE-PREPARE for"
" view no {} and seq no {}".format(self, i, viewNo, ppSeqNo))
def _dequeue_commits(self, viewNo: int, ppSeqNo: int):
key = (viewNo, ppSeqNo)
if key in self.commitsWaitingForPrepare:
if not self._has_prepared(key):
self._logger.debug('{} has not pre-prepared {}, will dequeue the '
'COMMITs later'.format(self, key))
return
i = 0
# Keys of pending prepares that will be processed below
while self.commitsWaitingForPrepare[key]:
commit, sender = self.commitsWaitingForPrepare[
key].popleft()
self._logger.debug("{} popping stashed COMMIT{}".format(self, key))
self._network.process_incoming(commit, sender)
i += 1
self.commitsWaitingForPrepare.pop(key)
self._logger.debug("{} processed {} COMMITs waiting for PREPARE for"
" view no {} and seq no {}".format(self, i, viewNo, ppSeqNo))
def try_prepare(self, pp: PrePrepare):
"""
Try to send the Prepare message if the PrePrepare message is ready to
be passed into the Prepare phase.
"""
rv, msg = self._can_prepare(pp)
if rv:
self._do_prepare(pp)
else:
self._logger.debug("{} cannot send PREPARE since {}".format(self, msg))
def _can_prepare(self, ppReq) -> (bool, str):
"""
Return whether the batch of requests in the PRE-PREPARE can
proceed to the PREPARE step.
:param ppReq: any object with identifier and requestId attributes
"""
if self.prepares.hasPrepareFrom(ppReq, self.name):
return False, 'has already sent PREPARE for {}'.format(ppReq)
return True, ''
@measure_consensus_time(MetricsName.SEND_PREPARE_TIME,
MetricsName.BACKUP_SEND_PREPARE_TIME)
def _do_prepare(self, pp: PrePrepare):
self._logger.debug("{} Sending PREPARE{} at {}".format(
self, (pp.viewNo, pp.ppSeqNo), self.get_current_time()))
params = [self._data.inst_id,
pp.viewNo,
pp.ppSeqNo,
pp.ppTime,
pp.digest,
pp.stateRootHash,
pp.txnRootHash]
if f.AUDIT_TXN_ROOT_HASH.nm in pp:
params.append(pp.auditTxnRootHash)
# BLS multi-sig:
params = self.l_bls_bft_replica.update_prepare(params, pp.ledgerId)
prepare = Prepare(*params)
if self.is_master:
rv = self.l_execute_hook(ReplicaHooks.CREATE_PR, prepare, pp)
prepare = rv if rv is not None else prepare
self._send(prepare, stat=TPCStat.PrepareSent)
self._add_to_prepares(prepare, self.name)
def _has_prepared(self, key):
if not self.get_preprepare(*key):
return False
if ((key not in self.prepares and key not in self.sentPrePrepares) and
(key not in self.preparesWaitingForPrePrepare)):
return False
return True
def get_preprepare(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
if key in self.sentPrePrepares:
return self.sentPrePrepares[key]
if key in self.prePrepares:
return self.prePrepares[key]
return None
def _add_to_prepares(self, prepare: Prepare, sender: str):
"""
Add the specified PREPARE to this replica's list of received
PREPAREs and try sending COMMIT
:param prepare: the PREPARE to add to the list
"""
# BLS multi-sig:
self.l_bls_bft_replica.process_prepare(prepare, sender)
self.prepares.addVote(prepare, sender)
self._dequeue_commits(prepare.viewNo, prepare.ppSeqNo)
self._try_commit(prepare)
def _try_commit(self, prepare: Prepare):
"""
Try to commit if the Prepare message is ready to be passed into the
commit phase.
"""
rv, reason = self._can_commit(prepare)
if rv:
pp = self.get_preprepare(prepare.viewNo, prepare.ppSeqNo)
self._consensus_data_helper.prepare_batch(pp)
self._do_commit(prepare)
else:
self._logger.debug("{} cannot send COMMIT since {}".format(self, reason))
@measure_consensus_time(MetricsName.SEND_COMMIT_TIME,
MetricsName.BACKUP_SEND_COMMIT_TIME)
def _do_commit(self, p: Prepare):
"""
Create a commit message from the given Prepare message and trigger the
commit phase
:param p: the prepare message
"""
key_3pc = (p.viewNo, p.ppSeqNo)
self._logger.debug("{} Sending COMMIT{} at {}".format(self, key_3pc, self.get_current_time()))
params = [
self._data.inst_id, p.viewNo, p.ppSeqNo
]
pre_prepare = self.get_preprepare(*key_3pc)
# BLS multi-sig:
if p.stateRootHash is not None:
pre_prepare = self.get_preprepare(*key_3pc)
params = self.l_bls_bft_replica.update_commit(params, pre_prepare)
commit = Commit(*params)
self._send(commit, stat=TPCStat.CommitSent)
self._add_to_commits(commit, self.name)
def _add_to_commits(self, commit: Commit, sender: str):
"""
Add the specified COMMIT to this replica's list of received
commit requests.
:param commit: the COMMIT to add to the list
:param sender: the name of the node that sent the COMMIT
"""
# BLS multi-sig:
self.l_bls_bft_replica.process_commit(commit, sender)
self.commits.addVote(commit, sender)
self._try_order(commit)
def _try_order(self, commit: Commit):
"""
Try to order if the Commit message is ready to be ordered.
"""
canOrder, reason = self._can_order(commit)
if canOrder:
self._logger.trace("{} returning request to node".format(self))
self._do_order(commit)
else:
self._logger.debug("{} cannot return request to node: {}".format(self, reason))
return canOrder
def _do_order(self, commit: Commit):
key = (commit.viewNo, commit.ppSeqNo)
self._logger.debug("{} ordering COMMIT {}".format(self, key))
return self._order_3pc_key(key)
@measure_consensus_time(MetricsName.ORDER_3PC_BATCH_TIME,
MetricsName.BACKUP_ORDER_3PC_BATCH_TIME)
def _order_3pc_key(self, key):
pp = self.get_preprepare(*key)
if pp is None:
raise ValueError(
"{} no PrePrepare with a 'key' {} found".format(self, key)
)
self._freshness_checker.update_freshness(ledger_id=pp.ledgerId,
ts=pp.ppTime)
self._add_to_ordered(*key)
invalid_indices = invalid_index_serializer.deserialize(pp.discarded)
invalid_reqIdr = []
valid_reqIdr = []
for ind, reqIdr in enumerate(pp.reqIdr):
if ind in invalid_indices:
invalid_reqIdr.append(reqIdr)
else:
valid_reqIdr.append(reqIdr)
self._requests.ordered_by_replica(reqIdr)
ordered = Ordered(self._data.inst_id,
pp.viewNo,
valid_reqIdr,
invalid_reqIdr,
pp.ppSeqNo,
pp.ppTime,
pp.ledgerId,
pp.stateRootHash,
pp.txnRootHash,
pp.auditTxnRootHash if f.AUDIT_TXN_ROOT_HASH.nm in pp else None,
self._get_primaries_for_ordered(pp))
if self.is_master:
rv = self.l_execute_hook(ReplicaHooks.CREATE_ORD, ordered, pp)
ordered = rv if rv is not None else ordered
self._discard_ordered_req_keys(pp)
self._bus.send(ordered)
ordered_msg = "{} ordered batch request, view no {}, ppSeqNo {}, ledger {}, " \
"state root {}, txn root {}, audit root {}".format(self, pp.viewNo, pp.ppSeqNo, pp.ledgerId,
pp.stateRootHash, pp.txnRootHash,
pp.auditTxnRootHash)
self._logger.debug("{}, requests ordered {}, discarded {}".
format(ordered_msg, valid_reqIdr, invalid_reqIdr))
self._logger.info("{}, requests ordered {}, discarded {}".
format(ordered_msg, len(valid_reqIdr), len(invalid_reqIdr)))
if self.is_master:
self.metrics.add_event(MetricsName.ORDERED_BATCH_SIZE, len(valid_reqIdr) + len(invalid_reqIdr))
self.metrics.add_event(MetricsName.ORDERED_BATCH_INVALID_COUNT, len(invalid_reqIdr))
else:
self.metrics.add_event(MetricsName.BACKUP_ORDERED_BATCH_SIZE, len(valid_reqIdr))
# BLS multi-sig:
self.l_bls_bft_replica.process_order(key, self._data.quorums, pp)
return True
def _add_to_ordered(self, view_no: int, pp_seq_no: int):
self.ordered.add(view_no, pp_seq_no)
self.last_ordered_3pc = (view_no, pp_seq_no)
self.requested_pre_prepares.pop((view_no, pp_seq_no), None)
self.requested_prepares.pop((view_no, pp_seq_no), None)
self.requested_commits.pop((view_no, pp_seq_no), None)
def _get_primaries_for_ordered(self, pp):
ledger = self.db_manager.get_ledger(AUDIT_LEDGER_ID)
for index, txn in enumerate(ledger.get_uncommitted_txns()):
payload_data = get_payload_data(txn)
if pp.ppSeqNo == payload_data[AUDIT_TXN_PP_SEQ_NO] and \
pp.viewNo == payload_data[AUDIT_TXN_VIEW_NO]:
txn_primaries = payload_data[AUDIT_TXN_PRIMARIES]
if isinstance(txn_primaries, Iterable):
return txn_primaries
elif isinstance(txn_primaries, int):
last_primaries_seq_no = get_seq_no(txn) - txn_primaries
return get_payload_data(
ledger.get_by_seq_no_uncommitted(last_primaries_seq_no))[AUDIT_TXN_PRIMARIES]
break
else:
return self._data.primaries
def _discard_ordered_req_keys(self, pp: PrePrepare):
for k in pp.reqIdr:
# Using discard since the key may not be present as in case of
# primary, the key was popped out while creating PRE-PREPARE.
# Or in case of node catching up, it will not validate
# PRE-PREPAREs or PREPAREs but will only validate number of COMMITs
# and their consistency with PRE-PREPARE of PREPAREs
self.discard_req_key(pp.ledgerId, k)
def _can_order(self, commit: Commit) -> Tuple[bool, Optional[str]]:
"""
Return whether the specified commitRequest can be returned to the node.
Decision criteria:
- If have got just n-f Commit requests then return request to node
- If less than n-f of commit requests then probably don't have
consensus on the request; don't return request to node
- If more than n-f then already returned to node; don't return request
to node
:param commit: the COMMIT
"""
quorum = self._data.quorums.commit.value
if not self.commits.hasQuorum(commit, quorum):
return False, "no quorum ({}): {} commits where f is {}". \
format(quorum, commit, self.f)
key = (commit.viewNo, commit.ppSeqNo)
if self._validator.has_already_ordered(*key):
return False, "already ordered"
if commit.ppSeqNo > 1 and not self._all_prev_ordered(commit):
viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
if viewNo not in self.stashed_out_of_order_commits:
self.stashed_out_of_order_commits[viewNo] = {}
self.stashed_out_of_order_commits[viewNo][ppSeqNo] = commit
self._out_of_order_repeater.start()
return False, "stashing {} since out of order". \
format(commit)
return True, None
def _process_stashed_out_of_order_commits(self):
# This method is called periodically to check for any commits that
# were stashed due to lack of commits before them and orders them if it
# can
if not self.can_order_commits():
return
self._logger.debug('{} trying to order from out of order commits. '
'Len(stashed_out_of_order_commits) == {}'
.format(self, len(self.stashed_out_of_order_commits)))
if self.last_ordered_3pc:
lastOrdered = self.last_ordered_3pc
vToRemove = set()
for v in self.stashed_out_of_order_commits:
if v < lastOrdered[0]:
self._logger.debug(
"{} found commits {} from previous view {}"
" that were not ordered but last ordered"
" is {}".format(
self, self.stashed_out_of_order_commits[v], v, lastOrdered))
vToRemove.add(v)
continue
pToRemove = set()
for p, commit in self.stashed_out_of_order_commits[v].items():
if (v, p) in self.ordered or \
self._validator.has_already_ordered(*(commit.viewNo, commit.ppSeqNo)):
pToRemove.add(p)
continue
if (v == lastOrdered[0] and lastOrdered == (v, p - 1)) or \
(v > lastOrdered[0] and self._is_lowest_commit_in_view(commit)):
self._logger.debug("{} ordering stashed commit {}".format(self, commit))
if self._try_order(commit):
lastOrdered = (v, p)
pToRemove.add(p)
for p in pToRemove:
del self.stashed_out_of_order_commits[v][p]
if not self.stashed_out_of_order_commits[v]:
vToRemove.add(v)
for v in vToRemove:
del self.stashed_out_of_order_commits[v]
if not self.stashed_out_of_order_commits:
self._out_of_order_repeater.stop()
else:
self._logger.debug('{} last_ordered_3pc if False. '
'Len(stashed_out_of_order_commits) == {}'
.format(self, len(self.stashed_out_of_order_commits)))
def _is_lowest_commit_in_view(self, commit):
view_no = commit.viewNo
if view_no > self.view_no:
self._logger.debug('{} encountered {} which belongs to a later view'.format(self, commit))
return False
return commit.ppSeqNo == 1
def _all_prev_ordered(self, commit: Commit):
"""
Return True if all previous COMMITs have been ordered
"""
# TODO: This method does a lot of work, choose correct data
# structures to make it efficient.
viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
if self.last_ordered_3pc == (viewNo, ppSeqNo - 1):
# Last ordered was in same view as this COMMIT
return True
# if some PREPAREs/COMMITs were completely missed in the same view
toCheck = set()
toCheck.update(set(self.sentPrePrepares.keys()))
toCheck.update(set(self.prePrepares.keys()))
toCheck.update(set(self.prepares.keys()))
toCheck.update(set(self.commits.keys()))
for (v, p) in toCheck:
if v < viewNo and (v, p) not in self.ordered:
# Have commits from previous view that are unordered.
return False
if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered:
# If unordered commits are found with lower ppSeqNo then this
# cannot be ordered.
return False
return True
def _can_commit(self, prepare: Prepare) -> (bool, str):
"""
Return whether the specified PREPARE can proceed to the Commit
step.
Decision criteria:
- If this replica has got just n-f-1 PREPARE requests then commit request.
- If less than n-f-1 PREPARE requests then probably there's no consensus on
the request; don't commit
- If more than n-f-1 then already sent COMMIT; don't commit
:param prepare: the PREPARE
"""
quorum = self._data.quorums.prepare.value
if not self.prepares.hasQuorum(prepare, quorum):
return False, 'does not have prepare quorum for {}'.format(prepare)
if self._has_committed(prepare):
return False, 'has already sent COMMIT for {}'.format(prepare)
return True, ''
def _has_committed(self, request) -> bool:
return self.commits.hasCommitFrom(ThreePhaseKey(
request.viewNo, request.ppSeqNo), self.name)
def post_batch_creation(self, three_pc_batch: ThreePcBatch):
"""
A batch of requests has been created and has been applied but
committed to ledger and state.
:param ledger_id:
:param state_root: state root after the batch creation
:return:
"""
ledger_id = three_pc_batch.ledger_id
if ledger_id != POOL_LEDGER_ID and not three_pc_batch.primaries:
three_pc_batch.primaries = self._write_manager.future_primary_handler.get_last_primaries() or self._data.primaries
if self._write_manager.is_valid_ledger_id(ledger_id):
self._write_manager.post_apply_batch(three_pc_batch)
else:
self._logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
def post_batch_rejection(self, ledger_id):
"""
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
"""
if self._write_manager.is_valid_ledger_id(ledger_id):
self._write_manager.post_batch_rejected(ledger_id)
else:
self._logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
def _ledger_id_for_request(self, request: Request):
if request.operation.get(TXN_TYPE) is None:
raise ValueError(
"{} TXN_TYPE is not defined for request {}".format(self, request)
)
typ = request.operation[TXN_TYPE]
return self._write_manager.type_to_ledger_id[typ]
def _do_dynamic_validation(self, request: Request, req_pp_time: int):
"""
State based validation
"""
# Digest validation
# TODO implicit caller's context: request is processed by (master) replica
# as part of PrePrepare 3PC batch
ledger_id, seq_no = self.db_manager.get_store(SEQ_NO_DB_LABEL).get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
raise SuspiciousPrePrepare('Trying to order already ordered request')
ledger = self.db_manager.get_ledger(self._ledger_id_for_request(request))
for txn in ledger.uncommittedTxns:
if get_payload_digest(txn) == request.payload_digest:
raise SuspiciousPrePrepare('Trying to order already ordered request')
# TAA validation
# For now, we need to call taa_validation not from dynamic_validation because
# req_pp_time is required
self._write_manager.do_taa_validation(request, req_pp_time, self._config)
self._write_manager.dynamic_validation(request)
@measure_consensus_time(MetricsName.REQUEST_PROCESSING_TIME,
MetricsName.BACKUP_REQUEST_PROCESSING_TIME)
def _process_req_during_batch(self,
req: Request,
cons_time: int):
"""
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
"""
if self.is_master:
self._do_dynamic_validation(req, cons_time)
self._write_manager.apply_request(req, cons_time)
def can_send_3pc_batch(self):
if not self._data.is_primary:
return False
if not self._data.is_participating:
return False
# ToDo: is pre_view_change_in_progress needed?
# if self.replica.node.pre_view_change_in_progress:
# return False
if self.view_no < self.last_ordered_3pc[0]:
return False
if self.view_no == self.last_ordered_3pc[0]:
if self._lastPrePrepareSeqNo < self.last_ordered_3pc[1]:
return False
# This check is done for current view only to simplify logic and avoid
# edge cases between views, especially taking into account that we need
# to send a batch in new view as soon as possible
if self._config.Max3PCBatchesInFlight is not None:
batches_in_flight = self._lastPrePrepareSeqNo - self.last_ordered_3pc[1]
if batches_in_flight >= self._config.Max3PCBatchesInFlight:
if self._can_log_skip_send_3pc():
self._logger.info("{} not creating new batch because there already {} in flight out of {} allowed".
format(self.name, batches_in_flight, self._config.Max3PCBatchesInFlight))
return False
self._skip_send_3pc_ts = None
return True
def _can_log_skip_send_3pc(self):
current_time = time.perf_counter()
if self._skip_send_3pc_ts is None:
self._skip_send_3pc_ts = current_time
return True
if current_time - self._skip_send_3pc_ts > self._config.Max3PCBatchWait:
self._skip_send_3pc_ts = current_time
return True
return False
def can_order_commits(self):
if self._data.is_participating:
return True
if self._data.is_synced and self._data.legacy_vc_in_progress:
return True
return False
@staticmethod
def generateName(node_name: str, inst_id: int):
"""
Create and return the name for a replica using its nodeName and
instanceId.
Ex: Alpha:1
"""
if isinstance(node_name, str):
# Because sometimes it is bytes (why?)
if ":" in node_name:
# Because in some cases (for requested messages) it
# already has ':'. This should be fixed.
return node_name
return "{}:{}".format(node_name, inst_id)
def dequeue_pre_prepares(self):
"""
Dequeue any received PRE-PREPAREs that did not have finalized requests
or the replica was missing any PRE-PREPAREs before it
:return:
"""
ppsReady = []
# Check if any requests have become finalised belonging to any stashed
# PRE-PREPAREs.
for i, (pp, sender, reqIds) in enumerate(
self.prePreparesPendingFinReqs):
finalised = set()
for r in reqIds:
if self._requests.is_finalised(r):
finalised.add(r)
diff = reqIds.difference(finalised)
# All requests become finalised
if not diff:
ppsReady.append(i)
self.prePreparesPendingFinReqs[i] = (pp, sender, diff)
for i in sorted(ppsReady, reverse=True):
pp, sender, _ = self.prePreparesPendingFinReqs.pop(i)
self.prePreparesPendingPrevPP[pp.viewNo, pp.ppSeqNo] = (pp, sender)
r = 0
while self.prePreparesPendingPrevPP and self._is_next_pre_prepare(
*self.prePreparesPendingPrevPP.iloc[0]):
_, (pp, sender) = self.prePreparesPendingPrevPP.popitem(last=False)
if not self._can_pp_seq_no_be_in_view(pp.viewNo, pp.ppSeqNo):
self._discard(pp, "Pre-Prepare from a previous view",
self._logger.debug)
continue
self._logger.info("{} popping stashed PREPREPARE{} "
"from sender {}".format(self, (pp.viewNo, pp.ppSeqNo), sender))
self._network.process_incoming(pp, sender)
r += 1
return r
# TODO: Convert this into a free function?
def _discard(self, msg, reason, logMethod=logging.error, cliOutput=False):
"""
Discard a message and log a reason using the specified `logMethod`.
:param msg: the message to discard
:param reason: the reason why this message is being discarded
:param logMethod: the logging function to be used
:param cliOutput: if truthy, informs a CLI that the logged msg should
be printed
"""
reason = "" if not reason else " because {}".format(reason)
logMethod("{} discarding message {}{}".format(self, msg, reason),
extra={"cli": cliOutput})
def _can_pp_seq_no_be_in_view(self, view_no, pp_seq_no):
"""
Checks if the `pp_seq_no` could have been in view `view_no`. It will
return False when the `pp_seq_no` belongs to a later view than
`view_no` else will return True
:return:
"""
if view_no > self.view_no:
raise PlenumValueError(
'view_no', view_no,
"<= current view_no {}".format(self.view_no),
prefix=self
)
return view_no == self.view_no or (view_no < self.view_no and self._data.legacy_last_prepared_before_view_change and
compare_3PC_keys((view_no, pp_seq_no),
self._data.legacy_last_prepared_before_view_change) >= 0)
def send_3pc_batch(self):
if not self.can_send_3pc_batch():
return 0
sent_batches = set()
# 1. send 3PC batches with requests for every ledger
self._send_3pc_batches_for_ledgers(sent_batches)
# 2. for every ledger we haven't just sent a 3PC batch check if it's not fresh enough,
# and send an empty 3PC batch to update the state if needed
self._send_3pc_freshness_batch(sent_batches)
# 3. send 3PC batch if new primaries elected
self.l_send_3pc_primaries_batch(sent_batches)
# 4. update ts of last sent 3PC batch
if len(sent_batches) > 0:
self.lastBatchCreated = self.get_current_time()
return len(sent_batches)
def l_send_3pc_primaries_batch(self, sent_batches):
# As we've selected new primaries, we need to send 3pc batch,
# so this primaries can be saved in audit ledger
if not sent_batches and self.primaries_batch_needed:
self._logger.debug("Sending a 3PC batch to propagate newly selected primaries")
self.primaries_batch_needed = False
sent_batches.add(self._do_send_3pc_batch(ledger_id=DOMAIN_LEDGER_ID))
def _send_3pc_freshness_batch(self, sent_batches):
if not self._config.UPDATE_STATE_FRESHNESS:
return
if not self.is_master:
return
# Update freshness for all outdated ledgers sequentially without any waits
# TODO: Consider sending every next update in Max3PCBatchWait only
outdated_ledgers = self._freshness_checker.check_freshness(self.get_time_for_3pc_batch())
for ledger_id, ts in outdated_ledgers.items():
if ledger_id in sent_batches:
self._logger.debug("Ledger {} is not updated for {} seconds, "
"but a 3PC for this ledger has been just sent".format(ledger_id, ts))
continue
self._logger.info("Ledger {} is not updated for {} seconds, "
"so its freshness state is going to be updated now".format(ledger_id, ts))
sent_batches.add(
self._do_send_3pc_batch(ledger_id=ledger_id))
def _send_3pc_batches_for_ledgers(self, sent_batches):
# TODO: Consider sending every next update in Max3PCBatchWait only
for ledger_id, q in self.requestQueues.items():
if len(q) == 0:
continue
queue_full = len(q) >= self._config.Max3PCBatchSize
timeout = self.lastBatchCreated + self._config.Max3PCBatchWait < self.get_current_time()
if not queue_full and not timeout:
continue
sent_batches.add(
self._do_send_3pc_batch(ledger_id=ledger_id))
def _do_send_3pc_batch(self, ledger_id):
oldStateRootHash = self.get_state_root_hash(ledger_id, to_str=False)
pre_prepare = self.create_3pc_batch(ledger_id)
self.send_pre_prepare(pre_prepare)
if not self.is_master:
self.db_manager.get_store(LAST_SENT_PP_STORE_LABEL).store_last_sent_pp_seq_no(
self._data.inst_id, pre_prepare.ppSeqNo)
self._consensus_data_helper.preprepare_batch(pre_prepare)
self._track_batches(pre_prepare, oldStateRootHash)
return ledger_id
@measure_consensus_time(MetricsName.CREATE_3PC_BATCH_TIME,
MetricsName.BACKUP_CREATE_3PC_BATCH_TIME)
def create_3pc_batch(self, ledger_id):
pp_seq_no = self.lastPrePrepareSeqNo + 1
pool_state_root_hash = self.get_state_root_hash(POOL_LEDGER_ID)
self._logger.debug("{} creating batch {} for ledger {} with state root {}".format(
self, pp_seq_no, ledger_id,
self.get_state_root_hash(ledger_id, to_str=False)))
if self.last_accepted_pre_prepare_time is None:
last_ordered_ts = self._get_last_timestamp_from_state(ledger_id)
if last_ordered_ts:
self.last_accepted_pre_prepare_time = last_ordered_ts
# DO NOT REMOVE `view_no` argument, used while replay
# tm = self.utc_epoch
tm = self._get_utc_epoch_for_preprepare(self._data.inst_id, self.view_no,
pp_seq_no)
reqs, invalid_indices, rejects = self._consume_req_queue_for_pre_prepare(
ledger_id, tm, self.view_no, pp_seq_no)
if self.is_master:
three_pc_batch = ThreePcBatch(ledger_id=ledger_id,
inst_id=self._data.inst_id,
view_no=self.view_no,
pp_seq_no=pp_seq_no,
pp_time=tm,
state_root=self.get_state_root_hash(ledger_id, to_str=False),
txn_root=self.get_txn_root_hash(ledger_id, to_str=False),
primaries=[],
valid_digests=self._get_valid_req_ids_from_all_requests(
reqs, invalid_indices))
self.post_batch_creation(three_pc_batch)
digest = self.replica_batch_digest(reqs)
state_root_hash = self.get_state_root_hash(ledger_id)
audit_txn_root_hash = self.get_txn_root_hash(AUDIT_LEDGER_ID)
"""TODO: for now default value for fields sub_seq_no is 0 and for final is True"""
params = [
self._data.inst_id,
self.view_no,
pp_seq_no,
tm,
[req.digest for req in reqs],
invalid_index_serializer.serialize(invalid_indices, toBytes=False),
digest,
ledger_id,
state_root_hash,
self.get_txn_root_hash(ledger_id),
0,
True,
pool_state_root_hash,
audit_txn_root_hash
]
# BLS multi-sig:
params = self.l_bls_bft_replica.update_pre_prepare(params, ledger_id)
pre_prepare = PrePrepare(*params)
if self.is_master:
rv = self.l_execute_hook(ReplicaHooks.CREATE_PPR, pre_prepare)
pre_prepare = rv if rv is not None else pre_prepare
self._logger.trace('{} created a PRE-PREPARE with {} requests for ledger {}'.format(
self, len(reqs), ledger_id))
self.lastPrePrepareSeqNo = pp_seq_no
self.last_accepted_pre_prepare_time = tm
if self.is_master and rejects:
for reject in rejects:
self._network.send(reject)
return pre_prepare
def _get_last_timestamp_from_state(self, ledger_id):
if ledger_id == DOMAIN_LEDGER_ID:
ts_store = self.db_manager.get_store(TS_LABEL)
if ts_store:
last_timestamp = ts_store.get_last_key()
if last_timestamp:
last_timestamp = int(last_timestamp.decode())
self._logger.debug("Last ordered timestamp from store is : {}"
"".format(last_timestamp))
return last_timestamp
return None
# This is to enable replaying, inst_id, view_no and pp_seq_no are used
# while replaying
def _get_utc_epoch_for_preprepare(self, inst_id, view_no, pp_seq_no):
tm = self.get_time_for_3pc_batch()
if self.last_accepted_pre_prepare_time and \
tm < self.last_accepted_pre_prepare_time:
tm = self.last_accepted_pre_prepare_time
return tm
def _consume_req_queue_for_pre_prepare(self, ledger_id, tm,
view_no, pp_seq_no):
reqs = []
rejects = []
invalid_indices = []
idx = 0
while len(reqs) < self._config.Max3PCBatchSize \
and self.requestQueues[ledger_id]:
key = self.requestQueues[ledger_id].pop(0)
if key in self._requests:
fin_req = self._requests[key].finalised
malicious_req = False
try:
self._process_req_during_batch(fin_req,
tm)
except (
InvalidClientMessageException,
UnknownIdentifier
) as ex:
self._logger.warning('{} encountered exception {} while processing {}, '
'will reject'.format(self, ex, fin_req))
rejects.append((fin_req.key, Reject(fin_req.identifier, fin_req.reqId, ex)))
invalid_indices.append(idx)
except SuspiciousPrePrepare:
malicious_req = True
finally:
if not malicious_req:
reqs.append(fin_req)
if not malicious_req:
idx += 1
else:
self._logger.debug('{} found {} in its request queue but the '
'corresponding request was removed'.format(self, key))
return reqs, invalid_indices, rejects
@measure_consensus_time(MetricsName.SEND_PREPREPARE_TIME,
MetricsName.BACKUP_SEND_PREPREPARE_TIME)
def send_pre_prepare(self, ppReq: PrePrepare):
self.sentPrePrepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq
self._send(ppReq, stat=TPCStat.PrePrepareSent)
def _send(self, msg, dst=None, stat=None) -> None:
"""
Send a message to the node on which this replica resides.
:param stat:
:param rid: remote id of one recipient (sends to all recipients if None)
:param msg: the message to send
"""
if stat:
self.stats.inc(stat)
self._network.send(msg, dst=dst)
def revert_unordered_batches(self):
"""
Revert changes to ledger (uncommitted) and state made by any requests
that have not been ordered.
"""
i = 0
for key in sorted(self.batches.keys(), reverse=True):
if compare_3PC_keys(self.last_ordered_3pc, key) > 0:
ledger_id, discarded, _, prevStateRoot, len_reqIdr = self.batches.pop(key)
discarded = invalid_index_serializer.deserialize(discarded)
self._logger.debug('{} reverting 3PC key {}'.format(self, key))
self._revert(ledger_id, prevStateRoot, len_reqIdr - len(discarded))
i += 1
else:
break
self._logger.info('{} reverted {} batches before starting catch up'.format(self, i))
return i
def l_last_prepared_certificate_in_view(self) -> Optional[Tuple[int, int]]:
# Pick the latest sent COMMIT in the view.
# TODO: Consider stashed messages too?
if not self.is_master:
raise LogicError("{} is not a master".format(self))
keys = []
quorum = self._data.quorums.prepare.value
for key in self.prepares.keys():
if self.prepares.hasQuorum(ThreePhaseKey(*key), quorum):
keys.append(key)
return max_3PC_key(keys) if keys else None
def _caught_up_till_3pc(self, last_caught_up_3PC):
self.last_ordered_3pc = last_caught_up_3PC
self._remove_till_caught_up_3pc(last_caught_up_3PC)
def catchup_clear_for_backup(self):
if not self._data.is_primary:
self.last_ordered_3pc = (self._data.view_no, 0)
self.batches.clear()
self.sentPrePrepares.clear()
self.prePrepares.clear()
self.prepares.clear()
self.commits.clear()
self._data.prepared.clear()
self._data.preprepared.clear()
self.first_batch_after_catchup = True
def _remove_till_caught_up_3pc(self, last_caught_up_3PC):
"""
Remove any 3 phase messages till the last ordered key and also remove
any corresponding request keys
"""
outdated_pre_prepares = {}
for key, pp in self.prePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
for key, pp in self.sentPrePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
self._logger.trace('{} going to remove messages for {} 3PC keys'.format(
self, len(outdated_pre_prepares)))
for key, pp in outdated_pre_prepares.items():
self.batches.pop(key, None)
self.sentPrePrepares.pop(key, None)
self.prePrepares.pop(key, None)
self.prepares.pop(key, None)
self.commits.pop(key, None)
self._discard_ordered_req_keys(pp)
self._consensus_data_helper.clear_batch(pp)
def get_sent_preprepare(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
return self.sentPrePrepares.get(key)
def get_sent_prepare(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
if key in self.prepares:
prepare = self.prepares[key].msg
if self.prepares.hasPrepareFrom(prepare, self.name):
return prepare
return None
def get_sent_commit(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
if key in self.commits:
commit = self.commits[key].msg
if self.commits.hasCommitFrom(commit, self.name):
return commit
return None
def replica_batch_digest(self, reqs):
return replica_batch_digest(reqs)
def process_view_change_started(self, msg: ViewChangeStarted):
# 1. update shared data
self._data.preprepared = []
self._data.prepared = []
# 2. save existing PrePrepares
new_old_view_preprepares = {(pp.ppSeqNo, pp.digest): pp
for pp in itertools.chain(self.prePrepares.values(), self.sentPrePrepares.values())}
self.old_view_preprepares.update(new_old_view_preprepares)
# 3. revert unordered transactions
if self.is_master:
self.revert_unordered_batches()
# 4. Clear the 3PC log
self.prePrepares.clear()
self.prepares.clear()
self.commits.clear()
self.requested_pre_prepares.clear()
self.requested_prepares.clear()
self.requested_commits.clear()
self.pre_prepare_tss.clear()
self.prePreparesPendingFinReqs.clear()
self.prePreparesPendingPrevPP.clear()
self.sentPrePrepares.clear()
self.batches.clear()
self.ordered.clear_below_view(msg.view_no)
return PROCESS, None
def process_new_view_checkpoints_applied(self, msg: NewViewCheckpointsApplied):
result, reason = self._validate(msg)
if result != PROCESS:
return result, reason
if not self.is_master:
return DISCARD, "not master"
for batch_id in msg.batches:
# TODO: take into account original view no
pp = self.old_view_preprepares.get((batch_id.pp_seq_no, batch_id.pp_digest))
if pp is None:
# TODO: implement correct re-sending logic
# self._request_pre_prepare(three_pc_key=(batch_id.view_no, batch_id.pp_seq_no))
continue
if self._validator.has_already_ordered(batch_id.view_no, batch_id.pp_seq_no):
self._add_to_pre_prepares(pp)
else:
sender = self.generateName(self._data.primary_name, self._data.inst_id)
# TODO: route it through the bus?
self.process_preprepare(pp, sender)
# TODO: this needs to be removed
self._data.preprepared = [BatchID(view_no=msg.view_no, pp_seq_no=batch_id.pp_seq_no,
pp_digest=batch_id.pp_digest)
for batch_id in msg.batches]
self._data.prepared = []
return PROCESS, None
| 44.138627
| 131
| 0.610387
|
67a176ae9657b5feff0b25a402b4d91393f926f2
| 7,261
|
py
|
Python
|
povary/apps/profiles/migrations/0008_auto__add_field_profile_registration_ip__add_field_profile_last_login_.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/profiles/migrations/0008_auto__add_field_profile_registration_ip__add_field_profile_last_login_.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/profiles/migrations/0008_auto__add_field_profile_registration_ip__add_field_profile_last_login_.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.registration_ip'
db.add_column('profiles_profile', 'registration_ip',
self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True),
keep_default=False)
# Adding field 'Profile.last_login_ip'
db.add_column('profiles_profile', 'last_login_ip',
self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.registration_ip'
db.delete_column('profiles_profile', 'registration_ip')
# Deleting field 'Profile.last_login_ip'
db.delete_column('profiles_profile', 'last_login_ip')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gallery.gallery': {
'Meta': {'object_name': 'Gallery'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'profiles.award': {
'Meta': {'object_name': 'Award'},
'icon': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_recipes_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'avatar': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'awards': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Award']", 'symmetrical': 'False'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'books': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cake_master': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cookery_in_life': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fb_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'gallery': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gallery.Gallery']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'registration_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vk_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| 71.186275
| 182
| 0.564661
|
24d0f4ca646c3cd4ebea5754be6620933dbb2c13
| 5,918
|
py
|
Python
|
examples/calibration/plot_calibration_curve.py
|
liamdp12/scikit-learn
|
a42a7c7998bff42288221bc57f7b27be5bcbedc1
|
[
"BSD-3-Clause"
] | 2
|
2019-02-21T10:43:16.000Z
|
2019-07-30T04:56:37.000Z
|
scikit-learn-master/examples/calibration/plot_calibration_curve.py
|
lqkweb/learnMLflow
|
13c5decaebba95b1b90f92021be35e343b4764af
|
[
"Apache-2.0"
] | null | null | null |
scikit-learn-master/examples/calibration/plot_calibration_curve.py
|
lqkweb/learnMLflow
|
13c5decaebba95b1b90f92021be35e343b4764af
|
[
"Apache-2.0"
] | null | null | null |
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100,000 samples (1,000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(max_iter=10000), "SVC", 2)
plt.show()
| 43.837037
| 79
| 0.708347
|
92488d407e528c7769a5294a55f529cd3cf943d9
| 381
|
py
|
Python
|
bmcore/tests/utils.py
|
hchockarprasad/bmdjango
|
a978e4bca264eaa5a1f21df332f7da06f9f69ee5
|
[
"MIT"
] | 3
|
2017-10-29T13:37:58.000Z
|
2017-11-06T15:31:35.000Z
|
bmcore/tests/utils.py
|
hchockarprasad/bmdjango
|
a978e4bca264eaa5a1f21df332f7da06f9f69ee5
|
[
"MIT"
] | null | null | null |
bmcore/tests/utils.py
|
hchockarprasad/bmdjango
|
a978e4bca264eaa5a1f21df332f7da06f9f69ee5
|
[
"MIT"
] | null | null | null |
from os import walk
from django.core import management
# Utility functions for testing
def load_initial_data():
f = []
for (dir_path, dir_name, file_name) in walk('bmcore/fixtures/'):
f.extend(file_name)
break
f.sort(key=lambda x: x.split()[-1])
for item in f:
management.call_command('loaddata', 'bmcore/fixtures/{0}'.format(item))
| 19.05
| 79
| 0.653543
|
ce9f2ba1d839fbcf833b655e68da46871ddbf896
| 3,389
|
py
|
Python
|
tests/functional/test_config.py
|
koneksys/aras-oslc
|
92adb87b884014df5b82a1c5402592aabc916bc0
|
[
"MIT"
] | 3
|
2021-03-19T22:25:51.000Z
|
2021-03-20T19:34:28.000Z
|
tests/functional/test_config.py
|
koneksys/aras-oslc
|
92adb87b884014df5b82a1c5402592aabc916bc0
|
[
"MIT"
] | null | null | null |
tests/functional/test_config.py
|
koneksys/aras-oslc
|
92adb87b884014df5b82a1c5402592aabc916bc0
|
[
"MIT"
] | null | null | null |
import logging
from oslc_api.auth import login
from oslc_api.auth.models import User
log = logging.getLogger(__name__)
def test_components(oslc_api, source_base_uri, access_token, item_values,
mocker, load_item_types_test, load_items_test):
@login.request_loader
def load_user_from_request(request):
return User(username='admin', access_token=access_token)
item_type = item_values[0]
config_id = item_values[1]
if 'localhost' in source_base_uri:
mocker.patch(
'oslc_api.aras.resources.load_item_types',
return_value=load_item_types_test
)
mocker.patch(
'oslc_api.aras.resources.load_items',
return_value=load_items_test
)
res = oslc_api.get_components(item_type)
assert res is not None
assert res.status_code == 200, 'The request was not successful'
assert config_id.encode('ascii') in res.data, 'The response does not contain the config id'
def test_component(oslc_api, source_base_uri, access_token, item_values,
mocker, load_item_types_test, load_items_test, load_validate_configs_test):
@login.request_loader
def load_user_from_request(request):
return User(username='admin', access_token=access_token)
item_type = item_values[0]
config_id = item_values[1]
if 'localhost' in source_base_uri:
mocker.patch(
'oslc_api.aras.resources.load_item_types',
return_value=load_item_types_test
)
mocker.patch(
'oslc_api.aras.resources.load_items',
return_value=load_items_test
)
mocker.patch(
'oslc_api.aras.resources.validate_config_id',
return_value=load_validate_configs_test
)
res = oslc_api.get_component(item_type, config_id)
assert res is not None
assert res.status_code == 200, 'The request was not successful'
assert config_id.encode('ascii') in res.data, 'The response does not contain the config id'
assert b'oslc_config:configurations' in res.data
def test_configurations(oslc_api, source_base_uri, access_token, item_values,
mocker, load_item_types_test, load_items_test, load_validate_configs_test,
load_resource_shape_test):
@login.request_loader
def load_user_from_request(request):
return User(username='admin', access_token=access_token)
item_type = item_values[0]
config_id = item_values[1]
if 'localhost' in source_base_uri:
mocker.patch(
'oslc_api.aras.resources.load_item_types',
return_value=load_item_types_test
)
mocker.patch(
'oslc_api.aras.resources.load_items',
return_value=load_items_test
)
mocker.patch(
'oslc_api.aras.resources.validate_config_id',
return_value=load_resource_shape_test
)
mocker.patch(
'oslc_api.aras.resources.load_streams',
return_value=load_validate_configs_test
)
res = oslc_api.get_configurations(item_type, config_id)
assert res is not None
assert res.status_code == 200, 'The request was not successful'
assert config_id.encode('ascii') in res.data, 'The response does not contain the config id'
assert b'rdfs:member' in res.data
| 32.902913
| 98
| 0.678961
|
bb022ecdcc855cb9638e50e4c3a78590843e3528
| 303
|
py
|
Python
|
Chapter10/ordered_vs_normal_dict.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 12
|
2018-07-09T16:20:31.000Z
|
2022-03-21T22:52:15.000Z
|
Chapter10/ordered_vs_normal_dict.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | null | null | null |
Chapter10/ordered_vs_normal_dict.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 19
|
2018-01-09T12:49:06.000Z
|
2021-11-23T08:05:55.000Z
|
import collections
print 'Regular Dictionary'
d = {}
d['a']= 'SAS'
d['b']= 'PYTHON'
d['c']= 'R'
for k,v in d.items():
print k, ":",v
print '\n Ordered dictionary'
d1 = collections.OrderedDict()
d1['a']= 'SAS'
d1['b']= 'PYTHON'
d1['c']= 'R'
for k,v in d1.items():
print k, ":",v
| 15.947368
| 31
| 0.537954
|
8b868b2bc7177a465125d045d785b99ac38074a6
| 4,860
|
py
|
Python
|
tests/backends/gaussian/test_preparations.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | 12
|
2021-09-12T15:51:45.000Z
|
2022-03-05T22:25:47.000Z
|
tests/backends/gaussian/test_preparations.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | 36
|
2021-09-13T08:01:27.000Z
|
2022-03-21T11:53:30.000Z
|
tests/backends/gaussian/test_preparations.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import piquasso as pq
from piquasso.api.errors import InvalidParameter, InvalidState
def test_Mean_is_automatically_scaled_by_hbar():
config = pq.Config(hbar=42)
xpxp_mean_vector = np.array([1, 2])
with pq.Program() as program:
pq.Q() | pq.Mean(xpxp_mean_vector)
simulator = pq.GaussianSimulator(d=1, config=config)
result = simulator.execute(program)
assert np.allclose(
result.state.xpxp_mean_vector, xpxp_mean_vector * np.sqrt(config.hbar)
)
def test_Covariance_is_automatically_scaled_by_hbar():
config = pq.Config(hbar=42)
xpxp_covariance_matrix = np.array(
[
[2, 1],
[1, 2],
]
)
with pq.Program() as program:
pq.Q() | pq.Covariance(xpxp_covariance_matrix)
simulator = pq.GaussianSimulator(d=1, config=config)
result = simulator.execute(program)
assert np.allclose(
result.state.xpxp_covariance_matrix, xpxp_covariance_matrix * config.hbar
)
def test_Thermal_is_automatically_scaled_by_hbar():
config = pq.Config(hbar=42)
mean_photon_numbers = np.array([1, 2])
with pq.Program() as program:
pq.Q() | pq.Thermal(mean_photon_numbers)
simulator = pq.GaussianSimulator(d=2, config=config)
result = simulator.execute(program)
assert np.allclose(
result.state.xpxp_covariance_matrix,
config.hbar
* np.array(
[
[3, 0, 0, 0],
[0, 3, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 5],
]
),
)
def test_Thermal_with_zero_mean_photon_numbers_yields_Vacuum():
config = pq.Config(hbar=42)
mean_photon_numbers = np.array([0, 0])
with pq.Program() as thermal_program:
pq.Q() | pq.Thermal(mean_photon_numbers)
with pq.Program() as vacuum_program:
pq.Q() | pq.Vacuum()
simulator = pq.GaussianSimulator(d=2, config=config)
thermal_state = simulator.execute(thermal_program).state
vacuum_state = simulator.execute(vacuum_program).state
assert thermal_state == vacuum_state
def test_state_initialization_with_misshaped_mean():
misshaped_mean = np.array(
[
[1, 2],
[3, 4],
]
)
with pq.Program() as program:
pq.Q() | pq.Mean(misshaped_mean)
simulator = pq.GaussianSimulator(d=1)
with pytest.raises(InvalidState):
simulator.execute(program)
def test_state_initialization_with_misshaped_covariance():
misshaped_cov = np.array(
[
[1, 2, 10000],
[1, 1, 10000],
]
)
with pq.Program() as program:
pq.Q() | pq.Covariance(misshaped_cov)
simulator = pq.GaussianSimulator(d=1)
with pytest.raises(InvalidState):
simulator.execute(program)
def test_state_initialization_with_nonsymmetric_covariance():
nonsymmetric_cov = np.array(
[
[1, 2],
[1, 1],
]
)
with pq.Program() as program:
pq.Q() | pq.Covariance(nonsymmetric_cov)
simulator = pq.GaussianSimulator(d=1)
with pytest.raises(InvalidState):
simulator.execute(program)
def test_state_initialization_with_nonpositive_covariance():
nonpositive_cov = np.array(
[
[1, 0],
[0, -1],
]
)
with pq.Program() as program:
pq.Q() | pq.Covariance(nonpositive_cov)
simulator = pq.GaussianSimulator(d=1)
with pytest.raises(InvalidState):
simulator.execute(program)
def test_Thermal_with_negative_mean_photon_numbers_raises_InvalidParameter():
mean_photon_numbers = np.array([-1, 1])
with pytest.raises(InvalidParameter):
pq.Q() | pq.Thermal(mean_photon_numbers)
def test_vacuum_resets_the_state(state):
with pq.Program() as program:
pq.Q() | pq.Vacuum()
simulator = pq.GaussianSimulator(d=state.d)
new_state = simulator.execute(program, initial_state=state).state
assert np.allclose(
new_state.xpxp_mean_vector,
np.zeros(2 * new_state.d),
)
assert np.allclose(
new_state.xpxp_covariance_matrix,
np.identity(2 * new_state.d) * simulator.config.hbar,
)
| 24.3
| 81
| 0.64856
|
58a4f05a345f8f48034347bd3a3c07ebb780bb08
| 4,272
|
py
|
Python
|
omspy/multi.py
|
webclinic017/omspy
|
a3982fba41fb7c8bbe5759bafba21556ae62df0d
|
[
"MIT"
] | 7
|
2021-10-10T16:07:06.000Z
|
2022-03-18T18:07:41.000Z
|
omspy/multi.py
|
webclinic017/omspy
|
a3982fba41fb7c8bbe5759bafba21556ae62df0d
|
[
"MIT"
] | 15
|
2021-10-30T15:33:23.000Z
|
2022-03-25T13:50:56.000Z
|
omspy/multi.py
|
webclinic017/omspy
|
a3982fba41fb7c8bbe5759bafba21556ae62df0d
|
[
"MIT"
] | 1
|
2021-10-30T19:29:15.000Z
|
2021-10-30T19:29:15.000Z
|
"""
Module for multi-user multi-broker implementation
"""
from pydantic import BaseModel
from omspy.base import Broker
from omspy.order import Order
from typing import Dict, List, Optional, Type
from collections import defaultdict
import logging
import uuid
class User(BaseModel):
"""
A basic user class for multi user environment
"""
broker: Broker
scale: float = 1.0
name: Optional[str]
client_id: Optional[str]
exclude: Optional[Dict]
class Config:
underscore_attrs_are_private = True
arbitrary_types_allowed = True
class UserOrder(BaseModel):
order: Order
user: User
class Config:
arbitrary_types_allowed = True
class MultiUser:
"""
Multi-userimplementation
"""
def __init__(self, users: List[User]):
self._users: List[User] = users
self._orders: defaultdict(list) = {}
def add(self, user: User):
"""
Add a user
"""
self._users.append(user)
@property
def users(self) -> List[User]:
return self._users
@property
def orders(self) -> Dict[str, Order]:
return self._orders
@property
def count(self) -> int:
return len(self.users)
class MultiOrder(Order):
_orders: List[UserOrder] = []
def __init__(self, **data) -> None:
super().__init__(**data)
self.pseudo_id = uuid.uuid4().hex
self.is_multi = True
@property
def orders(self) -> List[UserOrder]:
return self._orders
@property
def count(self) -> int:
"""
Return the number of orders
"""
return len(self.orders)
def create(self, users: Optional[MultiUser]) -> List[UserOrder]:
# Clear existing list
self._orders.clear()
for user in users.users:
order2 = self.clone()
order2.quantity = int(user.scale * self.quantity)
order2.pseudo_id = self.pseudo_id
order2.save_to_db()
m_order = UserOrder(order=order2, user=user)
self._orders.append(m_order)
self.save_to_db()
return self.orders
def save_to_db(self) -> bool:
"""
save or update the order to db
"""
if self.connection:
values = []
values.append(self.dict(exclude=self._exclude_fields))
for order in self.orders:
values.append(order.order.dict(exclude=self._exclude_fields))
self.connection["orders"].upsert_all(values, pk="id")
return True
else:
logging.info("No valid database connection")
return False
def execute(self, broker: MultiUser, **kwargs):
"""
Execute order on all users
broker
A Multi User instance
name is retained as broker so it is compatible
with the original Order interface
"""
if self.count == 0:
self.create(users=broker)
for order in self.orders:
order.order.execute(order.user.broker, **kwargs)
def modify(self, **kwargs):
"""
modify all orders
"""
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
if "quantity" in kwargs:
kwargs.pop("quantity")
for order in self.orders:
quantity = int(self.quantity * order.user.scale)
order.order.quantity = quantity
order.order.modify(order.user.broker, quantity=quantity, **kwargs)
def cancel(self, **kwargs):
"""
cancel all existing orders
"""
for order in self.orders:
order.order.cancel(order.user.broker)
def update(self, data: Dict[str, Dict]):
"""
Update order based on information received from broker
data
data to update as dictionary; key should be the broker order_id
returns True if update is done
"""
keys = data.keys()
for order in self._orders:
order_id = order.order.order_id
order_details = data.get(order_id)
if order_details:
order.order.update(order_details, save=False)
self.save_to_db()
| 26.7
| 78
| 0.582865
|
6f3448f1af494f3d3b8d2ecfed57593633aeb0f0
| 2,799
|
py
|
Python
|
tests/test_khmer_config.py
|
jiarong/khmer
|
9f3b68f3a281b68544effa0f815cbfa781228983
|
[
"BSD-3-Clause"
] | 1
|
2015-04-30T01:50:30.000Z
|
2015-04-30T01:50:30.000Z
|
tests/test_khmer_config.py
|
jiarong/khmer
|
9f3b68f3a281b68544effa0f815cbfa781228983
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_khmer_config.py
|
jiarong/khmer
|
9f3b68f3a281b68544effa0f815cbfa781228983
|
[
"BSD-3-Clause"
] | null | null | null |
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt. Contact: ctb@msu.edu
#
"""
Tests various aspects wrapper for C++ API configuration interface.
"""
import khmer
# NOTE: Currently the wrapper only supports a config singleton.
# In the future, manipulation of multiple configs may be allowed.
# The following alias is a hedge against the future.
from khmer import get_config as get_active_config
def test_EXISTENCE_has_extra_sanity_checks( ):
"""
Verify that 'has_extra_sanity_checks' exists.
An exception should be thrown if a config object cannot be obtained.
"""
config = get_active_config( )
assert "has_extra_sanity_checks" in dir( config )
def check_attribute_exists( config, attr_name ):
"""
Helper function for testing attribute existence.
"""
assert True == hasattr( config, attr_name ), attr_name
def test_EXISTENCE_OTHERS( ):
"""
Verify that all of the various attributes exist.
"""
config = get_active_config( )
for attr_name in \
[
"set_number_of_threads", "get_number_of_threads",
"get_reads_input_buffer_size", "set_reads_input_buffer_size",
]:
yield check_attribute_exists, config, attr_name
#def test_1_ARGS_set_number_of_threads( ):
# """
# Verify that the number of threads cannot be set to a negative number.
# """
# config = get_active_config( )
# if config.is_threaded( ):
# try: config.set_number_of_threads( -1 );
# except: pass
# else: assert False, "config.set_number_of_threads( -1 )"
#def test_2_ARGS_set_number_of_threads( ):
# """
# Verify that the number of threads cannot be set to zero.
# """
# config = get_active_config( )
# if config.is_threaded( ):
# try: config.set_number_of_threads( 0 );
# except: pass
# else: assert False, "config.set_number_of_threads( 0 )"
def test_USE_set_number_of_threads( ):
"""
Verify that the number of threads set is what is reported.
"""
config = get_active_config( )
tnum = config.get_number_of_threads( )
config.set_number_of_threads( 8 )
assert 8 == config.get_number_of_threads( )
config.set_number_of_threads( tnum )
assert tnum == config.get_number_of_threads( )
def test_USE_set_reads_input_buffer_size( ):
"""
Verify that the reads file chunk size is what is reported.
"""
config = get_active_config( )
bufsz = config.get_reads_input_buffer_size( )
config.set_reads_input_buffer_size( 123456789L )
assert 123456789L == config.get_reads_input_buffer_size( )
config.set_reads_input_buffer_size( bufsz )
assert bufsz == config.get_reads_input_buffer_size( )
# vim: set ft=python sts=4 sw=4 tw=79:
| 29.776596
| 74
| 0.719543
|
605506d2d13978e2c5d7039c7cea1ef82476a239
| 6,410
|
py
|
Python
|
ionoscloud/models/kubernetes_clusters.py
|
ionos-cloud/ionos-cloud-sdk-python
|
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
|
[
"Apache-2.0"
] | null | null | null |
ionoscloud/models/kubernetes_clusters.py
|
ionos-cloud/ionos-cloud-sdk-python
|
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
|
[
"Apache-2.0"
] | null | null | null |
ionoscloud/models/kubernetes_clusters.py
|
ionos-cloud/ionos-cloud-sdk-python
|
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class KubernetesClusters(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'type': 'str',
'href': 'str',
'items': 'list[KubernetesCluster]',
}
attribute_map = {
'id': 'id',
'type': 'type',
'href': 'href',
'items': 'items',
}
def __init__(self, id=None, type=None, href=None, items=None, local_vars_configuration=None): # noqa: E501
"""KubernetesClusters - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._type = None
self._href = None
self._items = None
self.discriminator = None
if id is not None:
self.id = id
if type is not None:
self.type = type
if href is not None:
self.href = href
if items is not None:
self.items = items
@property
def id(self):
"""Gets the id of this KubernetesClusters. # noqa: E501
A unique representation of the Kubernetes cluster as a resource collection. # noqa: E501
:return: The id of this KubernetesClusters. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this KubernetesClusters.
A unique representation of the Kubernetes cluster as a resource collection. # noqa: E501
:param id: The id of this KubernetesClusters. # noqa: E501
:type id: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this KubernetesClusters. # noqa: E501
The type of resource within a collection. # noqa: E501
:return: The type of this KubernetesClusters. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this KubernetesClusters.
The type of resource within a collection. # noqa: E501
:param type: The type of this KubernetesClusters. # noqa: E501
:type type: str
"""
allowed_values = ["collection"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def href(self):
"""Gets the href of this KubernetesClusters. # noqa: E501
URL to the collection representation (absolute path). # noqa: E501
:return: The href of this KubernetesClusters. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this KubernetesClusters.
URL to the collection representation (absolute path). # noqa: E501
:param href: The href of this KubernetesClusters. # noqa: E501
:type href: str
"""
self._href = href
@property
def items(self):
"""Gets the items of this KubernetesClusters. # noqa: E501
Array of items in the collection. # noqa: E501
:return: The items of this KubernetesClusters. # noqa: E501
:rtype: list[KubernetesCluster]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this KubernetesClusters.
Array of items in the collection. # noqa: E501
:param items: The items of this KubernetesClusters. # noqa: E501
:type items: list[KubernetesCluster]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KubernetesClusters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, KubernetesClusters):
return True
return self.to_dict() != other.to_dict()
| 29.004525
| 438
| 0.584711
|
75d8633362a2331904f0667299b9d3eee157b1be
| 3,567
|
py
|
Python
|
sis2.py
|
pdsteele/DES-Python
|
20e605c77eee5e6019ca3427ca27f5b41a13a8b8
|
[
"MIT"
] | 6
|
2016-04-17T03:49:09.000Z
|
2022-03-17T12:57:34.000Z
|
sis2.py
|
macrodatascience/DES-Python
|
20e605c77eee5e6019ca3427ca27f5b41a13a8b8
|
[
"MIT"
] | null | null | null |
sis2.py
|
macrodatascience/DES-Python
|
20e605c77eee5e6019ca3427ca27f5b41a13a8b8
|
[
"MIT"
] | 12
|
2015-10-07T21:16:59.000Z
|
2022-03-17T12:57:37.000Z
|
# -------------------------------------------------------------------------
# * This program - an extension of program sis1.c - simulates a simple (s,S)
# * inventory system using Equilikely distributed demands.
# *
# * Name : sis2.c (Simple Inventory System, version 2)
# * Authors : Steve Park & Dave Geyer
# * Language : ANSI C
# * Latest Revision : 8-28-97
# Translated by : Philip Steele
# Language : Python 3.3
# Latest Revision : 3/26/14
# * -------------------------------------------------------------------------
# */
#include <stdio.h>
#include "rng.h"
from rng import random, putSeed
MINIMUM = 20 # 's' inventory policy parameter */
MAXIMUM = 80 # 'S' inventory policy parameter */
STOP = 100 # number of time intervals */
class sumOf:
setup = 0.0 #setup instances
holding = 0.0 #inventory held (+)
shortage = 0.0 #inventory held (-)
order = 0.0 #orders
demand = 0.0 #demands
def sqr(x):
return((x)*(x))
def Equilikely(a,b):
#===================================================================
#Returns an equilikely distributed integer between a and b inclusive.
#NOTE: use a < b
#===================================================================
return (a + int((b - a + 1) * random()))
def GetDemand():
# ------------------------
# * generate the next demand
# * ------------------------
# */
return (Equilikely(10, 50))
###########################Main Program###############################
index = 0 # time interval index */
inventory = MAXIMUM # current inventory level */
demand = -1 # amount of demand */
order = -1 # amount of order */
sum = sumOf()
putSeed(123456789)
while (index < STOP):
index += 1
if (inventory < MINIMUM): # place an order */
order = MAXIMUM - inventory
sum.setup += 1
sum.order += order
else: # no order */
order = 0
inventory += order # there is no delivery lag */
demand = GetDemand()
sum.demand += demand
if (inventory > demand):
sum.holding += (inventory - 0.5 * demand)
else:
sum.holding += sqr(inventory) / (2.0 * demand)
sum.shortage += sqr(demand - inventory) / (2.0 * demand)
inventory -= demand
#EndWhile
if (inventory < MAXIMUM): # force the final inventory to */
order = MAXIMUM - inventory # match the initial inventory */
sum.setup += 1
sum.order += order
inventory += order
print("\nfor {0:1d} time intervals with an average demand of {1:6.2f}".format(index, (sum.demand/index)))
print("and policy parameters (s, S) = ({0}, {1})\n".format(MINIMUM, MAXIMUM))
print(" average order ............ = {0:6.2f}".format(sum.order / index))
print(" setup frequency .......... = {0:6.2f}".format(sum.setup / index))
print(" average holding level .... = {0:6.2f}".format(sum.holding / index))
print(" average shortage level ... = {0:6.2f}".format(sum.shortage / index))
#C output:
# for 100 time intervals with an average demand of 27.68
# and policy parameters (s, S) = (20, 80)
# average order ............ = 27.68
# setup frequency .......... = 0.36
# average holding level .... = 44.81
# average shortage level ... = 0.14
| 34.631068
| 105
| 0.478273
|
cd59e9c3574f3b459b2d3039466a6a379df5b06f
| 390
|
py
|
Python
|
scripts/external_libs/simpy-3.0.10/simpy/resources/__init__.py
|
timgates42/trex-core
|
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/external_libs/simpy-3.0.10/simpy/resources/__init__.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/external_libs/simpy-3.0.10/simpy/resources/__init__.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
"""
SimPy implements three types of resources that can be used to synchronize
processes or to model congestion points:
.. currentmodule:: simpy.resources
.. autosummary::
resource
container
store
They are derived from the base classes defined in the
:mod:`~simpy.resources.base` module. These classes are also meant to support
the implementation of custom resource types.
"""
| 21.666667
| 76
| 0.764103
|
26bd86aceba78660085a0f8f59a7c8d4616dd7c1
| 6,295
|
py
|
Python
|
src/pretalx/agenda/views/schedule.py
|
h3xstream/pretalx
|
0600d08b03e60342dfe7f21c3eee9dda450e4a1d
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/agenda/views/schedule.py
|
h3xstream/pretalx
|
0600d08b03e60342dfe7f21c3eee9dda450e4a1d
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/agenda/views/schedule.py
|
h3xstream/pretalx
|
0600d08b03e60342dfe7f21c3eee9dda450e4a1d
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
from datetime import timedelta
from urllib.parse import unquote
import pytz
from django.http import (
Http404, HttpResponse, HttpResponseNotModified, HttpResponsePermanentRedirect,
)
from django.urls import resolve, reverse
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.views.generic import TemplateView
from pretalx.common.mixins.views import PermissionRequired
from pretalx.common.signals import register_data_exporters
class ScheduleDataView(PermissionRequired, TemplateView):
template_name = 'agenda/schedule.html'
permission_required = 'agenda.view_schedule'
def get_permission_object(self):
return self.request.event
@cached_property
def version(self):
if 'version' in self.kwargs:
return unquote(self.kwargs['version'])
return None
def dispatch(self, request, *args, **kwargs):
if 'version' in request.GET:
kwargs['version'] = request.GET['version']
return HttpResponsePermanentRedirect(
reverse(
f'agenda:versioned-{request.resolver_match.url_name}',
args=args,
kwargs=kwargs,
)
)
return super().dispatch(request, *args, **kwargs)
def get_object(self):
if self.version:
return self.request.event.schedules.filter(
version__iexact=self.version
).first()
if self.request.event.current_schedule:
return self.request.event.current_schedule
return None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
schedule = self.get_object()
event = self.request.event
if not schedule and self.version:
context['version'] = self.version
context['error'] = f'Schedule "{self.version}" not found.'
return context
if not schedule:
context['error'] = 'Schedule not found.'
return context
context['schedule'] = schedule
context['schedules'] = event.schedules.filter(
published__isnull=False
).values_list('version')
return context
class ExporterView(ScheduleDataView):
def get_exporter(self, request):
url = resolve(request.path_info)
if url.url_name == 'export':
exporter = url.kwargs.get('name') or unquote(
self.request.GET.get('exporter')
)
else:
exporter = url.url_name
exporter = exporter.lstrip('export.')
responses = register_data_exporters.send(request.event)
for _, response in responses:
ex = response(request.event)
if ex.identifier == exporter:
if ex.public or request.is_orga:
return ex
return None
def get(self, request, *args, **kwargs):
exporter = self.get_exporter(request)
if not exporter:
raise Http404()
try:
exporter.schedule = self.get_object()
exporter.is_orga = getattr(self.request, 'is_orga', False)
file_name, file_type, data = exporter.render()
etag = hashlib.sha1(str(data).encode()).hexdigest()
if 'HTTP_IF_NONE_MATCH' in request.META:
if request.META['HTTP_IF_NONE_MATCH'] == etag:
return HttpResponseNotModified()
resp = HttpResponse(data, content_type=file_type)
resp['ETag'] = etag
if file_type not in ['application/json', 'text/xml']:
resp['Content-Disposition'] = f'attachment; filename="{file_name}"'
return resp
except Exception as export_exception:
print(export_exception)
raise Http404()
class ScheduleView(ScheduleDataView):
template_name = 'agenda/schedule.html'
permission_required = 'agenda.view_schedule'
def get_permission_object(self):
return self.request.event
def get_object(self):
if self.version == 'wip' and self.request.user.has_perm(
'orga.view_schedule', self.request.event
):
return self.request.event.wip_schedule
return super().get_object()
def get_context_data(self, **kwargs):
from pretalx.schedule.exporters import ScheduleData
context = super().get_context_data(**kwargs)
context['exporters'] = list(
exporter(self.request.event)
for _, exporter in register_data_exporters.send(self.request.event)
)
timezone = pytz.timezone(self.request.event.timezone)
if 'schedule' not in context:
return context
context['data'] = ScheduleData(
event=self.request.event, schedule=context['schedule']
).data
context['search'] = self.request.GET.get('q', '').lower()
for date in context['data']:
if date.get('first_start') and date.get('last_end'):
start = (
date.get('first_start')
.astimezone(timezone)
.replace(second=0, minute=0)
)
end = date.get('last_end').astimezone(timezone)
date['height'] = int((end - start).total_seconds() / 60 * 2)
date['hours'] = []
step = start
while step < end:
date['hours'].append(step.strftime('%H:%M'))
step += timedelta(hours=1)
for room in date['rooms']:
for talk in room.get('talks', []):
talk.top = int(
(talk.start.astimezone(timezone) - start).total_seconds()
/ 60
* 2
)
talk.height = int(talk.duration * 2)
talk.is_active = talk.start <= now() <= talk.real_end
return context
class ChangelogView(PermissionRequired, TemplateView):
template_name = 'agenda/changelog.html'
permission_required = 'agenda.view_schedule'
def get_permission_object(self):
return self.request.event
| 35.971429
| 85
| 0.587133
|
e682a45b3c9512c7dc12b72c9bd458d99574540c
| 1,845
|
py
|
Python
|
database/covid19_data_aggregator/rivm/aggregate_netherlands_rivm_data.py
|
datamike/covid19
|
76fd702b4d126642aba3a610d29541c197a7a9a5
|
[
"MIT"
] | 33
|
2020-03-11T00:42:28.000Z
|
2020-08-25T20:01:42.000Z
|
database/covid19_data_aggregator/rivm/aggregate_netherlands_rivm_data.py
|
datamike/covid19
|
76fd702b4d126642aba3a610d29541c197a7a9a5
|
[
"MIT"
] | 17
|
2020-03-11T01:15:43.000Z
|
2020-03-25T17:33:01.000Z
|
database/covid19_data_aggregator/rivm/aggregate_netherlands_rivm_data.py
|
datamike/covid19
|
76fd702b4d126642aba3a610d29541c197a7a9a5
|
[
"MIT"
] | 23
|
2020-03-11T06:07:03.000Z
|
2020-04-20T21:58:20.000Z
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime
from os import path
from io import StringIO
import re
def download_netherlands_rivm_data(target_directory):
"""
Download the Netherlands Rijksinstituut voor Volksgezondheid
en Milieu data
:param target_directory: str
:return None
"""
#The url of the Rijksinstituut data
rivm_url ="https://www.rivm.nl/coronavirus-kaart-van-nederland#node-coronavirus-covid-19-meldingen"
#Fetch the data
response = requests.get(rivm_url)
soup =BeautifulSoup(response.text,"html.parser")
raw_data = soup.find_all("div",id="csvData")[0].string
raw_data_rows = [r for r in raw_data.split("\n") if r!=""]
# @TODO: confirm whether second column and fourth column are sane guesses
#List columns
columns =raw_data_rows[0].split(";")
#Parse out the comment rows indicated by rows starting with negative integers
exp ="-\d+;"
comments = {x:y for x,y in enumerate(raw_data_rows[1:]) if re.match(exp,y) }
#clean the negative integer out.
comments={x:re.sub(exp,"",y) for x,y in comments.items()}
#Select the data rows: all rows except header row and comment rows
data_idx =1+len(comments)
data =raw_data_rows[data_idx:]
df=pd.read_csv(StringIO("\n".join(data)),sep=";",names=columns,header=0,skiprows=data_idx)
year, day, month = (datetime.now().year,datetime.now().day, datetime.now().month)
df["Fetch date"] =f"{day}.{month}.{year}"
df["Comments"] = "\n".join(list(comments.values()))
#Save file
file_path = path.join(target_directory,"Netherlands_RIVM_data.csv")
print(df)
df.to_csv(file_path, index=False)
| 34.811321
| 107
| 0.650407
|
477343e13cdb3c7a2006058367d02e07885a0487
| 512
|
py
|
Python
|
exercises/pt/exc_02_07.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/exc_02_07.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/exc_02_07.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("Berlin looks like a nice city")
# Iterar nos tokens
token_texts = [token.text for token in doc]
pos_tags = [token.pos_ for token in doc]
for index, pos in enumerate(pos_tags):
# Verifica se o token atual é um substantivo próprio.
if pos == "PROPN":
# Verifica se o próximo token é um verbo
if pos_tags[index + 1] == "VERB":
result = token_texts[index]
print("Found proper noun before a verb:", result)
| 30.117647
| 61
| 0.658203
|
28262c4f35e0f78f25af435920fa1b1b488599d7
| 4,362
|
py
|
Python
|
tasks.py
|
luan0ap/thg-framework-template-plugin
|
3fbf4937ab39c7fdd7631be091e9e9c5234457d5
|
[
"MIT"
] | null | null | null |
tasks.py
|
luan0ap/thg-framework-template-plugin
|
3fbf4937ab39c7fdd7631be091e9e9c5234457d5
|
[
"MIT"
] | null | null | null |
tasks.py
|
luan0ap/thg-framework-template-plugin
|
3fbf4937ab39c7fdd7631be091e9e9c5234457d5
|
[
"MIT"
] | 1
|
2019-06-17T19:11:32.000Z
|
2019-06-17T19:11:32.000Z
|
#
# -*- coding: utf-8 -*-
"""Development related tasks to be run with 'invoke'"""
import os
import shutil
import invoke
# shared function
def rmrf(items, verbose=True):
"Silently remove a list of directories or files"
if isinstance(items, str):
items = [items]
for item in items:
if verbose:
print("Removing {}".format(item))
shutil.rmtree(item, ignore_errors=True)
# rmtree doesn't remove bare files
try:
os.remove(item)
except FileNotFoundError:
pass
# create namespaces
namespace = invoke.Collection()
namespace_clean = invoke.Collection('clean')
namespace.add_collection(namespace_clean, 'clean')
#####
#
# pytest, tox, pylint, and codecov
#
#####
@invoke.task
def pytest(context):
"Run tests and code coverage using pytest"
context.run("pytest --cov=thg_test")
namespace.add_task(pytest)
@invoke.task
def pytest_clean(context):
"Remove pytest cache and code coverage files and directories"
#pylint: disable=unused-argument
dirs = ['.pytest_cache', '.cache', '.coverage']
rmrf(dirs)
namespace_clean.add_task(pytest_clean, 'pytest')
@invoke.task
def tox(context):
"Run unit and integration tests on multiple python versions using tox"
context.run("tox")
namespace.add_task(tox)
@invoke.task
def tox_clean(context):
"Remove tox virtualenvs and logs"
#pylint: disable=unused-argument
rmrf('.tox')
namespace_clean.add_task(tox_clean, 'tox')
@invoke.task
def pylint(context):
"Check code quality using pylint"
context.run('pylint --rcfile=thg_test/pylintrc thg_test')
namespace.add_task(pylint)
@invoke.task
def pylint_tests(context):
"Check code quality of test suite using pylint"
context.run('pylint --rcfile=tests/pylintrc tests')
namespace.add_task(pylint_tests)
#####
#
# build and distribute
#
#####
BUILDDIR = 'build'
DISTDIR = 'dist'
@invoke.task
def build_clean(context):
"Remove the build directory"
#pylint: disable=unused-argument
rmrf(BUILDDIR)
namespace_clean.add_task(build_clean, 'build')
@invoke.task
def dist_clean(context):
"Remove the dist directory"
#pylint: disable=unused-argument
rmrf(DISTDIR)
namespace_clean.add_task(dist_clean, 'dist')
@invoke.task
def eggs_clean(context):
"Remove egg directories"
#pylint: disable=unused-argument
dirs = set()
dirs.add('.eggs')
for name in os.listdir(os.curdir):
if name.endswith('.egg-info'):
dirs.add(name)
if name.endswith('.egg'):
dirs.add(name)
rmrf(dirs)
namespace_clean.add_task(eggs_clean, 'eggs')
@invoke.task
def bytecode_clean(context):
"Remove __pycache__ directories and *.pyc files"
#pylint: disable=unused-argument
dirs = set()
for root, dirnames, files in os.walk(os.curdir):
if '__pycache__' in dirnames:
dirs.add(os.path.join(root, '__pycache__'))
for file in files:
if file.endswith(".pyc"):
dirs.add(os.path.join(root,file))
print("Removing __pycache__ directories and .pyc files")
rmrf(dirs, verbose=False)
namespace_clean.add_task(bytecode_clean, 'bytecode')
#
# make a dummy clean task which runs all the tasks in the clean namespace
clean_tasks = list(namespace_clean.tasks.values())
@invoke.task(pre=list(namespace_clean.tasks.values()), default=True)
def clean_all(context):
"Run all clean tasks"
#pylint: disable=unused-argument
pass
namespace_clean.add_task(clean_all, 'all')
@invoke.task(pre=[clean_all])
def sdist(context):
"Create a source distribution"
context.run('python setup.py sdist')
namespace.add_task(sdist)
@invoke.task(pre=[clean_all])
def wheel(context):
"Build a wheel distribution"
context.run('python setup.py bdist_wheel')
namespace.add_task(wheel)
#
# these two tasks are commented out so you don't
# accidentally run them and upload this template to pypi
#
# @invoke.task(pre=[sdist, wheel])
# def pypi(context):
# "Build and upload a distribution to pypi"
# context.run('twine upload dist/*')
# namespace.add_task(pypi)
# @invoke.task(pre=[sdist, wheel])
# def pypi_test(context):
# "Build and upload a distribution to https://test.pypi.org"
# context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*')
# namespace.add_task(pypi_test)
| 26.277108
| 87
| 0.693489
|
7d491b1fa20b94149c174f35c76be256b737e3d7
| 7,999
|
py
|
Python
|
pollbot/telegram/keyboard/vote.py
|
tobikrs/ultimate-poll-bot
|
eaa190ba1fec852c1a7d12c8a4633245f00c435f
|
[
"MIT"
] | 1
|
2020-03-22T05:49:44.000Z
|
2020-03-22T05:49:44.000Z
|
pollbot/telegram/keyboard/vote.py
|
RuslanBitcash/ultimate-poll-bot
|
33bc71b56f79453359043bd0e778cd153d3a83a3
|
[
"MIT"
] | null | null | null |
pollbot/telegram/keyboard/vote.py
|
RuslanBitcash/ultimate-poll-bot
|
33bc71b56f79453359043bd0e778cd153d3a83a3
|
[
"MIT"
] | 1
|
2021-01-29T17:10:11.000Z
|
2021-01-29T17:10:11.000Z
|
"""Reply keyboards."""
import string
from telegram import (
InlineKeyboardMarkup,
InlineKeyboardButton,
)
from sqlalchemy.orm import joinedload
from pollbot.models import Vote
from pollbot.i18n import i18n
from pollbot.config import config
from pollbot.helper import poll_allows_cumulative_votes
from pollbot.db import get_session
from pollbot.helper.enums import (
CallbackType,
CallbackResult,
OptionSorting,
PollType,
StartAction,
)
from pollbot.telegram.keyboard import get_start_button_payload
from pollbot.helper.option import get_sorted_options
from pollbot.display.poll.indices import get_option_indices
from .management import get_back_to_management_button
IGNORE_PAYLOAD = f'{CallbackType.ignore.value}:0:0'
def get_vote_keyboard(poll, user, show_back=False, summary=False):
"""Get a plain vote keyboard."""
buttons = []
# If the poll is not closed yet, add the vote buttons and the button
# to add new options for new users (if enabled)
if not poll.closed:
buttons = get_vote_buttons(poll, user, show_back)
if poll.allow_new_options:
bot_name = config['telegram']['bot_name']
payload = get_start_button_payload(poll, StartAction.new_option)
url = f'http://t.me/{bot_name}?start={payload}'
buttons.append([InlineKeyboardButton(
i18n.t('keyboard.new_option', locale=poll.locale), url=url)])
# Add a button for to showing the summary, if the poll is too long for a single message
if summary:
payload = get_start_button_payload(poll, StartAction.show_results)
bot_name = config['telegram']['bot_name']
url = f'http://t.me/{bot_name}?start={payload}'
row = [InlineKeyboardButton(i18n.t('keyboard.show_results', locale=poll.locale), url=url)]
buttons.append(row)
# Add a button to go back to the management interface (admin overview)
if show_back:
buttons.append([get_back_to_management_button(poll)])
return InlineKeyboardMarkup(buttons)
def get_vote_buttons(poll, user=None, show_back=False):
"""Get the keyboard for actual voting."""
locale = poll.locale
if poll_allows_cumulative_votes(poll):
buttons = get_cumulative_buttons(poll)
elif poll.poll_type == PollType.doodle.name:
buttons = get_doodle_buttons(poll)
elif poll.is_priority():
buttons = get_priority_buttons(poll, user)
else:
buttons = get_normal_buttons(poll)
return buttons
def get_normal_buttons(poll):
"""Get the normal keyboard with one vote button per option."""
buttons = []
vote_button_type = CallbackType.vote.value
options = poll.options
if poll.option_sorting == OptionSorting.option_name.name:
options = get_sorted_options(poll)
for option in options:
option_name = option.get_formatted_name()
result = CallbackResult.vote.value
payload = f'{vote_button_type}:{option.id}:{result}'
if poll.should_show_result() and poll.show_option_votes:
text = i18n.t('keyboard.vote_with_count',
option_name=option_name,
count=len(option.votes),
locale=poll.locale)
else:
text = option_name
buttons.append([InlineKeyboardButton(text, callback_data=payload)])
return buttons
def get_cumulative_buttons(poll):
"""Get the cumulative keyboard with two buttons per option."""
vote_button_type = CallbackType.vote.value
vote_yes = CallbackResult.yes.value
vote_no = CallbackResult.no.value
options = poll.options
if poll.option_sorting == OptionSorting.option_name:
options = get_sorted_options(poll)
buttons = []
for option in options:
option_name = option.get_formatted_name()
yes_payload = f'{vote_button_type}:{option.id}:{vote_yes}'
no_payload = f'{vote_button_type}:{option.id}:{vote_no}'
buttons.append([
InlineKeyboardButton(f'- {option_name}', callback_data=no_payload),
InlineKeyboardButton(f'+ {option_name}', callback_data=yes_payload),
])
return buttons
def get_priority_buttons(poll, user):
"""Create the keyboard for priority poll. Only show the deeplink, if not in a direct conversation."""
if user is None:
bot_name = config['telegram']['bot_name']
payload = get_start_button_payload(poll, StartAction.vote)
url = f'http://t.me/{bot_name}?start={payload}'
buttons = [[InlineKeyboardButton(
i18n.t('keyboard.vote', locale=poll.locale), url=url)]]
return buttons
buttons = []
options = get_sorted_options(poll)
vote_button_type = CallbackType.vote.value
vote_increase = CallbackResult.increase_priority.value
vote_decrease = CallbackResult.decrease_priority.value
session = get_session()
votes = session.query(Vote) \
.filter(Vote.poll == poll) \
.filter(Vote.user == user) \
.order_by(Vote.priority.asc()) \
.options(joinedload(Vote.poll_option)) \
.all()
indices = get_option_indices(options)
for index, vote in enumerate(votes):
option = vote.poll_option
if not poll.compact_buttons:
name_row = [
InlineKeyboardButton(
f"{option.name}",
callback_data=IGNORE_PAYLOAD
)
]
buttons.append(name_row)
name_hint_payload = f'{CallbackType.show_option_name.value}:{poll.id}:{option.id}'
increase_payload = f'{vote_button_type}:{option.id}:{vote_increase}'
decrease_payload = f'{vote_button_type}:{option.id}:{vote_decrease}'
ignore_payload = f'{CallbackType.ignore.value}:0:0'
vote_row = []
if poll.compact_buttons:
vote_row.append(InlineKeyboardButton(f"{indices[index]})", callback_data=name_hint_payload))
if index != len(votes) - 1:
vote_row.append(InlineKeyboardButton('▼', callback_data=decrease_payload))
else:
vote_row.append(InlineKeyboardButton(' ', callback_data=ignore_payload))
if index != 0:
vote_row.append(InlineKeyboardButton('▲', callback_data=increase_payload))
else:
vote_row.append(InlineKeyboardButton(' ', callback_data=ignore_payload))
buttons.append(vote_row)
return buttons
def get_doodle_buttons(poll):
"""Get the doodle keyboard with yes, maybe and no button per option."""
show_option_name = CallbackType.show_option_name.value
vote_button_type = CallbackType.vote.value
vote_yes = CallbackResult.yes.value
vote_maybe = CallbackResult.maybe.value
vote_no = CallbackResult.no.value
options = get_sorted_options(poll)
buttons = []
indices = get_option_indices(options)
for index, option in enumerate(options):
name_hint_payload = f'{show_option_name}:{poll.id}:{option.id}'
yes_payload = f'{vote_button_type}:{option.id}:{vote_yes}'
maybe_payload = f'{vote_button_type}:{option.id}:{vote_maybe}'
no_payload = f'{vote_button_type}:{option.id}:{vote_no}'
# If we don't have the compact button view, display the option name on it's own button row
if not poll.compact_buttons:
option_row = [InlineKeyboardButton(option.get_formatted_name(),
callback_data=name_hint_payload)]
buttons.append(option_row)
option_row = []
else:
option_row = [InlineKeyboardButton(f'{indices[index]})', callback_data=name_hint_payload)]
vote_row = [
InlineKeyboardButton('✅', callback_data=yes_payload),
InlineKeyboardButton('❔', callback_data=maybe_payload),
InlineKeyboardButton('❌', callback_data=no_payload),
]
buttons.append(option_row + vote_row)
return buttons
| 36.031532
| 105
| 0.668709
|
40b465b0e675e324c26e3161f146d6ee4874a930
| 547
|
py
|
Python
|
segme/backbone/core/__init__.py
|
shkarupa-alex/segme
|
d5bc0043f9e709c8ccaf8949d662bc6fd6144006
|
[
"MIT"
] | 2
|
2021-05-25T18:53:00.000Z
|
2021-05-26T12:11:41.000Z
|
segme/backbone/core/__init__.py
|
shkarupa-alex/segme
|
d5bc0043f9e709c8ccaf8949d662bc6fd6144006
|
[
"MIT"
] | null | null | null |
segme/backbone/core/__init__.py
|
shkarupa-alex/segme
|
d5bc0043f9e709c8ccaf8949d662bc6fd6144006
|
[
"MIT"
] | 2
|
2021-11-21T02:39:37.000Z
|
2021-12-08T07:26:56.000Z
|
from .densenet import DenseNet121, DenseNet169, DenseNet201
from .efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2
from .efficientnet import EfficientNetB3, EfficientNetB4, EfficientNetB5
from .efficientnet import EfficientNetB6, EfficientNetB7
from .inception import InceptionV3, InceptionResNetV2, Xception
from .mobilenet import MobileNet, MobileNetV2, MobileNetV3Small, MobileNetV3Large
from .resnet import ResNet50, ResNet101, ResNet152
from .resnet import ResNet50V2, ResNet101V2, ResNet152V2
from .vgg import VGG16, VGG19
| 54.7
| 81
| 0.855576
|
34760c9533ec7c3899c9a3fbbbbdf7a39876e35a
| 121,562
|
py
|
Python
|
Lib/test/test_io.py
|
kimixuchen/Python-2.7.11_dict
|
7759702954316800de26705d17e80ba1126157eb
|
[
"PSF-2.0"
] | 2
|
2018-03-29T10:56:36.000Z
|
2020-12-12T15:28:14.000Z
|
Lib/test/test_io.py
|
ilanschnell/python27
|
82bc2eb71d4b3864998cca059ed407e66c99930c
|
[
"PSF-2.0"
] | 1
|
2016-05-18T01:27:28.000Z
|
2016-05-18T05:00:36.000Z
|
Lib/test/test_io.py
|
ilanschnell/python27
|
82bc2eb71d4b3864998cca059ed407e66c99930c
|
[
"PSF-2.0"
] | 3
|
2019-05-13T09:41:33.000Z
|
2021-04-09T12:12:38.000Z
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), u'')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with support.check_py3k_warnings():
self.TextIOWrapper(b, encoding="hex_codec")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri_codec")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri_codec")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
#else:
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read, 1)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.readline)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read)
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
class PyMiscIOTest(MiscIOTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
try:
with self.assertRaises(ZeroDivisionError):
wio.write(item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = [None]
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
error[0] = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error[0])
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| 36.982659
| 89
| 0.573156
|
d644e7a75a3f6b74ae886c98ad37524cdd96d5b7
| 941
|
py
|
Python
|
examples/sharepoint/folders/download_folder.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
examples/sharepoint/folders/download_folder.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
examples/sharepoint/folders/download_folder.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from settings import settings
from office365.runtime.auth.client_credential import ClientCredential
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext(settings['url']).with_credentials(
ClientCredential(settings['client_credentials']['client_id'],
settings['client_credentials']['client_secret']))
# retrieve files from library
files = ctx.web.lists.get_by_title("Documents").rootFolder.files
ctx.load(files)
ctx.execute_query()
download_path = tempfile.mkdtemp()
for file in files:
print("Downloading file: {0} ...".format(file.properties["ServerRelativeUrl"]))
download_file_name = os.path.join(download_path, os.path.basename(file.properties["Name"]))
with open(download_file_name, "wb") as local_file:
file.download(local_file)
ctx.execute_query()
print("[Ok] file has been downloaded: {0}".format(download_file_name))
| 37.64
| 95
| 0.752391
|
6357a3cc4d68ce1722eefb84898797ca19d167f9
| 7,539
|
py
|
Python
|
tods/tests/feature_analysis/test_DatetimeFeatureExtract.py
|
zylMozart/tods
|
a4c0192b43b438276d2228306c0e9c896d9e3809
|
[
"Apache-2.0"
] | null | null | null |
tods/tests/feature_analysis/test_DatetimeFeatureExtract.py
|
zylMozart/tods
|
a4c0192b43b438276d2228306c0e9c896d9e3809
|
[
"Apache-2.0"
] | null | null | null |
tods/tests/feature_analysis/test_DatetimeFeatureExtract.py
|
zylMozart/tods
|
a4c0192b43b438276d2228306c0e9c896d9e3809
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from d3m import container, utils
from d3m.metadata import base as metadata_base
from tods.feature_analysis import DatetimeFeatureExtract
class DatetimeFeatureExtractTestCase(unittest.TestCase):
def test_basic(self):
self.maxDiff = None
main = container.DataFrame(
{
"timestamp": [
"2021-02-17 13:46:24",
"2021-02-17 15:45:52",
"2021-02-17 16:02:56",
"2021-02-17 18:19:28",
"2021-02-17 23:01:04",
"2021-02-18 02:30:08",
"2021-02-18 03:42:40",
"2021-02-18 04:59:28",
"2021-02-18 05:25:04",
"2021-02-18 06:03:28",
]
},
columns=["timestamp"],
generate_metadata=True,
)
self.assertEqual(
utils.to_json_structure(main.metadata.to_internal_simple_structure()),
[
{
"selector": [],
"metadata": {
"schema": "https://metadata.datadrivendiscovery.org/schemas/v0/container.json",
"structural_type": "d3m.container.pandas.DataFrame",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Table"
],
"dimension": {
"name": "rows",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/TabularRow"
],
"length": 10,
},
},
},
{
"selector": ["__ALL_ELEMENTS__"],
"metadata": {
"dimension": {
"name": "columns",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/TabularColumn"
],
"length": 1,
}
},
},
{
"selector": ["__ALL_ELEMENTS__", 0],
"metadata": {"structural_type": "str", "name": "timestamp"},
},
],
)
hyperparams_class = (
DatetimeFeatureExtract.DatetimeFeatureExtractPrimitive.metadata.get_hyperparams()
)
hp = hyperparams_class.defaults().replace(
{
"use_columns": [0],
"use_semantic_types": False,
# 'window_size':2
}
)
primitive = DatetimeFeatureExtract.DatetimeFeatureExtractPrimitive(
hyperparams=hp
)
output_main = primitive._produce(inputs=main).value
print(output_main)
expected_output = container.DataFrame(
{
"year": [2021, 2021, 2021, 2021, 2021, 2021, 2021, 2021, 2021, 2021],
"month": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
"day": [17, 17, 17, 17, 17, 18, 18, 18, 18, 18],
"weekday": [2, 2, 2, 2, 2, 3, 3, 3, 3, 3],
"hour": [13, 15, 16, 18, 23, 2, 3, 4, 5, 6],
},
columns=["year", "month", "day", "weekday", "hour"],
)
self.assertEqual(
output_main[["year", "month", "day", "weekday", "hour"]].values.tolist(),
expected_output[
["year", "month", "day", "weekday", "hour"]
].values.tolist(),
)
self.assertEqual(
utils.to_json_structure(
output_main.metadata.to_internal_simple_structure()
),
[
{
"selector": [],
"metadata": {
"schema": "https://metadata.datadrivendiscovery.org/schemas/v0/container.json",
"structural_type": "d3m.container.pandas.DataFrame",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Table"
],
"dimension": {
"name": "rows",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/TabularRow"
],
"length": 10,
},
},
},
{
"selector": ["__ALL_ELEMENTS__"],
"metadata": {
"dimension": {
"name": "columns",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/TabularColumn"
],
"length": 6,
}
},
},
{
"selector": ["__ALL_ELEMENTS__", 0],
"metadata": {"structural_type": "str", "name": "timestamp"},
},
{
"selector": ["__ALL_ELEMENTS__", 1],
"metadata": {
"name": "year",
"structural_type": "numpy.int64",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
},
},
{
"selector": ["__ALL_ELEMENTS__", 2],
"metadata": {
"name": "month",
"structural_type": "numpy.int64",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
},
},
{
"selector": ["__ALL_ELEMENTS__", 3],
"metadata": {
"name": "day",
"structural_type": "numpy.int64",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
},
},
{
"selector": ["__ALL_ELEMENTS__", 4],
"metadata": {
"name": "weekday",
"structural_type": "numpy.int64",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
},
},
{
"selector": ["__ALL_ELEMENTS__", 5],
"metadata": {
"name": "hour",
"structural_type": "numpy.int64",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
},
},
],
)
params = primitive.get_params()
primitive.set_params(params=params)
if __name__ == "__main__":
unittest.main()
| 37.321782
| 103
| 0.37631
|
e81596dfe5c0dec52ee30c1ee187de1a302e3c53
| 8,372
|
py
|
Python
|
Old_20190104/Multispec_server/multispec_runner_ctypes.py
|
ColdMatter/PhotonBEC
|
c6bcf9bdefd267c8adde0d299cf5920b010c5022
|
[
"MIT"
] | null | null | null |
Old_20190104/Multispec_server/multispec_runner_ctypes.py
|
ColdMatter/PhotonBEC
|
c6bcf9bdefd267c8adde0d299cf5920b010c5022
|
[
"MIT"
] | null | null | null |
Old_20190104/Multispec_server/multispec_runner_ctypes.py
|
ColdMatter/PhotonBEC
|
c6bcf9bdefd267c8adde0d299cf5920b010c5022
|
[
"MIT"
] | null | null | null |
#NOTE: Uses pi_controller class, which is currently based in "CavityLock" folder, but will be moved to PythonPackages
#This class re-uses a lot of code from /Control/CavityLock/stabiliser_class.py
import sys
from socket import gethostname
if gethostname()=="ph-rnyman-01":
sys.path.append("D:\\Control\\PythonPackages\\")
elif gethostname()=="ph-photonbec2":
sys.path.append("Y:\\Control\\PythonPackages\\")
elif gethostname()=="ph-photonbec3":
sys.path.append("D:\\Control\\PythonPackages\\")
from hene_utils import *
from pbec_experiment_multispec import *
import pbec_experiment_multispec
import pbec_analysis_multispec
from pi_controller import PI_control
import threading
import traceback
import ctypes
from ctypes import pointer, c_char, sizeof, c_ushort
from ctypes import c_bool, c_short, c_uint, c_int8
from ctypes import c_double, c_int, Structure, c_uint32, c_float
from time import sleep
from avantes_datatypes import DarkCorrectionType, SmoothingType, TriggerType, ControlSettingsType, avs_id, detector_type, meas_config_type
#----------------------------------
#COMPUTER SPECIFIC SETTINGS HERE
if gethostname()=="ph-photonbec":
default_spec_int_time = 20
default_spec_nAverage = 1
default_lamb_range = (540,610) #Restrict range analysed, which might make acquisition faster
elif gethostname()=="ph-photonbec2":
default_spec_int_time = 40
default_spec_nAverage = 1
default_lamb_range = (555,585) #Restrict range analysed, which might make acquisition faster
elif gethostname()=="ph-photonbec3":
default_lamb_range = (540,600) #Restrict range analysed, which might make acquisition faster
only_one_spec = True
if only_one_spec:
default_spec_n_averages = [1]
default_no_spectrometers = 1
spectrometer_list = ['newbie'] #These have to be correct and in the order that avs_spectro deems fit to open them.
default_spec_int_times = [2]
else:
default_spec_n_averages = [1,1,1]
default_no_spectrometers = 3
spectrometer_list = ['grey','newbie','black'] #These have to be correct and in the order that avs_spectro deems fit to open them.
default_spec_int_times = [3,10,3]
#----------------------------------
class _MultiSpectrometerThread(threading.Thread):
def __init__(self,parent):
threading.Thread.__init__(self)
self.parent=parent
self.daemon = True
self.running = False
self.paused =False
self.parent.results=[]
def run(self):
self.running = True
parent = self.parent
spectros = parent.spectros #assumes spectrometer is setup
fail_num = 0
#try:
while self.running:
#why the double loop? so it can be paused and unpaused
time.sleep(1e-3) #when paused, while loop not going mental
while not self.paused:
#insert the controller actions here
try:
temp_spectum=[]
for spectro in parent.spectros:
spectro.get_data()
spectro.ts = pbec_analysis.make_timestamp(3)
#parent.spectrum = temp_spectrum #Split into two lines so parent.spectrum can be accessed by the graph
if parent.spectrum == None:
print('get_data() == None in spectrometer_stabiliser_class.py')
#------------------
parent.ts = pbec_analysis.make_timestamp(3) #Don't update the ts if acquisition fails!
fail_num = 0
except IOError:
if fail_num < 4:
time.sleep(1)
fail_num+=1
else:
self.parent.start_acquisition()
fail_num=0
except Exception as e:
traceback.print_exc()
self.parent.error_message = e
print "Spectrometer acquisition error. Re-using previous data"
#self.parent.stop_acquisition()
#self.parent.start_acquisition()
#
time.sleep(parent.bandwidth_throttle)
#Update spectrometer integration time if need be
#HERE BE BUGS: TEST PROPERLY PLEASE
#Gather the outputs
r = {"ts":parent.ts}
if parent.print_frequency > 0:
if len(parent.results) % parent.print_frequency == 0:
print r["ts"]
#Now output a voltage from PI_controller
parent.results.append(r)
#Turn this into a reasonable-length buffer, rather than a dump
buffer_len=2500 #minimum 2000 for GUI plot
if len(parent.results)>buffer_len:
del parent.results[0]
#finally:
# spectro.close()
print("Finished\n")
class ctype_Spectrometer():
def __init__(self,parent,index=0):
self.serial = parent.serials[index]
self.handle = parent.dll.AVS_Activate(pointer(parent.avs_id_list[index]))
num_pixels_temp = c_ushort()
parent.dll.AVS_GetNumPixels(handle_0, pointer(num_pixels_temp))
self.num_pixels = num_pixels_temp.value
self.lamb_temp = (c_double*self.num_pixels)()
self.spec_temp = (c_double*self.num_pixels)()
self.time_label = c_uint32()
parent.dll.AVS_GetLambda(self.handle, pointer(lamb_temp))
self.lamb = [x for x in self.lamb_temp] #Make a more pythonic array
def start_measure(seld, int_time, n_averages):
#SET UP MEASUREMENT CONFIGURATION
self.measureConfig = meas_config_type()
ctypes.memset(ctypes.addressof(self.measureConfig), 0, ctypes.sizeof(self.measureConfig))
startPixel = c_ushort(0)
stopPixel = c_ushort(num_pixels.value - 1)
intTime = c_float(int_time)
nAverages = c_uint32(n_averages)
self.measureConfig.m_StartPixel = startPixel
self.measureConfig.m_StopPixel = stopPixel
self.measureConfig.m_IntegrationTime = intTime
self.measureConfig.m_IntegrationDelay = 1
self.measureConfig.m_NrAverages = nAverages
n_measure = c_short(-1) #Number of measurements to make. -1 means infintity.
self.err_prepare = parent.dll.AVS_PrepareMeasure(self.handle, pointer(self.measureConfig))
self.err_measure = parent.dll.AVS_Measure(handle_0, None, n_measure)
sleep(0.5)
self.err_poll = dll.AVS_PollScan(handle_0)
def get_data(self):
err_data = err_data = parent.dll.AVS_GetScopeData(self.handle,pointer(self.time_label),pointer(self.spec_temp))
self.spec = [x for x in self.spec_temp]
def stop_measure(self):
parent.dll.AVS_StopMeasure(self.handle)
class MultiSpectrometers():
def __init__(self, do_setup=True):
self.error_message = None
self.bandwidth_throttle=0.001 #slows down acquisition so other devices can use USB
self.print_frequency =0#for diagnostic purposes
self.spec_int_times = default_spec_int_times
self.spec_n_averages = default_spec_n_averages
self.lamb_range = default_lamb_range
self.min_spec_int_times = [min_int_time_spectrometerLabel_map[name] for name in spectrometer_list]
if do_setup:
self.dll = ctypes.WinDLL("D://Control/spectrometer/AS5216.dll")
self.dll.AVS_Init(0)
self.num_spectrometers = self.dll.AVS_GetNrOfDevices()
self.avs_id_list = (avs_id * n_devices)()
ctypes.memset(ctypes.addressof(self.test_id), 0, ctypes.sizeof(self.test_id))
size = c_uint32(sizeof(avs_id))
total_size = c_uint32(sizeof(avs_id * n_devices))
print n_devices, " devices found"
self.dll.AVS_GetList(c_uint(n_devices*size.value),pointer(total_size),pointer(avs_id_list))
self.serials = []
self.device_mapping = {}
self.statuses = []
for i in range(n_devices):
self.serials.append(avs_id_list[i].m_aSerialId)
self.device_mapping.append({avs_id_list[i].m_aSerialId:i}
self.statuses.append(avs_id_list[i].m_Status)
#--------------------
self.results = []
#self.lamb, self.spectrum = [],[]
self.ts = None
self.spectros = [ctype_Spectrometer("index"=i) for i in range(self.num_spectrometers)]
buffer_length=10 #
print "Initialising thred"
self.initialise_thread()
def initialise_thread(self):
self.thread = _MultiSpectrometerThread(self) #don't start the thread until you want to acquire
print "Initial"
self.thread.paused=True
print "Paused"
self.thread.start()
print "Started"
def start_acquisition(self):
#self.spectro.setup() #This step is really slow
try:
for i,spectro in enumerate(self.spectros):
spectro.start_measure(self.spec_int_times[i], self.spec_n_averages[i])
except IOError:
self.stop_acquisition()
self.start_acquisition()
#self.lamb = copy(self.spectros[0].lamb)
self.thread.paused=False
def stop_acquisition(self):
self.thread.paused=True
time.sleep(0.5)#avoid race condition. Should really use mutex
print "Stopping measure"
for spectro in self.spectros:
spectro.stop_measure()
#spectros[-1].closedll()
def close_acquisition(self):
self.stop_acquisition()
self.thread.running=False
| 35.474576
| 138
| 0.736025
|
ae765662aa969539ff9f40ac9842d80dbebfe295
| 1,523
|
py
|
Python
|
plotly/validators/layout/scene/xaxis/_titlefont.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/scene/xaxis/_titlefont.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 1
|
2020-12-15T16:56:11.000Z
|
2020-12-15T16:56:11.000Z
|
plotly/validators/layout/scene/xaxis/_titlefont.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class TitlefontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='titlefont',
parent_name='layout.scene.xaxis',
**kwargs
):
super(TitlefontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Titlefont'),
data_docs=kwargs.pop(
'data_docs', """
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
),
**kwargs
)
| 36.261905
| 73
| 0.556796
|
28dae2079243a6e4c8f0165e4f89e2000016c33a
| 851
|
py
|
Python
|
tools/pythonpkg/tests/fast/arrow/test_multiple_reads.py
|
lokax/duckdb
|
c2581dfebccaebae9468c924c2c722fcf0306944
|
[
"MIT"
] | 1
|
2021-12-13T06:00:18.000Z
|
2021-12-13T06:00:18.000Z
|
tools/pythonpkg/tests/fast/arrow/test_multiple_reads.py
|
lokax/duckdb
|
c2581dfebccaebae9468c924c2c722fcf0306944
|
[
"MIT"
] | 32
|
2021-09-24T23:50:09.000Z
|
2022-03-29T09:37:26.000Z
|
tools/pythonpkg/tests/fast/arrow/test_multiple_reads.py
|
lokax/duckdb
|
c2581dfebccaebae9468c924c2c722fcf0306944
|
[
"MIT"
] | null | null | null |
import duckdb
import os
try:
import pyarrow
import pyarrow.parquet
can_run = True
except:
can_run = False
class TestArrowReads(object):
def test_multiple_queries_same_relation(self, duckdb_cursor):
if not can_run:
return
parquet_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),'data','userdata1.parquet')
cols = 'id, first_name, last_name, email, gender, ip_address, cc, country, birthdate, salary, title, comments'
userdata_parquet_table = pyarrow.parquet.read_table(parquet_filename)
userdata_parquet_table.validate(full=True)
rel = duckdb.from_arrow(userdata_parquet_table)
assert(rel.aggregate("(avg(salary))::INT").execute().fetchone()[0] == 149005)
assert(rel.aggregate("(avg(salary))::INT").execute().fetchone()[0] == 149005)
| 38.681818
| 118
| 0.695652
|
6b8c3463e4c7a74b179359ee7df91266167de022
| 1,442
|
py
|
Python
|
Semana 6/ParcialB1.py
|
mrgatik/Estructura_de_Datos_2
|
12aad1f56c1ff17a6f9900d001d085f955a3d08c
|
[
"MIT"
] | null | null | null |
Semana 6/ParcialB1.py
|
mrgatik/Estructura_de_Datos_2
|
12aad1f56c1ff17a6f9900d001d085f955a3d08c
|
[
"MIT"
] | null | null | null |
Semana 6/ParcialB1.py
|
mrgatik/Estructura_de_Datos_2
|
12aad1f56c1ff17a6f9900d001d085f955a3d08c
|
[
"MIT"
] | null | null | null |
class Nodo:
def init__(self, nombre=None, apellido=None, telefono=None, direccion=None, izq=None, der=None):
self.nombre = nombre self.apellido = apellido self.telefono = telefono self.direccion = direccion self.izq = izq
self.der = der def str (self):
return "%s %s" %(self.nombre, apellido, telefono, direccion)
class aBinarios:
def init__(self):
self.raiz = None
def agregar(self, elemento):
if self.raiz == None:
self.raiz = elemento else:
aux = self.raiz padre = none
while aux !=None:
padre = aux
if int(elemen.apellido)>= int(aux.apellido):
aux = aux.der else:
aux = aux.izq
if int(elemento.apellido)>= int (padre.apellido):
padre.der = elemento else:
padre.izq = elemento def nombre(self, elemento):
if elemento!= None:
print(elemento)
self.nombre(elemento.izq)
self.nombre(elemento.der)
def apellido(self, elemento):
if elemento != None:
print(elemento)
self.apellido(elemento.izq)
self.apellido(elemento.der)
print(elemento)
def telefono(self, elemento):
print(elemento)
self.telefono(elemento.izq)
print(elemento)
self.telefono(elemento.der)
def direccion(self, elemento):
print(elemento)
self.direccion(elemento.izq)
print(elemento)
self.direccion(elemento.der)
def getRaiz(self):
return self.raiz
| 29.428571
| 116
| 0.635922
|
6e1f4a6648b8bc9d53f27bb1d31bf8a226c6b640
| 738
|
py
|
Python
|
core/api/controller/reading_controller.py
|
rits-dajare/DaaS
|
ab8483250a1a2b2838c316ba71fdaf748130dff1
|
[
"MIT"
] | 7
|
2020-07-20T12:03:06.000Z
|
2021-05-22T15:57:18.000Z
|
core/api/controller/reading_controller.py
|
averak/DaaS
|
ab8483250a1a2b2838c316ba71fdaf748130dff1
|
[
"MIT"
] | 19
|
2020-08-28T10:23:53.000Z
|
2021-11-17T23:48:45.000Z
|
core/api/controller/reading_controller.py
|
averak/DaaS
|
ab8483250a1a2b2838c316ba71fdaf748130dff1
|
[
"MIT"
] | 2
|
2020-08-08T21:20:01.000Z
|
2021-05-20T01:37:46.000Z
|
from fastapi import APIRouter, Depends, HTTPException
from core.service.dajare_service import DajareService
from core.api.request.reading_request import ReadingRequest
from core.api.response.reading_response import ReadingResponse
dajare_service = DajareService()
router = APIRouter()
@router.get('/', status_code=200, response_model=ReadingResponse, include_in_schema=False)
@router.get('', status_code=200, response_model=ReadingResponse)
async def reading_dajare(request: ReadingRequest = Depends()):
# convert reading
try:
dajare = dajare_service.convert_reading(request.dajare)
except Exception:
raise HTTPException(status_code=500)
return ReadingResponse(
reading=dajare.reading,
)
| 30.75
| 90
| 0.777778
|
1dd38fe47e27e4bfb337611c66c5a1c33243c3a5
| 2,507
|
py
|
Python
|
examples/experimental/trial/resnet50_tf_keras/native_impl.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 3
|
2020-04-30T03:56:15.000Z
|
2020-04-30T04:01:24.000Z
|
examples/experimental/trial/resnet50_tf_keras/native_impl.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:31:44.000Z
|
2022-02-10T07:31:44.000Z
|
examples/experimental/trial/resnet50_tf_keras/native_impl.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 2
|
2020-07-10T23:08:23.000Z
|
2021-01-13T10:01:59.000Z
|
import argparse
import pathlib
from official.vision.image_classification import common
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification import resnet_model
import determined as det
from determined import experimental
from determined.experimental import keras
import data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", dest="mode", help="Specifies test mode or submit mode.", default="submit"
)
args = parser.parse_args()
config = {
"description": "Resnet50 Imagenet TF Keras",
"searcher": {
"name": "single",
"metric": "val_loss",
"max_steps": 1,
"smaller_is_better": True,
},
"min_validation_period": 1,
"hyperparameters": {
"global_batch_size": det.Constant(value=32),
"learning_rate": det.Constant(value=0.1),
},
}
ctx = keras.init(
config=config, mode=experimental.Mode(args.mode), context_dir=str(pathlib.Path.cwd())
)
lr_schedule = ctx.get_hparam("learning_rate")
if ctx.get_data_config().get("use_tensor_lr", False):
lr_schedule = common.PiecewiseConstantDecayWithWarmup(
batch_size=ctx.get_per_slot_batch_size(),
epoch_size=imagenet_preprocessing.NUM_IMAGES["train"],
warmup_epochs=common.LR_SCHEDULE[0][1],
boundaries=[p[1] for p in common.LR_SCHEDULE[1:]],
multipliers=[p[0] for p in common.LR_SCHEDULE],
compute_lr_on_cpu=True,
)
optimizer = common.get_optimizer(lr_schedule)
model = resnet_model.resnet50(num_classes=imagenet_preprocessing.NUM_CLASSES)
model = ctx.wrap_model(model)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=(["sparse_categorical_accuracy"]),
)
data_shape = (
ctx.get_per_slot_batch_size(),
imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
imagenet_preprocessing.NUM_CHANNELS,
)
labels_shape = (ctx.get_per_slot_batch_size(),)
model.fit(
data.SyntheticData(ctx.get_per_slot_batch_size(), data_shape, labels_shape),
epochs=1,
steps_per_epoch=1,
validation_steps=1,
validation_data=data.SyntheticData(ctx.get_per_slot_batch_size(), data_shape, labels_shape),
verbose=2,
)
| 32.986842
| 100
| 0.670523
|
1fc2f22fe34ac78033eb1b4fcce4a12b61b959b9
| 542
|
py
|
Python
|
examples/galgebra/matrix_latex.py
|
lidavidm/sympy
|
971aa94ee6d0774eacfb4aed6965195c4a59e104
|
[
"BSD-3-Clause"
] | 1
|
2020-03-12T02:52:16.000Z
|
2020-03-12T02:52:16.000Z
|
examples/galgebra/matrix_latex.py
|
shashank-agg/sympy
|
ecf69893c0b9927ea7192113b2421d639aee6ffb
|
[
"BSD-3-Clause"
] | null | null | null |
examples/galgebra/matrix_latex.py
|
shashank-agg/sympy
|
ecf69893c0b9927ea7192113b2421d639aee6ffb
|
[
"BSD-3-Clause"
] | 1
|
2015-04-16T17:35:45.000Z
|
2015-04-16T17:35:45.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from sympy import symbols, Matrix
from sympy.galgebra import xdvi
from sympy.galgebra import Format
def main():
Format()
a = Matrix( 2, 2, ( 1, 2, 3, 4 ) )
b = Matrix( 2, 1, ( 5, 6 ) )
c = a * b
print(a, b, '=', c)
x, y = symbols( 'x, y' )
d = Matrix( 1, 2, ( x ** 3, y ** 3 ))
e = Matrix( 2, 2, ( x ** 2, 2 * x * y, 2 * x * y, y ** 2 ) )
f = d * e
print('%', d, e, '=', f)
xdvi()
return
if __name__ == "__main__":
main()
| 18.689655
| 64
| 0.49262
|
ef1aff466c8c4381521fd90438deb24458a2e199
| 1,140
|
py
|
Python
|
build/preferences.py
|
deathaxe/sublime-a-file-icon
|
aeb58da4737fc5bd567c7a4ba62d72f407723f5f
|
[
"MIT"
] | 204
|
2018-12-19T00:09:36.000Z
|
2022-03-28T10:44:34.000Z
|
build/preferences.py
|
Ouyang-ui/AFileIcon
|
373027558a9ee2465a6ab293859676939e71b2d6
|
[
"MIT"
] | 58
|
2018-12-18T15:12:52.000Z
|
2022-01-30T12:46:50.000Z
|
build/preferences.py
|
Ouyang-ui/AFileIcon
|
373027558a9ee2465a6ab293859676939e71b2d6
|
[
"MIT"
] | 48
|
2018-12-19T16:30:39.000Z
|
2022-03-22T23:33:46.000Z
|
import os
from textwrap import dedent
def create_preferences(icons):
template = (
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<plist version="1.0">
<dict>
<key>scope</key>
<string>{scope}</string>
<key>settings</key>
<dict>
<key>icon</key>
<string>{name}</string>
</dict>
</dict>
</plist>
"""
)
.lstrip()
.replace(" ", "\t")
)
package_root = os.path.dirname(os.path.dirname(__file__))
for name, data in icons.items():
scopes = set()
for keys in ("aliases", "syntaxes"):
for syntax in data.get(keys, []):
for scope in syntax["scope"].split(","):
scopes.add(scope.strip())
if scopes:
with open(
os.path.join(package_root, "preferences", name + ".tmPreferences"), "w"
) as out:
out.write(template.format(name=name, scope=", ".join(sorted(scopes))))
| 27.142857
| 87
| 0.442982
|
beffd6e767f26b666fcb15b0524a7874180d97d0
| 5,190
|
py
|
Python
|
tests/case/view/manage.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | 1
|
2015-02-10T15:09:42.000Z
|
2015-02-10T15:09:42.000Z
|
tests/case/view/manage.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
tests/case/view/manage.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Utility base TestCase classes for testing manage views.
"""
from datetime import datetime
from . import base
class ListViewTestCase(base.FormViewTestCase, base.ListViewTestCase):
"""Base class for testing manage list views."""
# subclasses should specify these:
perm = None # required management permission codename
def assertActionRequiresPermission(self, action, permission=None):
"""Assert that the given list action requires the given permission."""
if permission is None:
permission = self.perm
o = self.factory.create()
form = self.get_form()
name = "action-{0}".format(action)
# action button not shown to the user
self.assertTrue(name not in form.fields)
# ...but if they cleverly submit it anyway they get a 403...
res = self.post(
{
name: str(o.id),
"csrfmiddlewaretoken":
form.fields.get("csrfmiddlewaretoken")[0].value
},
status=403,
)
# ...with a message about permissions.
res.mustcontain("permission")
def test_delete(self):
"""Can delete objects from list."""
self.add_perm(self.perm)
o = self.factory.create()
self.get_form().submit(
name="action-delete",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"}
)
self.assertTrue(bool(self.refresh(o).deleted_on))
def test_delete_requires_permission(self):
"""Deleting requires appropriate permission."""
self.assertActionRequiresPermission("delete")
def test_create_link(self):
"""With proper perm, create link is there."""
self.add_perm(self.perm)
res = self.get()
self.assertElement(res.html, "a", "create")
def test_create_link_requires_perms(self):
"""Without proper perm, create link is not there."""
res = self.get()
self.assertElement(res.html, "a", "create", count=0)
class MTModelListTests(object):
"""Additional manage list view tests for MTModels."""
def test_clone(self):
"""Can clone objects in list."""
self.add_perm(self.perm)
self.factory.create()
res = self.get_form().submit(
name="action-clone",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertElement(
res.json["html"], "h3", "title", count=2)
def test_clone_requires_permission(self):
"""Cloning requires appropriate permission."""
self.assertActionRequiresPermission("clone")
def test_filter_by_creator(self):
"""Can filter by creator."""
self.factory.create(name="Foo 1", user=self.user)
self.factory.create(name="Foo 2")
res = self.get(params={"filter-creator": self.user.id})
self.assertInList(res, "Foo 1")
self.assertNotInList(res, "Foo 2")
def test_default_sort_by_last_created(self):
"""Default sort is by latest created first."""
self.factory.create(
name="Foo 1", created_on=datetime(2012, 1, 21))
self.factory.create(
name="Foo 2", created_on=datetime(2012, 1, 22))
res = self.get()
self.assertOrderInList(res, "Foo 2", "Foo 1")
class StatusListTests(object):
"""Extra tests for manage lists with activated/deactivate actions."""
def test_activate(self):
"""Can activate objects in list."""
self.add_perm(self.perm)
s = self.factory.create(status="draft")
self.get_form().submit(
name="action-activate",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertEqual(self.refresh(s).status, "active")
def test_activate_requires_permission(self):
"""Activating requires appropriate permission."""
self.assertActionRequiresPermission("activate", self.perm)
def test_draft(self):
"""Can make-draft objects in list."""
self.add_perm(self.perm)
s = self.factory.create(status="active")
self.get_form().submit(
name="action-draft",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertEqual(self.refresh(s).status, "draft")
def test_draft_requires_permission(self):
"""Resetting to draft requires appropriate permission."""
self.assertActionRequiresPermission("draft", self.perm)
def test_deactivate(self):
"""Can deactivate objects in list."""
self.add_perm(self.perm)
s = self.factory.create(status="active")
self.get_form().submit(
name="action-deactivate",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertEqual(self.refresh(s).status, "disabled")
def test_deactivate_requires_permission(self):
"""Deactivating requires appropriate permission."""
self.assertActionRequiresPermission("deactivate", self.perm)
| 27.606383
| 78
| 0.604239
|
731dae44a82896569f53896af2eb4c079af55b2c
| 30,538
|
py
|
Python
|
liquidhandling/SoloSoft.py
|
brettin/liquidhandling
|
7a96e2881ffaa0326514cf5d97ba49d65ad42a14
|
[
"MIT"
] | null | null | null |
liquidhandling/SoloSoft.py
|
brettin/liquidhandling
|
7a96e2881ffaa0326514cf5d97ba49d65ad42a14
|
[
"MIT"
] | null | null | null |
liquidhandling/SoloSoft.py
|
brettin/liquidhandling
|
7a96e2881ffaa0326514cf5d97ba49d65ad42a14
|
[
"MIT"
] | 1
|
2021-03-25T13:47:42.000Z
|
2021-03-25T13:47:42.000Z
|
import json
from liquidhandling import Properties
STEP_DELIMITER = "!@#$"
class SoloSoft:
def __init__(self, filename=None, plateList=None, pipeline=None):
self.filename = None
self.plateList = []
self.pipeline = []
# *Open protocol file for editing
try:
if filename != None:
self.setFile(filename)
except Exception as error:
print("Error creating SoloSoft protocol with filename %s" % filename)
print(error)
return
# *Set plate list
try:
if plateList != None:
self.setPlates(plateList)
else:
self.setPlates(
[
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
]
)
except Exception as error:
print("Error setting Plate List")
print(error)
return
# *Set pipeline, if we're expanding on an existing pipeline
try:
if pipeline != None:
self.setPipeline(pipeline)
else:
self.initializePipeline()
except Exception as error:
print("Error setting pipeline")
print(error)
def setFile(self, filename):
if not isinstance(filename, str):
raise TypeError("filename must be a string.")
else:
self.filename = filename
def setPlates(self, plateList):
if not isinstance(plateList, list):
raise TypeError("plateList must be a list of strings.")
else:
self.plateList = plateList
def setPipeline(self, pipeline):
if not isinstance(pipeline, list):
raise TypeError("pipeline should be a list")
else:
self.pipeline = pipeline
def initializePipeline(self):
self.setPipeline([])
def removeStep(self, position=-1):
try:
self.pipeline.remove(position)
except:
print("Error removing step at position %i in pipeline" % position)
def savePipeline(self, filename=None, CRLF=True):
if filename == None:
if self.filename != None:
filename = self.filename
else:
raise BaseException("Need to specify a file to save pipeline")
if CRLF:
newline = "\r\n"
else:
newline = ""
with open(filename, "w", newline=newline) as file:
for plate in self.plateList:
file.write(str(plate) + "\n")
for step in self.pipeline:
for item in step:
if isinstance(item, list):
if len(item) > 0 and isinstance(item[0], list):
for line in item:
for number in line[:-1]:
file.write(str(number))
file.write(",")
file.write(str(line[-1]))
file.write("\n")
else:
for number in item:
file.write(str(number) + "\n")
else:
file.write(str(item) + "\n")
def pipelineToJSON(self, json_file=None, pipeline=None, plateList=None):
if pipeline != None:
if not isinstance(pipeline, list):
raise TypeError("pipeline should be a list")
else:
pipeline = self.pipeline
if plateList != None:
if not isinstance(plateList, list):
raise TypeError("platelist should be a list")
else:
plateList = self.plateList
json_data = {}
json_data["metadata"] = {"spec_version": Properties.SPEC_VERSION}
json_data["pipeline_type"] = "SoloSoft"
json_data["platelist"] = plateList
steps = []
for step in pipeline:
# step = pipeline[28]
# if True:
step_extraction_function = self.jsonify[step[0]]
step_data = {}
step_data["step_definition"] = step_extraction_function(self, step)
steps.append(step_data)
json_data["steps"] = steps
return json_data
def jsonToPipeline(self, json_data, inplace=True):
if isinstance(json_data, str):
json_local = json.loads(json_data)
elif isinstance(json_data, dict):
json_local = json_data
else:
print("json needs to be either a dict or string.")
return
steps = []
for step in json_local["steps"]:
params = {}
step_function = None
for key in step["step_definition"]:
if key == "step_type":
step_function = self.pipelinify[step["step_definition"][key]]
else:
params[key] = step["step_definition"][key]
steps.append(step_function(self=self, inplace=False, **params))
params = {}
if inplace:
self.setPipeline = steps
return steps
# * SOLOSoft Pipeline Functions
def getTip(
self,
position="Position1",
disposal="TipDisposal",
num_tips=8,
auto_tip_selection=True,
count_tips_from_last_channel=False,
index=None,
inplace=True,
):
properties_list = ["GetTip", position, disposal, num_tips]
if auto_tip_selection:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([0, count_tips_from_last_channel, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyGetTip(self, step):
json_data = {}
json_data["step_type"] = "GetTip"
json_data["position"] = step[1]
json_data["disposal"] = step[2]
json_data["num_tips"] = step[3]
json_data["auto_tip_selection"] = step[4]
json_data["count_tips_from_last_channel"] = step[6]
return json_data
def shuckTip(self, disposal="TipDisposal", index=None, inplace=True):
properties_list = ["ShuckTip", disposal, STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyShuckTip(self, step):
json_data = {}
json_data["step_type"] = "ShuckTip"
json_data["disposal"] = step[1]
return json_data
def loop(self, iterations=-1, index=None, inplace=True):
properties_list = ["Loop", iterations, STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyLoop(self, step):
json_data = {}
json_data["step_type"] = "Loop"
json_data["iterations"] = step[1]
return json_data
def endLoop(self, index=None, inplace=True):
properties_list = ["EndLoop", STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyEndLoop(self, step):
json_data = {}
json_data["step_type"] = "EndLoop"
return json_data
def aspirate(
self,
position="Position1",
aspirate_volume_to_named_point=False,
aspirate_volume_single=0,
syringe_speed=100,
start_by_emptying_syringe=True,
increment_column_order=False,
aspirate_point="Position1",
aspirate_shift=[0, 0, 0],
do_tip_touch=False,
tip_touch_shift=[0, 0, 0],
file_data_path="",
multiple_wells=1,
backlash=0,
pre_aspirate=0,
mix_at_start=False,
mix_cycles=0,
mix_volume=0,
dispense_height=0,
delay_after_dispense=0,
aspirate_volumes=None,
dwell_after_aspirate=0,
find_bottom_of_vessel=False,
reverse_order=False,
post_aspirate=0,
move_while_pipetting=False,
move_distance=[0, 0, 0],
index=None,
inplace=True,
):
properties_list = [
"Aspirate",
position,
aspirate_volume_single,
2,
syringe_speed,
]
if start_by_emptying_syringe:
properties_list.append(1)
else:
properties_list.append(0)
if aspirate_volume_to_named_point:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
if increment_column_order:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
properties_list.extend([aspirate_point, aspirate_shift])
if do_tip_touch:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[tip_touch_shift, file_data_path, multiple_wells, backlash, pre_aspirate]
)
if mix_at_start:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[mix_cycles, mix_volume, "a", 0, 0, dispense_height, delay_after_dispense]
)
if aspirate_volumes != None:
properties_list.append(aspirate_volumes)
else:
properties_list.append(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
properties_list.append(dwell_after_aspirate)
if find_bottom_of_vessel:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.append(5) # ? Myterious 1 or 2 digit integer
if reverse_order:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.append(post_aspirate)
if move_while_pipetting:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([move_distance, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyAspirate(self, step):
json_data = {}
json_data["step_type"] = "Aspirate"
json_data["position"] = step[1]
json_data["aspirate_volume_single"] = step[2]
json_data["syringe_speed"] = step[4]
json_data["start_by_emptying_syringe"] = step[5]
json_data["aspirate_volume_to_named_point"] = step[7]
json_data["increment_column_order"] = step[9]
json_data["aspirate_point"] = step[10]
json_data["aspirate_shift"] = step[11]
json_data["do_tip_touch"] = step[12]
json_data["tip_touch_shift"] = step[13]
json_data["file_data_path"] = step[14]
json_data["multiple_wells"] = step[15]
json_data["backlash"] = step[16]
json_data["pre_aspirate"] = step[17]
json_data["mix_at_start"] = step[18]
json_data["mix_cycles"] = step[19]
json_data["mix_volume"] = step[20]
json_data["dispense_height"] = step[24]
json_data["delay_after_dispense"] = step[25]
json_data["aspirate_volumes"] = step[26]
json_data["dwell_after_aspirate"] = step[27]
json_data["find_bottom_of_vessel"] = step[28]
json_data["reverse_order"] = step[30]
json_data["post_aspirate"] = step[31]
json_data["move_while_pipetting"] = step[32]
json_data["move_distance"] = step[33]
return json_data
def dispense(
self,
position="Position1",
dispense_volume_single=0,
syringe_speed=100,
backlash=0,
dispense_volume_to_named_point=False,
increment_column_order=False,
dispense_point="Position1",
dispense_shift=[0, 0, 0],
do_tip_touch=False,
tip_touch_shift=[0, 0, 0],
file_data_path="",
multiple_wells=1,
dwell_after_dispense=0,
blowoff=0,
mix_at_finish=False,
mix_cycles=0,
mix_volume=0,
aspirate_height=0,
delay_after_aspirate=0,
dispense_volumes=None,
reverse_order=False,
move_while_pipetting=False,
move_distance=[0, 0, 0],
index=None,
inplace=True,
):
properties_list = [
"Dispense",
position,
dispense_volume_single,
2,
syringe_speed,
backlash,
]
if dispense_volume_to_named_point:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
if increment_column_order:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
properties_list.extend([dispense_point, dispense_shift])
if do_tip_touch:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[
tip_touch_shift,
file_data_path,
multiple_wells,
dwell_after_dispense,
blowoff,
]
)
if mix_at_finish:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[mix_cycles, mix_volume, "a", aspirate_height, delay_after_aspirate]
)
if dispense_volumes != None:
properties_list.append(dispense_volumes)
else:
properties_list.append(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
if reverse_order:
properties_list.append(1)
else:
properties_list.append(0)
if move_while_pipetting:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([move_distance, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyDispense(self, step):
json_data = {}
json_data["step_type"] = "Dispense"
json_data["position"] = step[1]
json_data["dispense_volume_single"] = step[2]
json_data["syringe_speed"] = step[4]
json_data["backlash"] = step[5]
json_data["dispense_volume_to_named_point"] = step[7]
json_data["increment_column_order"] = step[9]
json_data["dispense_point"] = step[10]
json_data["dispense_shift"] = step[11]
json_data["do_tip_touch"] = step[12]
json_data["tip_touch_shift"] = step[13]
json_data["file_data_path"] = step[14]
json_data["multiple_wells"] = step[15]
json_data["dwell_after_dispense"] = step[16]
json_data["blowoff"] = step[17]
json_data["mix_at_finish"] = step[18]
json_data["mix_cycles"] = step[19]
json_data["mix_volume"] = step[20]
json_data["dispense_height"] = step[22]
json_data["delay_after_aspirate"] = step[23]
json_data["dispense_volumes"] = step[24]
json_data["reverse_order"] = step[25]
json_data["move_while_pipetting"] = step[26]
json_data["move_distance"] = step[27]
return json_data
def prime(
self,
position="Position1",
syringe_speed=100,
fill_syringe=False,
empty_syringe=True,
aspirate_volume=False,
dispense_volume=False,
volume=0,
index=None,
inplace=True,
):
properties_list = [
"Prime",
syringe_speed,
True, # ? Unclear what this is
False, # ? Unclear what this is
False, # ? Unclear what this is
0, # ? Unclear what this is
"a", # ? Unclear what this is
2, # ? Unclear what this is
True, # ? Unclear what this is
1, # ? Unclear what this is
"*", # ? Unclear what this is
volume,
fill_syringe,
empty_syringe,
aspirate_volume,
dispense_volume,
"*",
"*",
STEP_DELIMITER,
]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyPrime(self, step):
json_data = {}
json_data["step_type"] = "Prime"
json_data["syringe_speed"] = step[1]
json_data["volume"] = step[11]
json_data["fill_syringe"] = step[12]
json_data["empty_syringe"] = step[13]
json_data["aspirate_volume"] = step[14]
json_data["dispense_volume"] = step[15]
return json_data
def pause(
self,
pause_message="",
allow_end_run=False,
auto_continue_after=False,
wait_seconds=0,
index=None,
inplace=True,
):
properties_list = ["Pause", pause_message]
if allow_end_run or auto_continue_after:
properties_list.append(1)
else:
properties_list.append(0)
if auto_continue_after:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([wait_seconds, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyPause(self, step):
json_data = {}
json_data["step_type"] = "Pause"
json_data["pause_message"] = step[1]
json_data["allow_end_run"] = step[2]
json_data["auto_continue_after"] = step[3]
json_data["wait_seconds"] = step[4]
return json_data
def getBottom(
self,
position="Position1",
increment_row_order=True,
increment_column_order=False,
output_file_path="",
wells_per_pass=1, # * -1 for all
search_start_distance=0,
well_list=None,
index=None,
inplace=True,
):
properties_list = [
"GetBottom",
position,
increment_row_order,
increment_column_order,
output_file_path,
wells_per_pass,
search_start_distance,
5, # ? Unclear what this is
5, # ? Unclear what this is
"*", # ? Unclear what this is
"*", # ? Unclear what this is
]
if well_list != None:
properties_list.append(well_list)
else:
properties_list.append(
[
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
]
)
properties_list.append(STEP_DELIMITER)
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyGetBottom(self, step):
json_data = {}
json_data["step_type"] = "GetBottom"
json_data["position"] = step[1]
json_data["increment_row_order"] = step[2]
json_data["increment_column_order"] = step[3]
json_data["output_file_path"] = step[4]
json_data["wells_per_pass"] = step[5]
json_data["search_start_distance"] = step[6]
json_data["well_list"] = step[11]
return json_data
def setSpeed(self, xyz_speed=100, index=None, inplace=True):
properties_list = ["SetSpeed", xyz_speed, STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifySetSpeed(self, step):
json_data = {}
json_data["step_type"] = "SetSpeed"
json_data["xyz_speed"] = step[1]
return json_data
def moveArm(
self,
destination="TipDisposal",
xyz_speed=100,
move_z_at_start=True,
index=None,
inplace=True,
):
properties_list = ["MoveArm", destination, xyz_speed]
if move_z_at_start:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.append(STEP_DELIMITER)
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyMoveArm(self, step):
json_data = {}
json_data["step_type"] = "MoveArm"
json_data["destination"] = step[1]
json_data["xyz_speed"] = step[2]
json_data["move_z_at_start"] = step[3]
return json_data
def movePlate(
self,
source_position="Position1",
target_position="Position2",
grip_offset=0,
index=None,
inplace=True,
):
properties_list = [
"MovePlate",
source_position,
target_position,
grip_offset,
STEP_DELIMITER,
]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyMovePlate(self, step):
json_data = {}
json_data["step_type"] = "MovePlate"
json_data["source_position"] = step[1]
json_data["target_position"] = step[2]
json_data["grip_offset"] = step[3]
return json_data
# * Currently only supports Hot/Cold Nest
def operateAccessory(
self,
unit2=False,
desiredTemperature=24.0,
dontWait=False,
waitUntilActualEqualsStartPoint=False,
waitUntilTemperatureIsStable=False,
turnNestOffNow=False,
turnNestOffAtTemperature=False,
index=None,
inplace=True,
):
properties_list = [
"Accessory",
1,
5475,
5600,
960,
1080,
desiredTemperature,
]
if turnNestOffAtTemperature:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[
dontWait,
waitUntilActualEqualsStartPoint,
waitUntilTemperatureIsStable,
0,
60,
]
)
if turnNestOffNow:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[
0,
unit2,
"*",
"*",
"*",
"",
1,
"",
1,
7,
1,
"",
1,
1,
0,
100,
1,
25,
"N W S E",
0,
100,
1,
25,
"N W S E",
0,
100,
1,
25,
"N W S E",
0,
100,
1,
25,
"N W S E",
1,
True,
0,
STEP_DELIMITER,
]
)
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyOperateAccessory(self, step):
json_data = {}
json_data["step_type"] = "OperateAccessory"
json_data["desired_temperature"] = step[6]
json_data["turnNestOffAtTemperature"] = step[7]
json_data["dontWait"] = step[8]
json_data["waitUntilActualEqualsStartPoint"] = step[9]
json_data["waitUntilTemperatureIsStable"] = step[10]
json_data["turnNestOffNow"] = step[13]
json_data["unit2"] = step[15]
return json_data
jsonify = {
"GetTip": jsonifyGetTip,
"ShuckTip": jsonifyShuckTip,
"Loop": jsonifyLoop,
"EndLoop": jsonifyEndLoop,
"Aspirate": jsonifyAspirate,
"Dispense": jsonifyDispense,
"GetBottom": jsonifyGetBottom,
"Prime": jsonifyPrime,
"Pause": jsonifyPause,
"MoveArm": jsonifyMoveArm,
"MovePlate": jsonifyMovePlate,
"SetSpeed": jsonifySetSpeed,
"OperateAccessory": jsonifyOperateAccessory,
}
pipelinify = {
"GetTip": getTip,
"ShuckTip": shuckTip,
"Loop": loop,
"EndLoop": endLoop,
"Aspirate": aspirate,
"Dispense": dispense,
"GetBottom": getBottom,
"Prime": prime,
"Pause": pause,
"MoveArm": moveArm,
"MovePlate": movePlate,
"SetSpeed": setSpeed,
"OperateAccessory": operateAccessory,
}
| 32.044071
| 86
| 0.484609
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.