hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35a7747ea1cb3ed56e89664ac38bc9c07fde6d82
| 1,059
|
py
|
Python
|
pymongoshell/drop_collection.py
|
jdrumgoole/mongodbutils
|
9e74ce94e56f5b1a0eaa558c282b6c6659ebf1da
|
[
"Apache-2.0"
] | 2
|
2019-01-17T21:32:08.000Z
|
2019-01-17T22:14:41.000Z
|
pymongoshell/drop_collection.py
|
jdrumgoole/mongodbutils
|
9e74ce94e56f5b1a0eaa558c282b6c6659ebf1da
|
[
"Apache-2.0"
] | 5
|
2019-04-02T22:49:15.000Z
|
2020-04-13T23:02:21.000Z
|
pymongoshell/drop_collection.py
|
jdrumgoole/mongodbutils
|
9e74ce94e56f5b1a0eaa558c282b6c6659ebf1da
|
[
"Apache-2.0"
] | 1
|
2017-04-29T19:32:24.000Z
|
2017-04-29T19:32:24.000Z
|
#!/usr/bin/env python3
import sys
import argparse
import pymongo
if __name__ == "__main__":
"""
Drop a collection.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--host", default='mongodb://localhost:27017',
help="mongodb URI [default: %(default)s]")
parser.add_argument("--database", default=None,
help="database name: %(default)s]")
parser.add_argument("--collection", default=None,
help="collection name: %(default)s]")
args = parser.parse_args()
client = pymongo.MongoClient(host=args.host)
if args.database in client.list_database_names():
if args.collection in client[args.database].list_collection_names():
client[args.database].drop_collection(args.collection)
print(f"Dropped: '{args.database}.{args.collection}'")
else:
print(f"Collection '{args.database}.{args.collection}' does not exist")
else:
print(f"Database '{args.database}' does not exist")
| 29.416667
| 83
| 0.621341
|
013699f8226a3ac9b2a5ca4e279c26b0c899cd81
| 1,479
|
py
|
Python
|
desktop/core/ext-py/urllib3-1.25.8/test/test_compatibility.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/urllib3-1.25.8/test/test_compatibility.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/urllib3-1.25.8/test/test_compatibility.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
import warnings
import pytest
from urllib3.connection import HTTPConnection
from urllib3.response import HTTPResponse
from urllib3.packages.six.moves import http_cookiejar, urllib
class TestVersionCompatibility(object):
def test_connection_strict(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# strict=True is deprecated in Py33+
HTTPConnection("localhost", 12345, strict=True)
if w:
pytest.fail(
"HTTPConnection raised warning on strict=True: %r" % w[0].message
)
def test_connection_source_address(self):
try:
# source_address does not exist in Py26-
HTTPConnection("localhost", 12345, source_address="127.0.0.1")
except TypeError as e:
pytest.fail("HTTPConnection raised TypeError on source_address: %r" % e)
class TestCookiejar(object):
def test_extract(self):
request = urllib.request.Request("http://google.com")
cookiejar = http_cookiejar.CookieJar()
response = HTTPResponse()
cookies = [
"sessionhash=abcabcabcabcab; path=/; HttpOnly",
"lastvisit=1348253375; expires=Sat, 21-Sep-2050 18:49:35 GMT; path=/",
]
for c in cookies:
response.headers.add("set-cookie", c)
cookiejar.extract_cookies(response, request)
assert len(cookiejar) == len(cookies)
| 32.866667
| 85
| 0.640297
|
29cbce1e1f69d1e2e1fe82ede27f2c0ec105d9b9
| 4,310
|
py
|
Python
|
userbot/plugins/thumbnail.py
|
indianSammy07/Wolfuserbots
|
9c56dde1f81cec9eb4dd85a369f3a1f8b99f0763
|
[
"MIT"
] | 9
|
2021-05-16T23:40:05.000Z
|
2022-03-26T02:08:17.000Z
|
userbot/plugins/thumbnail.py
|
praveen368/CatUserbot
|
4b0cd970551ffaf86b9fdd5da584c1b3882821ff
|
[
"MIT"
] | 1
|
2021-02-08T20:47:53.000Z
|
2021-02-08T20:47:53.000Z
|
userbot/plugins/thumbnail.py
|
praveen368/CatUserbot
|
4b0cd970551ffaf86b9fdd5da584c1b3882821ff
|
[
"MIT"
] | 47
|
2021-03-16T17:16:25.000Z
|
2022-03-29T12:59:36.000Z
|
"""Thumbnail Utilities, © @AnyDLBot
Available Commands:
.savethumbnail
.clearthumbnail
.getthumbnail"""
import os
import subprocess
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image
from telethon import events
from userbot.utils import admin_cmd
thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg"
def get_video_thumb(file, output=None, width=320):
output = file + ".jpg"
metadata = extractMetadata(createParser(file))
p = subprocess.Popen([
'ffmpeg', '-i', file,
'-ss', str(int((0, metadata.get('duration').seconds)[metadata.has('duration')] / 2)),
# '-filter:v', 'scale={}:-1'.format(width),
'-vframes', '1',
output,
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
p.communicate()
if not p.returncode and os.path.lexists(file):
os.remove(file)
return output
@borg.on(admin_cmd(pattern="savethumbnail"))
async def _(event):
if event.fwd_from:
return
await event.edit("Processing ...")
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
downloaded_file_name = await borg.download_media(
await event.get_reply_message(),
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, mone, c_time, "trying to download")
)
)
if downloaded_file_name.endswith(".mp4"):
downloaded_file_name = get_video_thumb(
downloaded_file_name
)
metadata = extractMetadata(createParser(downloaded_file_name))
height = 0
if metadata.has("height"):
height = metadata.get("height")
# resize image
# ref: https://t.me/PyrogramChat/44663
# https://stackoverflow.com/a/21669827/4723940
Image.open(downloaded_file_name).convert("RGB").save(downloaded_file_name)
img = Image.open(downloaded_file_name)
# https://stackoverflow.com/a/37631799/4723940
# img.thumbnail((320, 320))
img.resize((320, height))
img.save(thumb_image_path, "JPEG")
# https://pillow.readthedocs.io/en/3.1.x/reference/Image.html#create-thumbnails
os.remove(downloaded_file_name)
await event.edit(
"Custom video / file thumbnail saved. " + \
"This image will be used in the upload, till `.clearthumbnail`."
)
else:
await event.edit("Reply to a photo to save custom thumbnail")
@borg.on(admin_cmd(pattern="clearthumbnail"))
async def _(event):
if event.fwd_from:
return
if os.path.exists(thumb_image_path):
os.remove(thumb_image_path)
await event.edit("✅ Custom thumbnail cleared succesfully.")
@borg.on(admin_cmd(pattern="getthumbnail"))
async def _(event):
if event.fwd_from:
return
if event.reply_to_msg_id:
r = await event.get_reply_message()
try:
a = await borg.download_media(
r.media.document.thumbs[0],
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, mone, c_time, "trying to download")
)
)
except Exception as e:
await event.edit(str(e))
try:
await borg.send_file(
event.chat_id,
a,
force_document=False,
allow_cache=False,
reply_to=event.reply_to_msg_id,
)
os.remove(a)
await event.delete()
except Exception as e:
await event.edit(str(e))
elif os.path.exists(thumb_image_path):
caption_str = "Currently Saved Thumbnail. Clear with `.clearthumbnail`"
await borg.send_file(
event.chat_id,
thumb_image_path,
caption=caption_str,
force_document=False,
allow_cache=False,
reply_to=event.message.id
)
await event.edit(caption_str)
else:
await event.edit("Reply `.gethumbnail` as a reply to a media")
| 33.937008
| 93
| 0.618097
|
107a32637e45ee17150637b86228dc03768df537
| 382
|
py
|
Python
|
app/email.py
|
Anne56njeri/Blog
|
59a1ee653165b3d83f8c284ecef020285ada57cc
|
[
"Unlicense",
"MIT"
] | null | null | null |
app/email.py
|
Anne56njeri/Blog
|
59a1ee653165b3d83f8c284ecef020285ada57cc
|
[
"Unlicense",
"MIT"
] | null | null | null |
app/email.py
|
Anne56njeri/Blog
|
59a1ee653165b3d83f8c284ecef020285ada57cc
|
[
"Unlicense",
"MIT"
] | null | null | null |
from flask_mail import Message
from flask import render_template
from . import mail
def mail_message(subject,template,to,**kwargs):
sender_email='trendytina77@gmail.com'
email= Message(subject, sender = sender_email, recipients=[to])
email.body=render_template(template +".txt",**kwargs)
email.html=render_template(template +".html",**kwargs)
mail.send(email)
| 31.833333
| 67
| 0.748691
|
7100f5158cbaad8761cd715a1d5a24992e6d8be3
| 2,551
|
py
|
Python
|
src/main/python/programmingtheiot/cda/sim/BaseActuatorSimTask.py
|
Yao-Miao/IoT-constrained-device-app
|
2eeb4bbb4a37e18636fd8db3d216edaa5d334b10
|
[
"MIT"
] | null | null | null |
src/main/python/programmingtheiot/cda/sim/BaseActuatorSimTask.py
|
Yao-Miao/IoT-constrained-device-app
|
2eeb4bbb4a37e18636fd8db3d216edaa5d334b10
|
[
"MIT"
] | null | null | null |
src/main/python/programmingtheiot/cda/sim/BaseActuatorSimTask.py
|
Yao-Miao/IoT-constrained-device-app
|
2eeb4bbb4a37e18636fd8db3d216edaa5d334b10
|
[
"MIT"
] | null | null | null |
#####
#
# This class is part of the Programming the Internet of Things project.
#
# It is provided as a simple shell to guide the student and assist with
# implementation for the Programming the Internet of Things exercises,
# and designed to be modified by the student as needed.
#
import logging
import random
from programmingtheiot.data.ActuatorData import ActuatorData
import programmingtheiot.common.ConfigConst as ConfigConst
class BaseActuatorSimTask():
"""
Shell representation of class for student implementation.
"""
def __init__(self, actuatorType: int = ActuatorData.DEFAULT_ACTUATOR_TYPE, simpleName: str = "Actuator", actuatorName = ConfigConst.NOT_SET):
"""
Initialization of class.
Create an instance of BaseActuatorSimTask
"""
self.actuatorType = actuatorType
self.simpleName = simpleName
self.latestAd = ActuatorData(name=actuatorName)
def activateActuator(self, val: float) -> bool:
"""
Activate the Actuator
@return bool
"""
logging.info("---> Emulating %s actuator ON:", str(self.getActuatorTypeName()))
print('*******')
print('* O N *')
print('*******')
print(self.getActuatorTypeName() + ' VALUE -> ' + str(val))
self.latestAd.setCommand(ActuatorData.COMMAND_ON)
return True
def deactivateActuator(self) -> bool:
"""
Deactivate the Actuator
@return bool
"""
logging.info("---> Emulating %s actuator OFF: ", str(self.getActuatorTypeName()))
print('*******')
print('* OFF *')
print('*******')
self.latestAd.setCommand(ActuatorData.COMMAND_OFF)
return True
def getActuatorType(self):
"""
Get the ActuatorType of the instance
@return str
"""
return self.actuatorType
def getLatestActuatorResponse(self) -> ActuatorData:
"""
Get the LatestActuatorResponse of the instance
@return ActuatorData
"""
return self.latestAd
def getSimpleName(self) -> str:
"""
Get the SimpleName of the instance
@return str
"""
return self.simpleName
def updateActuator(self, data: ActuatorData) -> bool:
"""
Update the Actuator
@return bool
"""
if data:
if data.getCommand() == ActuatorData.COMMAND_ON :
self.activateActuator(data.getValue())
else :
self.deactivateActuator()
self.latestAd._handleUpdateData(data)
self.latestAd.setAsResponse()
return True
def getActuatorTypeName(self):
"""
Get the ActuatorTypeName of the instance
@return str
"""
if self.actuatorType == 1:
return 'HVAC'
if self.actuatorType == 2:
return 'HUMIDIFIER'
return 'Unknown'
| 23.190909
| 142
| 0.697374
|
b5b5192fece48115e8efc42d1394b894a79d66aa
| 4,651
|
py
|
Python
|
tensorflow_datasets/image/breastpathq.py
|
ziyuan-shen/datasets
|
6d9fe398c069e1b0d7a95800a88eaa88deaa2ffc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image/breastpathq.py
|
ziyuan-shen/datasets
|
6d9fe398c069e1b0d7a95800a88eaa88deaa2ffc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image/breastpathq.py
|
ziyuan-shen/datasets
|
6d9fe398c069e1b0d7a95800a88eaa88deaa2ffc
|
[
"Apache-2.0"
] | null | null | null |
"""Breast cancer whole slide image dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import csv
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
_URL = "http://spiechallenges.cloudapp.net/competitions/14#participate"
# BibTeX citation
_CITATION = """\
@article{peikari2017automatic,
title={Automatic cellularity assessment from \
post-treated breast surgical specimens},
author={Peikari, Mohammad and Salama, Sherine and \
Nofech-Mozes, Sharon and Martel, Anne L},
journal={Cytometry Part A},
volume={91},
number={11},
pages={1078--1087},
year={2017},
publisher={Wiley Online Library}
}
"""
_DESCRIPTION = """\
The dataset's training/validation set consists of \
2578 patches extracted from 96 breast cancer \
whole slide images (WSI). Each patch is labelled \
by a tumor cellularity score. The testing set \
contains 1121 patches from 25 WSIs. Labels for \
testing data are not provided by far. \
The dataset can be used to develop an automated method \
for evaluating cancer cellularity from \
histology patches extracted from WSIs. The method \
is aimed to increase reproducibility of cancer \
cellularity scores and enhance tumor burden assessment.
"""
_IMAGE_SHAPE = (512, 512, 3)
def _load_tif(path):
with tf.io.gfile.GFile(path, "rb") as fp:
image = tfds.core.lazy_imports.PIL_Image.open(fp)
rgb_img = image.convert("RGB")
return np.array(rgb_img)
class Breastpathq(tfds.core.GeneratorBasedBuilder):
"""Breast cancer whole slide image dataset."""
# Set up version.
VERSION = tfds.core.Version('0.1.0')
def _info(self):
# Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
# These are the features of your dataset like images, labels ...
"image": tfds.features.Image(),
"label": tf.float32
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("image", "label"),
# Homepage of the dataset for documentation
urls=[_URL],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
# manual download is required for this dataset
download_path = dl_manager.manual_dir
train_file_list = list(filter(lambda x: 'breastpathq.zip' in x, tf.io.gfile.listdir(download_path)))
test_file_list = list(filter(lambda x: 'breastpathq-test.zip' in x, tf.io.gfile.listdir(download_path)))
if len(train_file_list)==0 or len(test_file_list)==0:
msg = "You must download the dataset files manually and place them in: "
msg += dl_manager.manual_dir
msg += " as .zip files. See testing/test_data/fake_examples/breastpathq "
raise AssertionError(msg)
train_dir = dl_manager.extract(os.path.join(download_path, train_file_list[0]))
test_dir = dl_manager.extract(os.path.join(download_path, test_file_list[0]))
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"images_dir_path": os.path.join(train_dir, \
"breastpathq/datasets/train"),
"labels": os.path.join(train_dir, \
"breastpathq/datasets/train_labels.csv"),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"images_dir_path": os.path.join(train_dir, \
"breastpathq/datasets/validation"),
"labels": os.path.join(test_dir, \
"breastpathq-test/val_labels.csv"),
}
),
]
def _generate_examples(self, images_dir_path, labels):
"""Yields examples."""
# Yields (key, example) tuples from the dataset
with tf.io.gfile.GFile(labels, "r") as f:
dataset = csv.DictReader(f)
for row in dataset:
image_id = row['slide']+'_'+row['rid']
yield image_id, {
"image": _load_tif(os.path.join(images_dir_path, image_id+'.tif')),
'label': row['y'],
}
| 34.451852
| 108
| 0.671038
|
0c490e1d842e720f73178bdb63382b7c029abfca
| 16,871
|
py
|
Python
|
sift_pyx12/segment.py
|
sifthealthcare/sift-pyx12
|
97b38fd8b681f149570ac9d5cae0fe868a7722f2
|
[
"BSD-3-Clause"
] | null | null | null |
sift_pyx12/segment.py
|
sifthealthcare/sift-pyx12
|
97b38fd8b681f149570ac9d5cae0fe868a7722f2
|
[
"BSD-3-Clause"
] | 2
|
2021-11-12T18:35:11.000Z
|
2021-11-12T18:35:31.000Z
|
sift_pyx12/segment.py
|
sifthealthcare/sift-pyx12
|
97b38fd8b681f149570ac9d5cae0fe868a7722f2
|
[
"BSD-3-Clause"
] | null | null | null |
######################################################################
# Copyright
# John Holland <john@zoner.org>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE.txt, which
# you should have received as part of this distribution.
#
######################################################################
"""
Implements an interface to a x12 segment.
A segment is comprised of a segment identifier and a sequence of elements.
An element can be a simple element or a composite. A simple element is
treated as a composite element with one sub-element.
All indexing is zero based.
"""
import re
import sift_pyx12.path
from sift_pyx12.errors import EngineError
rec_seg_id = re.compile('^[A-Z][A-Z0-9]{1,2}$', re.S)
class Element(object):
"""
Holds a simple element, which is just a simple string.
"""
def __init__(self, ele_str):
"""
@param ele_str: 1::2
@type ele_str: string
"""
self.value = ele_str if ele_str is not None else ''
def __eq__(self, other):
if isinstance(other, Element):
return self.value == other.value
return NotImplemented
def __ne__(self, other):
res = type(self).__eq__(self, other)
if res is NotImplemented:
return res
return not res
def __lt__(self, other):
return NotImplemented
__le__ = __lt__
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
__hash__ = None
def __len__(self):
"""
@rtype: int
"""
return 1
def __repr__(self):
"""
@rtype: string
"""
return self.value
def format(self):
"""
@rtype: string
"""
return self.value
def get_value(self):
"""
@rtype: string
"""
return self.value
def set_value(self, elem_str):
"""
@param elem_str: Element string value
@type elem_str: string
"""
self.value = elem_str if elem_str is not None else ''
def is_composite(self):
"""
@rtype: boolean
"""
return False
def is_element(self):
"""
@rtype: boolean
"""
return True
def is_empty(self):
"""
@rtype: boolean
"""
if self.value is not None and self.value != '':
return False
else:
return True
# return ''.join([`num` for num in xrange(loop_count)])
# def has_invalid_character(self,
class Composite(object):
"""
Can be a simple element or a composite.
A simple element is treated as a composite element with one sub-element.
"""
# Attributes:
# Operations
def __init__(self, ele_str, subele_term=None):
"""
@type ele_str: string
@raise EngineError: If a terminator is None and no default
"""
if subele_term is None or len(subele_term) != 1:
raise EngineError('The sub-element terminator must be a single character, is %s' % (subele_term))
self.subele_term = subele_term
self.subele_term_orig = subele_term
if ele_str is None:
raise EngineError('Element string is None')
members = ele_str.split(self.subele_term)
self.elements = []
for elem in members:
self.elements.append(Element(elem))
def __eq__(self, other):
if isinstance(other, Composite):
if len(self.elements) != len(other.elements):
return False
for i in range(len(self.elements)):
if self.elements[i] != other.elements[i]:
return False
return True
return NotImplemented
def __ne__(self, other):
res = type(self).__eq__(self, other)
if res is NotImplemented:
return res
return not res
def __lt__(self, other):
return NotImplemented
__le__ = __lt__
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
__hash__ = None
def __getitem__(self, idx):
"""
returns Element instance for idx
"""
return self.elements[idx]
def __setitem__(self, idx, val):
"""
1 based index
[0] throws exception
sets element value for idx
"""
self.elements[idx] = val
def __len__(self):
"""
@rtype: int
"""
return len(self.elements)
def __repr__(self):
"""
@rtype: string
"""
return self.format(self.subele_term)
def format(self, subele_term=None):
"""
Format a composite
@return: string
@raise EngineError: If terminator is None and no default
"""
if subele_term is None:
subele_term = self.subele_term
if subele_term is None:
raise EngineError('subele_term is None')
for i in range(len(self.elements) - 1, -1, -1):
if not self.elements[i].is_empty():
break
return subele_term.join([Element.__repr__(x) for x in self.elements[:i + 1]])
def get_value(self):
"""
Get value of simple element
"""
if len(self.elements) == 1:
return self.elements[0].get_value()
else:
raise IndexError('value of composite is undefined')
def set_subele_term(self, subele_term):
"""
@param subele_term: Sub-element terminator value
@type subele_term: string
"""
self.subele_term = subele_term
def is_composite(self):
"""
@rtype: boolean
"""
if len(self.elements) > 1:
return True
else:
return False
def is_element(self):
"""
@rtype: boolean
"""
if len(self.elements) == 1:
return True
else:
return False
def is_empty(self):
"""
@rtype: boolean
"""
for ele in self.elements:
if not ele.is_empty():
return False
return True
def values_iterator(self):
for j in range(len(self.elements)):
if not self.elements[j].is_empty():
subele_ord = '{comp}'.format(comp=j+1)
yield (subele_ord, self.elements[j].get_value())
class Segment(object):
"""
Encapsulates a X12 segment. Contains composites.
"""
# Attributes:
# Operations
def __init__(self, seg_str, seg_term, ele_term, subele_term, repetition_term='^'):
"""
"""
self.seg_term = seg_term
self.seg_term_orig = seg_term
self.ele_term = ele_term
self.ele_term_orig = ele_term
self.subele_term = subele_term
self.subele_term_orig = subele_term
self.repetition_term = repetition_term
self.seg_id = None
self.elements = []
if seg_str is None or seg_str == '':
return
if seg_str[-1] == seg_term:
elems = seg_str[:-1].split(self.ele_term)
else:
elems = seg_str.split(self.ele_term)
if elems:
self.seg_id = elems[0]
for ele in elems[1:]:
if self.seg_id == 'ISA':
#Special handling for ISA segment
#guarantee subele_term will not be matched
self.elements.append(Composite(ele, ele_term))
else:
self.elements.append(Composite(ele, subele_term))
def __eq__(self, other):
if isinstance(other, Segment):
if self.seg_id != other.seg_id:
return False
if len(self.elements) != len(other.elements):
return False
for i in range(len(self.elements)):
if self.elements[i] != other.elements[i]:
return False
return True
return NotImplemented
def __ne__(self, other):
res = type(self).__eq__(self, other)
if res is NotImplemented:
return res
return not res
def __lt__(self, other):
return NotImplemented
__le__ = __lt__
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
__hash__ = None
def __repr__(self):
"""
@rtype: string
"""
return self.format(self.seg_term, self.ele_term, self.subele_term)
def append(self, val):
"""
Append a composite to the segment
@param val: String value of composite
@type val: string
"""
self.elements.append(Composite(val, self.subele_term))
def __len__(self):
"""
@rtype: int
"""
return len(self.elements)
def get_seg_id(self):
"""
@rtype: string
"""
return self.seg_id
def _parse_refdes(self, ref_des):
"""
Format of ref_des:
- a simple element: TST02
- a composite: TST03 where TST03 is a composite
- a sub-element: TST03-2
- or any of the above with the segment ID omitted (02, 03, 03-1)
@param ref_des: X12 Reference Designator
@type ref_des: string
@rtype: tuple(ele_idx, subele_idx)
@raise EngineError: If the given ref_des does not match the segment ID
or if the indexes are not valid integers
"""
xp = sift_pyx12.path.X12Path(ref_des)
if xp.seg_id is not None and xp.seg_id != self.seg_id:
err_str = 'Invalid Reference Designator: %s, seg_id: %s' \
% (ref_des, self.seg_id)
raise EngineError(err_str)
ele_idx = xp.ele_idx - 1 if xp.ele_idx is not None else None
comp_idx = xp.subele_idx - 1 if xp.subele_idx is not None else None
return (ele_idx, comp_idx)
def get(self, ref_des):
"""
@param ref_des: X12 Reference Designator
@type ref_des: string
@return: Element or Composite
@rtype: L{segment.Composite}
@raise IndexError: If ref_des does not contain a valid element index
"""
(ele_idx, comp_idx) = self._parse_refdes(ref_des)
if ele_idx is None:
raise IndexError('{} is not a valid element index'.format(ref_des))
if ele_idx >= self.__len__():
return None
if comp_idx is None:
return self.elements[ele_idx]
else:
if comp_idx >= self.elements[ele_idx].__len__():
return None
return self.elements[ele_idx][comp_idx]
def get_value(self, ref_des):
"""
@param ref_des: X12 Reference Designator
@type ref_des: string
"""
comp1 = self.get(ref_des)
if comp1 is None:
return None
else:
return comp1.format()
def get_value_by_ref_des(self, ref_des):
"""
@param ref_des: X12 Reference Designator
@type ref_des: string
@attention: Deprecated - use get_value
"""
raise DeprecationWarning('Use Segment.get_value')
def set(self, ref_des, val):
"""
Set the value of an element or subelement identified by the
Reference Designator
@param ref_des: X12 Reference Designator
@type ref_des: string
@param val: New value
@type val: string
"""
(ele_idx, comp_idx) = self._parse_refdes(ref_des)
while len(self.elements) <= ele_idx:
# insert blank values before our value if needed
self.elements.append(Composite('', self.subele_term))
if self.seg_id == 'ISA' and ele_idx == 15:
#Special handling for ISA segment
#guarantee subele_term will not be matched
self.elements[ele_idx] = Composite(val, self.ele_term)
return
if comp_idx is None:
self.elements[ele_idx] = Composite(val, self.subele_term)
else:
while len(self.elements[ele_idx]) <= comp_idx:
# insert blank values before our value if needed
self.elements[ele_idx].elements.append(Element(''))
self.elements[ele_idx][comp_idx] = Element(val)
def is_element(self, ref_des):
"""
@param ref_des: X12 Reference Designator
@type ref_des: string
"""
ele_idx = self._parse_refdes(ref_des)[0]
return self.elements[ele_idx].is_element()
def is_composite(self, ref_des):
"""
@param ref_des: X12 Reference Designator
@type ref_des: string
"""
ele_idx = self._parse_refdes(ref_des)[0]
return self.elements[ele_idx].is_composite()
def ele_len(self, ref_des):
"""
@param ref_des: X12 Reference Designator
@type ref_des: string
@return: number of sub-elements in an element or composite
@rtype: int
"""
ele_idx = self._parse_refdes(ref_des)[0]
return len(self.elements[ele_idx])
def set_seg_term(self, seg_term):
"""
@param seg_term: Segment terminator
@type seg_term: string
"""
self.seg_term = seg_term
def set_ele_term(self, ele_term):
"""
@param ele_term: Element terminator
@type ele_term: string
"""
self.ele_term = ele_term
def set_subele_term(self, subele_term):
"""
@param subele_term: Sub-element terminator
@type subele_term: string
"""
self.subele_term = subele_term
def format(self, seg_term=None, ele_term=None, subele_term=None):
"""
@rtype: string
@raise EngineError: If a terminator is None and no default
"""
if seg_term is None:
seg_term = self.seg_term
if ele_term is None:
ele_term = self.ele_term
if subele_term is None:
subele_term = self.subele_term
if seg_term is None:
raise EngineError('seg_term is None')
if ele_term is None:
raise EngineError('ele_term is None')
if subele_term is None:
raise EngineError('subele_term is None')
str_elems = []
# get index of last non-empty element
i = 0
for i in range(len(self.elements) - 1, -1, -1):
if not self.elements[i].is_empty():
break
for ele in self.elements[:i + 1]:
str_elems.append(ele.format(subele_term))
return '%s%s%s%s' % (self.seg_id, ele_term, ele_term.join(str_elems), seg_term)
def format_ele_list(self, str_elems, subele_term=None):
"""
Modifies the parameter str_elems
Strips trailing empty composites
"""
if subele_term is None:
subele_term = self.subele_term
# Find last non-empty composite
for i in range(len(self.elements) - 1, -1, -1):
if not self.elements[i].is_empty():
break
for ele in self.elements[:i + 1]:
str_elems.append(ele.format(subele_term))
def is_empty(self):
"""
@rtype: boolean
"""
if len(self.elements) == 0:
return True
for ele in self.elements:
if not ele.is_empty():
return False
return True
def is_seg_id_valid(self):
"""
Is the Segment identifier valid?
EBNF:
<seg_id> ::= <letter_or_digit> <letter_or_digit> [<letter_or_digit>]
@rtype: boolean
"""
if not self.seg_id or len(self.seg_id) < 2 or len(self.seg_id) > 3:
return False
else:
m = rec_seg_id.search(self.seg_id)
if not m:
return False # Invalid char matched
return True
def copy(self):
return self.__copy__()
def __copy__(self):
return Segment(self.format(), self.seg_term, self.ele_term, self.subele_term)
def values_iterator(self):
"""
Enumerate over the values in the segment, adding the path, element index and sub-element index
"""
for i in range(len(self.elements)):
if self.elements[i].is_composite():
for (comp_ord, val) in self.elements[i].values_iterator():
ele_ord = '{idx:0>2}'.format(idx=i+1)
refdes = '{segid}{ele_ord}-{comp_ord}'.format(segid=self.seg_id, ele_ord=ele_ord, comp_ord=comp_ord)
yield (refdes, ele_ord, comp_ord, val)
else:
if not self.elements[i].is_empty():
ele_ord = '{idx:0>2}'.format(idx=i+1)
refdes = '{segid}{ele_ord}'.format(segid=self.seg_id, ele_ord=ele_ord)
yield (refdes, ele_ord, None, self.elements[i].get_value())
| 29.650264
| 120
| 0.559303
|
6088c95fcb6703defe1575c92ace84258115fbad
| 2,222
|
py
|
Python
|
keras_image_helper/preprocessors.py
|
alexeygrigorev/keras-image-helper
|
1fea06a0e1343133f7c2ecdc7d7a9390d0503ae5
|
[
"WTFPL"
] | 4
|
2020-10-31T15:53:34.000Z
|
2021-11-27T10:35:21.000Z
|
keras_image_helper/preprocessors.py
|
alexeygrigorev/keras-image-helper
|
1fea06a0e1343133f7c2ecdc7d7a9390d0503ae5
|
[
"WTFPL"
] | null | null | null |
keras_image_helper/preprocessors.py
|
alexeygrigorev/keras-image-helper
|
1fea06a0e1343133f7c2ecdc7d7a9390d0503ae5
|
[
"WTFPL"
] | null | null | null |
from keras_image_helper.base import BasePreprocessor
def tf_preprocessing(x):
x /= 127.5
x -= 1.0
return x
def caffe_preprocessing(x):
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
return x
class ResnetPreprocessor(BasePreprocessor):
# sources:
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/xception.py
# preprocess_input:
# imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
# _preprocess_numpy_input, mode == 'tf'
#
def preprocess(self, X):
return caffe_preprocessing(X)
class XceptionPreprocessor(BasePreprocessor):
# sources:
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/xception.py
# preprocess_input:
# imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
# _preprocess_numpy_input, mode == 'tf'
#
def preprocess(self, x):
return tf_preprocessing(x)
class VGGPreprocessor(BasePreprocessor):
# sources:
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/vgg16.py
# preprocess_input = imagenet_utils.preprocess_input
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
# _preprocess_numpy_input, mode == 'caffe'
#
def preprocess(self, x):
return caffe_preprocessing(x)
class InceptionPreprocessor(BasePreprocessor):
# sources:
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/inception_v3.py
# imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
#
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
# _preprocess_numpy_input, mode == 'tf'
#
def preprocess(self, x):
return tf_preprocessing(x)
| 28.126582
| 103
| 0.672817
|
ddda8d32fcaf59c03b7d79fee12e86b980acab79
| 187
|
py
|
Python
|
payu/exceptions.py
|
GearPlug/payu-python
|
47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e
|
[
"MIT"
] | 1
|
2019-11-11T20:06:27.000Z
|
2019-11-11T20:06:27.000Z
|
payu/exceptions.py
|
GearPlug/payu-python
|
47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e
|
[
"MIT"
] | 1
|
2020-04-20T15:46:28.000Z
|
2020-04-20T15:46:28.000Z
|
payu/exceptions.py
|
GearPlug/payu-python
|
47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e
|
[
"MIT"
] | 1
|
2020-06-17T23:34:32.000Z
|
2020-06-17T23:34:32.000Z
|
class BaseError(Exception):
pass
class FranchiseUnavailableError(BaseError):
pass
class CVVRequiredError(BaseError):
pass
class InvalidCountryError(BaseError):
pass
| 12.466667
| 43
| 0.754011
|
fcb9505d4b169ca7e4a3af1a400145e367341181
| 3,016
|
py
|
Python
|
tempest/scenario/test_swift_basic_ops.py
|
pcaruana/tempest
|
907ed711f265305316fdc80acd16e9657cb2c0b4
|
[
"Apache-2.0"
] | null | null | null |
tempest/scenario/test_swift_basic_ops.py
|
pcaruana/tempest
|
907ed711f265305316fdc80acd16e9657cb2c0b4
|
[
"Apache-2.0"
] | null | null | null |
tempest/scenario/test_swift_basic_ops.py
|
pcaruana/tempest
|
907ed711f265305316fdc80acd16e9657cb2c0b4
|
[
"Apache-2.0"
] | 1
|
2019-02-14T23:36:55.000Z
|
2019-02-14T23:36:55.000Z
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import http
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestSwiftBasicOps(manager.SwiftScenarioTest):
"""
Test swift basic ops.
* get swift stat.
* create container.
* upload a file to the created container.
* list container's objects and assure that the uploaded file is present.
* download the object and check the content
* delete object from container.
* list container's objects and assure that the deleted file is gone.
* delete a container.
* list containers and assure that the deleted container is gone.
* change ACL of the container and make sure it works successfully
"""
@test.services('object_storage')
def test_swift_basic_ops(self):
self.get_swift_stat()
container_name = self.create_container()
obj_name, obj_data = self.upload_object_to_container(container_name)
self.list_and_check_container_objects(container_name,
present_obj=[obj_name])
self.download_and_verify(container_name, obj_name, obj_data)
self.delete_object(container_name, obj_name)
self.list_and_check_container_objects(container_name,
not_present_obj=[obj_name])
self.delete_container(container_name)
@test.services('object_storage')
def test_swift_acl_anonymous_download(self):
"""This test will cover below steps:
1. Create container
2. Upload object to the new container
3. Change the ACL of the container
4. Check if the object can be download by anonymous user
5. Delete the object and container
"""
container_name = self.create_container()
obj_name, _ = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
http_client = http.ClosingHttp()
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
self.change_container_acl(container_name, '.r:*')
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 200)
| 40.756757
| 78
| 0.682692
|
46f0442219d60971301d5279ef2e203b6b7982ea
| 90,604
|
py
|
Python
|
Metrics/Depth.py
|
payamsiyari/evo-lexis
|
2f333f705229b449dc04552dbe4bd78d7bcde24c
|
[
"MIT"
] | null | null | null |
Metrics/Depth.py
|
payamsiyari/evo-lexis
|
2f333f705229b449dc04552dbe4bd78d7bcde24c
|
[
"MIT"
] | null | null | null |
Metrics/Depth.py
|
payamsiyari/evo-lexis
|
2f333f705229b449dc04552dbe4bd78d7bcde24c
|
[
"MIT"
] | 2
|
2019-01-12T22:52:05.000Z
|
2020-02-19T05:34:09.000Z
|
# -*- coding: utf-8 -*-
#Prints depth statistics of a DAG
#USAGE: Depth.py -l -t s -q <path-to-dag-file>
#OUTPUT: number of intermediate nodes, number of sources, average depth of DAG
#NOTE: Code includes many redundant stuff for historical reasons. I will clean the code in future.
"""
@author: Payam Siyari
"""
from __future__ import division
import os
from os import listdir
from os.path import isfile, join
import random
from collections import defaultdict
from bisect import bisect_left
import fileinput
import sys
import getopt
import operator
import time
import subprocess
import networkx as nx
import copy
import numpy as np
class SequenceType:
Character, Integer, SpaceSeparated = ('c', 'i', 's')
class CostFunction:
ConcatenationCost, EdgeCost = ('c', 'e')
class RepeatClass:
Repeat, MaximalRepeat, LargestMaximalRepeat, SuperMaximalRepeat = ('r', 'mr', 'lmr', 'smr')
class LogFlag:
ConcatenationCostLog, EdgeCostLog = range(2)
class DAG(object):
# preprocessedInput = [] #Original input as a sequence of integers
# dic = {} #Dictionary for correspondence of integers to original chars (only when charSeq = 'c','s')
# DAG = {} #Adjacency list of DAG
# DAGGraph = nx.MultiDiGraph()
# DAGStrings = {}#Strings corresponding to each node in DAG
#
# concatenatedDAG = [] #Concatenated DAG nodes with seperatorInts
# concatenatedNTs = [] #For each DAG node, alongside the concatenated DAG
# separatorInts = set([]) #Used for seperating DAG nodes in the concatenatedDAG
# separatorIntsIndices = set([]) #Indices of separatorInts in the concatenated DAG
# nextNewInt = 0 #Used for storing ints of repeat symbols and separators in odd numbers
#
# quietLog = False #if true, disables logging
# iterations = 0
def __init__(self, inputFile, loadDAGFlag, chFlag = SequenceType.Character, noNewLineFlag = True):
self.preprocessedInput = [] # Original input as a sequence of integers
self.dic = {} # Dictionary for correspondence of integers to original chars (only when charSeq = 'c','s')
self.DAG = {} # Adjacency list of DAG
self.DAGGraph = nx.MultiDiGraph()
self.DAGStrings = {} # Strings corresponding to each node in DAG
self.concatenatedDAG = [] # Concatenated DAG nodes with seperatorInts
self.concatenatedNTs = [] # For each DAG node, alongside the concatenated DAG
self.separatorInts = set([]) # Used for seperating DAG nodes in the concatenatedDAG
self.separatorIntsIndices = set([]) # Indices of separatorInts in the concatenated DAG
self.nextNewInt = 0 # Used for storing ints of repeat symbols and separators in odd numbers
self.quietLog = False # if true, disables logging
self.iterations = 0
if loadDAGFlag:
self.initFromDAG(inputFile)
else:
self.initFromStrings(inputFile, chFlag, noNewLineFlag)
#Initializes (an unoptimized) DAG from inputFile. charSeq tells if inputFile is a char sequence, int sequence or space-separated sequence
def initFromStrings(self, inputFile, chFlag = SequenceType.Character, noNewLineFlag = True):
(self.preprocessedInput, self.dic) = self.preprocessInput(inputFile, charSeq = chFlag, noNewLineFlag = noNewLineFlag)
allLetters = set(map(int,self.preprocessedInput.split()))
#Setting odd and even values for nextNewInt and nextNewContextInt
self.nextNewInt = max(allLetters)+1
if self.nextNewInt % 2 == 0:
self.nextNewInt += 1
#Initializing the concatenated DAG
for line in self.preprocessedInput.split('\n'):
line = line.rstrip('\n')
self.concatenatedDAG.extend(map(int,line.split()))
self.concatenatedDAG.append(self.nextNewInt)
self.concatenatedNTs.extend(0 for j in range(len(map(int,line.split()))))
self.concatenatedNTs.append(self.nextNewInt)
self.separatorInts.add(self.nextNewInt)
self.separatorIntsIndices.add(len(self.concatenatedDAG)-1)
self.nextNewInt += 2
#Loads the DAG from an external file (The file should start from 'N0' line, without cost logs)
def initFromDAG(self, inputFile):
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
textFile = inputFile.read().splitlines()
maxInt = 0
for line in textFile[2:]:
nt = int(line.split(' -> ')[0][1:])
self.dic[nt] = nt
if maxInt < nt:
maxInt = nt
maxInt += 1
reverseDic = {}
for line in textFile[2:]:
nt = int(line.split(' -> ')[0][1:])
rhs = line.split(' -> ')[1].split()
for w in rhs:
# sys.stderr.write(w + "\n")
if w.startswith('N') and RepresentsInt(int(w[1:])):
ntInt = int(w[1:])
self.concatenatedDAG.append(ntInt)
self.concatenatedNTs.append(nt)
else:
word = w
if word not in reverseDic:
reverseDic[word] = maxInt
self.dic[maxInt] = word
self.concatenatedDAG.append(maxInt)
self.concatenatedNTs.append(nt)
maxInt += 1
else:
self.concatenatedDAG.append(reverseDic[word])
self.concatenatedNTs.append(nt)
self.concatenatedDAG.append(-1)
self.concatenatedNTs.append(-1)
self.separatorIntsIndices.add(len(self.concatenatedDAG) - 1)
self.nextNewInt = maxInt + 1
for i in self.separatorIntsIndices:
self.concatenatedDAG[i] = self.nextNewInt
self.concatenatedNTs[i] = self.nextNewInt
self.separatorInts.add(self.nextNewInt)
self.nextNewInt += 1
# wordDict = {}
# counterDict = {}
# counter = 0
# textFile = inputFile.read().splitlines()
# tmpnode = []
# for line in textFile:
# # if len(line.split(' -> ')) < 2:
# # tmpnode = ['\n'] + line.split(' ')
# # newnode = []
# # for w in tmpnode:
# # if w not in counterDict:
# # wordDict[counter] = w
# # counterDict[w] = counter
# # counter += 1
# # newnode.append(counterDict[w])
# # self.DAG[newNt] += newnode
# # continue
# # else:
# nt = int(line.split(' -> ')[0][1:])
# if counter % 2 == 0:
# if counter != 0:
# counter += 1
# if nt not in counterDict:
# wordDict[counter] = nt
# counterDict[nt] = counter
# counter += 1
# newNt = counterDict[nt]
# node = line.split(' -> ')[1].split(' ')
# newnode = []
# for w in node:
# if w[0] == 'N':
# if w not in counterDict:
# wordDict[counter] = w[1:]
# counterDict[w[1:]] = counter
# counter += 1
# newnode.append(counterDict[w[1:]])
# else:
# if w not in counterDict:
# wordDict[counter] = w
# counterDict[w] = counter
# counter += 1
# newnode.append(counterDict[w])
# if newNt == 0:
# if newNt in self.DAG:
# self.DAG[newNt].append(newnode)
# else:
# self.DAG[newNt] = [newnode]
# else:
# self.DAG[newNt] = newnode
# self.dic = wordDict
# self.nextNewInt = counter
# if self.nextNewInt % 2 == 0:
# self.nextNewContextInt = self.nextNewInt
# self.nextNewInt += 1
# else:
# self.nextNewContextInt = self.nextNewInt + 1
# for nt in self.DAG:
# self.concatenatedDAG.extend(self.DAG[nt])
# self.concatenatedDAG.append(self.nextNewInt)
# self.concatenatedNTs.extend(nt for j in range(len(self.DAG[nt])))
# self.concatenatedNTs.append(self.nextNewInt)
# self.separatorInts.add(self.nextNewInt)
# self.separatorIntsIndices.add(len(self.concatenatedDAG)-1)
# self.nextNewInt += 2
# print self.DAG
# print self.dic
self.createAdjacencyList()
# print 'self dag'
# print self.DAG
self.createDAGGraph()
# print 'self graph'
# print self.DAGGraph
# print self.DAGGraph.nodes()
# print self.DAGGraph.edges()
self.nodeStringsGenerate()
# print 'self strings'
# print self.DAGStrings
#...........Main G-Lexis Algorithm Functions........
def GLexis(self, quiet, normalRepeatType, costFunction):
# print self.concatenatedDAG
self.quietLog = quiet
while True: #Main loop
#Logging DAG Cost
self.logViaFlag(LogFlag.ConcatenationCostLog)
self.logViaFlag(LogFlag.EdgeCostLog)
#Extracting Maximum-Gain Repeat
(maximumRepeatGainValue, selectedRepeatOccs) = self.retreiveMaximumGainRepeat(normalRepeatType, CostFunction.EdgeCost)
if maximumRepeatGainValue == -1:
break #No repeats, hence terminate
self.logMessage('maxR ' + str(maximumRepeatGainValue) + ' : ' + str(self.concatenatedDAG[selectedRepeatOccs[1][0]:selectedRepeatOccs[1][0]+selectedRepeatOccs[0]]) + '\n')
if maximumRepeatGainValue > 0:
odd = True
self.replaceRepeat(selectedRepeatOccs) #Replacing the chosen repeat
self.iterations += 1
self.logMessage('---------------')
self.logMessage('Number of Iterations: ' + str(self.iterations))
self.createAdjacencyList()
self.createDAGGraph()
self.nodeStringsGenerate()
#Returns the cost of the DAG according to the selected costFunction
def DAGCost(self, costFunction):
if costFunction == CostFunction.ConcatenationCost:
return len(self.concatenatedDAG)-2*len(self.separatorInts)
if costFunction == CostFunction.EdgeCost:
return len(self.concatenatedDAG)-len(self.separatorInts)
#Replaces a repeat's occurrences with a new symbol and creates a new node in the DAG
def replaceRepeat(self,(repeatLength, (repeatOccs))):
repeat = self.concatenatedDAG[repeatOccs[0]:repeatOccs[0]+repeatLength]
newTmpConcatenatedDAG = []
newTmpConcatenatedNTs = []
prevIndex = 0
for i in repeatOccs:
newTmpConcatenatedDAG += self.concatenatedDAG[prevIndex:i] + [self.nextNewInt]
newTmpConcatenatedNTs += self.concatenatedNTs[prevIndex:i] + [self.concatenatedNTs[i]]
prevIndex = i+repeatLength
self.concatenatedDAG = newTmpConcatenatedDAG + self.concatenatedDAG[prevIndex:]
self.concatenatedNTs = newTmpConcatenatedNTs + self.concatenatedNTs[prevIndex:]
self.concatenatedDAG = self.concatenatedDAG + repeat
self.concatenatedNTs = self.concatenatedNTs + [self.nextNewInt for j in range(repeatLength)]
self.logMessage('Added Node: ' + str(self.nextNewInt))
self.nextNewInt += 2
self.concatenatedDAG = self.concatenatedDAG + [self.nextNewInt]
self.concatenatedNTs = self.concatenatedNTs + [self.nextNewInt]
self.separatorInts.add(self.nextNewInt)
self.separatorIntsIndices = set([])
for i in range(len(self.concatenatedDAG)):
if self.concatenatedDAG[i] in self.separatorInts:
self.separatorIntsIndices.add(i)
self.nextNewInt += 2
#Retrieves the maximum-gain repeat (randomizes within ties).
#Output is a tuple: "(RepeatGain, (RepeatLength, (RepeatOccurrences)))"
#1st entry of output is the maximum repeat gain value
#2nd entry of output is a tuple of form: "(selectedRepeatLength, selectedRepeatOccsList)"
def retreiveMaximumGainRepeat(self, repeatClass, costFunction):
repeats = self.extractRepeats(repeatClass)
maxRepeatGain = 0
candidateRepeats = []
for r in repeats: #Extracting maximum repeat
repeatStats = r.split()
repeatOccs = self.extractNonoverlappingRepeatOccurrences(int(repeatStats[0]),map(int,repeatStats[2][1:-1].split(',')))
if maxRepeatGain < self.repeatGain(int(repeatStats[0]), len(repeatOccs), costFunction):
maxRepeatGain = self.repeatGain(int(repeatStats[0]), len(repeatOccs), costFunction)
candidateRepeats = [(int(repeatStats[0]),len(repeatOccs),repeatOccs)]
else:
if maxRepeatGain > 0 and maxRepeatGain == self.repeatGain(int(repeatStats[0]), len(repeatOccs), costFunction):
candidateRepeats.append((int(repeatStats[0]),len(repeatOccs),repeatOccs))
if(len(candidateRepeats) == 0):
return (-1, (0, []))
#Randomizing between candidates with maximum gain
#selectedRepeatStats = candidateRepeats[random.randrange(len(candidateRepeats))]
selectedRepeatStats = candidateRepeats[0]
selectedRepeatLength = selectedRepeatStats[0]
selectedRepeatOccs = sorted(selectedRepeatStats[2])
return (maxRepeatGain, (selectedRepeatLength, selectedRepeatOccs))
#Returns the repeat gain, according to the chosen cost function
def repeatGain(self, repeatLength, repeatOccsLength, costFunction):
# if costFunction == CostFunction.ConcatenationCost:
return (repeatLength-1)*(repeatOccsLength-1)
# if costFunction == CostFunction.EdgeCost:
# return (repeatLength-1)*(repeatOccsLength-1)-1
#Extracts the designated class of repeats (Assumes ./repeats binary being in the same directory)
#Output is a string, each line containing: "RepeatLength NumberOfOccurrence (CommaSeparatedOccurrenceIndices)"
def extractRepeats(self, repeatClass):
process = subprocess.Popen(["./repeats1/repeats11", "-i", "-r"+repeatClass, "-n2", "-psol"],stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
process.stdin.write(' '.join(map(str,self.concatenatedDAG)))
text_file = ''
while process.poll() is None:
output = process.communicate()[0].rstrip()
text_file += output
process.wait()
repeats=[]
firstLine = False
for line in text_file.splitlines():
if firstLine == False:
firstLine = True
continue
repeats.append(line.rstrip('\n'))
return repeats
#Extracts the non-overlapping occurrences of a repeat from a list of occurrences (scans from left to right)
def extractNonoverlappingRepeatOccurrences(self, repeatLength, occurrencesList):
nonoverlappingIndices = []
for i in range(len(occurrencesList)):
if len(nonoverlappingIndices) > 0:
if (nonoverlappingIndices[-1] + repeatLength <= occurrencesList[i]):#Not already covered
nonoverlappingIndices += [occurrencesList[i]]
else:
nonoverlappingIndices += [occurrencesList[i]]
return nonoverlappingIndices
#Creates the adjacency list
def createAdjacencyList(self):
separatorPassed = False
for i in range(len(self.concatenatedDAG)):
if i not in self.separatorIntsIndices:
node = self.concatenatedNTs[i]
if separatorPassed and node == 0:
self.DAG[node].append([])
separatorPassed = False
if node not in self.DAG:
if node == 0:#Target node
self.DAG[node] = [[self.concatenatedDAG[i]]]
else:
self.DAG[node] = [self.concatenatedDAG[i]]
else:
if node == 0:#Target node
self.DAG[node][-1].append(self.concatenatedDAG[i])
else:
self.DAG[node].append(self.concatenatedDAG[i])
else:
separatorPassed = True
#Creates the DAG graph object (adjacency list should already be processed)
def createDAGGraph(self):
for node in self.DAG:
self.DAGGraph.add_node(node)
if node == 0:
for l in self.DAG[node]:
for n in l:
self.DAGGraph.add_node(n)
self.DAGGraph.add_edge(n, node)
else:
for n in self.DAG[node]:
self.DAGGraph.add_node(n)
self.DAGGraph.add_edge(n, node)
#Stores the strings corresponding to each DAG node
def nodeStringsGenerate(self):
for node in nx.nodes(self.DAGGraph):
if self.DAGGraph.in_degree(node) == 0:
# if self.dic == {}:
self.DAGStrings[node] = str(node)
# else:
# self.DAGStrings[node] = str(self.dic[node])
else:
if node == 0:
self.DAGStrings[node] = []
else:
self.DAGStrings[node] = ''
self. nodeStringsHelper(0)
# Helper recursive function
def nodeStringsHelper(self, n):
if self.DAGStrings[n] != [] and self.DAGStrings[n] != '':
return
if n == 0:
for l in self.DAG[n]:
self.DAGStrings[n].append('')
for i in range(len(l)):
subnode = l[i]
self.nodeStringsHelper(subnode)
# if self.dic == {}:
self.DAGStrings[n][-1] += ' ' + self.DAGStrings[subnode]
# else:
# self.DAGStrings[n][-1] += self.DAGStrings[subnode] + ' '
else:
for i in range(len(self.DAG[n])):
subnode = self.DAG[n][i]
self.nodeStringsHelper(subnode)
# if self.dic == {}:
self.DAGStrings[n] += ' ' + self.DAGStrings[subnode]
# else:
# self.DAGStrings[n] += self.DAGStrings[subnode] + ' '
#Returns node's corresponding string
def getNodeString(self, n):
if n == 0:
result = []
for l in self.DAGStrings[n]:
result.append(' '.join(l.split()))
return result
return ' '.join(self.DAGStrings[n].split())
# ...........Path-Centrality Functions........
#Returns a list of strings, corresponding to the nodes removed from DAG, according to greedy core identification algorithm, based on the threshold of edge removal tau
def greedyCoreID_ByTau(self, tau):
cycle = False
for p in nx.simple_cycles(self.DAGGraph):
print p
cycle = True
if cycle:
print 'CYCLE!'
return
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
sources = []
targets = []
for node in nx.nodes(self.DAGGraph):
if self.DAGGraph.in_degree(node) == 0:
sources.append(node)
if self.DAGGraph.out_degree(node) == 0:
targets.append(node)
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
self.calculateNumberOfUpwardPaths(sources, targets, numberOfUpwardPaths)
self.calculateNumberOfDownwardPaths(sources, targets, numberOfDownwardPaths)
allPaths = 0
# print targets
for n in targets:
allPaths += numberOfDownwardPaths[n]
# return
orig_targets = targets[:]
for t in targets:
numberOfUpwardPaths[t] = 0
# for s in sources:
# numberOfDownwardPaths[s] = 0
number_of_initial_paths = numberOfDownwardPaths[0]
number_of_current_paths = numberOfDownwardPaths[0]
listOfCentralNodes = []
listOfCentralNodes_cents = []
centralities = self.calculateCentralities(numberOfUpwardPaths, numberOfDownwardPaths)
centsDic = {k:(v,v2) for k,v,v2 in centralities}
# count = 1
# for node in sorted(sources, key = lambda x : centsDic[x], reverse= True):
# sys.stderr.write(str(count) + '\n')
# count += 1
# nodeString = str(self.getListNodeStrings([node])[0])
# print nodeString + '\t' + str(centsDic[node]) + '\t',
# parentNodes = []
# for n in nx.nodes(self.DAGGraph):
# if n != 0:
# nString = str(self.getListNodeStrings([n])[0])
# if nString != nodeString and nString.find(nodeString) >= 0:
# parentNodes.append(n)
# parentNodes = sorted(parentNodes, key = lambda x : centsDic[x], reverse=True)
# if len(parentNodes) > 0:
# if centsDic[node] > centsDic[parentNodes[0]]:
# print 'Y\t',
# else:
# print 'N\t',
# else:
# print 'Y\t',
# for n in parentNodes:
# nString = str(self.getListNodeStrings([n])[0])
# print nString + '\t' + str(centsDic[n]) + '\t',
# print
# return
# for node in nx.nodes(self.DAGGraph):
# if node not in targets:
# print str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node]) + '\t' + str(centsDic[node])
# return
# for node in nx.nodes(self.DAGGraph):
# if node not in tagets:
# print str(self.DAGGraph.out_degree(node)) + '\t' + str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node])+ '\t' + str(centsDic[node])
# return
# print len(nx.nodes(self.DAGGraph)) - 1
# print allPaths
# return
orig_cents = {k:(v,v2) for k,v,v2 in centralities}
topCentralNodeInfo = max(centralities, key=lambda x:x[1])
# print topCentralNodeInfo[1], topCentralNodeInfo[2][0], topCentralNodeInfo[2][1]
# return
top_num = 20
import math
top10CentsSorted = sorted(centralities, key=lambda x:x[1], reverse = True)[:top_num]
# print top10CentsSorted[int(top_num / 2)][1], top10CentsSorted[int(top_num / 2)][2][0], top10CentsSorted[int(top_num / 2)][2][1]
# return
# print top10CentsSorted[0][1],
# top10CentsSorted = sorted(centralities, key=lambda x: x[2][0], reverse=True)[:top_num]
# print top10CentsSorted[0][2][0],
# top10CentsSorted = sorted(centralities, key=lambda x: x[2][1] if x[2][0] > 1 else 0, reverse=True)[:top_num]
# print top10CentsSorted[0][2][1]
# return
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
allMaxes = [allMaxes[0]]
# print '-------------', topCentralNodeInfo[1], len(allMaxes)
while topCentralNodeInfo[1] > 0 and float(number_of_current_paths)/float(number_of_initial_paths) > 1-tau:#Node with positive centrality exists
for nodeToBeRemoved in allMaxes:
nodeToBeRemoved = nodeToBeRemoved[0]
self.DAGGraph.remove_node(nodeToBeRemoved)
if nodeToBeRemoved in sources:
sources.remove(nodeToBeRemoved)
listOfCentralNodes.append(nodeToBeRemoved)
listOfCentralNodes_cents.append(topCentralNodeInfo[1])
print topCentralNodeInfo[1]
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
for node in nx.nodes(self.DAGGraph):
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
self.calculateNumberOfUpwardPaths(sources, targets, numberOfUpwardPaths)
self.calculateNumberOfDownwardPaths(sources, targets, numberOfDownwardPaths)
# for t in targets:
# numberOfUpwardPaths[t] = 0
# for s in sources:
# numberOfDownwardPaths[s] = 0
centralities = self.calculateCentralities(numberOfUpwardPaths, numberOfDownwardPaths)
topCentralNodeInfo = max(centralities, key=lambda x: x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
allMaxes = [allMaxes[0]]
number_of_current_paths = numberOfDownwardPaths[0]
self.DAGGraph = nx.MultiGraph()
self.createDAGGraph()#Reconstructing the DAG graph
core = []
# print str(len(nx.nodes(self.DAGGraph)) - len(targets) - len(sources)) + '\t' + str(allPaths)
arrayOfStrings = self.getListNodeStrings(listOfCentralNodes)
for i in range(len(arrayOfStrings)):
nodeString = arrayOfStrings[i]
core.append(nodeString.rstrip())
print nodeString.rstrip() + '\t' + str(listOfCentralNodes_cents[i]) + '\t' + str(orig_cents[listOfCentralNodes[i]])
# print str(listOfCentralNodes_cents[i])
# print len(nodeString.rstrip().split())
# break
return core, listOfCentralNodes_cents
def getListNodeStrings(self, listNodes):
arrayOfStrings = []
for i in range(len(listNodes)):
if listNodes[i] == 0:
arrayOfStrings.append('N0')
continue
nodeStringInt = self.getNodeString(listNodes[i])
nodeString = ''
for w in nodeStringInt.split():
nodeString += self.dic[int(w)] + ' '
arrayOfStrings.append(nodeString)
# arrayOfStrings = sorted(arrayOfStrings, key = lambda x : len(x.split()))
return arrayOfStrings
def greedyCoreID_ByTau_knee(self, age, tau, node_dic, knee_threshold):
listOfNodes = []
for node in nx.nodes(self.DAGGraph):
if node != 0:
nodeString = self.getListNodeStrings(list([node]))[0]
if nodeString not in node_dic:
node_dic[nodeString] = {'age_index':age, 'core_index': 0, 'core_count': 0}
cycle = False
for p in nx.simple_cycles(self.DAGGraph):
print p
cycle = True
if cycle:
print 'CYCLE!'
return
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
sources = []
targets = []
for node in nx.nodes(self.DAGGraph):
if self.DAGGraph.in_degree(node) == 0:
sources.append(node)
if self.DAGGraph.out_degree(node) == 0:
targets.append(node)
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
self.calculateNumberOfUpwardPaths(sources, targets, numberOfUpwardPaths)
self.calculateNumberOfDownwardPaths(sources, targets, numberOfDownwardPaths)
allPaths = 0
# print targets
allNodes = len(nx.nodes(self.DAGGraph))-len(targets)-len(sources)
for n in targets:
allPaths += numberOfDownwardPaths[n]
# return
for t in targets:
numberOfUpwardPaths[t] = 0
# for s in sources:
# numberOfDownwardPaths[s] = 0
number_of_initial_paths = numberOfDownwardPaths[0]
number_of_current_paths = numberOfDownwardPaths[0]
listOfCentralNodes = []
listOfCentralNodes_cents = []
centralities = self.calculateCentralities(numberOfUpwardPaths, numberOfDownwardPaths)
centsDic = {k:v for k,v in centralities}
# for node in nx.nodes(self.DAGGraph):
# if node not in targets:
# print str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node]) + '\t' + str(centsDic[node])
# return
orig_cents = {k:v for k,v in centralities}
topCentralNodeInfo = max(centralities, key=lambda x:x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
# print '-------------', topCentralNodeInfo[1], len(allMaxes)
number_of_prev_covered_paths = 0
number_of_current_covered_paths = 0
while topCentralNodeInfo[1] > 0 and float(number_of_current_paths)/float(number_of_initial_paths) > 1-tau:#Node with positive centrality exists
for nodeToBeRemoved in allMaxes:
nodeToBeRemoved = nodeToBeRemoved[0]
self.DAGGraph.remove_node(nodeToBeRemoved)
if nodeToBeRemoved in sources:
sources.remove(nodeToBeRemoved)
listOfCentralNodes.append(nodeToBeRemoved)
listOfCentralNodes_cents.append(topCentralNodeInfo[1])
number_of_current_covered_paths += topCentralNodeInfo[1]
# print (float(number_of_current_covered_paths - number_of_prev_covered_paths)/float(allPaths)) / (float(len(allMaxes))/float(allNodes))
if (float(number_of_current_covered_paths - number_of_prev_covered_paths)/float(allPaths)) / (float(len(allMaxes))/float(allNodes)) < knee_threshold:
break
number_of_prev_covered_paths = number_of_current_covered_paths
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
for node in nx.nodes(self.DAGGraph):
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
self.calculateNumberOfUpwardPaths(sources, targets, numberOfUpwardPaths)
self.calculateNumberOfDownwardPaths(sources, targets, numberOfDownwardPaths)
for t in targets:
numberOfUpwardPaths[t] = 0
# for s in sources:
# numberOfDownwardPaths[s] = 0
centralities = self.calculateCentralities(numberOfUpwardPaths, numberOfDownwardPaths)
topCentralNodeInfo = max(centralities, key=lambda x: x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
number_of_current_paths = numberOfDownwardPaths[0]
self.DAGGraph = nx.MultiGraph()
self.createDAGGraph()#Reconstructing the DAG graph
core = []
# print str(len(nx.nodes(self.DAGGraph)) - len(targets) - len(sources)) + '\t' + str(allPaths)
arrayOfStrings = self.getListNodeStrings(listOfCentralNodes)
for i in range(len(arrayOfStrings)):
nodeString = arrayOfStrings[i]
if node_dic[nodeString]['core_index'] == 0:
node_dic[nodeString]['core_index'] = age
node_dic[nodeString]['core_count'] += 1
core.append(nodeString.rstrip())
# print nodeString.rstrip() + '\t' + str(listOfCentralNodes_cents[i]) + '\t' + str(orig_cents[listOfCentralNodes[i]])
return core, listOfCentralNodes_cents
# Returns a list of strings, corresponding to the nodes removed from DAG, according to greedy core identification algorithm, based on the cardinality of the extracted set
def greedyCoreID_ByCardinality(self, k):
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
sources = []
targets = []
for node in nx.nodes(self.DAGGraph):
if self.DAGGraph.in_degree(node) == 0:
sources.append(node)
if self.DAGGraph.out_degree(node) == 0:
targets.append(node)
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
self.calculateNumberOfUpwardPaths(sources, targets, numberOfUpwardPaths)
self.calculateNumberOfDownwardPaths(sources, targets, numberOfDownwardPaths)
for t in targets:
numberOfUpwardPaths[t] = 0
for s in sources:
numberOfDownwardPaths[s] = 0
number_of_initial_paths = numberOfDownwardPaths[0]
number_of_current_paths = numberOfDownwardPaths[0]
listOfCentralNodes = []
centralities = self.calculateCentralities(numberOfUpwardPaths, numberOfDownwardPaths)
topCentralNodeInfo = max(centralities, key=lambda x: x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
while topCentralNodeInfo[1] > 0 and len(listOfCentralNodes) <= k: # Node with positive centrality exists
for nodeToBeRemoved in allMaxes:
nodeToBeRemoved = nodeToBeRemoved[0]
self.DAGGraph.remove_node(nodeToBeRemoved)
listOfCentralNodes.append(nodeToBeRemoved)
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
for node in nx.nodes(self.DAGGraph):
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
self.calculateNumberOfUpwardPaths(sources, targets, numberOfUpwardPaths)
self.calculateNumberOfDownwardPaths(sources, targets, numberOfDownwardPaths)
for t in targets:
numberOfUpwardPaths[t] = 0
for s in sources:
numberOfDownwardPaths[s] = 0
centralities = self.calculateCentralities(numberOfUpwardPaths, numberOfDownwardPaths)
topCentralNodeInfo = max(centralities, key=lambda x: x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
number_of_current_paths = numberOfDownwardPaths[0]
self.DAGGraph = nx.MultiGraph()
self.createDAGGraph() # Reconstructing the DAG graph
core = []
for i in range(len(listOfCentralNodes)):
core.append(self.getNodeString(listOfCentralNodes[i]))
return core
#Calculates the centralities for all nodes
def Length(self, node):
return len(nx.label(self.DAGGraph, node).split())
def calculateCentralities(self, numberOfUpwardPaths, numberOfDownwardPaths):
result = []
for node in nx.nodes(self.DAGGraph):
result.append((node, numberOfUpwardPaths[node] * numberOfDownwardPaths[node],(numberOfUpwardPaths[node],numberOfDownwardPaths[node])))
# result.append((node, numberOfUpwardPaths[node] * Length[node]))
return result
#Calculates the number of Upward paths for all nodes
def calculateNumberOfUpwardPaths(self, sources, targets, numberOfUpwardPaths):
for n in sources:
self.dfsUpward(n, sources, targets, numberOfUpwardPaths)
# Helper recursive function
def dfsUpward(self, n, sources, targets, numberOfUpwardPaths):
if self.DAGGraph.out_degree(n) == 0:
# if n in targets:
numberOfUpwardPaths[n] = 1
return
# else:
# numberOfUpwardPaths[n] = 0
# return
elif numberOfUpwardPaths[n] > 0:
return
else:
for o in self.DAGGraph.out_edges(n):
self.dfsUpward(o[1], sources, targets, numberOfUpwardPaths)
numberOfUpwardPaths[n] += numberOfUpwardPaths[o[1]]
# Calculates the number of Downward paths for all nodes
def calculateNumberOfDownwardPaths(self, sources, targets, numberOfDownwardPaths):
for n in targets:
self.dfsDownward(n, sources, targets, numberOfDownwardPaths)
# Helper recursive function
def dfsDownward(self, n, sources, targets, numberOfDownwardPaths):
if self.DAGGraph.in_degree(n) == 0:
# if n in sources:
numberOfDownwardPaths[n] = 1
return
# else:
# numberOfDownwardPaths[n] = 0
# return
elif numberOfDownwardPaths[n] > 0:
return
else:
for o in self.DAGGraph.in_edges(n):
self.dfsDownward(o[0], sources, targets, numberOfDownwardPaths)
numberOfDownwardPaths[n] += numberOfDownwardPaths[o[0]]
# Calculates the number of Downward paths for all nodes
def calculateNumber_and_LengthOfDownwardPaths(self, sources, targets, numberOfDownwardPaths, lengths):
for n in targets:
self.dfsDownward_length(n, sources, targets, numberOfDownwardPaths, lengths)
# Helper recursive function
def dfsDownward_length(self, n, sources, targets, numberOfDownwardPaths, lengths):
if self.DAGGraph.in_degree(n) == 0:
numberOfDownwardPaths[n] = 1
lengths[n] = [0]
return
elif numberOfDownwardPaths[n] > 0:
return
else:
for o in self.DAGGraph.in_edges(n):
self.dfsDownward_length(o[0], sources, targets, numberOfDownwardPaths, lengths)
numberOfDownwardPaths[n] += numberOfDownwardPaths[o[0]]
for l in lengths[o[0]]:
lengths[n].append(1+l)
# ...........Printing Functions........
# Prints the DAG, optionally in integer form if intDAGPrint==True
def printDAG(self, intDAGPrint):
# self.logMessage('DAGCost(Concats): ' + str(self.DAGCost(CostFunction.ConcatenationCost)))
# self.logMessage('DAGCost(Edges):' + str(self.DAGCost(CostFunction.EdgeCost)))
print 'DAGCost(Concats): ' + str(self.DAGCost(CostFunction.ConcatenationCost))
print 'DAGCost(Edges):' + str(self.DAGCost(CostFunction.EdgeCost))
# return
DAG = self.concatenatedDAG
# print 'dag'
# print DAG
NTs = self.concatenatedNTs
# print 'nts'
# print NTs
separatorInts = self.separatorInts
Dic = self.dic
nodes = {}
ntDic = {}
counter = 1
NTsSorted = set([])
for i in range(len(NTs)):
if NTs[i] not in ntDic and NTs[i] not in separatorInts:
NTsSorted.add(NTs[i])
# ntDic[NTs[i]] = 'N'+str(counter)
# nodes['N'+str(counter)] = ''
ntDic[NTs[i]] = 'N' + str(NTs[i])
nodes['N' + str(NTs[i])] = ''
counter += 1
for i in range(len(DAG)):
if DAG[i] not in NTsSorted:
# sys.stderr.write(str(DAG[i]) + ' ')
if DAG[i] not in separatorInts:
if not intDAGPrint:
try:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(Dic[DAG[i]])
except:
print DAG[i], NTs[i]
raise
else:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(DAG[i])
else:
nodes[ntDic[NTs[i - 1]]] = str(nodes[ntDic[NTs[i - 1]]]) + ' ||'
else:
if not intDAGPrint:
try:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(ntDic[DAG[i]])
except:
print DAG[i], NTs[i]
raise
else:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(ntDic[DAG[i]])
NTsSorted = sorted(list(NTsSorted))
nodeCounter = 0
for nt in NTsSorted:
if intDAGPrint:
subnodes = nodes[ntDic[nt]].rstrip(' ||').split(' ||')
for s in subnodes:
print ntDic[nt] + ' ->' + s
else:
subnodes = nodes[ntDic[nt]].rstrip(' ||').split(' ||')
for s in subnodes:
print ntDic[nt] + ' -> ' + s
nodeCounter += 1
def printDAG_toFile(self, outFile, intDAGPrint):
# self.logMessage('DAGCost(Concats): ' + str(self.DAGCost(CostFunction.ConcatenationCost)))
# self.logMessage('DAGCost(Edges):' + str(self.DAGCost(CostFunction.EdgeCost)))
outFile.write('DAGCost(Concats): ' + str(self.DAGCost(CostFunction.ConcatenationCost)) + '\n')
outFile.write('DAGCost(Edges):' + str(self.DAGCost(CostFunction.EdgeCost)) + '\n')
DAG = self.concatenatedDAG
# print 'dag'
# print DAG
NTs = self.concatenatedNTs
# print 'nts'
# print NTs
separatorInts = self.separatorInts
Dic = self.dic
nodes = {}
ntDic = {}
counter = 1
NTsSorted = set([])
for i in range(len(NTs)):
if NTs[i] not in ntDic and NTs[i] not in separatorInts:
NTsSorted.add(NTs[i])
# ntDic[NTs[i]] = 'N'+str(counter)
# nodes['N'+str(counter)] = ''
ntDic[NTs[i]] = 'N' + str(NTs[i])
nodes['N' + str(NTs[i])] = ''
counter += 1
# print DAG
# print separatorInts
# print NTs
# print NTsSorted
for i in range(len(DAG)):
if DAG[i] not in NTsSorted:
# sys.stderr.write(str(DAG[i]) + ' ')
if DAG[i] not in separatorInts:
if not intDAGPrint:
try:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(Dic[DAG[i]])
except:
print DAG[i], NTs[i]
raise
else:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(DAG[i])
else:
# print i - 1
# print NTs[i - 1]
# print ntDic[NTs[i - 1]]
# print nodes[ntDic[NTs[i - 1]]]
nodes[ntDic[NTs[i - 1]]] = str(nodes[ntDic[NTs[i - 1]]]) + ' ||'
else:
if not intDAGPrint:
try:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(ntDic[DAG[i]])
except:
print DAG[i], NTs[i]
raise
else:
nodes[ntDic[NTs[i]]] = str(nodes[ntDic[NTs[i]]]) + ' ' + str(ntDic[DAG[i]])
NTsSorted = sorted(list(NTsSorted))
nodeCounter = 0
for nt in NTsSorted:
if intDAGPrint:
subnodes = nodes[ntDic[nt]].rstrip(' ||').split(' ||')
for s in subnodes:
outFile.write(ntDic[nt] + ' ->' + s + '\n')
else:
subnodes = nodes[ntDic[nt]].rstrip(' ||').split(' ||')
for s in subnodes:
outFile.write(ntDic[nt] + ' -> ' + s + '\n')
nodeCounter += 1
outFile.close()
# Log via flags
def logViaFlag(self, flag):
if not self.quietLog:
if flag == LogFlag.ConcatenationCostLog:
sys.stderr.write('DAGCost(Concats): ' + str(self.DAGCost(CostFunction.ConcatenationCost)) + '\n')
print(str('DAGCost(Concats): ' + str(self.DAGCost(CostFunction.ConcatenationCost))))
if flag == LogFlag.EdgeCostLog:
sys.stderr.write('DAGCost(Edges): ' + str(self.DAGCost(CostFunction.EdgeCost)) + '\n')
print(str('DAGCost(Edges): ' + str(self.DAGCost(CostFunction.EdgeCost))))
# Log custom message
def logMessage(self, message):
if not self.quietLog:
sys.stderr.write(message + '\n')
print(str(message))
# ...........Utility Functions........
# Converts the input data into an integer sequence, returns the integer sequence and the dictionary for recovering orginal letters
def preprocessInput(self, inputFile, charSeq=SequenceType.Character, noNewLineFlag=True):
if charSeq == SequenceType.Character: # Building an integer-spaced sequence from the input string
letterDict = {}
counterDict = {}
i = 0
counter = 1
newContents = ''
if noNewLineFlag:
line = inputFile.read()
for i in range(len(line)):
if line[i] not in counterDict:
letterDict[counter] = line[i]
counterDict[line[i]] = counter
counter += 1
newContents += str(counterDict[line[i]]) + ' '
else:
for line in inputFile:
line = line.rstrip('\n')
for i in range(len(line)):
if line[i] not in counterDict:
letterDict[counter] = line[i]
counterDict[line[i]] = counter
counter += 1
newContents += str(counterDict[line[i]]) + ' '
newContents += '\n'
return (newContents.rstrip('\n'), letterDict)
if charSeq == SequenceType.Integer: # input is space seperated integers
newContents = ''
dict = {}
for l in inputFile.read().splitlines():
line = l.split()
for i in range(len(line)):
if not isinstance(int(line[i]), int) or line[i] == ' ':
raise ValueError('Input file is not in space-separated integer form.')
else:
dict[int(line[i])] = line[i]
newContents += l + '\n'
return (newContents.rstrip('\n'), dict)
if charSeq == SequenceType.SpaceSeparated: # input is space-seperated words
wordDict = {}
counterDict = {}
i = 0
counter = 1
newContents = ''
for line in inputFile:
line = line.rstrip('\n')
for w in line.split():
if w not in counterDict:
wordDict[counter] = w
counterDict[w] = counter
counter += 1
newContents += str(counterDict[w]) + ' '
newContents += '\n'
return (newContents.rstrip('\n'), wordDict)
def calcPaths(self):
# for p in nx.simple_cycles(self.DAGGraph):
# print p
# return
numberOfDownwardPaths = {}
lengths = {}
sources = []
targets = []
for node in nx.nodes(self.DAGGraph):
if self.DAGGraph.in_degree(node) == 0:
sources.append(node)
if self.DAGGraph.out_degree(node) == 0:
targets.append(node)
numberOfDownwardPaths[node] = 0
lengths[node] = []
print '\t' + '\t' + '\t' + '\t' + '\t' + '\t' + str(len(sources))
return
self.calculateNumber_and_LengthOfDownwardPaths(sources, targets, numberOfDownwardPaths, lengths)
centralNodes, cents = self.greedyCoreID_ByTau(0.9)
# print centralNodes
# print cents
max = 0
sum = 0
num = 0
for n in targets:
for l in lengths[n]:
if max < l:
max = l
sum += l
num += 1
print '\t' + str(len(nx.nodes(self.DAGGraph))-len(targets)-len(sources)) + '\t' + str(max) + '\t' + str(float(sum)/float(num)) + '\t' + str(len(centralNodes)) + '\t' + str(cents[0]) + '\t' + str(1-float(len(centralNodes))/float(len(sources)))
def stats(self):
# sources = []
# targets = []
# for node in nx.nodes(self.DAGGraph):
# if self.DAGGraph.in_degree(node) == 0:
# sources.append(node)
# if self.DAGGraph.out_degree(node) == 0:
# targets.append(node)
# print '\t' + str(len(nx.nodes(self.DAGGraph))-len(targets)-len(sources))
g.calcPaths()
def structureParenthesization(grammarFile):
counter = 1
grammar = {}
for line in open(grammarFile,'r').readlines():
if counter <= 2:
counter += 1
continue
nt = line.split(' -> ')[0]
rule = line.split(' -> ')[1].split()
if nt not in grammar:
grammar[nt] = []
grammar[nt].append(rule)
parentheses = []
for rule in grammar['N0']:
ruleParenthesesList = []
parenthesisIndex = 0
stack = copy.deepcopy(rule)
ntStack = ['N0']
ntText = []
while len(stack) > 0:
c = stack.pop(0)
if c == -2: # Repeat NT recursion
ntStack.pop()
ntText.append(-2)
continue
if c in grammar:
ntStack.append(c)
stack = grammar[c][0] + [-2] + stack
ntText.append(-1)
else:
ntText.append(c)
# print ntText
trueIndex = 0
parentheses.append(set([]))
parenthStack = []
for i in range(0,len(ntText)):
if ntText[i] == -1:
parenthStack.append(trueIndex)
elif ntText[i] == -2:
parentheses[-1].add((parenthStack.pop(0),trueIndex))
else:
trueIndex += 1
return parentheses
def structSimTest(file1, file2):
csParenth = structureParenthesization(file1)
testParenth = structureParenthesization(file2)
meanDistance = 0.0
for i in range(0,len(csParenth)):
p1 = csParenth[i]
p2 = testParenth[i]
# print 'p1', p1
# print 'p2', p2
# break
if len(p1) == 0 and len(p2) == 0:
meanDistance += 1
else:
meanDistance += 2*len(p1.intersection(p2))/(len(p1)+len(p2))
meanDistance /= len(csParenth)
return meanDistance
#Sets the value of parameters
def processParams(argv):
chFlag = SequenceType.Character #if false, accepts integer sequence
printIntsDAG = False #if true, prints the DAG in integer sequence format
quietLog = False #if true, disables logging
rFlag = 'mr' #repeat type (for normal repeat replacements)
functionFlag = 'e' #cost function to be optimized
noNewLineFlag = True #consider each line as a separate string
loadDAGFlag = False
usage = """Usage: ./python Lexis.py [-t (c | i | s) | -p (i) | -q | -r (r | mr | lmr | smr) | -f (c | e) | -m | -l] <filename>
[-t]: choosing between character sequence, integer sequence or space-separated sequence
c - character sequence
i - integer sequence
s - space-separated sequence
[-p]: specifies DAG printing option (for debugging purposes)
i - prints the DAG in integer sequence format
[-q]: disables logging
[-r]: repeat type (for normal repeat replacements)
r - repeat
mr - maximal repeat (default)
lmr - largest-maximal repeat
smr - super-maximal repeat
[-f]: cost function to be optimized
c - concatenation cost
e - edge cost (default)
[-m]: consider each line of the input file as a separate target string
[-l]: load a DAG file (will override -r -t -m options)
"""
if len(argv) == 1 or (len(argv) == 2 and argv[1] == '-h'):
sys.stderr.write('Invalid input\n')
sys.stderr.write(usage + '\n')
sys.exit()
optlist,args = getopt.getopt(argv[1:], 't:p:qr:f:ml')
for opt,arg in optlist:
if opt == '-t':
for ch in arg:
if ch == 'c' or ch == 'i' or ch == 's':
chFlag = ch
else:
sys.stderr.write('Invalid input in ' + '-i' + ' flag\n')
sys.stderr.write(usage + '\n')
sys.exit()
if opt == '-p':
for ch in arg:
if ch == 'i':
printIntsDAG = True
else:
sys.stderr.write('Invalid input in ' + '-p' + ' flag\n')
sys.stderr.write(usage + '\n')
sys.exit()
if opt == '-q':
quietLog = True
if opt == '-r':
if arg == 'r' or arg == 'mr' or arg == 'lmr' or arg == 'smr':
rFlag = arg
else:
sys.stderr.write('Invalid input in ' + '-r' + ' flag\n')
sys.stderr.write(usage + '\n')
sys.exit()
if opt == '-f':
if arg == 'c' or arg == 'e':
functionFlag = arg
else:
sys.stderr.write('Invalid input in ' + '-f' + ' flag\n')
sys.stderr.write(usage + '\n')
sys.exit()
if opt == '-m':
noNewLineFlag = False
if opt == '-l':
loadDAGFlag = True
return (chFlag, printIntsDAG, quietLog, rFlag, functionFlag, noNewLineFlag, loadDAGFlag)
def rand_input_q_analysis(rand_inputs_path, nodes_dic_path, num_rands):
import pickle
import os
nodes_dic = pickle.load(open(nodes_dic_path, "rb"))
nodes_rands = []; edges_rand=[]; sources_rand = []; intermediate_nodes_rand = [];
rand_inputs = [f for f in os.listdir(rand_inputs_path) if os.path.isfile(join(rand_inputs_path, f)) and not f.startswith('.')]
#get rand inoput stats
counter = 1
tmp_dag = DAG(open(rand_inputs_path + rand_inputs[0], 'r'), True, SequenceType.SpaceSeparated, noNewLineFlag)
sources = [];targets = []
for node in nx.nodes(tmp_dag.DAGGraph):
if tmp_dag.DAGGraph.in_degree(node) == 0: sources.append(node)
if tmp_dag.DAGGraph.out_degree(node) == 0: targets.append(node)
for f in rand_inputs:
sys.stderr.write(str(counter) + '\n')
counter += 1
tmp_dag = DAG(open(rand_inputs_path + f, 'r'), True, SequenceType.SpaceSeparated, noNewLineFlag)
nodes_rands.append(len(nx.nodes(tmp_dag.DAGGraph))); edges_rand.append(len(nx.edges(tmp_dag.DAGGraph)));
sources_rand.append((len(sources)));intermediate_nodes_rand.append((len(nx.nodes(tmp_dag.DAGGraph)) - len(sources)))
print str(np.mean(nodes_rands)) + '±' + str(np.std(nodes_rands)) + '\t',
print str(np.mean(edges_rand)) + '±' + str(np.std(edges_rand)) + '\t',
print str(np.mean(sources_rand)) + '±' + str(np.std(sources_rand)) + '\t',
print str(np.mean(intermediate_nodes_rand)) + '±' + str(np.std(intermediate_nodes_rand))
print
print
for q in [0.001, 0.01] + list(np.arange(0.1, 1, 0.1)) + [0.95, 0.99, 0.999]:
sys.stderr.write(str(q) + '\n')
q_nodes_rands = []; q_edges_rand = []; q_sources_rand = []; q_intermediate_nodes_rand = [];
q_num_of_pruned_nodes_rand = []; q_frac_of_pruned_nodes_rand = []; q_num_nonsource_pruned_rand = [];
for f in rand_inputs:
tmp_dag = DAG(open(rand_inputs_path + f, 'r'), True, SequenceType.SpaceSeparated, noNewLineFlag)
new_tmp_dag = pruneDAG_withNodeDic(nodes_dic, num_rands, tmp_dag, q)
new_sources = [];
new_targets = []
for node in nx.nodes(new_tmp_dag.DAGGraph):
if new_tmp_dag.DAGGraph.in_degree(node) == 0:
new_sources.append(node)
if new_tmp_dag.DAGGraph.out_degree(node) == 0:
new_targets.append(node)
q_nodes_rands.append(len(nx.nodes(new_tmp_dag.DAGGraph)));
q_edges_rand.append(len(nx.edges(new_tmp_dag.DAGGraph)));
q_sources_rand.append((len(new_sources)));
q_intermediate_nodes_rand.append((len(nx.nodes(new_tmp_dag.DAGGraph)) - len(new_sources)))
q_num_of_pruned_nodes_rand.append(len(nx.nodes(tmp_dag.DAGGraph))-len(nx.nodes(new_tmp_dag.DAGGraph)))
q_frac_of_pruned_nodes_rand.append(float(len(nx.nodes(tmp_dag.DAGGraph))-len(nx.nodes(new_tmp_dag.DAGGraph)))/float(len(nx.nodes(tmp_dag.DAGGraph))))
q_num_nonsource_pruned_rand.append(len(nx.nodes(tmp_dag.DAGGraph))-len(nx.nodes(new_tmp_dag.DAGGraph))-len(sources))
print str(np.mean(q_nodes_rands)) + '±' + str(np.std(q_nodes_rands)) + '\t',
print str(np.mean(q_edges_rand)) + '±' + str(np.std(q_edges_rand)) + '\t',
print str(np.mean(q_sources_rand)) + '±' + str(np.std(q_sources_rand)) + '\t',
print str(np.mean(q_intermediate_nodes_rand)) + '±' + str(np.std(q_intermediate_nodes_rand)) + '\t',
print str(np.mean(q_num_of_pruned_nodes_rand)) + '±' + str(np.std(q_num_of_pruned_nodes_rand)) + '\t',
print str(np.mean(q_frac_of_pruned_nodes_rand)) + '±' + str(np.std(q_frac_of_pruned_nodes_rand)) + '\t',
print str(np.mean(q_num_nonsource_pruned_rand)) + '±' + str(np.std(q_num_nonsource_pruned_rand))
return
def calc_max_prob_diff(file1, file2):
ranks1 = [l.split('\t')[1] for l in open(file1, 'r').readlines()]
ranks2 = [l.split('\t')[1] for l in open(file2, 'r').readlines()]
max_diff = 0
for i in range(len(ranks1)):
if max_diff < abs(float(ranks1[i])-float(ranks2[i])):
max_diff = abs(float(ranks1[i])-float(ranks2[i]))
print max_diff
def calc_rank_corr(file1, file2):
import scipy.stats
ranks1 = [l.split('\t')[0] for l in open(file1,'r').readlines()]
ranks2 = [l.split('\t')[0] for l in open(file2,'r').readlines()]
print scipy.stats.spearmanr(ranks1, ranks2)
print scipy.stats.kendalltau(ranks1, ranks2)
def listProbs(dag, num_rands):
import pickle
nodes_dic = pickle.load(open("nodes-dic/nodes-dic"+str(num_rands)+".p", "rb"))
nodeStrings = dag.getListNodeStrings(nx.nodes(dag.DAGGraph))
node_list = list(nx.nodes(dag.DAGGraph))
probs = {}
for i in range(len(nodeStrings)):
node = node_list[i]
node_str = nodeStrings[i]
probs[node_str] = float(nodes_dic[node_str]) / float(num_rands)
probs = {k:probs[k] for k in probs if len(k.split()) > 1}
probs_list = sorted(probs.iteritems(), key = lambda x : (x[1],x[0]), reverse = True)
for i in range(len(probs_list)):
print str(probs_list[i][0]) + '\t' + str(probs_list[i][1])
def create_nodes_prob_dic(origFile, name, folderName, num_rand):
NUMBER_OF_RANDOMIZATIONS = num_rand
import pickle
offset = 1000
# nodes_dic = pickle.load(open("nodes-dic/nodes-dic"+str(NUMBER_OF_RANDOMIZATIONS-offset)+".p", "rb"))
# start_index = NUMBER_OF_RANDOMIZATIONS - offset
start_index = 0
nodes_dic = defaultdict(int)
totalFileString = ''
lengths = []
# origFileObj = open(folderName+'/'+origFile+'.txt','r')
for l in origFile.readlines():
totalFileString += l
lengths.append(len(l.split()))
orig_totalFileString_list = totalFileString.split()
for r in range(start_index, NUMBER_OF_RANDOMIZATIONS):
sys.stderr.write('rand' + str(r) + '\n')
if True:
totalFileString_list = orig_totalFileString_list
random.shuffle(totalFileString_list)
new_strings = []
for l in lengths:
new_strings.append(' '.join(totalFileString_list[:l]))
totalFileString_list = totalFileString_list[l:]
tmp_file = open(folderName +'/' + name+ '-rand' +'/' + name+ '-rand' + str(r) + '.txt', 'w')
tmp_file.write('\n'.join(new_strings))
tmp_file.close()
tmp_dag = DAG(open(folderName +'/' + name+ '-rand' +'/' + name+ '-rand' + str(r) + '.txt', 'r'), False, SequenceType.SpaceSeparated,
noNewLineFlag)
tmp_dag.GLexis(True, RepeatClass.MaximalRepeat, CostFunction.EdgeCost)
tmp_dag.printDAG_toFile(open(folderName +'/' + name+ '-rand' +'/' + name+ '-gram-rand' + str(r) + '.txt', 'w'), False)
else:
tmp_dag = DAG(open(folderName + '/gram-rand' + ("%05d" % (r,)) + '.txt', 'r'), True,
SequenceType.SpaceSeparated, noNewLineFlag)
tmp_nodeStrings = tmp_dag.getListNodeStrings(nx.nodes(tmp_dag.DAGGraph))
for i in range(len(tmp_nodeStrings)):
node = nx.nodes(tmp_dag.DAGGraph)[i]
node_str = tmp_nodeStrings[i]
if node != 0:
nodes_dic[node_str] += 1
import pickle
pickle.dump(nodes_dic, open(folderName +'/' + name+ '-rand' +'/' + name+ '-nodes-dic' + str(NUMBER_OF_RANDOMIZATIONS) + ".p", "wb"))
# nodes_dic = pickle.load(open("nodes-dic"+str(NUMBER_OF_RANDOMIZATIONS)+".p", "rb"))
return nodes_dic
def pruneDAG(origFile, name, folderName, dag, q, num_rand):
NUMBER_OF_RANDOMIZATIONS = num_rand
# nodes_dic = create_nodes_prob_dic(origFile, folderName, dag, num_rand)
nodes_dic = create_nodes_prob_dic(origFile, name, folderName, num_rand)
removed_nodes = set([])
nodeStrings = dag.getListNodeStrings(nx.nodes(dag.DAGGraph))
for i in range(len(nodeStrings)):
node = list(nx.nodes(dag.DAGGraph))[i]
node_str = nodeStrings[i]
if float(nodes_dic[node_str])/float(NUMBER_OF_RANDOMIZATIONS) > q:
removed_nodes.add(node)
for nodeToBeRemoved in removed_nodes:
dag.DAGGraph.remove_node(nodeToBeRemoved)
return removed_nodes, dag
def pruneDAG_withNodeDic(nodes_dic, num_rand, input_dag, q):
removed_nodes = set([])
import copy
dag = copy.deepcopy(input_dag)
nodeStrings = dag.getListNodeStrings(nx.nodes(dag.DAGGraph))
for i in range(len(nodeStrings)):
node = list(nx.nodes(dag.DAGGraph))[i]
node_str = nodeStrings[i]
if float(nodes_dic[node_str])/float(num_rand) > q:
removed_nodes.add(node)
for nodeToBeRemoved in removed_nodes:
dag.DAGGraph.remove_node(nodeToBeRemoved)
return dag
def convertGMLtoDAG(gmlFile):
pass
def printNodeStats2(graph, gr_nodes):
sources = []
targets = []
for node in nx.nodes(graph):
if graph.in_degree(node) == 0:
sources.append(node)
if graph.out_degree(node) == 0:
targets.append(node)
centsDic, numberOfDownwardPaths, numberOfUpwardPaths = getAllNodesCentralities(graph)
for node in nx.nodes(graph):
if node not in targets:
print str(len(node.split())) + '\t' + str(graph.out_degree(node)) + '\t' + str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node])+ '\t' + str(centsDic[node]) + '\t',
if node in gr_nodes:
print 1
else:
print 0
def printNodeStats(graph, filePath):
sources = []
targets = []
for node in nx.nodes(graph):
if graph.in_degree(node) == 0:
sources.append(node)
if graph.out_degree(node) == 0:
targets.append(node)
centsDic, numberOfDownwardPaths, numberOfUpwardPaths = getAllNodesCentralities(graph)
# print str(len(nx.nodes(graph))) + '\t' + str(len(nx.edges(graph))) + '\t' + str(len(sources)) + '\t' + str(len(nx.nodes(graph))-len(sources))
# return
outFile = open(filePath,'w')
for node in nx.nodes(graph):
if node not in targets:
# print str(len(node.split())) + '\t' + str(graph.out_degree(node)) + '\t' + str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node])+ '\t' + str(centsDic[node])
# outFile.write(str(node) + '\t' + str(len(node.split())) + '\t' + str(graph.out_degree(node)) + '\t' + str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node]) + '\t' + str(centsDic[node]) + '\n')
outFile.write(str(len(node.split())) + '\t' + str(graph.out_degree(node)) + '\t' + str(
numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node]) + '\t' + str(
centsDic[node]) + '\n')
# print str(node) + '\t' + str(graph.out_degree(node)) + '\t' + str(numberOfDownwardPaths[node]) + '\t' + str(numberOfUpwardPaths[node]) + '\t' + str(centsDic[node])
outFile.close()
def getAllNodesCentralities(graph):
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
sources = []
targets = []
for node in nx.nodes(graph):
if graph.in_degree(node) == 0:
sources.append(node)
if graph.out_degree(node) == 0:
targets.append(node)
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
calculateNumberOfUpwardPaths(graph, sources, targets, numberOfUpwardPaths)
calculateNumberOfDownwardPaths(graph, sources, targets, numberOfDownwardPaths)
centralities = calculateCentralities(graph, numberOfUpwardPaths, numberOfDownwardPaths)
return {k: v for k, v in centralities}, numberOfDownwardPaths, numberOfUpwardPaths
def calculateCentralities(graph, numberOfUpwardPaths, numberOfDownwardPaths):
result = []
for node in nx.nodes(graph):
result.append((node, numberOfUpwardPaths[node] * numberOfDownwardPaths[node]))
# result.append((node, numberOfUpwardPaths[node] * len(node.split())))
# result.append((node, len(node.split()) * numberOfDownwardPaths[node]))
return result
#Calculates the number of Upward paths for all nodes
def calculateNumberOfUpwardPaths(graph, sources, targets, numberOfUpwardPaths):
for n in sources:
dfsUpward(graph, n, sources, targets, numberOfUpwardPaths)
# Helper recursive function
def dfsUpward(graph, n, sources, targets, numberOfUpwardPaths):
if graph.out_degree(n) == 0:
numberOfUpwardPaths[n] = 1
return
elif numberOfUpwardPaths[n] > 0:
return
else:
for o in graph.out_edges(n):
dfsUpward(graph, o[1], sources, targets, numberOfUpwardPaths)
numberOfUpwardPaths[n] += numberOfUpwardPaths[o[1]]
# Calculates the number of Downward paths for all nodes
def calculateNumberOfDownwardPaths(graph, sources, targets, numberOfDownwardPaths):
for n in targets:
dfsDownward(graph, n, sources, targets, numberOfDownwardPaths)
# Helper recursive function
def dfsDownward(graph, n, sources, targets, numberOfDownwardPaths):
if graph.in_degree(n) == 0:
numberOfDownwardPaths[n] = 1
return
elif numberOfDownwardPaths[n] > 0:
return
else:
for o in graph.in_edges(n):
dfsDownward(graph, o[0], sources, targets, numberOfDownwardPaths)
numberOfDownwardPaths[n] += numberOfDownwardPaths[o[0]]
def coreID_byTau(graph, tau):
cycle = False
for p in nx.simple_cycles(graph):
print p
cycle = True
if cycle:
print 'CYCLE!'
return
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
sources = []
targets = []
for node in nx.nodes(graph):
if graph.in_degree(node) == 0:
sources.append(node)
if graph.out_degree(node) == 0:
targets.append(node)
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
calculateNumberOfUpwardPaths(graph, sources, targets, numberOfUpwardPaths)
calculateNumberOfDownwardPaths(graph, sources, targets, numberOfDownwardPaths)
allPaths = 0
# print targets
for n in targets:
allPaths += numberOfDownwardPaths[n]
# return
lenNodes = len(nx.nodes(graph))
lenAllPaths = allPaths
lenSources = len([x for x in nx.nodes(graph) if graph.in_degree(x) == 0])
# print str(len(nx.nodes(graph))) + '\t' + str(allPaths)
for t in targets:
numberOfUpwardPaths[t] = 0
# for s in sources:
# numberOfDownwardPaths[s] = 0
number_of_initial_paths = allPaths
number_of_current_paths = allPaths
listOfCentralNodes = []
listOfCentralNodes_cents = []
centralities = calculateCentralities(graph, numberOfUpwardPaths, numberOfDownwardPaths)
centsDic = {k: v for k, v in centralities}
orig_cents = {k: v for k, v in centralities}
topCentralNodeInfo = max(centralities, key=lambda x: x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
allMaxes = [allMaxes[0]]
# print '-------------', topCentralNodeInfo[1], len(allMaxes)
while topCentralNodeInfo[1] > 0 and float(number_of_current_paths) / float(
number_of_initial_paths) > 1 - tau: # Node with positive centrality exists
for nodeToBeRemoved in allMaxes:
nodeToBeRemoved = nodeToBeRemoved[0]
graph.remove_node(nodeToBeRemoved)
if nodeToBeRemoved in sources:
sources.remove(nodeToBeRemoved)
listOfCentralNodes.append(nodeToBeRemoved)
listOfCentralNodes_cents.append((topCentralNodeInfo[1], orig_cents[nodeToBeRemoved]))
numberOfUpwardPaths = {}
numberOfDownwardPaths = {}
for node in nx.nodes(graph):
numberOfUpwardPaths[node] = 0
numberOfDownwardPaths[node] = 0
calculateNumberOfUpwardPaths(graph, sources, targets, numberOfUpwardPaths)
calculateNumberOfDownwardPaths(graph, sources, targets, numberOfDownwardPaths)
for t in targets:
numberOfUpwardPaths[t] = 0
# for s in sources:
# numberOfDownwardPaths[s] = 0
centralities = calculateCentralities(graph, numberOfUpwardPaths, numberOfDownwardPaths)
topCentralNodeInfo = max(centralities, key=lambda x: x[1])
allMaxes = [k for k in centralities if k[1] == topCentralNodeInfo[1]]
allMaxes = [allMaxes[0]]
allPaths_tmp = 0
for n in sources:
allPaths_tmp += numberOfUpwardPaths[n]
number_of_current_paths = allPaths_tmp
core = []
# print str(len(nx.nodes(self.DAGGraph)) - len(targets) - len(sources)) + '\t' + str(allPaths)
return listOfCentralNodes, listOfCentralNodes_cents, lenNodes, lenAllPaths, lenSources
# arrayOfStrings = self.getListNodeStrings(listOfCentralNodes)
# for i in range(len(arrayOfStrings)):
# nodeString = arrayOfStrings[i]
# core.append(nodeString.rstrip())
# print nodeString.rstrip() + '\t' + str(listOfCentralNodes_cents[i]) + '\t' + str(
# orig_cents[listOfCentralNodes[i]])
# return core, listOfCentralNodes_cents
def matrixGenerator(N, alpha, beta):
M = np.zeros((N, N))
B = np.zeros(N)
# NUM_IT = np.power(N,3)
NUM_IT = 1
for it in range(NUM_IT):
tmp_M = np.zeros((N, N))
for j in range(N):
B[j] = np.nextafter(np.power(float(j+1),-beta), float('inf'))
B = B/np.sum(B)
for i in range(N):
# if beta == 0:
# P = range(N)
# else:
P = np.random.choice(np.arange(N), p=B, size=N, replace=False)
for j in range(N):
tmp_M[i,P[j]] = np.nextafter(np.power(float(j+1),-alpha), float('inf'))
tmp_M[i, :] = tmp_M[i,:] / np.sum(tmp_M[i,:])
M += tmp_M
# print B
# print tmp_M
M /= NUM_IT
# print M
# col_sum = np.sum(M,axis=0)
# print 'sum0', col_sum/np.sum(col_sum)
# print 'sum1', np.sum(M, axis=1)
return M
def stringGeneratorFromMatrix(M, lengthArray):
targetSet = []
N = np.shape(M)[0]
abundance = np.sum(M,axis=0)
abundance = abundance/np.sum(abundance)
next_char = np.random.choice(np.arange(N), p=abundance, size=1, replace=False)[0]
for l in lengthArray:
tmp_target = ''
for i in range(l):
tmp_target += str(next_char) + ' '
next_char = np.random.choice(np.arange(N), p=M[next_char,:], size=1, replace=False)[0]
targetSet.append(tmp_target)
return targetSet
def empiricalMatrixGenerator(T, outPath):
part_freq = defaultdict(int)
for t in T:
for p in t.split():
part_freq[p] += 1
N = len(part_freq.keys())
two_gram_matrix = np.zeros((N, N))
part_to_index = {x[1]:x[0] for x in enumerate(part_freq.keys())}
for t in T:
prev_char = ' '
for p in t.split():
if prev_char != ' ':
two_gram_matrix[part_to_index[prev_char]][part_to_index[p]] += 1.
# two_gram_matrix[part_to_index[p]][part_to_index[prev_char]] += 1.
prev_char = p
# for row in range(N):
# if np.sum(two_gram_matrix[row, :]) != 0:
# two_gram_matrix[row, :] = two_gram_matrix[row, :] / np.sum(two_gram_matrix[row, :])
f = open(outPath, 'w')
for row in range(N):
for col in range(N):
# print str(two_gram_matrix[row, col]) + '\t',
f.write(str(two_gram_matrix[row, col]) + '\t')
# print
f.write('\n')
f.close()
def chopByiGEM(T, outPath):
igem_data = open('igem.txt','r').readlines()
igem_data = igem_data + igem_data + igem_data
# print len(igem_data)
parts = T.split()
file = open(outPath, 'w')
while len(parts) > 0:
tmp_device = ''
length = len(igem_data.pop(0).split())
for p in range(length):
tmp_device += parts.pop(0) + ' '
if len(parts) == 0:
break
file.write(tmp_device + '\n')
file.close()
def clusteringiGEM(T):
part_dic = {}
new_igem = []
count = 1000
for t in T:
tt = t.split()
tmp_t = ''
for p in tt:
if p not in part_dic:
part_dic[p] = unichr(count)
count += 1
tmp_t += part_dic[p]
new_igem.append(tmp_t)
if count > 1200:
break
from jellyfish import jaro_distance
def d(coord):
i, j = coord
return 1 - jaro_distance(new_igem[i], new_igem[j])
import numpy as np
utg = np.triu_indices(len(new_igem), 1)
dist_utg = np.apply_along_axis(d, 0, utg)
import scipy.cluster.hierarchy
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('testyyy1.pdf')
Z = scipy.cluster.hierarchy.linkage(dist_utg)
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
scipy.cluster.hierarchy.dendrogram(Z,count_sort=True)
pp.savefig()
pp.close()
plt.show()
if __name__ == "__main__":
(chFlag, printIntsDAG, quietLog, rFlag, functionFlag, noNewLineFlag, loadDAGFlag) = processParams(sys.argv)
#########Get iGEM Age-Reuse scatter out of CS and INC full profiles
# def igemAgeReuseScatter():
# from collections import defaultdict
# agePath = sys.argv[-2]
# targetAgeDic = {}
# targetAges = open(agePath, 'r').readlines()
# for i in range(len(targetAges)):
# targetAgeDic[i+1] = int(targetAges[i].split('\t')[1])
# # iGEM fullprofile path
# path = sys.argv[-1]
# filesTMP = os.listdir(path)[1:]
# # filesTMP.remove('.DS_Store')
# # print filesTMP
# # print len(filesTMP)
# # return
# # print filesTMP[0]
# # print filesTMP[0].split('gram')[-1].split('.txt.txt')[0]
# files = sorted(filesTMP, key=lambda x: int(x.split('gram')[-1].split('.txt.txt')[0]))
# reuseDic = defaultdict(int)
# ageDic = defaultdict(int)
# for i in range(len(files)):
# sys.stderr.write(str(i) + '\n')
# # for i in [3923,3924]:
# age = 59+425-targetAgeDic[int(files[i].split('gram')[-1].split('.txt.txt')[0])]
# d = DAG(open(sys.argv[-1]+files[i], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# nodeStrs = d.getListNodeStrings(nx.nodes(d.DAGGraph))
# for n in nodeStrs:
# if n not in ageDic:
# ageDic[n] = age
# # print ageDic
# if i == len(files) - 1:
# stats = getAllNodesCentralities(d.DAGGraph)
# for n in stats[2]:
# if stats[1][n] == 1 or stats[2][n] == 1:
# continue
# nodeStr = ''
# for w in d.getNodeString(n).split():
# nodeStr += d.dic[int(w)] + ' '
# reuseDic[nodeStr] = stats[0][n]
# ageReuseScatter = [(ageDic[n],reuseDic[n],n) for n in reuseDic if n in ageDic]
# for e in sorted(ageReuseScatter, key = lambda x : (x[0],x[1])):
# print str(e[0]) + '\t' + str(e[1]) + '\t' + str(e[2])
# igemAgeReuseScatter()
#######Run G-Lexis on stream
# path = 'genSeqs-Final/' + sys.argv[-1]
# data = open(path, 'r').readlines()
# tmp_dataset = []
# prev_len_tmp_dataset = 0
# data_increment = 0.05
# for i in range(len(data)):
# t = data[i]
# tmp_dataset.append(t)
# if len(tmp_dataset)-prev_len_tmp_dataset > data_increment * float(len(data)) or i == len(data) - 1:
# #run Lexis on tmp_dataset
# sys.stderr.write(str((float(len(tmp_dataset))/float(len(data)))*100) + '%\n')
# f = open('genSeqs-Final/streamData/'+'tmp-' +sys.argv[-1].split('.')[0]+'-'+str(int((float(len(tmp_dataset))/float(len(data)))*100))+'.txt', 'w')
# for tt in tmp_dataset:
# f.write(tt)
# f.close()
# g = DAG(open('genSeqs-Final/streamData/'+'tmp-' +sys.argv[-1].split('.')[0]+'-'+str(int((float(len(tmp_dataset))/float(len(data)))*100))+'.txt', 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# g.GLexis(quietLog, rFlag, functionFlag)
# # g.printDAG(printIntsDAG)
# g.printDAG_toFile(open('genSeqs-Final/DAGs/'+sys.argv[-1].split('.')[0]+'-'+str(int((float(len(tmp_dataset))/float(len(data)))*100))+'.txt','w'), printIntsDAG)
# prev_len_tmp_dataset = len(tmp_dataset)
# clusteringiGEM(open('igem.txt','r').readlines())
#######Chopping iGEM to 500 Alphbet - Extract Abundance and (most abundant) Digram Frequencies
# igem_data = open('igem.txt','r').readlines()
# part_freq = defaultdict(int)
# digrams = {}
# extracted_lines = []
# chop_alph_size = 100
# count = 1
# count_up = 50
# # while len(part_freq.keys()) < chop_alph_size:
# while count <= count_up:
# for l in igem_data:
# parts = l.split()
# prev_char = ' '
# for p in parts:
# part_freq[p] += 1
# if prev_char != ' ':
# if prev_char not in digrams:
# digrams[prev_char] = defaultdict(int)
# digrams[prev_char][p] += 1
# prev_char = p
# # if len(part_freq.keys()) == chop_alph_size:
# # break
# extracted_lines.append(l)
# if len(part_freq.keys()) >= chop_alph_size:
# # f = open('igem_' + str(chop_alph_size) + '-' + str(count) + '.txt', 'w')
# # for l in extracted_lines:
# # f.write(l)
# # f.close()
# # for p in part_freq:
# # print str(part_freq[p]) + '\t',
# # print
# most_abundant = min(part_freq.items(), key=lambda x: x[1])[0]
# if most_abundant in digrams:
# for p in digrams[most_abundant]:
# print str(digrams[most_abundant][p]) + '\t',
# print
# part_freq = defaultdict(int)
# extracted_lines = []
# count += 1
# if count == count_up:
# break
# sys.exit()
#######Plain run of Lexis
# g = DAG(open(sys.argv[-1], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# g.GLexis(quietLog, rFlag, functionFlag)
# g.printDAG(printIntsDAG)
# # print nx.dag_longest_path(g.DAGGraph)
# # print nx.dag_longest_path_length(g.DAGGraph)
# # print len(nx.nodes(g.DAGGraph))
# sys.exit()
# #######Generator matrix
# iterations = 1
# mean_cost = 0
# costs = []
# for i in range(iterations):
# sys.stderr.write(str(i) + '\n')
# M = matrixGenerator(int(sys.argv[-3]), float(sys.argv[-2]), float(sys.argv[-1]))
# print np.sum(M,0)
# T = stringGeneratorFromMatrix(M, [50000])
# # freqs = defaultdict(float)
# # for ch in T[0]:
# # if ch != ' ':
# # freqs[ch] += 1
# # sumFreqs = 0
# # for ch in freqs:
# # sumFreqs += freqs[ch]
# # for ch in freqs:
# # freqs[ch] /= sumFreqs
# # for ch in sorted(list(freqs.items()),key=lambda x: x[1]):
# # print ch[1],
# # print
# # T = []
# # t = ''
# # for i in range(23943):
# # next_char = np.random.choice(np.arange(int(sys.argv[-3])), p=[float(1)/int(sys.argv[-3]) for i in range(int(sys.argv[-3]))], size=1, replace=False)[0]
# # t += str(next_char) + ' '
# # T.append(t)
# f = open('genSeqs3/tmp'+sys.argv[-2]+sys.argv[-1]+'-'+str(i)+'.txt','w')
# for t in T:
# f.writelines(t)
# f.close()
# # g = DAG(open('tmp'+sys.argv[-2]+sys.argv[-1]+'.txt', 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# # g.GLexis(quietLog, rFlag, functionFlag)
# # costs.append(g.DAGCost(CostFunction.EdgeCost))
# # print np.mean(costs), np.std(costs)
# # sys.stderr.write(sys.argv[-1].split('gram')[1].split('.txt')[0]+'\n')
# # print sys.argv[-1].split('gram')[1].split('.txt')[0]
# # sys.exit()
#######Generating empirical generator matrix
# # names = ['tmp'+sys.argv[-2]+sys.argv[-1]+'-0']
# names = ['tmp00-0']
# # names = ['igem_100-50']
# # names = ['igem_50']
# for name in names:
# print name
# # chopByiGEM(open('genSeqs3/' + name + '.txt','r').readlines()[0],'genSeqs3/' + name + '-chopped.txt')
# empiricalMatrixGenerator(open('/Users/payamsiyari/Desktop/topo_genSeqs/20k/data/' + name + '-chopped.txt', 'r').readlines(), '/Users/payamsiyari/Desktop/topo_genSeqs/20k/data/' + name + '-matrix.txt')
# # empiricalMatrixGenerator(open('igem_100/' + name + '.txt', 'r').readlines(),'igem_100/' + name + '-matrix.txt')
# sys.exit()
#######Dynamic age in the core
# node_dic = {}
# files = [f for f in listdir(sys.argv[-1]) if isfile(join(sys.argv[-1], f)) and not f.startswith('.')]
# files = sorted(files, key=lambda x : int(x.split('gram')[1].split('.txt')[0]))
# for f in files:
# age = int(join(sys.argv[-1], f).split('gram')[1].split('.txt')[0])
# sys.stderr.write(str(age) + '\n')
# g = DAG(open(join(sys.argv[-1], f),'r'), loadDAGFlag, chFlag, noNewLineFlag)
# centralNodes = g.greedyCoreID_ByTau_knee(age, 1, node_dic, 0.75)
# # centralNodes = g.greedyCoreID_ByTau(1)
# # print len(centralNodes[0])
# for node, k in sorted(node_dic.iteritems(), key=lambda x: x[1]['age_index']):
# print str(node) + '\t' + str(k['age_index']) + '\t' + str(k['core_index']) + '\t' + str(k['core_count'])
#######Plain central nodes extraction
# g = DAG(open(sys.argv[-1], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# centralNodes = g.greedyCoreID_ByTau(0.99)
# print centralNodes[1]
# # print len(centralNodes[0])
#######DAG Pruning
# import pickle
# import os
# # num_rand = int(sys.argv[-2])
# NUMBER_OF_RANDOMIZATIONS = 10
# fileName = sys.argv[-1].split('/')[-1].split('.')[0]
# randFolder = '/'.join(sys.argv[-1].split('/')[:-2]) + '/' + fileName + '-rand/'
# nodes_dic = pickle.load(open(randFolder + fileName + '-nodes-dic' + str(NUMBER_OF_RANDOMIZATIONS) + ".p", "rb"))
# g = DAG(open(sys.argv[-1], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# # for q in [0.001]:
# prunStats = []
# prunStats.append(['Orig',len(nx.nodes(g.DAGGraph)),len(nx.edges(g.DAGGraph)),len([x for x in nx.nodes(g.DAGGraph) if g.DAGGraph.in_degree(x) == 0]),len(nx.nodes(g.DAGGraph))-len([x for x in nx.nodes(g.DAGGraph) if g.DAGGraph.in_degree(x) == 0])])
# for q in [0.001, 0.01] + list(np.arange(0.1, 1, 0.1)) + [0.95,0.99,0.999]:
# # g.GLexis(quietLog, rFlag, functionFlag)
# # g.printDAG(printIntsDAG)
# d = pruneDAG_withNodeDic(nodes_dic, NUMBER_OF_RANDOMIZATIONS, g, q)
# # _, d = pruneDAG(open(sys.argv[-3],'r'),sys.argv[-1], sys.argv[-2], g, 0.001, 10)
# nodes = nx.nodes(d.DAGGraph)
# nodesStrings = d.getListNodeStrings(nodes)
# labels = {nodes[i]:nodesStrings[i] for i in range(len(nodes))}
# nx.relabel_nodes(d.DAGGraph, labels, copy=False)
# nx.write_gml(d.DAGGraph, randFolder + fileName + '-prun' + str(q) + '.gml')
# printNodeStats(d.DAGGraph, randFolder + fileName + '-prun' + str(q) + '-stats.txt')
# prunStats.append([q,
# len(nx.nodes(d.DAGGraph)), len(nx.edges(d.DAGGraph)),
# len([x for x in nx.nodes(d.DAGGraph) if d.DAGGraph.in_degree(x) == 0]),
# len(nx.nodes(d.DAGGraph)) - len([x for x in nx.nodes(d.DAGGraph) if d.DAGGraph.in_degree(x) == 0]),
# len(nx.nodes(g.DAGGraph))-len(nx.nodes(d.DAGGraph)),
# float(len(nx.nodes(g.DAGGraph)) - len(nx.nodes(d.DAGGraph)))/len(nx.nodes(g.DAGGraph)),
# len(nx.nodes(g.DAGGraph)) - len(nx.nodes(d.DAGGraph))-len([x for x in nx.nodes(g.DAGGraph) if g.DAGGraph.in_degree(x) == 0])
# ])
# pStats = open(randFolder + fileName + '-prun-stats.txt', 'w')
# for l in prunStats:
# for e in l:
# pStats.write(str(e) + '\t')
# pStats.write('\n')
# pStats.close()
#######Plain centrality and contribution
# gr = nx.read_gml(sys.argv[-1])
# fileName = sys.argv[-1].split('/')[-1].split('.')[0]
# folder = '/'.join(sys.argv[-1].split('/')[:-1]) + '/'
# centralNodes, cents, lenNodes, lenAllPaths, lenSources = coreID_byTau(gr.copy(), 1)
# lenAllPaths2 = 0
# for i in range(len(centralNodes)):
# lenAllPaths2 += cents[i][0]
# f = open(folder + fileName + '-coreID.txt', 'w')
# f.write('\t'.join(map(str,
# [lenNodes, lenAllPaths, lenAllPaths2, lenSources])
# )+ '\n')
# sumPaths = 0
# for i in range(len(centralNodes)):
# # print str(centralNodes[i]) + '\t' + str(cents[i][0]) + '\t' + str(cents[i][1])
# sumPaths += cents[i][0]
# # f.write(str(centralNodes[i]) + '\t' + str(cents[i][0]) + '\t' + str(cents[i][1]) + '\t' + str(float(sumPaths)/lenAllPaths) + '\t' + str(float(sumPaths)/lenAllPaths2) + '\t' + str(1-float((i+1))/lenSources) +'\n')
# f.write('\t'.join(map(str,
# [centralNodes[i],
# cents[i][0],
# cents[i][1],
# float(sumPaths) / lenAllPaths,
# float(sumPaths) / lenAllPaths2,
# 1 - (float((i + 1)) / float(lenSources))])
# ) + '\n')
# f.close()
#######Static stats of nodes
# g = DAG(open(sys.argv[-2], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# nodes = nx.nodes(g.DAGGraph)
# nodesStrings = g.getListNodeStrings(nodes)
# labels = {nodes[i]: nodesStrings[i] for i in range(len(nodes))}
# nx.relabel_nodes(g.DAGGraph, labels, copy=False)
# # gr = nx.read_gml(sys.argv[-1])
# # gr_nodes = set(nx.nodes(gr))
# graph = g.DAGGraph
#
# # printNodeStats2(g.DAGGraph, gr_nodes)
# printNodeStats(graph, sys.argv[-1])
# create_nodes_prob_dic(open(sys.argv[-3], 'r'), sys.argv[-2], int(sys.argv[-1]))
# g = DAG(open(sys.argv[-1], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
# listProbs(g, int(sys.argv[-2]))
# calc_rank_corr(sys.argv[-2], sys.argv[-1])
# calc_max_prob_diff(sys.argv[-2], sys.argv[-1])
# rand_input_q_analysis(sys.argv[-3], sys.argv[-2], int(sys.argv[-1]))
# g = nx.read_gml(sys.argv[-1])
# gr = nx.read_gml(sys.argv[-2])
# g_nodes = set(nx.nodes(g))
# gr_nodes = set(nx.nodes(gr))
# print len(g_nodes)
# print len(gr_nodes)
# for node in g_nodes.difference(gr_nodes):
# if len(node.split()) > 1:
# print node
# printNodeStats(gr)
# nx.relabel_nodes(d.DAGGraph, labels, copy=False)
# for n in centralNodes:
# print labels[n]
#If desired to see the central nodes, please uncomment the lines below
# centralNodes = g.greedyCoreID_ByTau(1)
# print
# print 'Central Nodes:'
# for i in range(len(centralNodes)):
# print centralNodes[i]
# g.calcPaths()
# numData = sys.argv[-1].split('gram')[1].split('.txt.txt')[0]
# print numData
# print structSimTest('/Users/payamsiyari/Desktop/inc/cs-fullProfile/clean-slate full profile/gram' + str(numData) + '.txt', sys.argv[-1])
# from collections import Counter
# dic = Counter()
# f=open(sys.argv[-1],'r').readlines()
# for l in f:
# for c in l.split():
# dic[c] += 1
# for c in sorted([(c,dic[c]) for c in dic],key=lambda x:x[1], reverse = True):
# # print str(c[0]) + '\t' + str(c[1])
# print str(c[1])
g = DAG(open(sys.argv[-1], 'r'), loadDAGFlag, chFlag, noNewLineFlag)
sources = []
targets = []
for node in nx.nodes(g.DAGGraph):
if g.DAGGraph.in_degree(node) == 0:
sources.append(node)
if g.DAGGraph.out_degree(node) == 0:
targets.append(node)
# tendril_paths = 0
# for s in sources:
# for t in targets:
# ps = nx.all_simple_paths(g.DAGGraph, source=s, target=t)
# plens = map(len,ps)
# if len(plens) > 0:
# tendril_paths += sum([i==min(plens) for i in plens])
# print 'tau', float(200000-tendril_paths)/200000
# centralNodes = g.greedyCoreID_ByTau(float(200000-tendril_paths)/200000)
# print centralNodes[1]
# print len(centralNodes[0])
psum = 0
pnum = 0
num_all_paths = 0
listOfSources = []
for s in sources:
for t in targets:
ps = nx.all_simple_paths(g.DAGGraph, source=s, target=t)
for p in ps:
psum += len(p)-1
pnum += 1
listOfSources.append((s, pnum))
num_all_paths += pnum
# pnum = 0
# print pnum
# print psum
print "{}\t{}\t{}".format(len(nx.nodes(g.DAGGraph))-len(sources)-1, len(sources), float(psum)/pnum)
| 45.875443
| 252
| 0.571928
|
3fc172f53a55cb1817b555ff99fea9e45295c6d6
| 18,590
|
py
|
Python
|
Lib/test/test_hashlib.py
|
heamon7/CPython-2.7.8
|
a23fc8e4419ab0034029cbf4c5c3b3a765d2fcbe
|
[
"PSF-2.0"
] | 9
|
2015-04-15T10:58:49.000Z
|
2018-09-24T09:11:33.000Z
|
Lib/test/test_hashlib.py
|
odsod/cpython-internals-course
|
55fffca28e83ac0f30029c60113a3110451dfa08
|
[
"PSF-2.0"
] | 2
|
2020-02-17T22:31:09.000Z
|
2020-02-18T04:31:55.000Z
|
Lib/test/test_hashlib.py
|
odsod/cpython-internals-course
|
55fffca28e83ac0f30029c60113a3110451dfa08
|
[
"PSF-2.0"
] | 3
|
2018-03-26T17:41:40.000Z
|
2019-06-28T12:53:47.000Z
|
# Test hashlib module
#
# $Id$
#
# Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
import array
import hashlib
import itertools
import sys
try:
import threading
except ImportError:
threading = None
import unittest
import warnings
from binascii import unhexlify
from test import test_support
from test.test_support import _4G, precisionbigmemtest
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512' )
_warn_on_extension_import = COMPILED_WITH_PYDEBUG
def _conditional_import_module(self, module_name):
"""Import a module and return a reference to it or None on failure."""
try:
exec('import '+module_name)
except ImportError, error:
if self._warn_on_extension_import:
warnings.warn('Did a C extension fail to compile? %s' % error)
return locals().get(module_name)
def __init__(self, *args, **kwargs):
algorithms = set()
for algorithm in self.supported_hash_names:
algorithms.add(algorithm.lower())
self.constructors_to_test = {}
for algorithm in algorithms:
self.constructors_to_test[algorithm] = set()
# For each algorithm, test the direct constructor and the use
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
if data is None:
return hashlib.new(_alg)
return hashlib.new(_alg, data)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
if _hashlib:
# These two algorithms should always be present when this module
# is compiled. If not, something was compiled wrong.
assert hasattr(_hashlib, 'openssl_md5')
assert hasattr(_hashlib, 'openssl_sha1')
for algorithm, constructors in self.constructors_to_test.items():
constructor = getattr(_hashlib, 'openssl_'+algorithm, None)
if constructor:
constructors.add(constructor)
_md5 = self._conditional_import_module('_md5')
if _md5:
self.constructors_to_test['md5'].add(_md5.new)
_sha = self._conditional_import_module('_sha')
if _sha:
self.constructors_to_test['sha1'].add(_sha.new)
_sha256 = self._conditional_import_module('_sha256')
if _sha256:
self.constructors_to_test['sha224'].add(_sha256.sha224)
self.constructors_to_test['sha256'].add(_sha256.sha256)
_sha512 = self._conditional_import_module('_sha512')
if _sha512:
self.constructors_to_test['sha384'].add(_sha512.sha384)
self.constructors_to_test['sha512'].add(_sha512.sha512)
super(HashLibTestCase, self).__init__(*args, **kwargs)
def test_hash_array(self):
a = array.array("b", range(10))
constructors = self.constructors_to_test.itervalues()
for cons in itertools.chain.from_iterable(constructors):
c = cons(a)
c.hexdigest()
def test_algorithms_attribute(self):
self.assertEqual(hashlib.algorithms,
tuple([_algo for _algo in self.supported_hash_names if
_algo.islower()]))
def test_unknown_hash(self):
self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
self.assertRaises(TypeError, hashlib.new, 1)
def test_get_builtin_constructor(self):
get_builtin_constructor = hashlib.__dict__[
'__get_builtin_constructor']
self.assertRaises(ValueError, get_builtin_constructor, 'test')
try:
import _md5
except ImportError:
pass
# This forces an ImportError for "import _md5" statements
sys.modules['_md5'] = None
try:
self.assertRaises(ValueError, get_builtin_constructor, 'md5')
finally:
if '_md5' in locals():
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5']
self.assertRaises(TypeError, get_builtin_constructor, 3)
def test_hexdigest(self):
for name in self.supported_hash_names:
h = hashlib.new(name)
self.assertTrue(hexstr(h.digest()) == h.hexdigest())
def test_large_update(self):
aas = 'a' * 128
bees = 'b' * 127
cees = 'c' * 126
abcs = aas + bees + cees
for name in self.supported_hash_names:
m1 = hashlib.new(name)
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = hashlib.new(name)
m2.update(abcs)
self.assertEqual(m1.digest(), m2.digest(), name+' update problem.')
m3 = hashlib.new(name, abcs)
self.assertEqual(m1.digest(), m3.digest(), name+' new problem.')
def check(self, name, data, digest):
constructors = self.constructors_to_test[name]
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
computed = hash_object_constructor(data).hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s constructed using %s returned hexdigest"
" %r for %d byte input data that should have hashed to %r."
% (name, hash_object_constructor,
computed, len(data), digest))
def check_update(self, name, data, digest):
constructors = self.constructors_to_test[name]
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
h = hash_object_constructor()
h.update(data)
computed = h.hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s using %s when updated returned hexdigest"
" %r for %d byte input data that should have hashed to %r."
% (name, hash_object_constructor,
computed, len(data), digest))
def check_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
expected = hashlib.new(algorithm_name, str(u'spam')).hexdigest()
self.check(algorithm_name, u'spam', expected)
def test_unicode(self):
# In python 2.x unicode is auto-encoded to the system default encoding
# when passed to hashlib functions.
self.check_unicode('md5')
self.check_unicode('sha1')
self.check_unicode('sha224')
self.check_unicode('sha256')
self.check_unicode('sha384')
self.check_unicode('sha512')
def test_case_md5_0(self):
self.check('md5', '', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', 'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5', 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
@unittest.skipIf(sys.maxsize < _4G + 5, 'test cannot run on 32-bit systems')
@precisionbigmemtest(size=_4G + 5, memuse=1, dry_run=False)
def test_case_md5_huge(self, size):
self.check('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
@unittest.skipIf(sys.maxsize < _4G + 5, 'test cannot run on 32-bit systems')
@precisionbigmemtest(size=_4G + 5, memuse=1, dry_run=False)
def test_case_md5_huge_update(self, size):
self.check_update('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
@unittest.skipIf(sys.maxsize < _4G - 1, 'test cannot run on 32-bit systems')
@precisionbigmemtest(size=_4G - 1, memuse=1, dry_run=False)
def test_case_md5_uintmax(self, size):
self.check('md5', 'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', "",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', "abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1', "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', "a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
@precisionbigmemtest(size=_4G + 5, memuse=1)
def test_case_sha1_huge(self, size):
if size == _4G + 5:
try:
self.check('sha1', 'A'*size,
'87d745c50e6b2879ffa0fb2c930e9fbfe0dc9a5b')
except OverflowError:
pass # 32-bit arch
@precisionbigmemtest(size=_4G + 5, memuse=1)
def test_case_sha1_huge_update(self, size):
if size == _4G + 5:
try:
self.check_update('sha1', 'A'*size,
'87d745c50e6b2879ffa0fb2c930e9fbfe0dc9a5b')
except OverflowError:
pass # 32-bit arch
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', "",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', "abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', "a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', "",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', "abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', "a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', "",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', "abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', "a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', "",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', "abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', "a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
@unittest.skipUnless(threading, 'Threading required for this test.')
@test_support.reap_threads
def test_threaded_hashing(self):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
hasher = hashlib.sha1()
num_threads = 5
smallest_data = 'swineflu'
data = smallest_data*200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
def hash_in_chunks(chunk_size, event):
index = 0
while index < len(data):
hasher.update(data[index:index+chunk_size])
index += chunk_size
event.set()
events = []
for threadnum in xrange(num_threads):
chunk_size = len(data) // (10**threadnum)
assert chunk_size > 0
assert chunk_size % len(smallest_data) == 0
event = threading.Event()
events.append(event)
threading.Thread(target=hash_in_chunks,
args=(chunk_size, event)).start()
for event in events:
event.wait()
self.assertEqual(expected_hash, hasher.hexdigest())
class KDFTests(unittest.TestCase):
pbkdf2_test_vectors = [
(b'password', b'salt', 1, None),
(b'password', b'salt', 2, None),
(b'password', b'salt', 4096, None),
# too slow, it takes over a minute on a fast CPU.
#(b'password', b'salt', 16777216, None),
(b'passwordPASSWORDpassword', b'saltSALTsaltSALTsaltSALTsaltSALTsalt',
4096, -1),
(b'pass\0word', b'sa\0lt', 4096, 16),
]
pbkdf2_results = {
"sha1": [
# offical test vectors from RFC 6070
(unhexlify('0c60c80f961f0e71f3a9b524af6012062fe037a6'), None),
(unhexlify('ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'), None),
(unhexlify('4b007901b765489abead49d926f721d065a429c1'), None),
#(unhexlify('eefe3d61cd4da4e4e9945b3d6ba2158c2634e984'), None),
(unhexlify('3d2eec4fe41c849b80c8d83662c0e44a8b291a964c'
'f2f07038'), 25),
(unhexlify('56fa6aa75548099dcc37d7f03425e0c3'), None),],
"sha256": [
(unhexlify('120fb6cffcf8b32c43e7225256c4f837'
'a86548c92ccc35480805987cb70be17b'), None),
(unhexlify('ae4d0c95af6b46d32d0adff928f06dd0'
'2a303f8ef3c251dfd6e2d85a95474c43'), None),
(unhexlify('c5e478d59288c841aa530db6845c4c8d'
'962893a001ce4e11a4963873aa98134a'), None),
#(unhexlify('cf81c66fe8cfc04d1f31ecb65dab4089'
# 'f7f179e89b3b0bcb17ad10e3ac6eba46'), None),
(unhexlify('348c89dbcbd32b2f32d814b8116e84cf2b17'
'347ebc1800181c4e2a1fb8dd53e1c635518c7dac47e9'), 40),
(unhexlify('89b69d0516f829893c696226650a8687'), None),],
"sha512": [
(unhexlify('867f70cf1ade02cff3752599a3a53dc4af34c7a669815ae5'
'd513554e1c8cf252c02d470a285a0501bad999bfe943c08f'
'050235d7d68b1da55e63f73b60a57fce'), None),
(unhexlify('e1d9c16aa681708a45f5c7c4e215ceb66e011a2e9f004071'
'3f18aefdb866d53cf76cab2868a39b9f7840edce4fef5a82'
'be67335c77a6068e04112754f27ccf4e'), None),
(unhexlify('d197b1b33db0143e018b12f3d1d1479e6cdebdcc97c5c0f8'
'7f6902e072f457b5143f30602641b3d55cd335988cb36b84'
'376060ecd532e039b742a239434af2d5'), None),
(unhexlify('8c0511f4c6e597c6ac6315d8f0362e225f3c501495ba23b8'
'68c005174dc4ee71115b59f9e60cd9532fa33e0f75aefe30'
'225c583a186cd82bd4daea9724a3d3b8'), 64),
(unhexlify('9d9e9c4cd21fe4be24d5b8244c759665'), None),],
}
def test_pbkdf2_hmac(self):
for digest_name, results in self.pbkdf2_results.items():
for i, vector in enumerate(self.pbkdf2_test_vectors):
password, salt, rounds, dklen = vector
expected, overwrite_dklen = results[i]
if overwrite_dklen:
dklen = overwrite_dklen
out = hashlib.pbkdf2_hmac(
digest_name, password, salt, rounds, dklen)
self.assertEqual(out, expected,
(digest_name, password, salt, rounds, dklen))
def test_main():
test_support.run_unittest(HashLibTestCase, KDFTests)
if __name__ == "__main__":
test_main()
| 40.767544
| 91
| 0.635933
|
336bf4a3d1cd67cf92c993461da61b6f674c4edb
| 4,492
|
py
|
Python
|
test/functional/test_framework/muhash.py
|
syglee7/zenacoin-ver2
|
90079b95bdf0ea2b7fce644c56d2a9626526e5e4
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/muhash.py
|
syglee7/zenacoin-ver2
|
90079b95bdf0ea2b7fce644c56d2a9626526e5e4
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/muhash.py
|
syglee7/zenacoin-ver2
|
90079b95bdf0ea2b7fce644c56d2a9626526e5e4
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Native Python MuHash3072 implementation."""
import hashlib
import unittest
from .util import modinv
def rot32(v, bits):
"""Rotate the 32-bit value v left by bits bits."""
bits %= 32 # Make sure the term below does not throw an exception
return ((v << bits) & 0xffffffff) | (v >> (32 - bits))
def chacha20_doubleround(s):
"""Apply a ChaCha20 double round to 16-element state array s.
See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439
"""
QUARTER_ROUNDS = [(0, 4, 8, 12),
(1, 5, 9, 13),
(2, 6, 10, 14),
(3, 7, 11, 15),
(0, 5, 10, 15),
(1, 6, 11, 12),
(2, 7, 8, 13),
(3, 4, 9, 14)]
for a, b, c, d in QUARTER_ROUNDS:
s[a] = (s[a] + s[b]) & 0xffffffff
s[d] = rot32(s[d] ^ s[a], 16)
s[c] = (s[c] + s[d]) & 0xffffffff
s[b] = rot32(s[b] ^ s[c], 12)
s[a] = (s[a] + s[b]) & 0xffffffff
s[d] = rot32(s[d] ^ s[a], 8)
s[c] = (s[c] + s[d]) & 0xffffffff
s[b] = rot32(s[b] ^ s[c], 7)
def chacha20_32_to_384(key32):
"""Specialized ChaCha20 implementation with 32-byte key, 0 IV, 384-byte output."""
# See RFC 8439 section 2.3 for chacha20 parameters
CONSTANTS = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574]
key_bytes = [0]*8
for i in range(8):
key_bytes[i] = int.from_bytes(key32[(4 * i):(4 * (i+1))], 'little')
INITIALIZATION_VECTOR = [0] * 4
init = CONSTANTS + key_bytes + INITIALIZATION_VECTOR
out = bytearray()
for counter in range(6):
init[12] = counter
s = init.copy()
for _ in range(10):
chacha20_doubleround(s)
for i in range(16):
out.extend(((s[i] + init[i]) & 0xffffffff).to_bytes(4, 'little'))
return bytes(out)
def data_to_num3072(data):
"""Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations."""
bytes384 = chacha20_32_to_384(data)
return int.from_bytes(bytes384, 'little')
class MuHash3072:
"""Class representing the MuHash3072 computation of a set.
See https://cseweb.ucsd.edu/~mihir/papers/inchash.pdf and https://lists.linuxfoundation.org/pipermail/zenacoin-dev/2017-May/014337.html
"""
MODULUS = 2**3072 - 1103717
def __init__(self):
"""Initialize for an empty set."""
self.numerator = 1
self.denominator = 1
def insert(self, data):
"""Insert a byte array data in the set."""
data_hash = hashlib.sha256(data).digest()
self.numerator = (self.numerator * data_to_num3072(data_hash)) % self.MODULUS
def remove(self, data):
"""Remove a byte array from the set."""
data_hash = hashlib.sha256(data).digest()
self.denominator = (self.denominator * data_to_num3072(data_hash)) % self.MODULUS
def digest(self):
"""Extract the final hash. Does not modify this object."""
val = (self.numerator * modinv(self.denominator, self.MODULUS)) % self.MODULUS
bytes384 = val.to_bytes(384, 'little')
return hashlib.sha256(bytes384).digest()
class TestFrameworkMuhash(unittest.TestCase):
def test_muhash(self):
muhash = MuHash3072()
muhash.insert(b'\x00' * 32)
muhash.insert((b'\x01' + b'\x00' * 31))
muhash.remove((b'\x02' + b'\x00' * 31))
finalized = muhash.digest()
# This mirrors the result in the C++ MuHash3072 unit test
self.assertEqual(finalized[::-1].hex(), "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863")
def test_chacha20(self):
def chacha_check(key, result):
self.assertEqual(chacha20_32_to_384(key)[:64].hex(), result)
# Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7
# Since the nonce is hardcoded to 0 in our function we only use those vectors.
chacha_check([0]*32, "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586")
chacha_check([0]*31 + [1], "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963")
| 39.752212
| 166
| 0.622663
|
1a5c0feb40363c37d267f58ece23d2fd2353befb
| 804
|
py
|
Python
|
clinicadl/scripts/preprocessing/orig/run_qc_similarity.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 112
|
2019-10-21T14:50:35.000Z
|
2022-03-29T03:15:47.000Z
|
clinicadl/scripts/preprocessing/orig/run_qc_similarity.py
|
921974496/AD-DL
|
9a0303579a665800633024bdab1ac44f794a0c38
|
[
"MIT"
] | 136
|
2019-10-17T17:40:55.000Z
|
2021-06-30T14:53:29.000Z
|
clinicadl/scripts/preprocessing/orig/run_qc_similarity.py
|
921974496/AD-DL
|
9a0303579a665800633024bdab1ac44f794a0c38
|
[
"MIT"
] | 49
|
2019-11-26T13:57:52.000Z
|
2022-03-20T13:17:42.000Z
|
####################
from Code.image_preprocessing.quality_check_image_similarity import quality_check_image_similarity
## run the pipeline
# for test
##
caps_directory= '/teams/ARAMIS/PROJECTS/CLINICA/CLINICA_datasets/temp/CAPS_ADNI_DL'
tsv= '/teams/ARAMIS/PROJECTS/junhao.wen/PhD/ADNI_classification/gitlabs/AD-DL/tsv_files/ADNI_MCI_T1_rest.tsv'
working_dir = '/teams/ARAMIS/PROJECTS/junhao.wen/PhD/ADNI_classification/gitlabs/AD-DL/Results/working_dir'
ref_template = '/teams/ARAMIS/PROJECTS/junhao.wen/PhD/ADNI_classification/gitlabs/AD-DL/Data/mni_icbm152_nlin_sym_09c_nifti/mni_icbm152_nlin_sym_09c/mni_icbm152_t1_tal_nlin_sym_09c.nii'
wf = quality_check_image_similarity(caps_directory, tsv, ref_template, working_directory=working_dir)
wf.run(plugin='MultiProc', plugin_args={'n_procs': 8})
| 50.25
| 185
| 0.822139
|
1f5f201e5d7a046ac8f54b6657d1404e56791a64
| 10,682
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_addrgrp_tagging.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_addrgrp_tagging.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_addrgrp_tagging.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_addrgrp_tagging
short_description: Config object tagging.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
addrgrp:
description: the parameter (addrgrp) in requested url
type: str
required: true
firewall_addrgrp_tagging:
description: the top level parameters set
required: false
type: dict
suboptions:
category:
type: str
description: 'Tag category.'
name:
type: str
description: 'Tagging entry name.'
tags:
description: no description
type: str
'''
EXAMPLES = '''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Config object tagging.
fmgr_firewall_addrgrp_tagging:
bypass_validation: False
adom: ansible
addrgrp: 'ansible-addrgrp4' # name
state: present
firewall_addrgrp_tagging:
category: 'ansible-category'
name: 'ansible-test'
tags: ['ansible1', 'ansible2']
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the taggings in the IPv4 address group
fmgr_fact:
facts:
selector: 'firewall_addrgrp_tagging'
params:
adom: 'ansible'
addrgrp: 'ansible-addrgrp4' # name
tagging: ''
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/addrgrp/{addrgrp}/tagging',
'/pm/config/global/obj/firewall/addrgrp/{addrgrp}/tagging'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/addrgrp/{addrgrp}/tagging/{tagging}',
'/pm/config/global/obj/firewall/addrgrp/{addrgrp}/tagging/{tagging}'
]
url_params = ['adom', 'addrgrp']
module_primary_key = 'name'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'addrgrp': {
'required': True,
'type': 'str'
},
'firewall_addrgrp_tagging': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'name': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'tags': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_addrgrp_tagging'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 32.078078
| 153
| 0.549054
|
164a77da39e9f1b83c7e9092cc72ae585421c92a
| 7,459
|
py
|
Python
|
caserec/recommenders/item_recommendation/base_item_recommendation.py
|
andlzanon/CaseRecommender
|
f7f436996057298bfe76ee4eceeba634a74b69cd
|
[
"MIT"
] | 407
|
2018-06-25T18:00:11.000Z
|
2022-03-24T17:16:17.000Z
|
caserec/recommenders/item_recommendation/base_item_recommendation.py
|
andlzanon/CaseRecommender
|
f7f436996057298bfe76ee4eceeba634a74b69cd
|
[
"MIT"
] | 23
|
2018-06-27T21:19:04.000Z
|
2022-01-24T04:20:40.000Z
|
caserec/recommenders/item_recommendation/base_item_recommendation.py
|
andlzanon/CaseRecommender
|
f7f436996057298bfe76ee4eceeba634a74b69cd
|
[
"MIT"
] | 77
|
2018-06-24T19:04:18.000Z
|
2021-11-29T10:12:36.000Z
|
# coding=utf-8
""""
This class is base for item recommendation algorithms.
"""
# © 2019. Case Recommender (MIT License)
from scipy.spatial.distance import squareform, pdist
import numpy as np
from caserec.evaluation.item_recommendation import ItemRecommendationEvaluation
from caserec.utils.extra_functions import print_header
from caserec.utils.process_data import ReadFile, WriteFile
__author__ = 'Arthur Fortes <fortes.arthur@gmail.com>'
class BaseItemRecommendation(object):
def __init__(self, train_file, test_file, output_file=None, as_binary=False, rank_length=10,
similarity_metric="cosine", sep='\t', output_sep='\t'):
"""
This class is base for all item recommendation algorithms. Inherits the class Recommender
and implements / adds common methods and attributes for rank approaches.
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param similarity_metric:
:type similarity_metric: str, default cosine
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param as_binary: If True, the explicit feedback will be transform to binary
:type as_binary: bool, default False
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
self.train_file = train_file
self.test_file = test_file
self.as_binary = as_binary
self.similarity_metric = similarity_metric
self.output_file = output_file
self.rank_length = rank_length
self.sep = sep
self.output_sep = output_sep
# internal vars
self.item_to_item_id = {}
self.item_id_to_item = {}
self.user_to_user_id = {}
self.user_id_to_user = {}
self.train_set = None
self.test_set = None
self.users = None
self.items = None
self.matrix = None
self.evaluation_results = None
self.recommender_name = None
self.extra_info_header = None
self.ranking = []
def read_files(self):
"""
Method to initialize recommender algorithm.
"""
self.train_set = ReadFile(self.train_file, sep=self.sep, as_binary=self.as_binary).read()
if self.test_file is not None:
self.test_set = ReadFile(self.test_file, sep=self.sep).read()
self.users = sorted(set(list(self.train_set['users']) + list(self.test_set['users'])))
self.items = sorted(set(list(self.train_set['items']) + list(self.test_set['items'])))
else:
self.users = self.train_set['users']
self.items = self.train_set['items']
for i, item in enumerate(self.items):
self.item_to_item_id.update({item: i})
self.item_id_to_item.update({i: item})
for u, user in enumerate(self.users):
self.user_to_user_id.update({user: u})
self.user_id_to_user.update({u: user})
def create_matrix(self):
"""
Method to create a feedback matrix
"""
self.matrix = np.zeros((len(self.users), len(self.items)))
for user in self.train_set['users']:
for item in self.train_set['feedback'][user]:
self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \
self.train_set['feedback'][user][item]
def compute_similarity(self, transpose=False):
"""
Method to compute a similarity matrix from original df_matrix
:param transpose: If True, calculate the similarity in a transpose matrix
:type transpose: bool, default False
"""
# Calculate distance matrix
if transpose:
similarity_matrix = np.float32(squareform(pdist(self.matrix.T, self.similarity_metric)))
else:
similarity_matrix = np.float32(squareform(pdist(self.matrix, self.similarity_metric)))
# Remove NaNs
similarity_matrix[np.isnan(similarity_matrix)] = 1.0
# transform distances in similarities. Values in matrix range from 0-1
similarity_matrix = (similarity_matrix.max() - similarity_matrix) / similarity_matrix.max()
return similarity_matrix
def evaluate(self, metrics, verbose=True, as_table=False, table_sep='\t', n_ranks=None):
"""
Method to evaluate the final ranking
:param metrics: List of evaluation metrics
:type metrics: list, default ('Prec', 'Recall', 'MAP, 'NDCG')
:param verbose: Print the evaluation results
:type verbose: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
:param n_ranks: List of positions to evaluate the ranking
:type n_ranks: list, None
"""
self.evaluation_results = {}
if metrics is None:
metrics = list(['PREC', 'RECALL', 'MAP', 'NDCG'])
if n_ranks is None:
n_ranks = list([1, 3, 5, 10])
results = ItemRecommendationEvaluation(verbose=verbose, as_table=as_table, table_sep=table_sep,
metrics=metrics, n_ranks=n_ranks)
self.evaluation_results = results.evaluate_recommender(predictions=self.ranking, test_set=self.test_set)
def write_ranking(self):
"""
Method to write final ranking
"""
if self.output_file is not None:
WriteFile(self.output_file, data=self.ranking, sep=self.sep).write()
def compute(self, verbose=True):
"""
Method to run the recommender algorithm
:param verbose: Print the information about recommender
:type verbose: bool, default True
"""
# read files
self.read_files()
# initialize empty ranking (Don't remove: important to Cross Validation)
self.ranking = []
if verbose:
test_info = None
main_info = {
'title': 'Item Recommendation > ' + self.recommender_name,
'n_users': len(self.train_set['users']),
'n_items': len(self.train_set['items']),
'n_interactions': self.train_set['number_interactions'],
'sparsity': self.train_set['sparsity']
}
if self.test_file is not None:
test_info = {
'n_users': len(self.test_set['users']),
'n_items': len(self.test_set['items']),
'n_interactions': self.test_set['number_interactions'],
'sparsity': self.test_set['sparsity']
}
print_header(main_info, test_info)
| 35.018779
| 115
| 0.624346
|
1fd4776f588cbe6feb6765677dab45d0fde13711
| 23,334
|
py
|
Python
|
sbibm/third_party/kgof/ex/ex1_vary_n.py
|
michaeldeistler/sbibm-1
|
8e9875f79beb828c07fbf4820b30413914d1ceca
|
[
"MIT"
] | 2
|
2021-05-06T06:19:27.000Z
|
2022-02-20T19:49:55.000Z
|
sbibm/third_party/kgof/ex/ex1_vary_n.py
|
mackelab/sbibm
|
b9781c610a1a80d2de014ee46a29cf061fb6074a
|
[
"MIT"
] | null | null | null |
sbibm/third_party/kgof/ex/ex1_vary_n.py
|
mackelab/sbibm
|
b9781c610a1a80d2de014ee46a29cf061fb6074a
|
[
"MIT"
] | 1
|
2022-01-23T15:54:06.000Z
|
2022-01-23T15:54:06.000Z
|
"""Simulation to test the test power vs increasing sample size"""
__author__ = "wittawat"
import logging
import math
import os
import sys
import time
# import numpy as np
import autograd.numpy as np
# need independent_jobs package
# https://github.com/karlnapf/independent-jobs
# The independent_jobs and kgof have to be in the global search path (.bashrc)
import independent_jobs as inj
from independent_jobs.aggregators.SingleResultAggregator import SingleResultAggregator
from independent_jobs.engines.BatchClusterParameters import BatchClusterParameters
from independent_jobs.engines.SerialComputationEngine import SerialComputationEngine
from independent_jobs.engines.SlurmComputationEngine import SlurmComputationEngine
from independent_jobs.jobs.IndependentJob import IndependentJob
from independent_jobs.results.SingleResult import SingleResult
from independent_jobs.tools.Log import logger
import sbibm.third_party.kgof as kgof
import sbibm.third_party.kgof.data as data
import sbibm.third_party.kgof.density as density
import sbibm.third_party.kgof.glo as glo
import sbibm.third_party.kgof.goftest as gof
import sbibm.third_party.kgof.intertst as tgof
import sbibm.third_party.kgof.kernel as kernel
import sbibm.third_party.kgof.mmd as mgof
import sbibm.third_party.kgof.util as util
"""
All the job functions return a dictionary with the following keys:
- goftest: test object. (may or may not return)
- test_result: the result from calling perform_test(te).
- time_secs: run time in seconds
"""
def job_fssdJ1q_med(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD test with a Gaussian kernel, where the test locations are randomized,
and the Gaussian width is set with the median heuristic. Use full sample.
No training/testing splits.
p: an UnnormalizedDensity
data_source: a DataSource
tr, te: Data
r: trial number (positive integer)
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med ** 2)
V = util.fit_gaussian_draw(X, J, seed=r + 1)
fssd_med = gof.FSSD(p, k, V, null_sim=null_sim, alpha=alpha)
fssd_med_result = fssd_med.perform_test(data)
return {"test_result": fssd_med_result, "time_secs": t.secs}
def job_fssdJ5q_med(p, data_source, tr, te, r):
"""
FSSD. J=5
"""
return job_fssdJ1q_med(p, data_source, tr, te, r, J=5)
def job_fssdJ1q_opt(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use a Gaussian kernel.
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# Use grid search to initialize the gwidth
n_gwidth_cand = 5
gwidth_factors = 2.0 ** np.linspace(-3, 3, n_gwidth_cand)
med2 = util.meddistance(Xtr, 1000) ** 2
k = kernel.KGauss(med2)
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r + 1, reg=1e-6)
list_gwidth = np.hstack(((med2) * gwidth_factors))
besti, objs = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
gwidth = list_gwidth[besti]
assert util.is_real_num(gwidth), "gwidth not real. Was %s" % str(gwidth)
assert gwidth > 0, "gwidth not positive. Was %.3g" % gwidth
logging.info("After grid search, gwidth=%.3g" % gwidth)
ops = {
"reg": 1e-2,
"max_iter": 30,
"tol_fun": 1e-5,
"disp": True,
"locs_bounds_frac": 30.0,
"gwidth_lb": 1e-1,
"gwidth_ub": 1e4,
}
V_opt, gwidth_opt, info = gof.GaussFSSD.optimize_locs_widths(
p, tr, gwidth, V0, **ops
)
# Use the optimized parameters to construct a test
k_opt = kernel.KGauss(gwidth_opt)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te)
return {
"test_result": fssd_opt_result,
"time_secs": t.secs,
"goftest": fssd_opt,
"opt_info": info,
}
def job_fssdJ5q_opt(p, data_source, tr, te, r):
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=5)
def job_fssdJ10q_opt(p, data_source, tr, te, r):
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=10)
def job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use an inverse multiquadric
kernel (IMQ). Optimize only the test locations (V). Fix the kernel
parameters to b = -0.5, c=1. These are the recommended values from
Measuring Sample Quality with Kernels
Jackson Gorham, Lester Mackey
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# IMQ kernel parameters
b = -0.5
c = 1.0
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r + 1, reg=1e-6)
ops = {
"reg": 1e-2,
"max_iter": 40,
"tol_fun": 1e-4,
"disp": True,
"locs_bounds_frac": 10.0,
}
V_opt, info = gof.IMQFSSD.optimize_locs(p, tr, b, c, V0, **ops)
k_imq = kernel.KIMQ(b=b, c=c)
# Use the optimized parameters to construct a test
fssd_imq = gof.FSSD(p, k_imq, V_opt, null_sim=null_sim, alpha=alpha)
fssd_imq_result = fssd_imq.perform_test(te)
return {
"test_result": fssd_imq_result,
"time_secs": t.secs,
"goftest": fssd_imq,
"opt_info": info,
}
def job_fssdJ5q_imq_optv(p, data_source, tr, te, r):
return job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=5)
def job_me_opt(p, data_source, tr, te, r, J=5):
"""
ME test of Jitkrittum et al., 2016 used as a goodness-of-fit test.
Gaussian kernel. Optimize test locations and Gaussian width.
"""
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
# pds = p.get_datasource()
# datY = pds.sample(data.sample_size(), seed=r+294)
# Y = datY.data()
# XY = np.vstack((X, Y))
# med = util.meddistance(XY, subsample=1000)
op = {
"n_test_locs": J,
"seed": r + 5,
"max_iter": 40,
"batch_proportion": 1.0,
"locs_step_size": 1.0,
"gwidth_step_size": 0.1,
"tol_fun": 1e-4,
"reg": 1e-4,
}
# optimize on the training set
me_opt = tgof.GaussMETestOpt(
p, n_locs=J, tr_proportion=tr_proportion, alpha=alpha, seed=r + 111
)
me_result = me_opt.perform_test(data, op)
return {"test_result": me_result, "time_secs": t.secs}
def job_kstein_med(p, data_source, tr, te, r):
"""
Kernel Stein discrepancy test of Liu et al., 2016 and Chwialkowski et al.,
2016. Use full sample. Use Gaussian kernel.
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med ** 2)
kstein = gof.KernelSteinTest(p, k, alpha=alpha, n_simulate=1000, seed=r)
kstein_result = kstein.perform_test(data)
return {"test_result": kstein_result, "time_secs": t.secs}
def job_kstein_imq(p, data_source, tr, te, r):
"""
Kernel Stein discrepancy test of Liu et al., 2016 and Chwialkowski et al.,
2016. Use full sample. Use the inverse multiquadric kernel (IMQ) studied
in
Measuring Sample Quality with Kernels
Gorham and Mackey 2017.
Parameters are fixed to the recommented values: beta = b = -0.5, c = 1.
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
k = kernel.KIMQ(b=-0.5, c=1.0)
kstein = gof.KernelSteinTest(p, k, alpha=alpha, n_simulate=1000, seed=r)
kstein_result = kstein.perform_test(data)
return {"test_result": kstein_result, "time_secs": t.secs}
def job_lin_kstein_med(p, data_source, tr, te, r):
"""
Linear-time version of the kernel Stein discrepancy test of Liu et al.,
2016 and Chwialkowski et al., 2016. Use full sample.
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med ** 2)
lin_kstein = gof.LinearKernelSteinTest(p, k, alpha=alpha, seed=r)
lin_kstein_result = lin_kstein.perform_test(data)
return {"test_result": lin_kstein_result, "time_secs": t.secs}
def job_mmd_med(p, data_source, tr, te, r):
"""
MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
Require the ability to sample from p i.e., the UnnormalizedDensity p has
to be able to return a non-None from get_datasource()
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
pds = p.get_datasource()
datY = pds.sample(data.sample_size(), seed=r + 294)
Y = datY.data()
XY = np.vstack((X, Y))
# If p, q differ very little, the median may be very small, rejecting H0
# when it should not?
# If p, q differ very little, the median may be very small, rejecting H0
# when it should not?
medx = util.meddistance(X, subsample=1000)
medy = util.meddistance(Y, subsample=1000)
medxy = util.meddistance(XY, subsample=1000)
med_avg = (medx + medy + medxy) / 3.0
k = kernel.KGauss(med_avg ** 2)
mmd_test = mgof.QuadMMDGof(p, k, n_permute=400, alpha=alpha, seed=r)
mmd_result = mmd_test.perform_test(data)
return {"test_result": mmd_result, "time_secs": t.secs}
def job_mmd_opt(p, data_source, tr, te, r):
"""
MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
Require the ability to sample from p i.e., the UnnormalizedDensity p has
to be able to return a non-None from get_datasource()
With optimization. Gaussian kernel.
"""
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
pds = p.get_datasource()
datY = pds.sample(data.sample_size(), seed=r + 294)
Y = datY.data()
XY = np.vstack((X, Y))
med = util.meddistance(XY, subsample=1000)
# Construct a list of kernels to try based on multiples of the median
# heuristic
# list_gwidth = np.hstack( (np.linspace(20, 40, 10), (med**2)
# *(2.0**np.linspace(-2, 2, 20) ) ) )
list_gwidth = (med ** 2) * (2.0 ** np.linspace(-4, 4, 30))
list_gwidth.sort()
candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]
mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r)
mmd_result = mmd_opt.perform_test(
data,
candidate_kernels=candidate_kernels,
tr_proportion=tr_proportion,
reg=1e-3,
)
return {"test_result": mmd_result, "time_secs": t.secs}
def job_mmd_dgauss_opt(p, data_source, tr, te, r):
"""
MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
Require the ability to sample from p i.e., the UnnormalizedDensity p has
to be able to return a non-None from get_datasource()
With optimization. Diagonal Gaussian kernel where there is one Gaussian width
for each dimension.
"""
data = tr + te
X = data.data()
d = X.shape[1]
with util.ContextTimer() as t:
# median heuristic
pds = p.get_datasource()
datY = pds.sample(data.sample_size(), seed=r + 294)
Y = datY.data()
XY = np.vstack((X, Y))
# Get the median heuristic for each dimension
meds = np.zeros(d)
for i in range(d):
medi = util.meddistance(XY[:, [i]], subsample=1000)
meds[i] = medi
# Construct a list of kernels to try based on multiples of the median
# heuristic
med_factors = 2.0 ** np.linspace(-4, 4, 20)
candidate_kernels = []
for i in range(len(med_factors)):
ki = kernel.KDiagGauss((meds ** 2) * med_factors[i])
candidate_kernels.append(ki)
mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r + 56)
mmd_result = mmd_opt.perform_test(
data,
candidate_kernels=candidate_kernels,
tr_proportion=tr_proportion,
reg=1e-3,
)
return {"test_result": mmd_result, "time_secs": t.secs}
# Define our custom Job, which inherits from base class IndependentJob
class Ex1Job(IndependentJob):
def __init__(self, aggregator, p, data_source, prob_label, rep, job_func, n):
# walltime = 60*59*24
walltime = 60 * 59
memory = int(tr_proportion * n * 1e-2) + 50
IndependentJob.__init__(self, aggregator, walltime=walltime, memory=memory)
# p: an UnnormalizedDensity
self.p = p
self.data_source = data_source
self.prob_label = prob_label
self.rep = rep
self.job_func = job_func
self.n = n
# we need to define the abstract compute method. It has to return an instance
# of JobResult base class
def compute(self):
p = self.p
data_source = self.data_source
r = self.rep
n = self.n
job_func = self.job_func
data = data_source.sample(n, seed=r)
with util.ContextTimer() as t:
tr, te = data.split_tr_te(tr_proportion=tr_proportion, seed=r + 21)
prob_label = self.prob_label
logger.info(
"computing. %s. prob=%s, r=%d,\
n=%d"
% (job_func.__name__, prob_label, r, n)
)
job_result = job_func(p, data_source, tr, te, r)
# create ScalarResult instance
result = SingleResult(job_result)
# submit the result to my own aggregator
self.aggregator.submit_result(result)
func_name = job_func.__name__
logger.info(
"done. ex2: %s, prob=%s, r=%d, n=%d. Took: %.3g s "
% (func_name, prob_label, r, n, t.secs)
)
# save result
fname = "%s-%s-n%d_r%d_a%.3f_trp%.2f.p" % (
prob_label,
func_name,
n,
r,
alpha,
tr_proportion,
)
glo.ex_save_result(ex, job_result, prob_label, fname)
# This import is needed so that pickle knows about the class Ex1Job.
# pickle is used when collecting the results from the submitted jobs.
from sbibm.third_party.kgof.ex.ex1_vary_n import (
Ex1Job,
job_fssdJ1q_imq_optv,
job_fssdJ1q_med,
job_fssdJ1q_opt,
job_fssdJ5q_imq_optv,
job_fssdJ5q_med,
job_fssdJ5q_opt,
job_fssdJ10q_opt,
job_kstein_imq,
job_kstein_med,
job_lin_kstein_med,
job_me_opt,
job_mmd_dgauss_opt,
job_mmd_med,
job_mmd_opt,
)
# --- experimental setting -----
ex = 1
# significance level of the test
alpha = 0.05
# Proportion of training sample relative to the full sample size n
tr_proportion = 0.2
# repetitions for each sample size
reps = 200
# tests to try
method_job_funcs = [
job_fssdJ5q_opt,
# job_fssdJ5q_med,
# job_me_opt,
# job_kstein_med,
# job_lin_kstein_med,
job_mmd_opt,
# job_fssdJ5q_imq_optv,
# job_fssdJ10q_opt,
# job_kstein_imq,
# job_mmd_dgauss_opt,
# job_mmd_med,
]
# If is_rerun==False, do not rerun the experiment if a result file for the current
# setting of (ni, r) already exists.
is_rerun = False
# ---------------------------
def gbrbm_perturb(var_perturb_B, dx=50, dh=10):
"""
Get a Gaussian-Bernoulli RBM problem where the first entry of the B matrix
(the matrix linking the latent and the observation) is perturbed.
- var_perturb_B: Gaussian noise variance for perturbing B.
- dx: observed dimension
- dh: latent dimension
Return p (density), data source
"""
with util.NumpySeedContext(seed=10):
B = np.random.randint(0, 2, (dx, dh)) * 2 - 1.0
b = np.random.randn(dx)
c = np.random.randn(dh)
p = density.GaussBernRBM(B, b, c)
B_perturb = np.copy(B)
if var_perturb_B > 1e-7:
B_perturb[0, 0] = B_perturb[0, 0] + np.random.randn(1) * np.sqrt(
var_perturb_B
)
ds = data.DSGaussBernRBM(B_perturb, b, c, burnin=2000)
return p, ds
def get_ns_pqsource(prob_label):
"""
Return (ns, p, ds), a tuple of
where
- ns: a list of sample sizes
- p: a Density representing the distribution p
- ds: a DataSource, each corresponding to one parameter setting.
The DataSource generates sample from q.
"""
gmd_p01_d10_ns = [1000, 3000, 5000]
# gb_rbm_dx50_dh10_vars = [0, 1e-3, 2e-3, 3e-3]
prob2tuples = {
# vary d. P = N(0, I), Q = N( (c,..0), I)
"gmd_p03_d10_ns": (
gmd_p01_d10_ns,
density.IsotropicNormal(np.zeros(10), 1),
data.DSIsotropicNormal(np.hstack((0.03, np.zeros(10 - 1))), 1),
),
# Gaussian Bernoulli RBM. dx=50, dh=10
# Perturbation variance to B[0, 0] is 0.1
"gbrbm_dx50_dh10_vp1": ([i * 1000 for i in range(1, 4 + 1)],) +
# ([1000, 5000], ) +
gbrbm_perturb(var_perturb_B=0.1, dx=50, dh=10),
# Gaussian Bernoulli RBM. dx=50, dh=40
# Perturbation variance to B[0, 0] is 0.1
"gbrbm_dx50_dh40_vp1": ([i * 1000 for i in range(1, 4 + 1)],) +
# ([1000, 5000], ) +
gbrbm_perturb(var_perturb_B=0.1, dx=50, dh=40),
# Gaussian Bernoulli RBM. dx=50, dh=10
# No perturbation
"gbrbm_dx50_dh10_h0": ([i * 1000 for i in range(1, 4 + 1)],) +
# ([1000, 5000], ) +
gbrbm_perturb(var_perturb_B=0, dx=50, dh=10),
# Gaussian Bernoulli RBM. dx=50, dh=40
# No perturbation
"gbrbm_dx50_dh40_h0": ([i * 1000 for i in range(1, 4 + 1)],) +
# ([1000, 5000], ) +
gbrbm_perturb(var_perturb_B=0, dx=50, dh=40),
# Gaussian Bernoulli RBM. dx=20, dh=10
# Perturbation variance to B[0, 0] is 0.1
"gbrbm_dx20_dh10_vp1": ([i * 1000 for i in range(2, 5 + 1)],)
+ gbrbm_perturb(var_perturb_B=0.1, dx=20, dh=10),
# Gaussian Bernoulli RBM. dx=20, dh=10
# No perturbation
"gbrbm_dx20_dh10_h0": ([i * 1000 for i in range(2, 5 + 1)],)
+ gbrbm_perturb(var_perturb_B=0, dx=20, dh=10),
}
if prob_label not in prob2tuples:
raise ValueError(
"Unknown problem label. Need to be one of %s" % str(prob2tuples.keys())
)
return prob2tuples[prob_label]
def run_problem(prob_label):
"""Run the experiment"""
ns, p, ds = get_ns_pqsource(prob_label)
# /////// submit jobs //////////
# create folder name string
# result_folder = glo.result_folder()
from sbibm.third_party.kgof.config import expr_configs
tmp_dir = expr_configs["scratch_path"]
foldername = os.path.join(tmp_dir, "kgof_slurm", "e%d" % ex)
logger.info("Setting engine folder to %s" % foldername)
# create parameter instance that is needed for any batch computation engine
logger.info("Creating batch parameter instance")
batch_parameters = BatchClusterParameters(
foldername=foldername, job_name_base="e%d_" % ex, parameter_prefix=""
)
# Use the following line if Slurm queue is not used.
# engine = SerialComputationEngine()
# engine = SlurmComputationEngine(batch_parameters, partition='wrkstn,compute')
engine = SlurmComputationEngine(batch_parameters)
n_methods = len(method_job_funcs)
# repetitions x len(ns) x #methods
aggregators = np.empty((reps, len(ns), n_methods), dtype=object)
for r in range(reps):
for ni, n in enumerate(ns):
for mi, f in enumerate(method_job_funcs):
# name used to save the result
func_name = f.__name__
fname = "%s-%s-n%d_r%d_a%.3f_trp%.2f.p" % (
prob_label,
func_name,
n,
r,
alpha,
tr_proportion,
)
if not is_rerun and glo.ex_file_exists(ex, prob_label, fname):
logger.info("%s exists. Load and return." % fname)
job_result = glo.ex_load_result(ex, prob_label, fname)
sra = SingleResultAggregator()
sra.submit_result(SingleResult(job_result))
aggregators[r, ni, mi] = sra
else:
# result not exists or rerun
# p: an UnnormalizedDensity object
job = Ex1Job(SingleResultAggregator(), p, ds, prob_label, r, f, n)
agg = engine.submit_job(job)
aggregators[r, ni, mi] = agg
# let the engine finish its business
logger.info("Wait for all call in engine")
engine.wait_for_all()
# ////// collect the results ///////////
logger.info("Collecting results")
job_results = np.empty((reps, len(ns), n_methods), dtype=object)
for r in range(reps):
for ni, n in enumerate(ns):
for mi, f in enumerate(method_job_funcs):
logger.info("Collecting result (%s, r=%d, n=%rd)" % (f.__name__, r, n))
# let the aggregator finalize things
aggregators[r, ni, mi].finalize()
# aggregators[i].get_final_result() returns a SingleResult instance,
# which we need to extract the actual result
job_result = aggregators[r, ni, mi].get_final_result().result
job_results[r, ni, mi] = job_result
# func_names = [f.__name__ for f in method_job_funcs]
# func2labels = exglobal.get_func2label_map()
# method_labels = [func2labels[f] for f in func_names if f in func2labels]
# save results
results = {
"job_results": job_results,
"data_source": ds,
"alpha": alpha,
"repeats": reps,
"ns": ns,
"p": p,
"tr_proportion": tr_proportion,
"method_job_funcs": method_job_funcs,
"prob_label": prob_label,
}
# class name
fname = "ex%d-%s-me%d_rs%d_nmi%d_nma%d_a%.3f_trp%.2f.p" % (
ex,
prob_label,
n_methods,
reps,
min(ns),
max(ns),
alpha,
tr_proportion,
)
glo.ex_save_result(ex, results, fname)
logger.info("Saved aggregated results to %s" % fname)
def main():
if len(sys.argv) != 2:
print("Usage: %s problem_label" % sys.argv[0])
sys.exit(1)
prob_label = sys.argv[1]
run_problem(prob_label)
if __name__ == "__main__":
main()
| 33.525862
| 87
| 0.609754
|
6d3f9e56cdce852aeb7534f93a3184020e6ad210
| 3,957
|
py
|
Python
|
coord_utils.py
|
JunwookHeo/YOLO-OT
|
7004f25ce858acb7253bfcbc6fabeb915d8747a3
|
[
"MIT"
] | null | null | null |
coord_utils.py
|
JunwookHeo/YOLO-OT
|
7004f25ce858acb7253bfcbc6fabeb915d8747a3
|
[
"MIT"
] | null | null | null |
coord_utils.py
|
JunwookHeo/YOLO-OT
|
7004f25ce858acb7253bfcbc6fabeb915d8747a3
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
class coord_utils:
@staticmethod
def location_to_probability_map(size, loc):
# loc is not normalized location
promap_vec = torch.zeros([size, size], dtype=torch.float32)
try:
conf = loc[4]
except IndexError:
conf = 1.0
cx = loc[0]*size
cy = loc[1]*size
w = loc[2]*size
h = loc[3]*size
[x1, y1, x2, y2] = [(cx - w/2.).int(), (cy - h/2.).int(), (cx + w/2.).int(), (cy + h/2.).int()]
if x1 == x2: x2 += 1
if y1 == y2: y2 += 1
x1 = x1.clamp(0, size)
y1 = y1.clamp(0, size)
x2 = x2.clamp(0, size)
y2 = y2.clamp(0, size)
for y in range(y1, y2):
for x in range(x1, x2):
promap_vec[y][x] = conf
return promap_vec
@staticmethod
def locations_to_probability_maps(size, locs):
pms = []
for loc in locs:
pm = coord_utils.location_to_probability_map(size, loc)
pms.append(pm.view(-1))
return torch.stack(pms, dim=0)
@staticmethod
def probability_map_to_location(size, pmap):
# probability map to location (cx, cy, w, h)
pmap = pmap.view(size, size)
xlist = []
ylist = []
for y in range(size):
for x in range(size):
if(pmap[y][x] >= 0.5):
xlist.append(x+0.5)
ylist.append(y+0.5)
if len(xlist) == 0 or len(ylist) == 0:
return torch.zeros(4, dtype=torch.float32)
ax = np.array(xlist)
ay = np.array(ylist)
x1 = ax.mean()
y1 = ay.mean()
k = 3.5 #np.sqrt(2)
w = ax.std() * k + 0.5
h = ay.std() * k + 0.5
loc = torch.tensor([x1/size, y1/size, w/size, h/size], dtype=torch.float32)
return loc
@staticmethod
def normal_to_location(wid, ht, location):
# Normalized location to coordinate
wid *= 1.0
ht *= 1.0
location[0] *= wid
location[1] *= ht
location[2] *= wid
location[3] *= ht
return location
@staticmethod
def location_to_normal(wid, ht, location):
# Coordinates to normalized location
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
@staticmethod
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2: # (cx, cy, w, h)
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
| 32.170732
| 103
| 0.503159
|
0057a854b922b0044747744070c98956a051205b
| 537
|
py
|
Python
|
features/environment.py
|
sanjay051099/try2-
|
2ee165320476184a7390898c7a873844fc79e77a
|
[
"MIT"
] | 199
|
2019-12-14T02:25:05.000Z
|
2022-03-31T11:26:12.000Z
|
features/environment.py
|
sajib1066/opensource-job-portal
|
1288046e32f009c38742a28e4552ffafafabf684
|
[
"MIT"
] | 91
|
2019-12-12T12:19:34.000Z
|
2022-03-25T05:52:04.000Z
|
features/environment.py
|
sajib1066/opensource-job-portal
|
1288046e32f009c38742a28e4552ffafafabf684
|
[
"MIT"
] | 131
|
2019-12-13T06:26:06.000Z
|
2022-03-29T19:45:18.000Z
|
import django
import os
from django.core.management import call_command
from splinter.browser import Browser
from features.helpers import initiate_test_data
from peeldb.models import User
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobsp.settings_server")
django.setup()
def before_all(context):
User.objects.filter(email="test@mp.com").delete()
context.browser = Browser("firefox")
context.server_url = "http://test.peeljobs.com:8000"
def after_all(context):
context.browser.quit()
context.browser = None
| 24.409091
| 72
| 0.772812
|
6045d45495bfdcece29e0d2d868d4548d3d6cf48
| 499
|
py
|
Python
|
bibbutler_web/migrations/0005_auto_20160615_0446.py
|
dolonnen/bibbuttler
|
a9f672d0321fa6d060e204ecc952ed333edc1d81
|
[
"MIT"
] | null | null | null |
bibbutler_web/migrations/0005_auto_20160615_0446.py
|
dolonnen/bibbuttler
|
a9f672d0321fa6d060e204ecc952ed333edc1d81
|
[
"MIT"
] | null | null | null |
bibbutler_web/migrations/0005_auto_20160615_0446.py
|
dolonnen/bibbuttler
|
a9f672d0321fa6d060e204ecc952ed333edc1d81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-15 04:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bibbutler_web', '0004_auto_20160604_1223'),
]
operations = [
migrations.RemoveField(
model_name='entry',
name='addendum',
),
migrations.RemoveField(
model_name='entry',
name='note',
),
]
| 20.791667
| 53
| 0.583166
|
94534a254feda103939435d3ca17bc4c5d949d71
| 1,222
|
py
|
Python
|
django_modelapiview/Token.py
|
TiphaineLAURENT/Django_APIView
|
83cf33f7af7263ab8a8451b867ea8f38b392cb6a
|
[
"BSD-2-Clause"
] | null | null | null |
django_modelapiview/Token.py
|
TiphaineLAURENT/Django_APIView
|
83cf33f7af7263ab8a8451b867ea8f38b392cb6a
|
[
"BSD-2-Clause"
] | 2
|
2020-08-15T21:37:38.000Z
|
2020-08-18T19:25:02.000Z
|
django_modelapiview/Token.py
|
TiphaineLAURENT/Django_APIView
|
83cf33f7af7263ab8a8451b867ea8f38b392cb6a
|
[
"BSD-2-Clause"
] | null | null | null |
from django.core import signing
from datetime import timedelta
class StillSigned(ValueError):
def __init__(self):
super().__init__("Token still signed")
class Token(object):
"""
Represent a token used when authenticating a request toward an APIView with authentification set to True
"""
_max_age = timedelta(hours=2)
_body = None
_signed_data = None
def __init__(self, body:dict=None, signed_data:str=None):
self._body = body
self._signed_data = signed_data
def __str__(self) -> str:
return self._signed_data if self.is_signed() else str(self._body)
def __repr__(self) -> str:
return f"<Token id({id(self)}): {'S' if self.is_signed() else 'Not s'}igned>"
def sign(self) -> None:
self._signed_data = signing.dumps(self._body)
def unsign(self) -> None:
self._body = signing.loads(self._signed_data, max_age=self._max_age)
def is_signed(self) -> bool:
return self._signed_data is not None
def is_unsigned(self) -> bool:
return self._body is not None
@property
def uid(self):
if not self.is_unsigned():
raise StillSigned
return self._body['uid']
| 25.458333
| 109
| 0.648936
|
2f86245c66691d6825000eeb2df122c6fc198da4
| 2,409
|
py
|
Python
|
qdarkstyle/palette.py
|
oscargus/QDarkStyleSheet
|
6aef1de7e97227899c478a5634d136d80991123e
|
[
"CC-BY-4.0"
] | 7
|
2020-08-11T16:30:04.000Z
|
2022-03-25T08:50:19.000Z
|
qdarkstyle/palette.py
|
oscargus/QDarkStyleSheet
|
6aef1de7e97227899c478a5634d136d80991123e
|
[
"CC-BY-4.0"
] | 1
|
2020-06-24T04:50:29.000Z
|
2020-06-24T17:40:02.000Z
|
qdarkstyle/palette.py
|
oscargus/QDarkStyleSheet
|
6aef1de7e97227899c478a5634d136d80991123e
|
[
"CC-BY-4.0"
] | 1
|
2021-09-15T16:28:46.000Z
|
2021-09-15T16:28:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""QDarkStyle default palette."""
# Standard library imports
from collections import OrderedDict
class DarkPalette(object):
"""Theme variables."""
# Color
COLOR_BACKGROUND_LIGHT = '#505F69'
COLOR_BACKGROUND_NORMAL = '#32414B'
COLOR_BACKGROUND_DARK = '#19232D'
COLOR_FOREGROUND_LIGHT = '#F0F0F0'
COLOR_FOREGROUND_NORMAL = '#AAAAAA'
COLOR_FOREGROUND_DARK = '#787878'
COLOR_SELECTION_LIGHT = '#148CD2'
COLOR_SELECTION_NORMAL = '#1464A0'
COLOR_SELECTION_DARK = '#14506E'
OPACITY_TOOLTIP = 230
# Size
SIZE_BORDER_RADIUS = '4px'
# Borders
BORDER_LIGHT = '1px solid $COLOR_BACKGROUND_LIGHT'
BORDER_NORMAL = '1px solid $COLOR_BACKGROUND_NORMAL'
BORDER_DARK = '1px solid $COLOR_BACKGROUND_DARK'
BORDER_SELECTION_LIGHT = '1px solid $COLOR_SELECTION_LIGHT'
BORDER_SELECTION_NORMAL = '1px solid $COLOR_SELECTION_NORMAL'
BORDER_SELECTION_DARK = '1px solid $COLOR_SELECTION_DARK'
# Example of additional widget specific variables
W_STATUS_BAR_BACKGROUND_COLOR = COLOR_SELECTION_DARK
# Paths
PATH_RESOURCES = "':/qss_icons'"
@classmethod
def to_dict(cls, colors_only=False):
"""Convert variables to dictionary."""
order = [
'COLOR_BACKGROUND_LIGHT',
'COLOR_BACKGROUND_NORMAL',
'COLOR_BACKGROUND_DARK',
'COLOR_FOREGROUND_LIGHT',
'COLOR_FOREGROUND_NORMAL',
'COLOR_FOREGROUND_DARK',
'COLOR_SELECTION_LIGHT',
'COLOR_SELECTION_NORMAL',
'COLOR_SELECTION_DARK',
'OPACITY_TOOLTIP',
'SIZE_BORDER_RADIUS',
'BORDER_LIGHT',
'BORDER_NORMAL',
'BORDER_DARK',
'BORDER_SELECTION_LIGHT',
'BORDER_SELECTION_NORMAL',
'BORDER_SELECTION_DARK',
'W_STATUS_BAR_BACKGROUND_COLOR',
'PATH_RESOURCES',
]
dic = OrderedDict()
for var in order:
value = getattr(cls, var)
if colors_only:
if not var.startswith('COLOR'):
value = None
if value:
dic[var] = value
return dic
@classmethod
def color_palette(cls):
"""Return the ordered colored palette dictionary."""
return cls.to_dict(colors_only=True)
| 28.011628
| 65
| 0.624741
|
1feec120e68e91f5d8adde46fdf0fdd47cbf72d7
| 1,880
|
py
|
Python
|
tests/test_GBM.py
|
tercenya/funcsim
|
36fb837cfc7f8ab7ff0c216c8c4ab3a476e4ca5c
|
[
"BSD-3-Clause"
] | 1
|
2021-12-08T03:40:26.000Z
|
2021-12-08T03:40:26.000Z
|
tests/test_GBM.py
|
tercenya/funcsim
|
36fb837cfc7f8ab7ff0c216c8c4ab3a476e4ca5c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_GBM.py
|
tercenya/funcsim
|
36fb837cfc7f8ab7ff0c216c8c4ab3a476e4ca5c
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import numpy as np
from scipy import stats
import xarray as xr
import funcsim as fs
def gbm(s0, dt, mu, sig, eps):
# update a variable via a standard geometric Brownian motion
return s0 * math.exp((mu - 0.5 * sig**2) * dt + eps * sig * dt ** 0.5)
def step(data, draw):
# take one step through time
# value of p in previous period
pLag1 = fs.recall(data, "p", lag=1)
# uniform draw --> standard normal draw
u = next(draw)
eps = stats.norm.ppf(u)
# update all intermediate variables
pNew = gbm(s0=pLag1, dt=1.0 / 12.0, mu=0.05, sig=0.10, eps=eps)
cNew = max(0.0, pNew - 1.0)
# return updated price history
dataNew = fs.chron(data, {"p": pNew, "c": cNew})
return dataNew
def data0():
# set up existing/historical data
steps = [0, 1, 2]
variables = ["p", "c"]
a = np.array([[1.0, np.nan], [1.01, np.nan], [0.99, np.nan]])
d0 = xr.DataArray(data=a, coords=(('steps', steps),
('variables', variables)))
return d0
def test_0(): # basic
out = fs.recdyn(step=step, data0=data0(), steps=10, trials=500)
assert type(out) == xr.DataArray
print(out)
print(out[:, 0, 10].mean())
assert abs(float(out[:, 0, 10].mean()) - 1.0234) < 0.01
def test_1(): # use multi
out = fs.recdyn(step=step, data0=data0(), steps=10, trials=500, multi=True)
assert type(out) == xr.DataArray
assert abs(float(out[:, 0, 10].mean()) - 1.0234) < 0.01
def test_2(): # alternative seed
out = fs.recdyn(step=step, data0=data0(), steps=10, trials=500, seed=123)
assert type(out) == xr.DataArray
assert abs(float(out[:, 0, 10].mean()) - 1.0234) < 0.01
def test_3(): # many steps (check that recursion does not bust stack)
out = fs.recdyn(step=step, data0=data0(), steps=2000, trials=10)
assert type(out) == xr.DataArray
| 28.923077
| 79
| 0.601596
|
346d01f0f3a8fd7eaf779872da28f885775c3432
| 710
|
py
|
Python
|
prototype/solver/cls_quant_eval_solver.py
|
ModelTC/mqbench-paper
|
8d25a3b63c0cde4d904f77439fc435b49b0b33d4
|
[
"Apache-2.0"
] | 6
|
2021-09-26T03:24:26.000Z
|
2022-03-17T09:19:18.000Z
|
prototype/solver/cls_quant_eval_solver.py
|
ModelTC/mqbench-paper
|
8d25a3b63c0cde4d904f77439fc435b49b0b33d4
|
[
"Apache-2.0"
] | null | null | null |
prototype/solver/cls_quant_eval_solver.py
|
ModelTC/mqbench-paper
|
8d25a3b63c0cde4d904f77439fc435b49b0b33d4
|
[
"Apache-2.0"
] | 2
|
2021-11-14T18:38:22.000Z
|
2022-03-17T09:19:22.000Z
|
from .cls_quant_new_solver import ClsNewSolver
from prototype.utils.dist import link_dist
import argparse
class ClsEvalSolver(ClsNewSolver):
def __init__(self, config_file):
super(ClsEvalSolver, self).__init__(config_file)
def train(self):
self.initialize(0, 1, flag=True, calib_bn=True)
@link_dist
def main():
parser = argparse.ArgumentParser(description='Classification Solver')
parser.add_argument('--config', required=True, type=str)
parser.add_argument('--evaluate', action='store_true')
args = parser.parse_args()
# build solver
solver = ClsEvalSolver(args.config)
# evaluate or train
solver.train()
if __name__ == '__main__':
main()
| 22.903226
| 73
| 0.712676
|
297c619572896b747fe8a8a263a9da6a80ae0347
| 2,869
|
py
|
Python
|
src/preprocess/feature_selection.py
|
joaopfonseca/remote_sensing
|
1c3704e2ea4de6ec803e6b42a1557660768ab03b
|
[
"MIT"
] | null | null | null |
src/preprocess/feature_selection.py
|
joaopfonseca/remote_sensing
|
1c3704e2ea4de6ec803e6b42a1557660768ab03b
|
[
"MIT"
] | null | null | null |
src/preprocess/feature_selection.py
|
joaopfonseca/remote_sensing
|
1c3704e2ea4de6ec803e6b42a1557660768ab03b
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
#from imblearn.under_sampling.base import BaseCleaningSampler
from sklearn.ensemble import RandomForestClassifier
#from sklearn.utils.validation import check_is_fitted
from sklearn.feature_selection import SelectFromModel
from scipy.stats import spearmanr, pearsonr
from sklearn.base import BaseEstimator, MetaEstimatorMixin
from sklearn.feature_selection.base import SelectorMixin
from sklearn.model_selection import train_test_split
from rfpimp import importances as permutation_importances
class PermutationRF(BaseEstimator, SelectorMixin, MetaEstimatorMixin):
def __init__(self, n_estimators=100, test_size=.2, max_features=None, n_jobs=-1, random_state=None):
super().__init__()
self.n_estimators = n_estimators
self.n_jobs = n_jobs
self.random_state = random_state
self.test_size = test_size
self.max_features = max_features
self.model = RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=self.n_jobs,
random_state=self.random_state)
def fit(self, X, y):
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=self.test_size, random_state=self.random_state)
self.model.fit(X_tr,y_tr)
self.permutation_importances_ = permutation_importances(
self.model,
pd.DataFrame(X_te, columns=np.arange(X_te.shape[-1]).astype(str)),
pd.Series(y_te)
)
return self
def transform(self, X, y=None):
if self.max_features:
col_ids = self.permutation_importances_\
.iloc[:self.max_features]\
.index.values\
.astype(int)
else:
col_ids = self.permutation_importances_\
.index.values\
[self.permutation_importances_['Importance']>0]\
.astype(int)
return X[:,col_ids]
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X,y)
def _get_support_mask(self):
return None
class CorrelationBasedFeatureSelection(BaseEstimator, SelectorMixin, MetaEstimatorMixin):
def __init__(self, corr_type='pearson', threshold=.6):
super().__init__()
self.corr_type = corr_type
self.threshold = threshold
def fit(self, X, y):
corr = pd.DataFrame(X).corr(self.corr_type).abs()
corr_tril = pd.DataFrame(np.tril(corr, -1))
unstacked = corr_tril.unstack().reset_index()
self.dropped_features_ = unstacked['level_1'][unstacked[0]>=self.threshold].drop_duplicates().values
return self
def transform(self, X, y=None):
return np.delete(X, self.dropped_features_, axis=1)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X,y)
def _get_support_mask(self):
return None
| 35.8625
| 113
| 0.676891
|
abba15346eac131b5bc18a7cd6706d8ed89f8c13
| 3,519
|
py
|
Python
|
src/Update/update_install.py
|
DBrianKimmel/PyHouse_Install
|
9c7ff397299e0f2e63782d4a955d2f8bf840ef6f
|
[
"MIT"
] | 1
|
2015-10-13T15:01:48.000Z
|
2015-10-13T15:01:48.000Z
|
src/Update/update_install.py
|
DBrianKimmel/PyHouse_Install
|
9c7ff397299e0f2e63782d4a955d2f8bf840ef6f
|
[
"MIT"
] | null | null | null |
src/Update/update_install.py
|
DBrianKimmel/PyHouse_Install
|
9c7ff397299e0f2e63782d4a955d2f8bf840ef6f
|
[
"MIT"
] | null | null | null |
"""
@name: PyHouse_Install/src/Update/update_install.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2016 by D. Brian Kimmel
@license: MIT License
@note: Created on Oct 14, 2015
@Summary:
This is run as root by the shell script update_install which is run as bin/update_install
"""
__updated__ = '2016-08-28'
# Import system type stuff
import os
import pwd
import shutil
import stat
# Import PyHouseInstall files and modules.
# from Install.Utility import Utilities as utilUtil
HOME_DIR = '/home/pyhouse/'
WORKSPACE_DIR = HOME_DIR + 'workspace/'
HOME_BIN_DIR = HOME_DIR + 'bin/'
INSTALL_DIR = WORKSPACE_DIR + 'PyHouse_Install/'
INSTALL_BIN_DIR = INSTALL_DIR + 'bin/'
class Utilities(object):
"""
"""
@staticmethod
def get_user_ids(p_user_name):
l_user = pwd.getpwnam(p_user_name)
l_uid = l_user.pw_uid
l_gid = l_user.pw_gid
return l_uid, l_gid
@staticmethod
def is_dir(p_path):
return os.path.isdir(p_path)
@staticmethod
def MakeDir(p_dir_name, p_user_name):
l_uid, l_gid = Utilities.get_user_ids(p_user_name)
if not os.path.isdir(p_dir_name):
print('Creating a directory {}'.format(p_dir_name))
os.makedirs(p_dir_name)
os.chown(p_dir_name, l_uid, l_gid)
def Install(self):
""" Install or update the repositories,
"""
class Api(object):
"""
"""
def make_pyhouse_dialout(self):
pass
def make_etc_dir(self):
Utilities.MakeDir('/etc/pyhouse/', 'pyhouse')
def make_log_dir(self):
Utilities.MakeDir('/var/log/pyhouse/', 'pyhouse')
def make_HOME_BIN_DIR(self):
Utilities.MakeDir('bin', 'pyhouse')
l_user = pwd.getpwnam('pyhouse')
for l_entry in os.listdir(INSTALL_BIN_DIR):
l_file = os.path.join(INSTALL_BIN_DIR, l_entry)
l_target = os.path.join(HOME_BIN_DIR, l_entry)
shutil.copy(l_file, HOME_BIN_DIR)
try:
os.chmod(l_target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
os.chown(l_target, l_user.pw_uid, l_user.pw_gid)
print(' Installed file {}'.format(l_target))
except Exception as e_err:
print('Error in changing {} - {}'.format(l_target, e_err))
def copy_pyhouse_service(self):
""" Install in ~/.config/systemd/user/pyhouse.service
sudo systemctl daemon-reload
"""
l_user = pwd.getpwnam('pyhouse')
l_file = 'pyhouse.service'
l_dir = HOME_DIR + '.config/systemd/user/'
l_src = INSTALL_DIR + 'src/files/' + l_file
l_dest = os.path.join(l_dir, l_file)
if not os.path.isdir(l_dir):
print('Creating a directory {}'.format(l_dir))
os.makedirs(l_dir)
os.chown(l_dir, l_user.pw_uid, l_user.pw_gid)
shutil.copy(l_src, l_dir)
os.chown(l_dest, l_user.pw_uid, l_user.pw_gid)
print(' Copied file "{}" to "{}"'.format(l_src, l_dest))
def copy_autologin(self):
"""
"""
def update(self):
self.make_HOME_BIN_DIR()
self.copy_pyhouse_service()
self.make_etc_dir()
self.make_log_dir()
if __name__ == "__main__":
print('---Running Update/update_install.py ...')
l_api = Api()
l_api.update()
print('---Finished update_install.py\n')
# ## END DBK
| 27.492188
| 138
| 0.623757
|
6f023686943e41eb11ab19f279c2f702df707d37
| 2,050
|
py
|
Python
|
vanzare/facturacion/forms.py
|
avilaroman/vanzare
|
3dacef370b21eee008b8883c95e777862c402dfe
|
[
"MIT"
] | null | null | null |
vanzare/facturacion/forms.py
|
avilaroman/vanzare
|
3dacef370b21eee008b8883c95e777862c402dfe
|
[
"MIT"
] | 12
|
2018-06-19T03:20:27.000Z
|
2020-10-07T17:28:40.000Z
|
vanzare/facturacion/forms.py
|
avilaroman/vanzare
|
3dacef370b21eee008b8883c95e777862c402dfe
|
[
"MIT"
] | 4
|
2018-05-31T14:26:45.000Z
|
2019-07-06T06:42:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import ModelForm, inlineformset_factory
from cliente.models import Cliente, Recaudo, Recibido
from producto.models import Producto, ProductoBase
from .models import Gasto
class RecibidoForm(ModelForm):
class Meta:
model = Recibido
fields = ['cliente', 'descuento', 'abono']
exclude = ['saldo', 'subtotal']
class GastoForm(ModelForm):
class Meta:
model = Gasto
fields = ['elemento_gasto', 'valor']
class ProductoForm(ModelForm):
class Meta:
model = Producto
fields = [
'producto_base', 'cantidad', 'detalle', 'ancho', 'alto', 'total'
]
ProductoFormSet = inlineformset_factory(
Recibido,
Producto,
form=ProductoForm,
can_delete=False,
extra=0,
min_num=1,
max_num=None)
class RecaudoForm(ModelForm):
class Meta:
model = Recaudo
fields = ['recibido', 'valor']
class ClienteForm(ModelForm):
class Meta:
model = Cliente
fields = [
'nombre_completo', 'tipo_identificacion', 'numero_identificacion'
]
class ProductoBaseForm(ModelForm):
class Meta:
model = ProductoBase
fields = [
'nombre', 'valor', 'valor_cantidad', 'factor', 'especificacion',
'opciones_cobro'
]
class RegistroForm(UserCreationForm):
first_name = forms.CharField(
max_length=30, required=False, help_text='Opcional.')
last_name = forms.CharField(
max_length=30, required=False, help_text='Opcional.')
email = forms.EmailField(
max_length=254,
help_text=
'Requerido. Informar una dirección de correo electrónico válida.')
class Meta:
model = User
fields = [
'username', 'first_name', 'last_name', 'email', 'password1',
'password2'
]
| 24.117647
| 77
| 0.637073
|
f3b5a00bc182cd7c902656c0a046d3c44e867181
| 43
|
py
|
Python
|
src/luamb/__main__.py
|
un-def/luamb
|
eec3969eda1b225e26a138c25720182754e9ec54
|
[
"MIT"
] | 16
|
2016-07-20T03:33:45.000Z
|
2021-07-23T06:05:00.000Z
|
src/luamb/__main__.py
|
un-def/luamb
|
eec3969eda1b225e26a138c25720182754e9ec54
|
[
"MIT"
] | 2
|
2016-08-20T02:47:26.000Z
|
2017-08-29T16:27:42.000Z
|
src/luamb/__main__.py
|
un-def/luamb
|
eec3969eda1b225e26a138c25720182754e9ec54
|
[
"MIT"
] | 1
|
2016-07-20T10:25:55.000Z
|
2016-07-20T10:25:55.000Z
|
from luamb._entrypoint import main
main()
| 10.75
| 34
| 0.790698
|
5dc7ed2bc616310ae5f106b8f5bf7b086e41fedf
| 14,877
|
py
|
Python
|
pygeoapi/provider/postgresql.py
|
allixender/pygeoapi
|
42a3424ae8bea2089337e1ed480a07c54d75302a
|
[
"MIT"
] | null | null | null |
pygeoapi/provider/postgresql.py
|
allixender/pygeoapi
|
42a3424ae8bea2089337e1ed480a07c54d75302a
|
[
"MIT"
] | null | null | null |
pygeoapi/provider/postgresql.py
|
allixender/pygeoapi
|
42a3424ae8bea2089337e1ed480a07c54d75302a
|
[
"MIT"
] | null | null | null |
# =================================================================
#
# Authors: Jorge Samuel Mendes de Jesus <jorge.dejesus@protonmail.com>
# Tom Kralidis <tomkralidis@gmail.com>
# Mary Bucknell <mbucknell@usgs.gov>
#
# Copyright (c) 2018 Jorge Samuel Mendes de Jesus
# Copyright (c) 2019 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# Testing local docker:
# docker run --name "postgis" \
# -v postgres_data:/var/lib/postgresql -p 5432:5432 \
# -e ALLOW_IP_RANGE=0.0.0.0/0 \
# -e POSTGRES_USER=postgres \
# -e POSTGRES_PASS=postgres \
# -e POSTGRES_DBNAME=test \
# -d -t kartoza/postgis
# Import dump:
# gunzip < tests/data/hotosm_bdi_waterways.sql.gz |
# psql -U postgres -h 127.0.0.1 -p 5432 test
import logging
import json
import psycopg2
from psycopg2.sql import SQL, Identifier, Literal
from pygeoapi.provider.base import BaseProvider, \
ProviderConnectionError, ProviderQueryError, ProviderItemNotFoundError
from psycopg2.extras import RealDictCursor
LOGGER = logging.getLogger(__name__)
class DatabaseConnection:
"""Database connection class to be used as 'with' statement.
The class returns a connection object.
"""
def __init__(self, conn_dic, table, context="query"):
"""
PostgreSQLProvider Class constructor returning
:param conn: dictionary with connection parameters
to be used by psycopg2
dbname – the database name (database is a deprecated alias)
user – user name used to authenticate
password – password used to authenticate
host – database host address
(defaults to UNIX socket if not provided)
port – connection port number
(defaults to 5432 if not provided)
search_path – search path to be used (by order) , normally
data is in the public schema, [public],
or in a specific schema ["osm", "public"].
Note: First we should have the schema
being used and then public
:param table: table name containing the data. This variable is used to
assemble column information
:param context: query or hits, if query then it will determine
table column otherwise will not do it
:returns: psycopg2.extensions.connection
"""
self.conn_dic = conn_dic
self.table = table
self.context = context
self.columns = None
self.fields = {} # Dict of columns. Key is col name, value is type
self.conn = None
def __enter__(self):
try:
search_path = self.conn_dic.pop('search_path', ['public'])
if search_path != ['public']:
self.conn_dic["options"] = '-c \
search_path={}'.format(",".join(search_path))
LOGGER.debug('Using search path: {} '.format(search_path))
self.conn = psycopg2.connect(**self.conn_dic)
self.conn.set_client_encoding('utf8')
except psycopg2.OperationalError:
LOGGER.error("Couldn't connect to Postgis using:{}".format(
str(self.conn_dic)))
raise ProviderConnectionError()
self.cur = self.conn.cursor()
if self.context == 'query':
# Getting columns
query_cols = "SELECT column_name, udt_name FROM information_schema.columns \
WHERE table_name = '{}' and udt_name != 'geometry';".format(
self.table)
self.cur.execute(query_cols)
result = self.cur.fetchall()
self.columns = SQL(', ').join(
[Identifier(item[0]) for item in result]
)
self.fields = dict(result)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# some logic to commit/rollback
self.conn.close()
class PostgreSQLProvider(BaseProvider):
"""Generic provider for Postgresql based on psycopg2
using sync approach and server side
cursor (using support class DatabaseCursor)
"""
def __init__(self, provider_def):
"""
PostgreSQLProvider Class constructor
:param provider_def: provider definitions from yml pygeoapi-config.
data,id_field, name set in parent class
data contains the connection information
for class DatabaseCursor
:returns: pygeoapi.provider.base.PostgreSQLProvider
"""
BaseProvider.__init__(self, provider_def)
self.table = provider_def['table']
self.id_field = provider_def['id_field']
self.conn_dic = provider_def['data']
self.geom = provider_def.get('geom_field', 'geom')
LOGGER.debug('Setting Postgresql properties:')
LOGGER.debug('Connection String:{}'.format(
",".join(("{}={}".format(*i) for i in self.conn_dic.items()))))
LOGGER.debug('Name:{}'.format(self.name))
LOGGER.debug('ID_field:{}'.format(self.id_field))
LOGGER.debug('Table:{}'.format(self.table))
LOGGER.debug('Get available fields/properties')
self.get_fields()
def get_fields(self):
"""
Get fields from PostgreSQL table (columns are field)
:returns: dict of fields
"""
if not self.fields:
with DatabaseConnection(self.conn_dic, self.table) as db:
self.fields = db.fields
return self.fields
def __get_where_clauses(self, properties=[], bbox=[]):
"""
Generarates WHERE conditions to be implemented in query.
Private method mainly associated with query method
:param properties: list of tuples (name, value)
:param bbox: bounding box [minx,miny,maxx,maxy]
:returns: psycopg2.sql.Composed or psycopg2.sql.SQL
"""
where_conditions = []
if properties:
property_clauses = [SQL('{} = {}').format(
Identifier(k), Literal(v)) for k, v in properties]
where_conditions += property_clauses
if bbox:
bbox_clause = SQL('{} && ST_MakeEnvelope({})').format(
Identifier(self.geom), SQL(', ').join(
[Literal(bbox_coord) for bbox_coord in bbox]))
where_conditions.append(bbox_clause)
if where_conditions:
where_clause = SQL(' WHERE {}').format(
SQL(' AND ').join(where_conditions))
else:
where_clause = SQL('')
return where_clause
def query(self, startindex=0, limit=10, resulttype='results',
bbox=[], datetime_=None, properties=[], sortby=[]):
"""
Query Postgis for all the content.
e,g: http://localhost:5000/collections/hotosm_bdi_waterways/items?
limit=1&resulttype=results
:param startindex: starting record to return (default 0)
:param limit: number of records to return (default 10)
:param resulttype: return results or hit limit (default results)
:param bbox: bounding box [minx,miny,maxx,maxy]
:param datetime_: temporal (datestamp or extent)
:param properties: list of tuples (name, value)
:param sortby: list of dicts (property, order)
:returns: GeoJSON FeaturesCollection
"""
LOGGER.debug('Querying PostGIS')
if resulttype == 'hits':
with DatabaseConnection(self.conn_dic,
self.table, context="hits") as db:
cursor = db.conn.cursor(cursor_factory=RealDictCursor)
where_clause = self.__get_where_clauses(
properties=properties, bbox=bbox)
sql_query = SQL("SELECT COUNT(*) as hits from {} {}").\
format(Identifier(self.table), where_clause)
try:
cursor.execute(sql_query)
except Exception as err:
LOGGER.error('Error executing sql_query: {}: {}'.format(
sql_query.as_string(cursor), err))
raise ProviderQueryError()
hits = cursor.fetchone()["hits"]
return self.__response_feature_hits(hits)
end_index = startindex + limit
with DatabaseConnection(self.conn_dic, self.table) as db:
cursor = db.conn.cursor(cursor_factory=RealDictCursor)
where_clause = self.__get_where_clauses(
properties=properties, bbox=bbox)
sql_query = SQL("DECLARE \"geo_cursor\" CURSOR FOR \
SELECT DISTINCT {},ST_AsGeoJSON({}) FROM {}{}").\
format(db.columns,
Identifier(self.geom),
Identifier(self.table),
where_clause)
LOGGER.debug('SQL Query: {}'.format(sql_query.as_string(cursor)))
LOGGER.debug('Start Index: {}'.format(startindex))
LOGGER.debug('End Index: {}'.format(end_index))
try:
cursor.execute(sql_query)
for index in [startindex, limit]:
cursor.execute("fetch forward {} from geo_cursor"
.format(index))
except Exception as err:
LOGGER.error('Error executing sql_query: {}'.format(
sql_query.as_string(cursor)))
LOGGER.error(err)
raise ProviderQueryError()
row_data = cursor.fetchall()
feature_collection = {
'type': 'FeatureCollection',
'features': []
}
for rd in row_data:
feature_collection['features'].append(
self.__response_feature(rd))
return feature_collection
def get_previous(self, cursor, identifier):
"""
Query previous ID given current ID
:param identifier: feature id
:returns: feature id
"""
sql = 'SELECT {} AS id FROM {} WHERE {}<%s ORDER BY {} DESC LIMIT 1'
cursor.execute(SQL(sql).format(
Identifier(self.id_field),
Identifier(self.table),
Identifier(self.id_field),
Identifier(self.id_field),
), (identifier,))
item = cursor.fetchall()
id_ = item[0]['id'] if item else identifier
return id_
def get_next(self, cursor, identifier):
"""
Query next ID given current ID
:param identifier: feature id
:returns: feature id
"""
sql = 'SELECT {} AS id FROM {} WHERE {}>%s ORDER BY {} LIMIT 1'
cursor.execute(SQL(sql).format(
Identifier(self.id_field),
Identifier(self.table),
Identifier(self.id_field),
Identifier(self.id_field),
), (identifier,))
item = cursor.fetchall()
id_ = item[0]['id'] if item else identifier
return id_
def get(self, identifier):
"""
Query the provider for a specific
feature id e.g: /collections/hotosm_bdi_waterways/items/13990765
:param identifier: feature id
:returns: GeoJSON FeaturesCollection
"""
LOGGER.debug('Get item from Postgis')
with DatabaseConnection(self.conn_dic, self.table) as db:
cursor = db.conn.cursor(cursor_factory=RealDictCursor)
sql_query = SQL("SELECT {},ST_AsGeoJSON({}) \
from {} WHERE {}=%s").format(db.columns,
Identifier(self.geom),
Identifier(self.table),
Identifier(self.id_field))
LOGGER.debug('SQL Query: {}'.format(sql_query.as_string(db.conn)))
LOGGER.debug('Identifier: {}'.format(identifier))
try:
cursor.execute(sql_query, (identifier, ))
except Exception as err:
LOGGER.error('Error executing sql_query: {}'.format(
sql_query.as_string(cursor)))
LOGGER.error(err)
raise ProviderQueryError()
results = cursor.fetchall()
row_data = None
if results:
row_data = results[0]
feature = self.__response_feature(row_data)
if feature:
feature['prev'] = self.get_previous(cursor, identifier)
feature['next'] = self.get_next(cursor, identifier)
return feature
else:
err = 'item {} not found'.format(identifier)
LOGGER.error(err)
raise ProviderItemNotFoundError(err)
def __response_feature(self, row_data):
"""
Assembles GeoJSON output from DB query
:param row_data: DB row result
:returns: `dict` of GeoJSON Feature
"""
if row_data:
rd = dict(row_data)
feature = {
'type': 'Feature'
}
feature["geometry"] = json.loads(
rd.pop('st_asgeojson'))
feature['properties'] = rd
feature['id'] = feature['properties'].get(self.id_field)
return feature
else:
return None
def __response_feature_hits(self, hits):
"""Assembles GeoJSON/Feature number
e.g: http://localhost:5000/collections/
hotosm_bdi_waterways/items?resulttype=hits
:returns: GeoJSON FeaturesCollection
"""
feature_collection = {"features": [],
"type": "FeatureCollection"}
feature_collection['numberMatched'] = hits
return feature_collection
| 36.374083
| 88
| 0.582577
|
7b5c12f0a924e65e7599911af7ff9aa50d7e0f1d
| 41
|
py
|
Python
|
sdk/exception/business_exception.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | 1
|
2021-04-03T05:11:29.000Z
|
2021-04-03T05:11:29.000Z
|
sdk/exception/business_exception.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | null | null | null |
sdk/exception/business_exception.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | null | null | null |
class BusinessException(Exception):pass
| 13.666667
| 39
| 0.853659
|
87ddec2639e46c91e4ab1b6d06e74bccbc2e54b5
| 1,261
|
py
|
Python
|
tc_pipelines/components/ml/pytorch_trainer.py
|
cabukela/iorek-byrnison
|
a683d0e07f6de6ca568a0941ced6570feb2a6aa4
|
[
"Apache-2.0"
] | 1
|
2022-03-18T11:00:18.000Z
|
2022-03-18T11:00:18.000Z
|
tc_pipelines/components/ml/pytorch_trainer.py
|
cabukela/iorek-byrnison
|
a683d0e07f6de6ca568a0941ced6570feb2a6aa4
|
[
"Apache-2.0"
] | null | null | null |
tc_pipelines/components/ml/pytorch_trainer.py
|
cabukela/iorek-byrnison
|
a683d0e07f6de6ca568a0941ced6570feb2a6aa4
|
[
"Apache-2.0"
] | 2
|
2022-03-18T11:00:21.000Z
|
2022-03-30T04:08:05.000Z
|
from kfp.v2.dsl import component, Input, Dataset
@component(
base_image="python:3.7",
packages_to_install=["google-cloud-aiplatform==1.7.1"],
)
def train_and_deploy(
project: str,
location: str,
container_uri: str,
serving_container_uri: str,
training_dataset: Input[Dataset],
validation_dataset: Input[Dataset],
staging_bucket: str,
):
from google.cloud import aiplatform
aiplatform.init(
project=project, location=location, staging_bucket=staging_bucket
)
job = aiplatform.CustomContainerTrainingJob(
display_name="telco_churn_kfp_training_pytorch",
container_uri=container_uri,
command=[
"python",
"train.py",
f"--training_dataset_path={training_dataset.uri}",
f"--validation_dataset_path={validation_dataset.uri}",
'--batch_size', '64',
'--num_epochs', '15',
],
staging_bucket=staging_bucket,
model_serving_container_image_uri=serving_container_uri,
)
model = job.run(replica_count=1, model_display_name="telco_churn_kfp_model_pytorch")
#endpoint = model.deploy(
# traffic_split={"0": 80, '13785': 20},
# machine_type="n1-standard-2",
#)
| 30.756098
| 88
| 0.654243
|
a15c054a9f629e4229cd6dea1c4506dac574c6b1
| 45,555
|
py
|
Python
|
nova/tests/unit/compute/test_shelve.py
|
gyliu513/nova
|
14e974a5f77c72a9bb44c6801746abb2eda8e91d
|
[
"Apache-2.0"
] | 1
|
2016-07-18T22:05:01.000Z
|
2016-07-18T22:05:01.000Z
|
nova/tests/unit/compute/test_shelve.py
|
gyliu513/nova
|
14e974a5f77c72a9bb44c6801746abb2eda8e91d
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/compute/test_shelve.py
|
gyliu513/nova
|
14e974a5f77c72a9bb44c6801746abb2eda8e91d
|
[
"Apache-2.0"
] | 1
|
2021-11-12T03:55:41.000Z
|
2021-11-12T03:55:41.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.compute import api as compute_api
from nova.compute import claims
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova.db import api as db
from nova import exception
from nova.network.neutronv2 import api as neutron_api
from nova import objects
from nova import test
from nova.tests.unit.compute import test_compute
from nova.tests.unit.image import fake as fake_image
CONF = nova.conf.CONF
def _fake_resources():
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
return objects.ComputeNode(**resources)
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'snapshot')
@mock.patch.object(nova.compute.manager.ComputeManager, '_get_power_state')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def _shelve_instance(self, shelved_offload_time, mock_notify,
mock_notify_instance_usage, mock_get_power_state,
mock_snapshot, mock_power_off, mock_terminate,
mock_get_bdms, clean_shutdown=True,
guest_power_state=power_state.RUNNING):
mock_get_power_state.return_value = 123
CONF.set_override('shelved_offload_time', shelved_offload_time)
host = 'fake-mini'
instance = self._create_fake_instance_obj(
params={'host': host, 'power_state': guest_power_state})
image_id = 'fake_image_id'
host = 'fake-mini'
self.useFixture(utils_fixture.TimeFixture())
instance.task_state = task_states.SHELVING
instance.save()
fake_bdms = None
if shelved_offload_time == 0:
fake_bdms = objects.BlockDeviceMappingList()
mock_get_bdms.return_value = fake_bdms
tracking = {'last_state': instance.vm_state}
def check_save(expected_task_state=None):
self.assertEqual(123, instance.power_state)
if tracking['last_state'] == vm_states.ACTIVE:
if CONF.shelved_offload_time == 0:
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
else:
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED, instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
expected_task_state)
self.assertIn('shelved_at', instance.system_metadata)
self.assertEqual(image_id,
instance.system_metadata['shelved_image_id'])
self.assertEqual(host,
instance.system_metadata['shelved_host'])
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED_OFFLOADED,
instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_OFFLOADING],
expected_task_state)
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED_OFFLOADED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(expected_task_state)
else:
self.fail('Unexpected save!')
with test.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute.network_api,
'cleanup_instance_network_on_host')) as (
mock_save, mock_cleanup
):
mock_save.side_effect = check_save
self.compute.shelve_instance(self.context, instance,
image_id=image_id,
clean_shutdown=clean_shutdown)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve', phase='start', bdms=fake_bdms),
mock.call(self.context, instance, 'fake-mini',
action='shelve', phase='end', bdms=fake_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'shelve.start'),
mock.call(self.context, instance, 'shelve.end')]
mock_power_off_call_list = []
mock_get_power_state_call_list = [
mock.call(self.context, instance)]
mock_cleanup_call_list = []
if clean_shutdown:
if guest_power_state == power_state.PAUSED:
mock_power_off_call_list.append(mock.call(instance, 0, 0))
else:
mock_power_off_call_list.append(
mock.call(instance, CONF.shutdown_timeout,
CONF.compute.shutdown_retry_interval))
else:
mock_power_off_call_list.append(mock.call(instance, 0, 0))
if CONF.shelved_offload_time == 0:
mock_notify_instance_usage_call_list.extend([
mock.call(self.context, instance, 'shelve_offload.start'),
mock.call(self.context, instance, 'shelve_offload.end')])
mock_power_off_call_list.append(mock.call(instance, 0, 0))
mock_get_power_state_call_list.append(mock.call(self.context,
instance))
# instance.host is replaced with host because
# original instance.host is clear after
# ComputeManager.shelve_instance execute with
# shelved_offload_time == 0
mock_cleanup_call_list.append(mock.call(self.context, instance,
host))
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
mock_power_off.assert_has_calls(mock_power_off_call_list)
mock_cleanup.assert_has_calls(mock_cleanup_call_list)
mock_snapshot.assert_called_once_with(self.context, instance,
'fake_image_id', mock.ANY)
mock_get_power_state.assert_has_calls(mock_get_power_state_call_list)
if CONF.shelved_offload_time == 0:
self.assertTrue(mock_terminate.called)
def test_shelve(self):
self._shelve_instance(-1)
def test_shelve_forced_shutdown(self):
self._shelve_instance(-1, clean_shutdown=False)
def test_shelve_and_offload(self):
self._shelve_instance(0)
def test_shelve_paused_instance(self):
self._shelve_instance(-1, guest_power_state=power_state.PAUSED)
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
def test_shelve_offload(self, mock_power_off):
instance = self._shelve_offload()
mock_power_off.assert_called_once_with(instance,
CONF.shutdown_timeout, CONF.compute.shutdown_retry_interval)
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
def test_shelve_offload_forced_shutdown(self, mock_power_off):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'delete_allocation_for_shelve_offloaded_instance')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_update_resource_tracker')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_get_power_state', return_value=123)
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate, mock_get_bdms,
mock_event, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
instance.save()
self.useFixture(utils_fixture.TimeFixture())
fake_bdms = objects.BlockDeviceMappingList()
mock_get_bdms.return_value = fake_bdms
def stub_instance_save(inst, *args, **kwargs):
# If the vm_state is changed to SHELVED_OFFLOADED make sure we
# have already freed up allocations in placement.
if inst.vm_state == vm_states.SHELVED_OFFLOADED:
self.assertTrue(mock_delete_alloc.called,
'Allocations must be deleted before the '
'vm_status can change to shelved_offloaded.')
self.stub_out('nova.objects.Instance.save', stub_instance_save)
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='start',
bdms=fake_bdms),
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='end',
bdms=fake_bdms)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertTrue(mock_terminate.called)
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'shelve_offload.start'),
mock.call(self.context, instance, 'shelve_offload.end')]
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
# instance.host is replaced with host because
# original instance.host is clear after
# ComputeManager.shelve_offload_instance execute
mock_get_power_state.assert_called_once_with(
self.context, instance)
mock_update_resource_tracker.assert_called_once_with(self.context,
instance)
mock_delete_alloc.assert_called_once_with(self.context, instance)
mock_event.assert_called_once_with(self.context,
'compute_shelve_offload_instance',
CONF.host,
instance.uuid)
return instance
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_prep_block_device', return_value='fake_bdm')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_get_power_state', return_value=123)
@mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
def test_unshelve(self, mock_setup_network,
mock_get_power_state, mock_spawn,
mock_prep_block_device, mock_notify_instance_usage,
mock_notify_instance_action,
mock_get_bdms):
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': uuids.image_id}
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
host = 'fake-mini'
cur_time = timeutils.utcnow()
# Adding shelved_* keys in system metadata to verify
# whether those are deleted after unshelve call.
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = cur_time.isoformat()
sys_meta['shelved_image_id'] = image['id']
sys_meta['shelved_host'] = host
instance.system_metadata = sys_meta
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
def fake_claim(context, instance, node, allocations, limits):
instance.host = self.compute.host
requests = objects.InstancePCIRequests(requests=[])
return claims.Claim(context, instance, test_compute.NODENAME,
self.rt, _fake_resources(),
requests)
tracking = {
'last_state': instance.task_state,
'spawned': False,
}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
if tracking['spawned']:
self.assertIsNone(instance.task_state)
else:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['spawned'] = True
tracking['last_state'] == instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
tracking['last_state'] == instance.task_state
else:
self.fail('Unexpected save!')
fake_image.stub_out_image_service(self)
self.stub_out('nova.tests.unit.image.fake._FakeImageService.delete',
fake_delete)
with mock.patch.object(self.rt, 'instance_claim',
side_effect=fake_claim), \
mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(
self.context, instance, image=image,
filter_properties=filter_properties,
node=node, request_spec=objects.RequestSpec())
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='start', bdms=mock_bdms),
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='end', bdms=mock_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'unshelve.start'),
mock.call(self.context, instance, 'unshelve.end')]
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
mock_prep_block_device.assert_called_once_with(self.context,
instance, mock.ANY)
mock_setup_network.assert_called_once_with(self.context, instance,
self.compute.host)
mock_spawn.assert_called_once_with(self.context, instance,
test.MatchType(objects.ImageMeta), injected_files=[],
admin_password=None, allocations={}, network_info=[],
block_device_info='fake_bdm')
self.mock_get_allocs.assert_called_once_with(self.context,
instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
self.assertNotIn('shelved_at', instance.system_metadata)
self.assertNotIn('shelved_image_id', instance.system_metadata)
self.assertNotIn('shelved_host', instance.system_metadata)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertEqual(self.compute.host, instance.host)
self.assertFalse(instance.auto_disk_config)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
'instance_claim')
@mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_get_power_state', return_value=123)
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_prep_block_device', return_value='fake_bdm')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.utils.get_image_from_system_metadata')
def test_unshelve_volume_backed(self, mock_image_meta,
mock_notify_instance_usage,
mock_prep_block_device, mock_spawn,
mock_get_power_state,
mock_setup_network, mock_instance_claim,
mock_notify_instance_action,
mock_get_bdms):
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
image_meta = {'properties': {'base_image_ref': uuids.image_id}}
mock_image_meta.return_value = image_meta
tracking = {'last_state': instance.task_state}
def fake_claim(context, instance, node, allocations, limits):
instance.host = self.compute.host
requests = objects.InstancePCIRequests(requests=[])
return claims.Claim(context, instance, test_compute.NODENAME,
self.rt, _fake_resources(),
requests)
mock_instance_claim.side_effect = fake_claim
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertFalse(instance.auto_disk_config)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node,
request_spec=objects.RequestSpec())
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='start', bdms=mock_bdms),
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='end', bdms=mock_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'unshelve.start'),
mock.call(self.context, instance, 'unshelve.end')]
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
mock_prep_block_device.assert_called_once_with(self.context, instance,
mock.ANY)
mock_setup_network.assert_called_once_with(self.context, instance,
self.compute.host)
mock_instance_claim.assert_called_once_with(self.context, instance,
test_compute.NODENAME,
{}, limits)
mock_spawn.assert_called_once_with(self.context, instance,
test.MatchType(objects.ImageMeta),
injected_files=[], admin_password=None,
allocations={}, network_info=[], block_device_info='fake_bdm')
self.mock_get_allocs.assert_called_once_with(self.context,
instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
'instance_claim')
@mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn',
side_effect=test.TestingException('oops!'))
@mock.patch.object(nova.compute.manager.ComputeManager,
'_prep_block_device', return_value='fake_bdm')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
def test_unshelve_spawn_fails_cleanup_volume_connections(
self, mock_terminate_volume_connections, mock_image_meta,
mock_notify_instance_usage, mock_prep_block_device, mock_spawn,
mock_setup_network, mock_instance_claim,
mock_notify_instance_action, mock_get_bdms):
"""Tests error handling when a instance fails to unshelve and makes
sure that volume connections are cleaned up from the host
and that the host/node values are unset on the instance.
"""
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
image_meta = {'properties': {'base_image_ref': uuids.image_id}}
mock_image_meta.return_value = image_meta
tracking = {'last_state': instance.task_state}
def fake_claim(context, instance, node, allocations, limits):
instance.host = self.compute.host
instance.node = node
requests = objects.InstancePCIRequests(requests=[])
return claims.Claim(context, instance, node,
self.rt, _fake_resources(),
requests, limits=limits)
mock_instance_claim.side_effect = fake_claim
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
# This is before we've failed.
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
# This is after we've failed.
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.assertRaises(test.TestingException,
self.compute.unshelve_instance,
self.context, instance, image=None,
filter_properties=filter_properties, node=node,
request_spec=objects.RequestSpec())
mock_notify_instance_action.assert_called_once_with(
self.context, instance, 'fake-mini', action='unshelve',
phase='start', bdms=mock_bdms)
mock_notify_instance_usage.assert_called_once_with(
self.context, instance, 'unshelve.start')
mock_prep_block_device.assert_called_once_with(
self.context, instance, mock_bdms)
mock_setup_network.assert_called_once_with(self.context, instance,
self.compute.host)
mock_instance_claim.assert_called_once_with(self.context, instance,
test_compute.NODENAME,
{}, limits)
mock_spawn.assert_called_once_with(
self.context, instance, test.MatchType(objects.ImageMeta),
injected_files=[], admin_password=None,
allocations={}, network_info=[], block_device_info='fake_bdm')
mock_terminate_volume_connections.assert_called_once_with(
self.context, instance, mock_bdms)
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def test_shelved_poll_none_offloaded(self, mock_get_by_filters):
# Test instances are not offloaded when shelved_offload_time is -1
self.flags(shelved_offload_time=-1)
self.compute._poll_shelved_instances(self.context)
self.assertEqual(0, mock_get_by_filters.call_count)
@mock.patch('oslo_utils.timeutils.is_older_than')
def test_shelved_poll_none_exist(self, mock_older):
self.flags(shelved_offload_time=1)
mock_older.return_value = False
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
self.compute._poll_shelved_instances(self.context)
self.assertFalse(soi.called)
@mock.patch('oslo_utils.timeutils.is_older_than')
def test_shelved_poll_not_timedout(self, mock_older):
mock_older.return_value = False
self.flags(shelved_offload_time=1)
shelved_time = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time))
time_fixture.advance_time_seconds(CONF.shelved_offload_time - 1)
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = None
instance.host = self.compute.host
sys_meta = instance.system_metadata
sys_meta['shelved_at'] = shelved_time.isoformat()
instance.save()
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
self.compute._poll_shelved_instances(self.context)
self.assertFalse(soi.called)
self.assertTrue(mock_older.called)
def test_shelved_poll_timedout(self):
self.flags(shelved_offload_time=1)
shelved_time = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time))
time_fixture.advance_time_seconds(CONF.shelved_offload_time + 1)
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = None
instance.host = self.compute.host
sys_meta = instance.system_metadata
sys_meta['shelved_at'] = shelved_time.isoformat()
instance.save()
data = []
def fake_soi(context, instance, **kwargs):
data.append(instance.uuid)
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
soi.side_effect = fake_soi
self.compute._poll_shelved_instances(self.context)
self.assertTrue(soi.called)
self.assertEqual(instance.uuid, data[0])
@mock.patch('oslo_utils.timeutils.is_older_than')
@mock.patch('oslo_utils.timeutils.parse_strtime')
def test_shelved_poll_filters_task_state(self, mock_parse, mock_older):
self.flags(shelved_offload_time=1)
mock_older.return_value = True
instance1 = self._create_fake_instance_obj()
instance1.task_state = task_states.SPAWNING
instance1.vm_state = vm_states.SHELVED
instance1.host = self.compute.host
instance1.system_metadata = {'shelved_at': ''}
instance1.save()
instance2 = self._create_fake_instance_obj()
instance2.task_state = None
instance2.vm_state = vm_states.SHELVED
instance2.host = self.compute.host
instance2.system_metadata = {'shelved_at': ''}
instance2.save()
data = []
def fake_soi(context, instance, **kwargs):
data.append(instance.uuid)
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
soi.side_effect = fake_soi
self.compute._poll_shelved_instances(self.context)
self.assertTrue(soi.called)
self.assertEqual([instance2.uuid], data)
@mock.patch('oslo_utils.timeutils.is_older_than')
@mock.patch('oslo_utils.timeutils.parse_strtime')
def test_shelved_poll_checks_task_state_on_save(self, mock_parse,
mock_older):
self.flags(shelved_offload_time=1)
mock_older.return_value = True
instance = self._create_fake_instance_obj()
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.host = self.compute.host
instance.system_metadata = {'shelved_at': ''}
instance.save()
def fake_parse_hook(timestring):
instance.task_state = task_states.SPAWNING
instance.save()
mock_parse.side_effect = fake_parse_hook
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
self.compute._poll_shelved_instances(self.context)
self.assertFalse(soi.called)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def _get_vm_states(self, exclude_states=None):
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
if not exclude_states:
exclude_states = set()
return vm_state - exclude_states
def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False,
clean_shutdown=True):
# Ensure instance can be shelved.
params = dict(task_state=None, vm_state=vm_state, display_name='vm01')
fake_instance = self._create_fake_instance_obj(params=params)
instance = fake_instance
self.assertIsNone(instance['task_state'])
with test.nested(
mock.patch.object(compute_utils, 'is_volume_backed_instance',
return_value=boot_from_volume),
mock.patch.object(compute_utils, 'create_image',
return_value=dict(id='fake-image-id')),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_instance'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_offload_instance')
) as (
volume_backed_inst, create_image, instance_save,
record_action_start, rpcapi_shelve_instance,
rpcapi_shelve_offload_instance
):
self.compute_api.shelve(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(instance.task_state, task_states.SHELVING)
# assert our mock calls
volume_backed_inst.assert_called_once_with(
self.context, instance)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.SHELVE)
if boot_from_volume:
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=instance,
clean_shutdown=clean_shutdown)
else:
rpcapi_shelve_instance.assert_called_once_with(
self.context, instance=instance, image_id='fake-image-id',
clean_shutdown=clean_shutdown)
db.instance_destroy(self.context, instance['uuid'])
def test_shelve(self):
self._test_shelve()
def test_shelves_stopped(self):
self._test_shelve(vm_state=vm_states.STOPPED)
def test_shelves_paused(self):
self._test_shelve(vm_state=vm_states.PAUSED)
def test_shelves_suspended(self):
self._test_shelve(vm_state=vm_states.SUSPENDED)
def test_shelves_boot_from_volume(self):
self._test_shelve(boot_from_volume=True)
def test_shelve_forced_shutdown(self):
self._test_shelve(clean_shutdown=False)
def test_shelve_boot_from_volume_forced_shutdown(self):
self._test_shelve(boot_from_volume=True,
clean_shutdown=False)
def _test_shelve_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
fake_instance = self._create_fake_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve,
self.context, fake_instance)
def test_shelve_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED]))
for state in invalid_vm_states:
self._test_shelve_invalid_state(state)
def _test_shelve_offload(self, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_states.SHELVED)
fake_instance = self._create_fake_instance_obj(params=params)
with test.nested(
mock.patch.object(fake_instance, 'save'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_offload_instance'),
mock.patch('nova.compute.api.API._record_action_start')
) as (
instance_save, rpcapi_shelve_offload_instance, record
):
self.compute_api.shelve_offload(self.context, fake_instance,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.SHELVING_OFFLOADING,
fake_instance.task_state)
instance_save.assert_called_once_with(expected_task_state=[None])
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=fake_instance,
clean_shutdown=clean_shutdown)
record.assert_called_once_with(self.context, fake_instance,
instance_actions.SHELVE_OFFLOAD)
def test_shelve_offload(self):
self._test_shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._test_shelve_offload(clean_shutdown=False)
def _test_shelve_offload_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
fake_instance = self._create_fake_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve_offload,
self.context, fake_instance)
def test_shelve_offload_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED]))
for state in invalid_vm_states:
self._test_shelve_offload_invalid_state(state)
def _get_specify_state_instance(self, vm_state):
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_state
instance.save()
return instance
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
def test_unshelve(self, get_by_instance_uuid):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(vm_states.SHELVED)
fake_spec = objects.RequestSpec()
get_by_instance_uuid.return_value = fake_spec
with mock.patch.object(self.compute_api.compute_task_api,
'unshelve_instance') as unshelve:
self.compute_api.unshelve(self.context, instance)
get_by_instance_uuid.assert_called_once_with(self.context,
instance.uuid)
unshelve.assert_called_once_with(self.context, instance, fake_spec)
self.assertEqual(instance.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
@mock.patch('nova.availability_zones.get_availability_zones',
return_value=['az1', 'az2'])
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
def test_specified_az_ushelve_invalid_request(self,
get_by_instance_uuid,
mock_save,
mock_availability_zones):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(
vm_states.SHELVED_OFFLOADED)
new_az = "fake-new-az"
fake_spec = objects.RequestSpec()
fake_spec.availability_zone = "fake-old-az"
get_by_instance_uuid.return_value = fake_spec
exc = self.assertRaises(exception.InvalidRequest,
self.compute_api.unshelve,
self.context, instance, new_az=new_az)
self.assertEqual("The requested availability zone is not available",
exc.format_message())
@mock.patch('nova.availability_zones.get_availability_zones',
return_value=['az1', 'az2'])
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
def test_specified_az_unshelve_invalid_state(self, get_by_instance_uuid,
mock_save,
mock_availability_zones):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(vm_states.SHELVED)
new_az = "az1"
fake_spec = objects.RequestSpec()
fake_spec.availability_zone = "fake-old-az"
get_by_instance_uuid.return_value = fake_spec
self.assertRaises(exception.UnshelveInstanceInvalidState,
self.compute_api.unshelve,
self.context, instance, new_az=new_az)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid',
new_callable=mock.NonCallableMock)
@mock.patch('nova.availability_zones.get_availability_zones')
def test_validate_unshelve_az_cross_az_attach_true(
self, mock_get_azs, mock_get_bdms):
"""Tests a case where the new AZ to unshelve does not match the volume
attached to the server but cross_az_attach=True so it's not an error.
"""
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj(
params=dict(vm_state=vm_states.SHELVED_OFFLOADED))
new_az = "west_az"
mock_get_azs.return_value = ["west_az", "east_az"]
self.flags(cross_az_attach=True, group='cinder')
self.compute_api._validate_unshelve_az(self.context, instance, new_az)
mock_get_azs.assert_called_once_with(
self.context, self.compute_api.host_api, get_only_available=True)
@mock.patch('nova.volume.cinder.API.get')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.availability_zones.get_availability_zones')
def test_validate_unshelve_az_cross_az_attach_false(
self, mock_get_azs, mock_get_bdms, mock_get):
"""Tests a case where the new AZ to unshelve does not match the volume
attached to the server and cross_az_attach=False so it's an error.
"""
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj(
params=dict(vm_state=vm_states.SHELVED_OFFLOADED))
new_az = "west_az"
mock_get_azs.return_value = ["west_az", "east_az"]
bdms = [objects.BlockDeviceMapping(destination_type='volume',
volume_id=uuids.volume_id)]
mock_get_bdms.return_value = bdms
volume = {'id': uuids.volume_id, 'availability_zone': 'east_az'}
mock_get.return_value = volume
self.flags(cross_az_attach=False, group='cinder')
self.assertRaises(exception.MismatchVolumeAZException,
self.compute_api._validate_unshelve_az,
self.context, instance, new_az)
mock_get_azs.assert_called_once_with(
self.context, self.compute_api.host_api, get_only_available=True)
mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
mock_get.assert_called_once_with(self.context, uuids.volume_id)
@mock.patch.object(compute_api.API, '_validate_unshelve_az')
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
def test_specified_az_unshelve(self, get_by_instance_uuid,
mock_save, mock_validate_unshelve_az):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(
vm_states.SHELVED_OFFLOADED)
new_az = "west_az"
fake_spec = objects.RequestSpec()
fake_spec.availability_zone = "fake-old-az"
get_by_instance_uuid.return_value = fake_spec
self.compute_api.unshelve(self.context, instance, new_az=new_az)
mock_save.assert_called_once_with()
self.assertEqual(new_az, fake_spec.availability_zone)
mock_validate_unshelve_az.assert_called_once_with(
self.context, instance, new_az)
| 47.502607
| 79
| 0.637427
|
e04215af045e2800d79542eac81f35f756b83a9f
| 16,281
|
py
|
Python
|
src/sage/schemes/curves/zariski_vankampen.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 3
|
2016-06-19T14:48:31.000Z
|
2022-01-28T08:46:01.000Z
|
src/sage/schemes/curves/zariski_vankampen.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/schemes/curves/zariski_vankampen.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 7
|
2021-11-08T10:01:59.000Z
|
2022-03-03T11:25:52.000Z
|
r"""
Zariski-Van Kampen method implementation
This file contains functions to compute the fundamental group of
the complement of a curve in the complex affine or projective plane,
using Zariski-Van Kampen approach. It depends on the package ``sirocco``.
The current implementation allows to compute a presentation of the
fundamental group of curves over the rationals or number fields with
a fixed embedding on `\QQbar`.
Instead of computing a representation of the braid monodromy, we
choose several base points and a system of paths joining them that
generate all the necessary loops around the points of the discriminant.
The group is generated by the free groups over these points, and
braids over this paths gives relations between these generators.
This big group presentation is simplified at the end.
.. TODO::
Implement the complete braid monodromy approach.
AUTHORS:
- Miguel Marco (2015-09-30): Initial version
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import fundamental_group # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = y^3 + x^3 -1
sage: fundamental_group(f) # optional - sirocco
Finitely presented group < x0 | >
"""
#*****************************************************************************
# Copyright (C) 2015 Miguel Marco <mmarco@unizar.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import division, absolute_import
from sage.groups.braid import BraidGroup
from sage.groups.perm_gps.permgroup_named import SymmetricGroup
from sage.rings.rational_field import QQ
from sage.rings.qqbar import QQbar
from sage.rings.all import CC, CIF
from sage.parallel.decorate import parallel
from sage.misc.flatten import flatten
from sage.groups.free_group import FreeGroup
from sage.misc.misc_c import prod
from sage.rings.complex_field import ComplexField
from sage.rings.complex_interval_field import ComplexIntervalField
from sage.combinat.permutation import Permutation
from sage.functions.generalized import sign
def braid_from_piecewise(strands):
r"""
Compute the braid corresponding to the piecewise linear curves strands.
INPUT:
- ``strands`` -- a list of lists of tuples ``(t, c)``, where ``t``
is a number bewteen 0 and 1, and ``c`` is a complex number
OUTPUT:
The braid formed by the piecewise linear strands.
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import braid_from_piecewise # optional - sirocco
sage: paths = [[(0, I), (0.2, -1 - 0.5*I), (0.8, -1), (1, -I)],
....: [(0, -1), (0.5, -I), (1, 1)],
....: [(0, 1), (0.5, 1 + I), (1, I)]]
sage: braid_from_piecewise(paths) # optional - sirocco
s0*s1
"""
L = strands
i = min(val[1][0] for val in L)
totalpoints = [[[a[0][1].real(), a[0][1].imag()]] for a in L]
indices = [1 for a in range(len(L))]
while i < 1:
for j, val in enumerate(L):
if val[indices[j]][0] > i:
xaux = val[indices[j] - 1][1]
yaux = val[indices[j]][1]
aaux = val[indices[j] - 1][0]
baux = val[indices[j]][0]
interpola = xaux + (yaux - xaux)*(i - aaux)/(baux - aaux)
totalpoints[j].append([interpola.real(), interpola.imag()])
else:
totalpoints[j].append([val[indices[j]][1].real(), val[indices[j]][1].imag()])
indices[j] = indices[j] + 1
i = min(val[indices[k]][0] for k,val in enumerate(L))
for j, val in enumerate(L):
totalpoints[j].append([val[-1][1].real(), val[-1][1].imag()])
braid = []
G = SymmetricGroup(len(totalpoints))
def sgn(x, y): # Opposite sign of cmp
if x < y:
return 1
if x > y:
return -1
return 0
for i in range(len(totalpoints[0]) - 1):
l1 = [totalpoints[j][i] for j in range(len(L))]
l2 = [totalpoints[j][i+1] for j in range(len(L))]
M = [[l1[s], l2[s]] for s in range(len(l1))]
M.sort()
l1 = [a[0] for a in M]
l2 = [a[1] for a in M]
cruces = []
for j in range(len(l2)):
for k in range(j):
if l2[j] < l2[k]:
t = (l1[j][0] - l1[k][0])/(l2[k][0] - l1[k][0] + l1[j][0] - l2[j][0])
s = sgn(l1[k][1]*(1 - t) + t*l2[k][1], l1[j][1]*(1 - t) + t*l2[j][1])
cruces.append([t, k, j, s])
if cruces:
cruces.sort()
P = G(Permutation([]))
while cruces:
# we select the crosses in the same t
crucesl = [c for c in cruces if c[0]==cruces[0][0]]
crossesl = [(P(c[2]+1) - P(c[1]+1),c[1],c[2],c[3]) for c in crucesl]
cruces = cruces[len(crucesl):]
while crossesl:
crossesl.sort()
c = crossesl.pop(0)
braid.append(c[3]*min(map(P, [c[1] + 1, c[2] + 1])))
P = G(Permutation([(c[1] + 1, c[2] + 1)]))*P
crossesl = [(P(c[2]+1) - P(c[1]+1),c[1],c[2],c[3]) for c in crossesl]
B = BraidGroup(len(L))
return B(braid)
def discrim(f):
r"""
Return the points in the discriminant of ``f``.
The result is the set of values of the first variable for which
two roots in the second variable coincide.
INPUT:
- ``f`` -- a polynomial in two variables with coefficients in a
number field with a fixed embedding in `\QQbar`
OUTPUT:
A list with the values of the discriminant in `\QQbar`.
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import discrim # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = (y^3 + x^3 - 1) * (x + y)
sage: discrim(f) # optional - sirocco
[1,
-0.500000000000000? - 0.866025403784439?*I,
-0.500000000000000? + 0.866025403784439?*I]
"""
x, y = f.variables()
F = f.base_ring()
disc = F[x](f.discriminant(y).resultant(f, y)).roots(QQbar, multiplicities=False)
return disc
def segments(points):
"""
Return the bounded segments of the Voronoi diagram of the given points.
INPUT:
- ``points`` -- a list of complex points
OUTPUT:
A list of pairs ``(p1, p2)``, where ``p1`` and ``p2`` are the
endpoints of the segments in the Voronoi diagram.
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import discrim, segments # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = y^3 + x^3 - 1
sage: disc = discrim(f) # optional - sirocco
sage: segments(disc) # optional - sirocco # abs tol 1e-15
[(-2.84740787203333 - 2.84740787203333*I,
-2.14285714285714 + 1.11022302462516e-16*I),
(-2.84740787203333 + 2.84740787203333*I,
-2.14285714285714 + 1.11022302462516e-16*I),
(2.50000000000000 + 2.50000000000000*I,
1.26513881334184 + 2.19128470333546*I),
(2.50000000000000 + 2.50000000000000*I,
2.50000000000000 - 2.50000000000000*I),
(1.26513881334184 + 2.19128470333546*I, 0.000000000000000),
(0.000000000000000, 1.26513881334184 - 2.19128470333546*I),
(2.50000000000000 - 2.50000000000000*I,
1.26513881334184 - 2.19128470333546*I),
(-2.84740787203333 + 2.84740787203333*I,
1.26513881334184 + 2.19128470333546*I),
(-2.14285714285714 + 1.11022302462516e-16*I, 0.000000000000000),
(-2.84740787203333 - 2.84740787203333*I,
1.26513881334184 - 2.19128470333546*I)]
"""
from numpy import array, vstack
from scipy.spatial import Voronoi
discpoints = array([(CC(a).real(), CC(a).imag()) for a in points])
added_points = 3 * abs(discpoints).max() + 1.0
configuration = vstack([discpoints, array([[added_points, 0], [-added_points, 0],
[0, added_points], [0, -added_points]])])
V = Voronoi(configuration)
res = []
for rv in V.ridge_vertices:
if not -1 in rv:
p1 = CC(list(V.vertices[rv[0]]))
p2 = CC(list(V.vertices[rv[1]]))
res.append((p1, p2))
return res
def followstrand(f, x0, x1, y0a, prec=53):
r"""
Return a piecewise linear aproximation of the homotopy continuation
of the root ``y0a`` from ``x0`` to ``x1``.
INPUT:
- ``f`` -- a polynomial in two variables
- ``x0`` -- a complex value, where the homotopy starts
- ``x1`` -- a complex value, where the homotopy ends
- ``y0a`` -- an approximate solution of the polynomial `F(y) = f(x_0, y)`
- ``prec`` -- the precision to use
OUTPUT:
A list of values `(t, y_{tr}, y_{ti})` such that:
- ``t`` is a real number between zero and one
- `f(t \cdot x_1 + (1-t) \cdot x_0, y_{tr} + I \cdot y_{ti})`
is zero (or a good enough aproximation)
- the piecewise linear path determined by the points has a tubular
neighborhood where the actual homotopy continuation path lies, and
no other root intersects it.
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import followstrand # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = x^2 + y^3
sage: x0 = CC(1, 0)
sage: x1 = CC(1, 0.5)
sage: followstrand(f, x0, x1, -1.0) # optional - sirocco # abs tol 1e-15
[(0.0, -1.0, 0.0),
(0.7500000000000001, -1.015090921153253, -0.24752813818386948),
(1.0, -1.026166099551513, -0.32768940253604323)]
"""
CIF = ComplexIntervalField(prec)
CC = ComplexField(prec)
G = f.change_ring(QQbar).change_ring(CIF)
(x, y) = G.variables()
g = G.subs({x: (1-x)*CIF(x0) + x*CIF(x1)})
coefs = []
deg = g.total_degree()
for d in range(deg + 1):
for i in range(d + 1):
c = CIF(g.coefficient({x: d-i, y: i}))
cr = c.real()
ci = c.imag()
coefs += list(cr.endpoints())
coefs += list(ci.endpoints())
yr = CC(y0a).real()
yi = CC(y0a).imag()
from sage.libs.sirocco import contpath, contpath_mp
try:
if prec == 53:
points = contpath(deg, coefs, yr, yi)
else:
points = contpath_mp(deg, coefs, yr, yi, prec)
return points
except:
return followstrand(f, x0, x1, y0a, 2*prec)
@parallel
def braid_in_segment(f, x0, x1):
"""
Return the braid formed by the `y` roots of ``f`` when `x` moves
from ``x0`` to ``x1``.
INPUT:
- ``f`` -- a polynomial in two variables
- ``x0`` -- a complex number
- ``x1`` -- a complex number
OUTPUT:
A braid.
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import braid_in_segment # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = x^2 + y^3
sage: x0 = CC(1,0)
sage: x1 = CC(1, 0.5)
sage: braid_in_segment(f, x0, x1) # optional - sirocco
s1
"""
CC = ComplexField(64)
(x, y) = f.variables()
I = QQbar.gen()
X0 = QQ(x0.real()) + I*QQ(x0.imag())
X1 = QQ(x1.real()) + I*QQ(x1.imag())
F0 = QQbar[y](f(X0, y))
y0s = F0.roots(multiplicities=False)
strands = [followstrand(f, x0, x1, CC(a)) for a in y0s]
complexstrands = [[(a[0], CC(a[1], a[2])) for a in b] for b in strands]
centralbraid = braid_from_piecewise(complexstrands)
initialstrands = []
y0aps = [c[0][1] for c in complexstrands]
used = []
for y0ap in y0aps:
distances = [((y0ap - y0).norm(), y0) for y0 in y0s]
y0 = sorted(distances)[0][1]
if y0 in used:
raise ValueError("different roots are too close")
used.append(y0)
initialstrands.append([(0, CC(y0)), (1, y0ap)])
initialbraid = braid_from_piecewise(initialstrands)
F1 = QQbar[y](f(X1,y))
y1s = F1.roots(multiplicities=False)
finalstrands = []
y1aps = [c[-1][1] for c in complexstrands]
used = []
for y1ap in y1aps:
distances = [((y1ap - y1).norm(), y1) for y1 in y1s]
y1 = sorted(distances)[0][1]
if y1 in used:
raise ValueError("different roots are too close")
used.append(y1)
finalstrands.append([(0, y1ap), (1, CC(y1))])
finallbraid = braid_from_piecewise(finalstrands)
return initialbraid * centralbraid * finallbraid
def fundamental_group(f, simplified=True, projective=False):
r"""
Return a presentation of the fundamental group of the complement of
the algebraic set defined by the polynomial ``f``.
INPUT:
- ``f`` -- a polynomial in two variables, with coefficients in either
the rationals or a number field with a fixed embedding in `\QQbar`
- ``simplified`` -- boolean (default: ``True``); if set to ``True`` the
presentation will be simplified (see below)
- ``projective`` -- boolean (default: ``False``); if set to ``True``,
the fundamental group of the complement of the projective completion
of the curve will be computed, otherwise, the fundamental group of
the complement in the affine plane will be computed
If ``simplified`` is ``False``, the returned presentation has as
many generators as degree of the polynomial times the points in the
base used to create the segments that surround the discriminant. In
this case, the generators are granted to be meridians of the curve.
OUTPUT:
A presentation of the fundamental group of the complement of the
curve defined by ``f``.
EXAMPLES::
sage: from sage.schemes.curves.zariski_vankampen import fundamental_group # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = x^2 + y^3
sage: fundamental_group(f) # optional - sirocco
Finitely presented group < ... >
sage: fundamental_group(f, simplified=False) # optional - sirocco
Finitely presented group < ... >
::
sage: from sage.schemes.curves.zariski_vankampen import fundamental_group # optional - sirocco
sage: R.<x,y> = QQ[]
sage: f = y^3 + x^3
sage: fundamental_group(f) # optional - sirocco
Finitely presented group < ... >
It is also possible to have coefficients in a number field with a
fixed embedding in `\QQbar`::
sage: from sage.schemes.curves.zariski_vankampen import fundamental_group # optional - sirocco
sage: zeta = QQbar['x']('x^2+x+1').roots(multiplicities=False)[0]
sage: zeta
-0.50000000000000000? - 0.866025403784439?*I
sage: F = NumberField(zeta.minpoly(), 'zeta', embedding=zeta)
sage: F.inject_variables()
Defining zeta
sage: R.<x,y> = F[]
sage: f = y^3 + x^3 +zeta *x + 1
sage: fundamental_group(f) # optional - sirocco
Finitely presented group < x0 | >
"""
(x, y) = f.variables()
F = f.base_ring()
g = f.factor().radical().prod()
d = g.degree(y)
while not g.coefficient(y**d) in F or (projective and g.total_degree() > d):
g = g.subs({x: x + y})
d = g.degree(y)
disc = discrim(g)
segs = segments(disc)
vertices = list(set(flatten(segs)))
Faux = FreeGroup(d)
F = FreeGroup(d * len(vertices))
rels = []
if projective:
rels.append(prod(F.gen(i) for i in range(d)))
braidscomputed = braid_in_segment([(g, seg[0], seg[1]) for seg in segs])
for braidcomputed in braidscomputed:
seg = (braidcomputed[0][0][1], braidcomputed[0][0][2])
b = braidcomputed[1]
i = vertices.index(seg[0])
j = vertices.index(seg[1])
for k in range(d):
el1 = Faux([k + 1]) * b.inverse()
el2 = k + 1
w1 = F([sign(a)*d*i + a for a in el1.Tietze()])
w2 = F([d*j + el2])
rels.append(w1/w2)
G = F / rels
if simplified:
return G.simplified()
else:
return G
| 36.341518
| 105
| 0.586819
|
0603ce718275356081cc7d9b2e215aa1069792be
| 320
|
py
|
Python
|
string_algorithms/search/naive_search.py
|
ocozalp/Algorithms
|
0eed59b2e8f110bfe036ddc3de7762948473ecc9
|
[
"Apache-2.0"
] | 5
|
2017-01-10T07:42:15.000Z
|
2020-02-16T19:39:25.000Z
|
string_algorithms/search/naive_search.py
|
ocozalp/Algorithms
|
0eed59b2e8f110bfe036ddc3de7762948473ecc9
|
[
"Apache-2.0"
] | null | null | null |
string_algorithms/search/naive_search.py
|
ocozalp/Algorithms
|
0eed59b2e8f110bfe036ddc3de7762948473ecc9
|
[
"Apache-2.0"
] | null | null | null |
def naive_search(text, pattern, start_index=0):
for i in xrange(start_index, len(text) - len(pattern) + 1):
found = True
for j in xrange(len(pattern)):
if text[i + j] != pattern[j]:
found = False
break
if found:
return i
return -1
| 26.666667
| 63
| 0.50625
|
5145260fc63fbaab8ee85b98a5638a3318d03f49
| 8,879
|
py
|
Python
|
sdk/python/pulumi_azure/network/get_subnet.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/get_subnet.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/get_subnet.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSubnetResult',
'AwaitableGetSubnetResult',
'get_subnet',
]
@pulumi.output_type
class GetSubnetResult:
"""
A collection of values returned by getSubnet.
"""
def __init__(__self__, address_prefix=None, address_prefixes=None, enforce_private_link_endpoint_network_policies=None, enforce_private_link_service_network_policies=None, id=None, name=None, network_security_group_id=None, resource_group_name=None, route_table_id=None, service_endpoints=None, virtual_network_name=None):
if address_prefix and not isinstance(address_prefix, str):
raise TypeError("Expected argument 'address_prefix' to be a str")
pulumi.set(__self__, "address_prefix", address_prefix)
if address_prefixes and not isinstance(address_prefixes, list):
raise TypeError("Expected argument 'address_prefixes' to be a list")
pulumi.set(__self__, "address_prefixes", address_prefixes)
if enforce_private_link_endpoint_network_policies and not isinstance(enforce_private_link_endpoint_network_policies, bool):
raise TypeError("Expected argument 'enforce_private_link_endpoint_network_policies' to be a bool")
pulumi.set(__self__, "enforce_private_link_endpoint_network_policies", enforce_private_link_endpoint_network_policies)
if enforce_private_link_service_network_policies and not isinstance(enforce_private_link_service_network_policies, bool):
raise TypeError("Expected argument 'enforce_private_link_service_network_policies' to be a bool")
pulumi.set(__self__, "enforce_private_link_service_network_policies", enforce_private_link_service_network_policies)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group_id and not isinstance(network_security_group_id, str):
raise TypeError("Expected argument 'network_security_group_id' to be a str")
pulumi.set(__self__, "network_security_group_id", network_security_group_id)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if route_table_id and not isinstance(route_table_id, str):
raise TypeError("Expected argument 'route_table_id' to be a str")
pulumi.set(__self__, "route_table_id", route_table_id)
if service_endpoints and not isinstance(service_endpoints, list):
raise TypeError("Expected argument 'service_endpoints' to be a list")
pulumi.set(__self__, "service_endpoints", service_endpoints)
if virtual_network_name and not isinstance(virtual_network_name, str):
raise TypeError("Expected argument 'virtual_network_name' to be a str")
pulumi.set(__self__, "virtual_network_name", virtual_network_name)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> str:
"""
(Deprecated) The address prefix used for the subnet.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Sequence[str]:
"""
The address prefixes for the subnet.
"""
return pulumi.get(self, "address_prefixes")
@property
@pulumi.getter(name="enforcePrivateLinkEndpointNetworkPolicies")
def enforce_private_link_endpoint_network_policies(self) -> bool:
"""
Enable or Disable network policies for the private link endpoint on the subnet.
"""
return pulumi.get(self, "enforce_private_link_endpoint_network_policies")
@property
@pulumi.getter(name="enforcePrivateLinkServiceNetworkPolicies")
def enforce_private_link_service_network_policies(self) -> bool:
"""
Enable or Disable network policies for the private link service on the subnet.
"""
return pulumi.get(self, "enforce_private_link_service_network_policies")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroupId")
def network_security_group_id(self) -> str:
"""
The ID of the Network Security Group associated with the subnet.
"""
return pulumi.get(self, "network_security_group_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="routeTableId")
def route_table_id(self) -> str:
"""
The ID of the Route Table associated with this subnet.
"""
return pulumi.get(self, "route_table_id")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> Sequence[str]:
"""
A list of Service Endpoints within this subnet.
"""
return pulumi.get(self, "service_endpoints")
@property
@pulumi.getter(name="virtualNetworkName")
def virtual_network_name(self) -> str:
return pulumi.get(self, "virtual_network_name")
class AwaitableGetSubnetResult(GetSubnetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubnetResult(
address_prefix=self.address_prefix,
address_prefixes=self.address_prefixes,
enforce_private_link_endpoint_network_policies=self.enforce_private_link_endpoint_network_policies,
enforce_private_link_service_network_policies=self.enforce_private_link_service_network_policies,
id=self.id,
name=self.name,
network_security_group_id=self.network_security_group_id,
resource_group_name=self.resource_group_name,
route_table_id=self.route_table_id,
service_endpoints=self.service_endpoints,
virtual_network_name=self.virtual_network_name)
def get_subnet(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult:
"""
Use this data source to access information about an existing Subnet within a Virtual Network.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.network.get_subnet(name="backend",
virtual_network_name="production",
resource_group_name="networking")
pulumi.export("subnetId", example.id)
```
:param str name: Specifies the name of the Subnet.
:param str resource_group_name: Specifies the name of the resource group the Virtual Network is located in.
:param str virtual_network_name: Specifies the name of the Virtual Network this Subnet is located within.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkName'] = virtual_network_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:network/getSubnet:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value
return AwaitableGetSubnetResult(
address_prefix=__ret__.address_prefix,
address_prefixes=__ret__.address_prefixes,
enforce_private_link_endpoint_network_policies=__ret__.enforce_private_link_endpoint_network_policies,
enforce_private_link_service_network_policies=__ret__.enforce_private_link_service_network_policies,
id=__ret__.id,
name=__ret__.name,
network_security_group_id=__ret__.network_security_group_id,
resource_group_name=__ret__.resource_group_name,
route_table_id=__ret__.route_table_id,
service_endpoints=__ret__.service_endpoints,
virtual_network_name=__ret__.virtual_network_name)
| 43.955446
| 326
| 0.713819
|
4578c4e980b69e691ef510ee173aaefb08a0f2ce
| 144,797
|
py
|
Python
|
src/sage/combinat/diagram_algebras.py
|
ChamanAgrawal/sage
|
5f6d56ba247b352d7d46442e88fa3a027e9f222d
|
[
"BSL-1.0"
] | 2
|
2019-06-02T03:16:59.000Z
|
2019-06-15T10:17:18.000Z
|
src/sage/combinat/diagram_algebras.py
|
ChamanAgrawal/sage
|
5f6d56ba247b352d7d46442e88fa3a027e9f222d
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/diagram_algebras.py
|
ChamanAgrawal/sage
|
5f6d56ba247b352d7d46442e88fa3a027e9f222d
|
[
"BSL-1.0"
] | 1
|
2019-06-02T03:16:55.000Z
|
2019-06-02T03:16:55.000Z
|
r"""
Diagram and Partition Algebras
AUTHORS:
- Mike Hansen (2007): Initial version
- Stephen Doty, Aaron Lauve, George H. Seelinger (2012): Implementation of
partition, Brauer, Temperley--Lieb, and ideal partition algebras
- Stephen Doty, Aaron Lauve, George H. Seelinger (2015): Implementation of
``*Diagram`` classes and other methods to improve diagram algebras.
- Mike Zabrocki (2018): Implementation of individual element diagram classes
- Aaron Lauve, Mike Zabrocki (2018): Implementation of orbit basis for Partition algebra.
"""
#*****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
# 2012 Stephen Doty <doty@math.luc.edu>,
# Aaron Lauve <lauve@math.luc.edu>,
# George H. Seelinger <ghseeli@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#****************************************************************************
# python3
from __future__ import division
from six.moves import range
from sage.categories.associative_algebras import AssociativeAlgebras
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.arith.power import generic_power
from sage.combinat.free_module import CombinatorialFreeModule
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.combinat.combinat import bell_number, catalan_number
from sage.structure.global_options import GlobalOptions
from sage.combinat.combinat_cython import set_partition_iterator, perfect_matchings_iterator
from sage.combinat.set_partition import SetPartitions, AbstractSetPartition
from sage.combinat.symmetric_group_algebra import SymmetricGroupAlgebra_n
from sage.combinat.permutation import Permutations
from sage.graphs.graph import Graph
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.flatten import flatten
from sage.misc.misc_c import prod
from sage.rings.all import ZZ, QQ
from sage.functions.other import ceil
import itertools
def partition_diagrams(k):
r"""
Return a generator of all partition diagrams of order ``k``.
A partition diagram of order `k \in \ZZ` to is a set partition of
`\{1, \ldots, k, -1, \ldots, -k\}`. If we have `k - 1/2 \in ZZ`, then
a partition diagram of order `k \in 1/2 \ZZ` is a set partition of
`\{1, \ldots, k+1/2, -1, \ldots, -(k+1/2)\}` with `k+1/2` and `-(k+1/2)`
in the same block. See [HR2005]_.
INPUT:
- ``k`` -- the order of the partition diagrams
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: [SetPartition(p) for p in da.partition_diagrams(2)]
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2}, {-1, 1, 2}},
{{-2, -1}, {1, 2}},
{{-2}, {-1}, {1, 2}},
{{-2, -1, 1}, {2}},
{{-2, 1}, {-1, 2}},
{{-2, 1}, {-1}, {2}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}},
{{-2}, {-1, 1}, {2}},
{{-2}, {-1, 2}, {1}},
{{-2, -1}, {1}, {2}},
{{-2}, {-1}, {1}, {2}}]
sage: [SetPartition(p) for p in da.partition_diagrams(3/2)]
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
"""
if k in ZZ:
S = set_partition_iterator(list(range(1, k+1)) + list(range(-k,0)))
for p in S:
yield p
elif k + ZZ(1)/ZZ(2) in ZZ: # Else k in 1/2 ZZ
k = ZZ(k + ZZ(1) / ZZ(2))
S = set_partition_iterator(list(range(1, k+1)) + list(range(-k+1,0)))
for p in S:
yield [b + [-k] if k in b else b for b in p]
else:
raise ValueError("argument %s must be a half-integer"%k)
def brauer_diagrams(k):
r"""
Return a generator of all Brauer diagrams of order ``k``.
A Brauer diagram of order `k` is a partition diagram of order `k`
with block size 2.
INPUT:
- ``k`` -- the order of the Brauer diagrams
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: [SetPartition(p) for p in da.brauer_diagrams(2)]
[{{-2, -1}, {1, 2}}, {{-2, 1}, {-1, 2}}, {{-2, 2}, {-1, 1}}]
sage: [SetPartition(p) for p in da.brauer_diagrams(5/2)]
[{{-3, 3}, {-2, -1}, {1, 2}},
{{-3, 3}, {-2, 1}, {-1, 2}},
{{-3, 3}, {-2, 2}, {-1, 1}}]
"""
if k in ZZ:
s = list(range(1,k+1)) + list(range(-k,0))
for p in perfect_matchings_iterator(k):
yield [(s[a],s[b]) for a,b in p]
elif k + ZZ(1) / ZZ(2) in ZZ: # Else k in 1/2 ZZ
k = ZZ(k + ZZ(1) / ZZ(2))
s = list(range(1, k)) + list(range(-k+1,0))
for p in perfect_matchings_iterator(k-1):
yield [(s[a],s[b]) for a,b in p] + [[k, -k]]
def temperley_lieb_diagrams(k):
r"""
Return a generator of all Temperley--Lieb diagrams of order ``k``.
A Temperley--Lieb diagram of order `k` is a partition diagram of order `k`
with block size 2 and is planar.
INPUT:
- ``k`` -- the order of the Temperley--Lieb diagrams
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: [SetPartition(p) for p in da.temperley_lieb_diagrams(2)]
[{{-2, -1}, {1, 2}}, {{-2, 2}, {-1, 1}}]
sage: [SetPartition(p) for p in da.temperley_lieb_diagrams(5/2)]
[{{-3, 3}, {-2, -1}, {1, 2}}, {{-3, 3}, {-2, 2}, {-1, 1}}]
"""
for i in brauer_diagrams(k):
if is_planar(i):
yield i
def planar_diagrams(k):
r"""
Return a generator of all planar diagrams of order ``k``.
A planar diagram of order `k` is a partition diagram of order `k`
that has no crossings.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: all_diagrams = da.partition_diagrams(2)
sage: [SetPartition(p) for p in all_diagrams if p not in da.planar_diagrams(2)]
[{{-2, 1}, {-1, 2}}]
sage: all_diagrams = da.partition_diagrams(5/2)
sage: [SetPartition(p) for p in all_diagrams if p not in da.planar_diagrams(5/2)]
[{{-3, -1, 3}, {-2, 1, 2}},
{{-3, -2, 1, 3}, {-1, 2}},
{{-3, -1, 1, 3}, {-2, 2}},
{{-3, 1, 3}, {-2, -1, 2}},
{{-3, 1, 3}, {-2, 2}, {-1}},
{{-3, 1, 3}, {-2}, {-1, 2}},
{{-3, -1, 2, 3}, {-2, 1}},
{{-3, 3}, {-2, 1}, {-1, 2}},
{{-3, -1, 3}, {-2, 1}, {2}},
{{-3, -1, 3}, {-2, 2}, {1}}]
"""
for i in partition_diagrams(k):
if is_planar(i):
yield i
def ideal_diagrams(k):
r"""
Return a generator of all "ideal" diagrams of order ``k``.
An ideal diagram of order `k` is a partition diagram of order `k` with
propagating number less than `k`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: all_diagrams = da.partition_diagrams(2)
sage: [SetPartition(p) for p in all_diagrams if p not in da.ideal_diagrams(2)]
[{{-2, 1}, {-1, 2}}, {{-2, 2}, {-1, 1}}]
sage: all_diagrams = da.partition_diagrams(3/2)
sage: [SetPartition(p) for p in all_diagrams if p not in da.ideal_diagrams(3/2)]
[{{-2, 2}, {-1, 1}}]
"""
for i in partition_diagrams(k):
if propagating_number(i) < k:
yield i
class AbstractPartitionDiagram(AbstractSetPartition):
r"""
Abstract base class for partition diagrams.
This class represents a single partition diagram, that is used as a
basis key for a diagram algebra element. A partition diagram should
be a partition of the set `\{1, \ldots, k, -1, \ldots, -k\}`. Each
such set partition is regarded as a graph on nodes
`\{1, \ldots, k, -1, \ldots, -k\}` arranged in two rows, with nodes
`1, \ldots, k` in the top row from left to right and with nodes
`-1, \ldots, -k` in the bottom row from left to right, and an edge
connecting two nodes if and only if the nodes lie in the same
subset of the set partition.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd1 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd1
{{-2, -1}, {1, 2}}
sage: pd1 == pd2
True
sage: pd1 == [[1,2],[-1,-2]]
True
sage: pd1 == ((-2,-1),(2,1))
True
sage: pd1 == SetPartition([[1,2],[-1,-2]])
True
sage: pd3 = da.AbstractPartitionDiagram(pd, [[1,-2],[-1,2]])
sage: pd1 == pd3
False
sage: pd4 = da.AbstractPartitionDiagram(pd, [[1,2],[3,4]])
Traceback (most recent call last):
...
ValueError: {{1, 2}, {3, 4}} does not represent two rows of vertices of order 2
"""
def __init__(self, parent, d):
r"""
Initialize ``self``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd1 = da.AbstractPartitionDiagram(pd, ((-2,-1),(1,2)) )
"""
self._base_diagram = tuple(sorted(tuple(sorted(i)) for i in d))
super(AbstractPartitionDiagram, self).__init__(parent, self._base_diagram)
def check(self):
r"""
Check the validity of the input for the diagram.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd1 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]]) # indirect doctest
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1,2],[3,4]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: {{1, 2}, {3, 4}} does not represent two rows of vertices of order 2
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1],[-1]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: {{-1}, {1}} does not represent two rows of vertices of order 2
sage: pd2 = da.AbstractPartitionDiagram(pd, [[[1,2],[-1,-2]]]) # indirect doctest
Traceback (most recent call last):
...
TypeError: unhashable type: 'list'
"""
if self._base_diagram:
tst = frozenset(e for B in self._base_diagram for e in B)
if tst != self.parent()._set:
raise ValueError("{} does not represent two rows of vertices of order {}".format(
self, self.parent().order))
def __hash__(self):
"""
Return the hash of ``self``.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd1 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: hash(pd1) == hash(pd2)
True
sage: hash(pd1) == hash( ((-2,-1), (1,2)) )
True
"""
return hash(self._base_diagram)
def __eq__(self, other):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd1 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd1 == pd2
True
sage: pd1 == [[1,2],[-1,-2]]
True
sage: pd1 == ((-2,-1),(2,1))
True
sage: pd1 == SetPartition([[1,2],[-1,-2]])
True
sage: pd3 = da.AbstractPartitionDiagram(pd, [[1,-2],[-1,2]])
sage: pd1 == pd3
False
Check the inherited inequality::
sage: pd1 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1,-2],[-1,2]])
sage: pd1 != pd2
True
sage: pd1 != ((-2,-1),(2,1))
False
"""
try:
return self._base_diagram == other._base_diagram
except AttributeError:
pass
try:
other2 = self.parent(other)
return self._base_diagram == other2._base_diagram
except (TypeError, ValueError, AttributeError):
return False
def __lt__(self, other):
"""
Compare less than.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd1 = da.AbstractPartitionDiagram(pd, [[1,2],[-1,-2]])
sage: pd2 = da.AbstractPartitionDiagram(pd, [[1,-2],[-1,2]])
sage: pd1 < pd2
True
sage: pd2 < pd1
False
sage: pd2 > pd1
True
sage: pd1 > pd2
False
"""
if not isinstance(other, AbstractPartitionDiagram):
return False
return self._base_diagram < other._base_diagram
def base_diagram(self):
r"""
Return the underlying implementation of the diagram.
OUTPUT:
- tuple of tuples of integers
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd([[1,2],[-1,-2]]).base_diagram() == ((-2,-1),(1,2))
True
"""
return self._base_diagram # note, this works because self._base_diagram is immutable
diagram = base_diagram
def set_partition(self):
r"""
Return the underlying implementation of the diagram as a set of sets.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: X = pd([[1,2],[-1,-2]]).set_partition(); X
{{-2, -1}, {1, 2}}
sage: X.parent()
Set partitions
"""
return SetPartitions()(self)
def compose(self, other):
r"""
Compose ``self`` with ``other``.
The composition of two diagrams `X` and `Y` is given by placing
`X` on top of `Y` and removing all loops.
OUTPUT:
A tuple where the first entry is the composite diagram and the
second entry is how many loop were removed.
.. NOTE::
This is not really meant to be called directly, but it works
to call it this way if desired.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd([[1,2],[-1,-2]]).compose(pd([[1,2],[-1,-2]]))
({{-2, -1}, {1, 2}}, 1)
"""
(composite_diagram, loops_removed) = set_partition_composition(self._base_diagram, other._base_diagram)
return (self.__class__(self.parent(), composite_diagram), loops_removed)
def propagating_number(self):
r"""
Return the propagating number of the diagram.
The propagating number is the number of blocks with both a
positive and negative number.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: d1 = pd([[1,-2],[2,-1]])
sage: d1.propagating_number()
2
sage: d2 = pd([[1,2],[-2,-1]])
sage: d2.propagating_number()
0
"""
return ZZ(sum(1 for part in self._base_diagram if min(part) < 0 and max(part) > 0))
def count_blocks_of_size(self, n):
r"""
Count the number of blocks of a given size.
INPUT:
- ``n`` -- a positive integer
EXAMPLES::
sage: from sage.combinat.diagram_algebras import PartitionDiagram
sage: pd = PartitionDiagram([[1,-3,-5],[2,4],[3,-1,-2],[5],[-4]])
sage: pd.count_blocks_of_size(1)
2
sage: pd.count_blocks_of_size(2)
1
sage: pd.count_blocks_of_size(3)
2
"""
return sum(ZZ(len(block) == n) for block in self)
def order(self):
r"""
Return the maximum entry in the diagram element.
A diagram element will be a partition of the set
`\{-1, -2, \ldots, -k, 1, 2, \ldots, k\}`. The order of
the diagram element is the value `k`.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import PartitionDiagram
sage: PartitionDiagram([[1,-1],[2,-2,-3],[3]]).order()
3
sage: PartitionDiagram([[1,-1]]).order()
1
sage: PartitionDiagram([[1,-3,-5],[2,4],[3,-1,-2],[5],[-4]]).order()
5
"""
return self.parent().order
def is_planar(self):
r"""
Test if the diagram ``self`` is planar.
A diagram element is planar if the graph of the nodes is planar.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import BrauerDiagram
sage: BrauerDiagram([[1,-2],[2,-1]]).is_planar()
False
sage: BrauerDiagram([[1,-1],[2,-2]]).is_planar()
True
"""
return is_planar(self)
class IdealDiagram(AbstractPartitionDiagram):
r"""
The element class for a ideal diagram.
An ideal diagram for an integer `k` is a partition of the set
`\{1, \ldots, k, -1, \ldots, -k\}` where the propagating number is
strictly smaller than the order.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import IdealDiagrams as IDs
sage: IDs(2)
Ideal diagrams of order 2
sage: IDs(2).list()
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2}, {-1, 1, 2}},
{{-2, -1}, {1, 2}},
{{-2}, {-1}, {1, 2}},
{{-2, -1, 1}, {2}},
{{-2, 1}, {-1}, {2}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}},
{{-2}, {-1, 1}, {2}},
{{-2}, {-1, 2}, {1}},
{{-2, -1}, {1}, {2}},
{{-2}, {-1}, {1}, {2}}]
sage: from sage.combinat.diagram_algebras import PartitionDiagrams as PDs
sage: PDs(4).cardinality() == factorial(4) + IDs(4).cardinality()
True
"""
@staticmethod
def __classcall_private__(cls, diag):
"""
Normalize input to initialize diagram.
The order of the diagram element is the maximum value found in
the list of lists.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import IdealDiagram
sage: IdealDiagram([[1],[-1]])
{{-1}, {1}}
sage: IdealDiagram([[1], [-1]]).parent()
Ideal diagrams of order 1
"""
order = max(v for p in diag for v in p)
return IdealDiagrams(order)(diag)
def check(self):
r"""
Check the validity of the input for ``self``.
TESTS::
sage: from sage.combinat.diagram_algebras import IdealDiagram
sage: pd1 = IdealDiagram([[1,2],[-1,-2]]) # indirect doctest
sage: pd2 = IdealDiagram([[1,-2],[2,-1]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must have a propagating number smaller than the order
sage: pd3 = IdealDiagram([[1,2,-1,-3]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: {{-3, -1, 1, 2}} does not represent two rows of vertices of order 2
sage: pd4 = IdealDiagram([[1,-2,-1],[2]]) # indirect doctest
"""
super(IdealDiagram, self).check()
if self.propagating_number() >= self.order():
raise ValueError("the diagram %s must have a propagating number smaller than the order"%(self))
class PlanarDiagram(AbstractPartitionDiagram):
r"""
The element class for a planar diagram.
A planar diagram for an integer `k` is a partition of the set
`\{1, \ldots, k, -1, \ldots, -k\}` so that the diagram is non-crossing.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import PlanarDiagrams
sage: PlanarDiagrams(2)
Planar diagrams of order 2
sage: PlanarDiagrams(2).list()
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2}, {-1, 1, 2}},
{{-2, -1}, {1, 2}},
{{-2}, {-1}, {1, 2}},
{{-2, -1, 1}, {2}},
{{-2, 1}, {-1}, {2}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}},
{{-2}, {-1, 1}, {2}},
{{-2}, {-1, 2}, {1}},
{{-2, -1}, {1}, {2}},
{{-2}, {-1}, {1}, {2}}]
"""
@staticmethod
def __classcall_private__(cls, diag):
"""
Normalize input to initialize diagram.
The order of the diagram element is the maximum value found in
the list of lists.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import PlanarDiagram
sage: PlanarDiagram([[1,-1]])
{{-1, 1}}
sage: PlanarDiagram([[1, -1]]).parent()
Planar diagrams of order 1
"""
order = max(v for p in diag for v in p)
PD = PlanarDiagrams(order)
return PD(diag)
def check(self):
r"""
Check the validity of the input for ``self``.
TESTS::
sage: from sage.combinat.diagram_algebras import PlanarDiagram
sage: pd1 = PlanarDiagram([[1,2],[-1,-2]]) # indirect doctest
sage: pd2 = PlanarDiagram([[1,-2],[2,-1]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must be planar
sage: pd3 = PlanarDiagram([[1,2,-1,-3]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: {{-3, -1, 1, 2}} does not represent two rows of vertices of order 2
sage: pd4 = PlanarDiagram([[1,-2,-1],[2]]) # indirect doctest
"""
super(PlanarDiagram, self).check()
if not self.is_planar():
raise ValueError("the diagram %s must be planar"%(self))
class TemperleyLiebDiagram(AbstractPartitionDiagram):
r"""
The element class for a Temperley-Lieb diagram.
A Temperley-Lieb diagram for an integer `k` is a partition of the set
`\{1, \ldots, k, -1, \ldots, -k\}` so that the blocks are all of size
2 and the diagram is planar.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import TemperleyLiebDiagrams
sage: TemperleyLiebDiagrams(2)
Temperley Lieb diagrams of order 2
sage: TemperleyLiebDiagrams(2).list()
[{{-2, -1}, {1, 2}}, {{-2, 2}, {-1, 1}}]
"""
@staticmethod
def __classcall_private__(cls, diag):
"""
Normalize input to initialize diagram.
The order of the diagram element is the maximum value found in
the list of lists.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import TemperleyLiebDiagram
sage: TemperleyLiebDiagram([[1,-1]])
{{-1, 1}}
sage: TemperleyLiebDiagram([[1, -1]]).parent()
Temperley Lieb diagrams of order 1
"""
order = max(v for p in diag for v in p)
TLD = TemperleyLiebDiagrams(order)
return TLD(diag)
def check(self):
r"""
Check the validity of the input for ``self``.
TESTS::
sage: from sage.combinat.diagram_algebras import TemperleyLiebDiagram
sage: pd1 = TemperleyLiebDiagram([[1,2],[-1,-2]]) # indirect doctest
sage: pd2 = TemperleyLiebDiagram([[1,-2],[2,-1]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must be planar
sage: pd3 = TemperleyLiebDiagram([[1,2,-1,-3]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: {{-3, -1, 1, 2}} does not represent two rows of vertices of order 2
sage: pd4 = TemperleyLiebDiagram([[1,-2,-1],[2]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: all blocks of {{-2, -1, 1}, {2}} must be of size 2
"""
super(TemperleyLiebDiagram, self).check()
if any(len(block) != 2 for block in self):
raise ValueError("all blocks of %s must be of size 2"%(self))
if not self.is_planar():
raise ValueError("the diagram %s must be planar"%(self))
class PartitionDiagram(AbstractPartitionDiagram):
r"""
The element class for a partition diagram.
A partition diagram for an integer `k` is a partition of the set
`\{1, \ldots, k, -1, \ldots, -k\}`
EXAMPLES::
sage: from sage.combinat.diagram_algebras import PartitionDiagram, PartitionDiagrams
sage: PartitionDiagrams(1)
Partition diagrams of order 1
sage: PartitionDiagrams(1).list()
[{{-1, 1}}, {{-1}, {1}}]
sage: PartitionDiagram([[1,-1]])
{{-1, 1}}
sage: PartitionDiagram(((1,-2),(2,-1))).parent()
Partition diagrams of order 2
"""
@staticmethod
def __classcall_private__(cls, diag):
"""
Normalize input to initialize diagram.
The order of the diagram element is the maximum value found in
the list of lists.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import PartitionDiagram
sage: PartitionDiagram([[1],[-1]])
{{-1}, {1}}
sage: PartitionDiagram([[1],[-1]]).parent()
Partition diagrams of order 1
"""
order = max(v for p in diag for v in p)
PD = PartitionDiagrams(order)
return PD(diag)
class BrauerDiagram(AbstractPartitionDiagram):
r"""
A Brauer diagram.
A Brauer diagram for an integer `k` is a partition of the set
`\{1, \ldots, k, -1, \ldots, -k\}` with block size 2.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)
sage: bd1 = bd([[1,2],[-1,-2]])
sage: bd2 = bd([[1,2,-1,-2]])
Traceback (most recent call last):
...
ValueError: all blocks of {{-2, -1, 1, 2}} must be of size 2
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)( ((-2,-1),(1,2)) )
sage: TestSuite(bd).run()
"""
@staticmethod
def __classcall_private__(cls, diag):
"""
Normalize input to initialize diagram.
The order of the diagram element is the maximum value found in
the list of lists.
EXAMPLES::
sage: from sage.combinat.diagram_algebras import BrauerDiagram
sage: bd = BrauerDiagram([[1,-1]]); bd
{{-1, 1}}
sage: bd.parent()
Brauer diagrams of order 1
"""
order = max(v for p in diag for v in p)
BD = BrauerDiagrams(order)
return BD(diag)
def check(self):
r"""
Check the validity of the input for ``self``.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)
sage: bd1 = bd([[1,2],[-1,-2]]) # indirect doctest
sage: bd2 = bd([[1,2,-1,-2]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: all blocks of {{-2, -1, 1, 2}} must be of size 2
"""
super(BrauerDiagram, self).check()
if any(len(i) != 2 for i in self):
raise ValueError("all blocks of %s must be of size 2"%(self))
def _repr_(self):
r"""
Return a string representation of a Brauer diagram.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)
sage: bd1 = bd([[1,2],[-1,-2]]); bd1
{{-2, -1}, {1, 2}}
"""
return self.parent().options._dispatch(self, '_repr_', 'display')
# add options to class
class options(GlobalOptions):
r"""
Set and display the global options for Brauer diagram (algebras). If no
parameters are set, then the function returns a copy of the options
dictionary.
The ``options`` to diagram algebras can be accessed as the method
:obj:`BrauerAlgebra.options` of :class:`BrauerAlgebra` and
related classes.
@OPTIONS@
The compact representation ``[A/B;pi]`` of the Brauer algebra diagram
(see [GL1996]_) has the following components:
- ``A`` -- is a list of pairs of positive elements (upper row) that
are connected,
- ``B`` -- is a list of pairs of negative elements (lower row) that
are connected, and
- ``pi`` -- is a permutation that is to be interpreted as the relative
order of the remaining elements in the top row and the bottom row.
EXAMPLES::
sage: R.<q> = QQ[]
sage: BA = BrauerAlgebra(2, q)
sage: E = BA([[1,2],[-1,-2]])
sage: E
B{{-2, -1}, {1, 2}}
sage: BA8 = BrauerAlgebra(8, q)
sage: BA8([[1,-4],[2,4],[3,8],[-7,-2],[5,7],[6,-1],[-3,-5],[-6,-8]])
B{{-8, -6}, {-7, -2}, {-5, -3}, {-4, 1}, {-1, 6}, {2, 4}, {3, 8}, {5, 7}}
sage: BrauerAlgebra.options.display = "compact"
sage: E
B[12/12;]
sage: BA8([[1,-4],[2,4],[3,8],[-7,-2],[5,7],[6,-1],[-3,-5],[-6,-8]])
B[24.38.57/35.27.68;21]
sage: BrauerAlgebra.options._reset()
"""
NAME = 'Brauer diagram'
module = 'sage.combinat.diagram_algebras'
option_class='BrauerDiagram'
display = dict(default="normal",
description='Specifies how the Brauer diagrams should be printed',
values=dict(normal="Using the normal representation",
compact="Using the compact representation"),
case_sensitive=False)
def _repr_normal(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)
sage: bd([[1,2],[-1,-2]])._repr_normal()
'{{-2, -1}, {1, 2}}'
"""
return super(BrauerDiagram, self)._repr_()
def _repr_compact(self):
"""
Return a compact string representation of ``self``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)
sage: bd([[1,2],[-1,-2]])._repr_compact()
'[12/12;]'
sage: bd([[1,-2],[2,-1]])._repr_compact()
'[/;21]'
sage: bd = da.BrauerDiagrams(7)
sage: bd([[1,4],[6,7], [-2,-6],[-5,-7], [2,-4],[3,-1],[5,-3]])._repr_compact()
'[14.67/26.57;312]'
"""
(top, bot, thru) = self.involution_permutation_triple()
bot.reverse()
s1 = ".".join("".join(str(b) for b in block) for block in top)
s2 = ".".join("".join(str(abs(k)) for k in sorted(block,reverse=True))
for block in bot)
s3 = "".join(str(x) for x in thru)
return "[{}/{};{}]".format(s1,s2,s3)
def involution_permutation_triple(self, curt=True):
r"""
Return the involution permutation triple of ``self``.
From Graham-Lehrer (see :class:`BrauerDiagrams`), a Brauer diagram
is a triple `(D_1, D_2, \pi)`, where:
- `D_1` is a partition of the top nodes;
- `D_2` is a partition of the bottom nodes;
- `\pi` is the induced permutation on the free nodes.
INPUT:
- ``curt`` -- (default: ``True``) if ``True``, then return bijection
on free nodes as a one-line notation (standardized to look like a
permutation), else, return the honest mapping, a list of pairs
`(i, -j)` describing the bijection on free nodes
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3)
sage: elm = bd([[1,2],[-2,-3],[3,-1]])
sage: elm.involution_permutation_triple()
([(1, 2)], [(-3, -2)], [1])
sage: elm.involution_permutation_triple(curt=False)
([(1, 2)], [(-3, -2)], [[3, -1]])
"""
diagram = self.diagram()
top = []
bottom = []
for v in diagram:
if min(v)>0:
top+=[v]
if max(v)<0:
bottom+=[v]
if curt:
perm = self.perm()
else:
perm = self.bijection_on_free_nodes()
return (top,bottom,perm)
def bijection_on_free_nodes(self, two_line=False):
r"""
Return the induced bijection - as a list of `(x,f(x))` values -
from the free nodes on the top at the Brauer diagram to the free
nodes at the bottom of ``self``.
OUTPUT:
If ``two_line`` is ``True``, then the output is the induced
bijection as a two-row list ``(inputs, outputs)``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3)
sage: elm = bd([[1,2],[-2,-3],[3,-1]])
sage: elm.bijection_on_free_nodes()
[[3, -1]]
sage: elm2 = bd([[1,-2],[2,-3],[3,-1]])
sage: elm2.bijection_on_free_nodes(two_line=True)
[[1, 2, 3], [-2, -3, -1]]
"""
terms = sorted(sorted(list(v), reverse=True) for v in self.diagram()
if max(v) > 0 and min(v) < 0)
if two_line:
terms = [[t[i] for t in terms] for i in range(2)]
return terms
def perm(self):
r"""
Return the induced bijection on the free nodes of ``self`` in
one-line notation, re-indexed and treated as a permutation.
.. SEEALSO::
:meth:`bijection_on_free_nodes`
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3)
sage: elm = bd([[1,2],[-2,-3],[3,-1]])
sage: elm.perm()
[1]
"""
long_form = self.bijection_on_free_nodes()
if not long_form:
return long_form
short_form = [abs(v[1]) for v in long_form]
# given any list [i1,i2,...,ir] with distinct positive integer entries,
# return naturally associated permutation of [r].
# probably already defined somewhere in Permutations/Compositions/list/etc.
std = list(range(1, len(short_form) + 1))
j = 0
for i in range(max(short_form)+1):
if i in short_form:
j += 1
std[short_form.index(i)] = j
return std
def is_elementary_symmetric(self):
r"""
Check if is elementary symmetric.
Let `(D_1, D_2, \pi)` be the Graham-Lehrer representation
of the Brauer diagram `d`. We say `d` is *elementary symmetric*
if `D_1 = D_2` and `\pi` is the identity.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3)
sage: elm = bd([[1,2],[-1,-2],[3,-3]])
sage: elm.is_elementary_symmetric()
True
sage: elm2 = bd([[1,2],[-1,-3],[3,-2]])
sage: elm2.is_elementary_symmetric()
False
"""
(D1,D2,pi) = self.involution_permutation_triple()
D1 = sorted(sorted(abs(y) for y in x) for x in D1)
D2 = sorted(sorted(abs(y) for y in x) for x in D2)
return D1 == D2 and pi == list(range(1,len(pi)+1))
class AbstractPartitionDiagrams(Parent, UniqueRepresentation):
r"""
This is an abstract base class for partition diagrams.
The primary use of this class is to serve as basis keys for
diagram algebras, but diagrams also have properties in their
own right. Furthermore, this class is meant to be extended to
create more efficient contains methods.
INPUT:
- ``order`` -- integer or integer `+ 1/2`; the order of the diagrams
- ``category`` -- (default: ``FiniteEnumeratedSets()``); the category
All concrete classes should implement attributes
- ``_name`` -- the name of the class
- ``_diagram_func`` -- an iterator function that takes the order
as its only input
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.PartitionDiagrams(2)
sage: pd
Partition diagrams of order 2
sage: pd.an_element() in pd
True
sage: elm = pd([[1,2],[-1,-2]])
sage: elm in pd
True
"""
Element = AbstractPartitionDiagram
def __init__(self, order, category=None):
r"""
Initialize ``self``.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: pd.category()
Category of finite enumerated sets
sage: pd = da.AbstractPartitionDiagrams(2, Sets().Finite())
sage: pd.category()
Category of finite sets
sage: pd = da.PartitionDiagrams(2)
sage: TestSuite(pd).run()
sage: bd = da.BrauerDiagrams(2)
sage: TestSuite(bd).run()
sage: td = da.TemperleyLiebDiagrams(2)
sage: TestSuite(td).run()
sage: pld = da.PlanarDiagrams(2)
sage: TestSuite(pld).run()
sage: id = da.IdealDiagrams(2)
sage: TestSuite(id).run()
"""
if category is None:
category = FiniteEnumeratedSets()
Parent.__init__(self, category=category)
if order in ZZ:
self.order = ZZ(order)
base_set = frozenset(list(range(1,order+1)) + list(range(-order,0)))
else:
#order is a half-integer.
self.order = QQ(order)
base_set = frozenset(list(range(1,ZZ(ZZ(1)/ZZ(2) + order)+1))
+ list(range(ZZ(-ZZ(1)/ZZ(2) - order),0)))
self._set = base_set
def _repr_(self):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: da.PartitionDiagrams(2)
Partition diagrams of order 2
"""
return "{} diagrams of order {}".format(self._name, self.order)
def __iter__(self):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: list(da.PartitionDiagrams(2))
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2}, {-1, 1, 2}},
{{-2, -1}, {1, 2}},
{{-2}, {-1}, {1, 2}},
{{-2, -1, 1}, {2}},
{{-2, 1}, {-1, 2}},
{{-2, 1}, {-1}, {2}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}},
{{-2}, {-1, 1}, {2}},
{{-2}, {-1, 2}, {1}},
{{-2, -1}, {1}, {2}},
{{-2}, {-1}, {1}, {2}}]
sage: list(da.PartitionDiagrams(3/2))
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
sage: list(da.BrauerDiagrams(5/2))
[{{-3, 3}, {-2, -1}, {1, 2}},
{{-3, 3}, {-2, 1}, {-1, 2}},
{{-3, 3}, {-2, 2}, {-1, 1}}]
sage: list(da.BrauerDiagrams(2))
[{{-2, -1}, {1, 2}}, {{-2, 1}, {-1, 2}}, {{-2, 2}, {-1, 1}}]
sage: list(da.TemperleyLiebDiagrams(5/2))
[{{-3, 3}, {-2, -1}, {1, 2}}, {{-3, 3}, {-2, 2}, {-1, 1}}]
sage: list(da.TemperleyLiebDiagrams(2))
[{{-2, -1}, {1, 2}}, {{-2, 2}, {-1, 1}}]
sage: list(da.PlanarDiagrams(3/2))
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
sage: list(da.PlanarDiagrams(2))
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2}, {-1, 1, 2}},
{{-2, -1}, {1, 2}},
{{-2}, {-1}, {1, 2}},
{{-2, -1, 1}, {2}},
{{-2, 1}, {-1}, {2}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}},
{{-2}, {-1, 1}, {2}},
{{-2}, {-1, 2}, {1}},
{{-2, -1}, {1}, {2}},
{{-2}, {-1}, {1}, {2}}]
sage: list(da.IdealDiagrams(3/2))
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
sage: list(da.IdealDiagrams(2))
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2}, {-1, 1, 2}},
{{-2, -1}, {1, 2}},
{{-2}, {-1}, {1, 2}},
{{-2, -1, 1}, {2}},
{{-2, 1}, {-1}, {2}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}},
{{-2}, {-1, 1}, {2}},
{{-2}, {-1, 2}, {1}},
{{-2, -1}, {1}, {2}},
{{-2}, {-1}, {1}, {2}}]
"""
# The _diagram_func gets set as a method, but we want to
# treat it like an attribute, so we call the underlying
# __func__.
for i in self._diagram_func.__func__(self.order):
yield self.element_class(self, i)
def __contains__(self, obj):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.PartitionDiagrams(2)
sage: pd.an_element() in pd
True
sage: elm = pd([[1,2],[-1,-2]])
sage: elm in pd # indirect doctest
True
"""
if not hasattr(obj, '_base_diagram'):
try:
obj = self._element_constructor_(obj)
except (ValueError, TypeError):
return False
if obj.base_diagram():
tst = sorted(flatten(obj.base_diagram()))
if len(tst) % 2 or tst != list(range(-len(tst)//2,0)) + list(range(1,len(tst)//2+1)):
return False
return True
return self.order == 0
def _element_constructor_(self, d):
r"""
Construct an element of ``self``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.AbstractPartitionDiagrams(2)
sage: elm = pd( [[1,2], [-1,-2]] ); elm
{{-2, -1}, {1, 2}}
sage: pd( [{1,2}, {-1,-2}] ) == elm
True
sage: pd( ((1,2), (-1,-2)) ) == elm
True
sage: pd( SetPartition([[1,2], [-1,-2]]) ) == elm
True
sage: bd = da.BrauerDiagrams(2)
sage: bd( [[1,2],[-1,-2]] )
{{-2, -1}, {1, 2}}
"""
return self.element_class(self, d)
class PartitionDiagrams(AbstractPartitionDiagrams):
r"""
This class represents all partition diagrams of integer or integer
`+ 1/2` order.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.PartitionDiagrams(1); pd
Partition diagrams of order 1
sage: pd.list()
[{{-1, 1}}, {{-1}, {1}}]
sage: pd = da.PartitionDiagrams(3/2); pd
Partition diagrams of order 3/2
sage: pd.list()
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.PartitionDiagrams(3)
sage: pd.an_element() in pd
True
sage: pd.cardinality() == len(pd.list())
True
sage: pd = da.PartitionDiagrams(5/2)
sage: pd.an_element() in pd
True
sage: pd.cardinality() == len(pd.list())
True
"""
Element = PartitionDiagram
_name = "Partition"
_diagram_func = partition_diagrams
def cardinality(self):
r"""
The cardinality of partition diagrams of half-integer order `n` is
the `2n`-th Bell number.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pd = da.PartitionDiagrams(3)
sage: pd.cardinality()
203
sage: pd = da.PartitionDiagrams(7/2)
sage: pd.cardinality()
877
"""
return bell_number(ZZ(2 * self.order))
class BrauerDiagrams(AbstractPartitionDiagrams):
r"""
This class represents all Brauer diagrams of integer or integer
`+1/2` order. For more information on Brauer diagrams,
see :class:`BrauerAlgebra`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2); bd
Brauer diagrams of order 2
sage: bd.list()
[{{-2, -1}, {1, 2}}, {{-2, 1}, {-1, 2}}, {{-2, 2}, {-1, 1}}]
sage: bd = da.BrauerDiagrams(5/2); bd
Brauer diagrams of order 5/2
sage: bd.list()
[{{-3, 3}, {-2, -1}, {1, 2}},
{{-3, 3}, {-2, 1}, {-1, 2}},
{{-3, 3}, {-2, 2}, {-1, 1}}]
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3)
sage: bd.an_element() in bd
True
sage: bd.cardinality() == len(bd.list())
True
sage: bd = da.BrauerDiagrams(5/2)
sage: bd.an_element() in bd
True
sage: bd.cardinality() == len(bd.list())
True
These diagrams also come equipped with a compact representation based
on their bipartition triple representation. See the
:meth:`from_involution_permutation_triple` method for more information.
::
sage: bd = da.BrauerDiagrams(3)
sage: bd.options.display="compact"
sage: bd.list()
[[12/12;1],
[13/12;1],
[23/12;1],
[23/13;1],
[23/23;1],
[/;132],
[/;231],
[/;321],
[13/13;1],
[12/13;1],
[12/23;1],
[13/23;1],
[/;312],
[/;213],
[/;123]]
sage: bd.options._reset()
"""
Element = BrauerDiagram
options = BrauerDiagram.options
_name = "Brauer"
_diagram_func = brauer_diagrams
def __contains__(self, obj):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(2)
sage: bd.an_element() in bd
True
sage: bd([[1,2],[-1,-2]]) in bd
True
sage: [[1,2,-1,-2]] in bd
False
sage: bd = da.BrauerDiagrams(3/2)
sage: bd.an_element() in bd
True
"""
if self.order in ZZ:
r = ZZ(self.order)
else:
r = ZZ(self.order + ZZ(1)/ZZ(2))
return super(BrauerDiagrams, self).__contains__(obj) and [len(i) for i in obj] == [2]*r
def cardinality(self):
r"""
Return the cardinality of ``self``.
The number of Brauer diagrams of integer order `k` is `(2k-1)!!`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3)
sage: bd.cardinality()
15
sage: bd = da.BrauerDiagrams(7/2)
sage: bd.cardinality()
15
"""
if self.order in ZZ:
return (2 * ZZ(self.order) -1 ).multifactorial(2)
else:
return (2 * ZZ(self.order - 1/2) - 1).multifactorial(2)
def symmetric_diagrams(self, l=None, perm=None):
r"""
Return the list of Brauer diagrams with symmetric placement of `l` arcs,
and with free nodes permuted according to `perm`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(4)
sage: bd.symmetric_diagrams(l=1, perm=[2,1])
[{{-4, -2}, {-3, 1}, {-1, 3}, {2, 4}},
{{-4, -3}, {-2, 1}, {-1, 2}, {3, 4}},
{{-4, -1}, {-3, 2}, {-2, 3}, {1, 4}},
{{-4, 2}, {-3, -1}, {-2, 4}, {1, 3}},
{{-4, 3}, {-3, 4}, {-2, -1}, {1, 2}},
{{-4, 1}, {-3, -2}, {-1, 4}, {2, 3}}]
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(3/2)
sage: bd.symmetric_diagrams(l=1, perm=[2,1])
Traceback (most recent call last):
...
NotImplementedError: only implemented for integer order, not for order 3/2
"""
# perm = permutation on free nodes
# l = number of arcs
if self.order not in ZZ:
raise NotImplementedError("only implemented for integer order,"
" not for order %s" % (self.order))
n = ZZ(self.order)
if l is None:
l = 0
if perm is None:
perm = list(range(1, n+1-2*l))
out = []
partition_shape = [2]*l + [1]*(n-2*l)
for sp in SetPartitions(n, partition_shape):
sp0 = [block for block in sp if len(block) == 2]
diag = self.from_involution_permutation_triple((sp0,sp0,perm))
out.append(diag)
return out
def from_involution_permutation_triple(self, D1_D2_pi):
r"""
Construct a Brauer diagram of ``self`` from an involution
permutation triple.
A Brauer diagram can be represented as a triple where the first
entry is a list of arcs on the top row of the diagram, the second
entry is a list of arcs on the bottom row of the diagram, and the
third entry is a permutation on the remaining nodes. This triple
is called the *involution permutation triple*. For more
information, see [GL1996]_.
INPUT:
- ``D1_D2_pi``-- a list or tuple where the first entry is a list of
arcs on the top of the diagram, the second entry is a list of arcs
on the bottom of the diagram, and the third entry is a permutation
on the free nodes.
REFERENCES:
.. [GL1996] \J.J. Graham and G.I. Lehrer, Cellular algebras.
Inventiones mathematicae 123 (1996), 1--34.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(4)
sage: bd.from_involution_permutation_triple([[[1,2]],[[3,4]],[2,1]])
{{-4, -3}, {-2, 3}, {-1, 4}, {1, 2}}
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: bd = da.BrauerDiagrams(5/2)
sage: bd.from_involution_permutation_triple([[[1,2]],[[3,4]],[2,1]])
Traceback (most recent call last):
...
NotImplementedError: only implemented for integer order, not for order 5/2
"""
if self.order not in ZZ:
raise NotImplementedError("only implemented for integer order,"
" not for order %s" % (self.order))
try:
(D1,D2,pi) = tuple(D1_D2_pi)
except ValueError:
raise ValueError("argument %s not in correct form; must be a tuple (D1, D2, pi)" % D1_D2_pi)
D1 = [[abs(x) for x in b] for b in D1 if len(b) == 2] # not needed if argument correctly passed at outset.
D2 = [[abs(x) for x in b] for b in D2 if len(b) == 2] # ditto.
nD2 = [[-i for i in b] for b in D2]
pi = list(pi)
nn = set(range(1, self.order+1))
dom = sorted(nn.difference(flatten([list(x) for x in D1])))
rng = sorted(nn.difference(flatten([list(x) for x in D2])))
SP0 = D1 + nD2
if len(pi) != len(dom) or pi not in Permutations():
raise ValueError("in the tuple (D1, D2, pi)={}, pi must be a permutation of {} (indicating a permutation on the free nodes of the diagram)".format(
(D1,D2,pi), self.order-2*len(D1)))
Perm = [[dom[i], -rng[val-1]] for i,val in enumerate(pi)]
SP = SP0 + Perm
return self(SP) # could pass 'SetPartition' ?
class TemperleyLiebDiagrams(AbstractPartitionDiagrams):
r"""
All Temperley-Lieb diagrams of integer or integer `+1/2` order.
For more information on Temperley-Lieb diagrams, see
:class:`TemperleyLiebAlgebra`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: td = da.TemperleyLiebDiagrams(3); td
Temperley Lieb diagrams of order 3
sage: td.list()
[{{-3, 3}, {-2, -1}, {1, 2}},
{{-3, 1}, {-2, -1}, {2, 3}},
{{-3, -2}, {-1, 1}, {2, 3}},
{{-3, -2}, {-1, 3}, {1, 2}},
{{-3, 3}, {-2, 2}, {-1, 1}}]
sage: td = da.TemperleyLiebDiagrams(5/2); td
Temperley Lieb diagrams of order 5/2
sage: td.list()
[{{-3, 3}, {-2, -1}, {1, 2}}, {{-3, 3}, {-2, 2}, {-1, 1}}]
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: td = da.TemperleyLiebDiagrams(3)
sage: td.an_element() in td
True
sage: td.cardinality() == len(td.list())
True
sage: td = da.TemperleyLiebDiagrams(7/2)
sage: td.an_element() in td
True
sage: td.cardinality() == len(td.list())
True
"""
Element = TemperleyLiebDiagram
_name = "Temperley Lieb"
_diagram_func = temperley_lieb_diagrams
def cardinality(self):
r"""
Return the cardinality of ``self``.
The number of Temperley--Lieb diagrams of integer order `k` is the
`k`-th Catalan number.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: td = da.TemperleyLiebDiagrams(3)
sage: td.cardinality()
5
"""
if self.order in ZZ:
return catalan_number(ZZ(self.order))
else:
return catalan_number(ZZ(self.order - 1/2))
def __contains__(self, obj):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: td = da.TemperleyLiebDiagrams(2)
sage: td.an_element() in td
True
sage: td([[1,2],[-1,-2]]) in td
True
sage: [[1,2],[-1,-2]] in td
True
sage: [[1,-2],[-1,2]] in td
False
"""
if not hasattr(obj, '_base_diagram'):
try:
obj = self._element_constructor_(obj)
except (ValueError, TypeError):
return False
return obj in BrauerDiagrams(self.order) and obj.is_planar()
class PlanarDiagrams(AbstractPartitionDiagrams):
r"""
All planar diagrams of integer or integer `+1/2` order.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pld = da.PlanarDiagrams(1); pld
Planar diagrams of order 1
sage: pld.list()
[{{-1, 1}}, {{-1}, {1}}]
sage: pld = da.PlanarDiagrams(3/2); pld
Planar diagrams of order 3/2
sage: pld.list()
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, 2}, {-1, 1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pld = da.PlanarDiagrams(3)
sage: pld.an_element() in pld
True
sage: pld.cardinality() == len(pld.list())
True
sage: pld = da.PlanarDiagrams(5/2)
sage: pld.an_element() in pld
True
sage: pld.cardinality() == len(pld.list())
True
"""
Element = PlanarDiagram
_name = "Planar"
_diagram_func = planar_diagrams
def cardinality(self):
r"""
Return the cardinality of ``self``.
The number of all planar diagrams of order `k` is the
`2k`-th Catalan number.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: pld = da.PlanarDiagrams(3)
sage: pld.cardinality()
132
"""
return catalan_number(2*self.order)
def __contains__(self, obj):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: pld = da.PlanarDiagrams(2)
sage: pld.an_element() in pld
True
sage: pld([[1,2],[-1,-2]]) in pld
True
sage: [[1,2],[-1,-2]] in pld
True
sage: [[1,-2],[-1,2]] in pld
False
"""
if not hasattr(obj, '_base_diagram'):
try:
obj = self._element_constructor_(obj)
except (ValueError, TypeError):
return False
return super(PlanarDiagrams, self).__contains__(obj)
class IdealDiagrams(AbstractPartitionDiagrams):
r"""
All "ideal" diagrams of integer or integer `+1/2` order.
If `k` is an integer then an ideal diagram of order `k` is a partition
diagram of order `k` with propagating number less than `k`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: id = da.IdealDiagrams(3)
sage: id.an_element() in id
True
sage: id.cardinality() == len(id.list())
True
sage: da.IdealDiagrams(3/2).list()
[{{-2, -1, 1, 2}},
{{-2, 1, 2}, {-1}},
{{-2, -1, 2}, {1}},
{{-2, 2}, {-1}, {1}}]
"""
Element = IdealDiagram
_name = "Ideal"
_diagram_func = ideal_diagrams
def __contains__(self, obj):
r"""
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: id = da.IdealDiagrams(2)
sage: id.an_element() in id
True
sage: id([[1,2],[-1,-2]]) in id
True
sage: [[1,2],[-1,-2]] in id
True
sage: [[1,-2],[-1,2]] in id
False
"""
if not hasattr(obj, '_base_diagram'):
try:
obj = self._element_constructor_(obj)
except (ValueError, TypeError):
return False
return super(IdealDiagrams, self).__contains__(obj) and obj.propagating_number() < self.order
class DiagramAlgebra(CombinatorialFreeModule):
r"""
Abstract class for diagram algebras and is not designed to be used
directly.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: D = da.DiagramAlgebra(2, x, R, 'P', da.PartitionDiagrams(2))
sage: list(D.basis())
[P{{-2, -1, 1, 2}},
P{{-2, 1, 2}, {-1}},
P{{-2}, {-1, 1, 2}},
P{{-2, -1}, {1, 2}},
P{{-2}, {-1}, {1, 2}},
P{{-2, -1, 1}, {2}},
P{{-2, 1}, {-1, 2}},
P{{-2, 1}, {-1}, {2}},
P{{-2, 2}, {-1, 1}},
P{{-2, -1, 2}, {1}},
P{{-2, 2}, {-1}, {1}},
P{{-2}, {-1, 1}, {2}},
P{{-2}, {-1, 2}, {1}},
P{{-2, -1}, {1}, {2}},
P{{-2}, {-1}, {1}, {2}}]
"""
def __init__(self, k, q, base_ring, prefix, diagrams, category=None):
r"""
Initialize ``self``.
INPUT:
- ``k`` -- the rank
- ``q`` -- the deformation parameter
- ``base_ring`` -- the base ring
- ``prefix`` -- the prefix of our monomials
- ``diagrams`` -- the object representing all the diagrams
(i.e. indices for the basis elements)
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: D = da.DiagramBasis(2, x, R, 'P', da.PartitionDiagrams(2))
sage: TestSuite(D).run()
"""
self._prefix = prefix
self._q = base_ring(q)
self._k = k
self._base_diagrams = diagrams
cat = AssociativeAlgebras(base_ring.category()).FiniteDimensional().WithBasis()
if isinstance(self, UnitDiagramMixin):
cat = cat.Unital()
category = cat.or_subcategory(category)
CombinatorialFreeModule.__init__(self, base_ring, diagrams,
category=category, prefix=prefix, bracket=False)
def _element_constructor_(self, set_partition):
r"""
Construct an element of ``self``.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: D = da.DiagramAlgebra(2, x, R, 'P', da.PartitionDiagrams(2))
sage: sp = da.to_set_partition( [[1,2], [-1,-2]] )
sage: b_elt = D(sp); b_elt
P{{-2, -1}, {1, 2}}
sage: b_elt in D
True
sage: D([[1,2],[-1,-2]]) == b_elt
True
sage: D([{1,2},{-1,-2}]) == b_elt
True
sage: S = SymmetricGroupAlgebra(R,2)
sage: D(S([2,1]))
P{{-2, 1}, {-1, 2}}
sage: D2 = da.DiagramAlgebra(2, x, R, 'P', da.PlanarDiagrams(2))
sage: D2(S([1,2]))
P{{-2, 2}, {-1, 1}}
sage: D2(S([2,1]))
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must be planar
"""
if self.basis().keys().is_parent_of(set_partition):
return self.basis()[set_partition]
if isinstance(set_partition, SymmetricGroupAlgebra_n.Element):
return self._apply_module_morphism(set_partition, self._perm_to_Blst, self)
sp = self._base_diagrams(set_partition) # attempt conversion
if sp in self.basis().keys():
return self.basis()[sp]
raise ValueError("invalid input of {0}".format(set_partition))
def __getitem__(self, d):
"""
Get the basis item of ``self`` indexed by ``d``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: D = da.DiagramAlgebra(2, x, R, 'P', da.PartitionDiagrams(2))
sage: sp = da.PartitionDiagrams(2)( [[1,2], [-1,-2]] )
sage: D[sp]
P{{-2, -1}, {1, 2}}
sage: D[[1,-1,2,-2]]
P{{-2, -1, 1, 2}}
sage: D3 = da.DiagramAlgebra(3, x, R, 'P', da.PartitionDiagrams(3))
sage: da.PartitionDiagrams(3)( [[1,2], [-1,-2]] )
Traceback (most recent call last):
...
ValueError: {{-2, -1}, {1, 2}} does not represent two rows of vertices of order 3
sage: D3[sp]
P{{-3, 3}, {-2, -1}, {1, 2}}
sage: D3[[1,-1,2,-2]]
P{{-3, 3}, {-2, -1, 1, 2}}
sage: D3[[1,2,-2]]
P{{-3, 3}, {-2, 1, 2}, {-1}}
sage: P = PartitionAlgebra(3,x)
sage: P[[1]]
P{{-3, 3}, {-2, 2}, {-1}, {1}}
"""
if isinstance(d, (list, tuple)) and all(a in ZZ for a in d):
d = [d]
d = self._base_diagrams(to_set_partition(d, self._k))
if d in self.basis().keys():
return self.basis()[d]
raise ValueError("{0} is not an index of a basis element".format(d))
def _perm_to_Blst(self, w):
"""
Convert the permutation ``w`` to an element of ``self``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: S = SymmetricGroupAlgebra(R,2)
sage: import sage.combinat.diagram_algebras as da
sage: D2 = da.DiagramAlgebra(2, x, R, 'P', da.PlanarDiagrams(2))
sage: D2._perm_to_Blst([2,1])
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must be planar
"""
## 'perm' is a permutation in one-line notation
## turns w into an expression suitable for the element constructor.
u = sorted(w)
p = [[u[i],-x] for i,x in enumerate(w)]
if len(u) < self.order():
p1 = [[j,-j] for j in range(len(u)+1,self.order()+1)]
p.extend(p1)
return self[p]
def _diag_to_Blst(self, d):
r"""
Return an element of ``self`` from the input ``d``.
If ``d`` is a partial diagram of `\{1,2,\ldots,k,-1,-2,\ldots,-k\}`
then the set partition is filled in by adding the parts `\{i,-i\}`
if possible, and singletons sets for the remaining parts.
INPUT:
- ``d`` -- an iterable that behaves like
:class:`AbstractPartitionDiagram` or :class:`Permutation`
EXAMPLES::
sage: R.<x> = QQ[]
sage: PartitionAlgebra(3, x, R)._diag_to_Blst([[1,2], [-3,-1]])
P{{-3, -1}, {-2}, {1, 2}, {3}}
sage: BrauerAlgebra(4, x, R)._diag_to_Blst([3,1,2])
B{{-4, 4}, {-3, 1}, {-2, 3}, {-1, 2}}
sage: import sage.combinat.diagram_algebras as da
sage: D3 = da.DiagramAlgebra(3, x, R, 'P', da.PlanarDiagrams(3))
sage: D3._diag_to_Blst([[1, 2], [-2,-1]])
P{{-3, 3}, {-2, -1}, {1, 2}}
sage: D3._diag_to_Blst([[-1,2], [-2,1]])
Traceback (most recent call last):
...
ValueError: the diagram {{-3, 3}, {-2, 1}, {-1, 2}} must be planar
sage: D3._diag_to_Blst([[-1,2], [-3,1]])
Traceback (most recent call last):
...
ValueError: the diagram {{-3, 1}, {-2}, {-1, 2}, {3}} must be planar
"""
d = list(d)
if not d:
return self.one()
if d[0] in ZZ:
return self._perm_to_Blst(d)
d = to_set_partition(d, self._k)
return self[self._base_diagrams(d)]
def order(self):
r"""
Return the order of ``self``.
The order of a partition algebra is defined as half of the number
of nodes in the diagrams.
EXAMPLES::
sage: q = var('q')
sage: PA = PartitionAlgebra(2, q)
sage: PA.order()
2
"""
return self._k
def set_partitions(self):
r"""
Return the collection of underlying set partitions indexing the
basis elements of a given diagram algebra.
.. TODO:: Is this really necessary? deprecate?
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: D = da.DiagramAlgebra(2, x, R, 'P', da.PartitionDiagrams(2))
sage: list(D.set_partitions()) == list(da.PartitionDiagrams(2))
True
"""
return self.basis().keys()
def _latex_term(self, diagram):
r"""
Return `\LaTeX` representation of ``diagram`` to draw
diagram algebra element in latex using tikz.
EXAMPLES::
sage: R.<x> = ZZ[]
sage: P = PartitionAlgebra(2, x, R)
sage: latex(P([[1,2],[-2,-1]])) # indirect doctest
\begin{tikzpicture}[scale = 0.5,thick, baseline={(0,-1ex/2)}]
\tikzstyle{vertex} = [shape = circle, minimum size = 7pt, inner sep = 1pt]
\node[vertex] (G--2) at (1.5, -1) [shape = circle, draw] {};
\node[vertex] (G--1) at (0.0, -1) [shape = circle, draw] {};
\node[vertex] (G-1) at (0.0, 1) [shape = circle, draw] {};
\node[vertex] (G-2) at (1.5, 1) [shape = circle, draw] {};
\draw (G--2) .. controls +(-0.5, 0.5) and +(0.5, 0.5) .. (G--1);
\draw (G-1) .. controls +(0.5, -0.5) and +(-0.5, -0.5) .. (G-2);
\end{tikzpicture}
sage: latex(P.orbit_basis()([[1,2],[-2,-1]])) # indirect doctest
\begin{tikzpicture}[scale = 0.5,thick, baseline={(0,-1ex/2)}]
\tikzstyle{vertex} = [shape = circle, minimum size = 7pt, inner sep = 1pt]
\node[vertex] (G--2) at (1.5, -1) [shape = circle, draw, fill] {};
\node[vertex] (G--1) at (0.0, -1) [shape = circle, draw, fill] {};
\node[vertex] (G-1) at (0.0, 1) [shape = circle, draw, fill] {};
\node[vertex] (G-2) at (1.5, 1) [shape = circle, draw, fill] {};
\draw (G--2) .. controls +(-0.5, 0.5) and +(0.5, 0.5) .. (G--1);
\draw (G-1) .. controls +(0.5, -0.5) and +(-0.5, -0.5) .. (G-2);
\end{tikzpicture}
"""
# these allow the view command to work (maybe move them somewhere more appropriate?)
from sage.misc.latex import latex
latex.add_to_mathjax_avoid_list('tikzpicture')
latex.add_package_to_preamble_if_available('tikz')
if hasattr(self, '_fill'):
filled_str = ", fill"
else:
filled_str = ""
# Define the sign function
def sgn(x):
if x > 0:
return 1
if x < 0:
return -1
return 0
l1 = [] #list of blocks
l2 = [] #lsit of nodes
for i in list(diagram):
l1.append(list(i))
for j in list(i):
l2.append(j)
output = "\\begin{tikzpicture}[scale = 0.5,thick, baseline={(0,-1ex/2)}] \n\\tikzstyle{vertex} = [shape = circle, minimum size = 7pt, inner sep = 1pt] \n" #setup beginning of picture
for i in l2: #add nodes
output = output + "\\node[vertex] (G-{}) at ({}, {}) [shape = circle, draw{}] {{}}; \n".format(i, (abs(i)-1)*1.5, sgn(i), filled_str)
for i in l1: #add edges
if len(i) > 1:
l4 = list(i)
posList = []
negList = []
for i in l4: #sort list so rows are grouped together
if i > 0:
posList.append(i)
elif i < 0:
negList.append(i)
posList.sort()
negList.sort()
l4 = posList + negList
l5 = l4[:] #deep copy
for j in range(len(l5)):
l5[j-1] = l4[j] #create a permuted list
if len(l4) == 2:
l4.pop()
l5.pop() #pops to prevent duplicating edges
for j in zip(l4, l5):
xdiff = abs(j[1])-abs(j[0])
y1 = sgn(j[0])
y2 = sgn(j[1])
if y2-y1 == 0 and abs(xdiff) < 5: #if nodes are close to each other on same row
diffCo = (0.5+0.1*(abs(xdiff)-1)) #gets bigger as nodes are farther apart; max value of 1; min value of 0.5.
outVec = (sgn(xdiff)*diffCo, -1*diffCo*y1)
inVec = (-1*diffCo*sgn(xdiff), -1*diffCo*y2)
elif y2-y1 != 0 and abs(xdiff) == 1: #if nodes are close enough curviness looks bad.
outVec = (sgn(xdiff)*0.75, -1*y1)
inVec = (-1*sgn(xdiff)*0.75, -1*y2)
else:
outVec = (sgn(xdiff)*1, -1*y1)
inVec = (-1*sgn(xdiff), -1*y2)
output = output + "\\draw (G-{}) .. controls +{} and +{} .. (G-{}); \n".format(j[0], outVec, inVec, j[1])
output = output + "\\end{tikzpicture} \n" #end picture
return output
# The following subclass provides a few additional methods for
# (sub)partition algebra elements.
class Element(CombinatorialFreeModule.Element):
r"""
An element of a diagram algebra.
This subclass provides a few additional methods for
partition algebra elements. Most element methods are
already implemented elsewhere.
"""
def diagram(self):
r"""
Return the underlying diagram of ``self`` if ``self`` is a basis
element. Raises an error if ``self`` is not a basis element.
EXAMPLES::
sage: R.<x> = ZZ[]
sage: P = PartitionAlgebra(2, x, R)
sage: elt = 3*P([[1,2],[-2,-1]])
sage: elt.diagram()
{{-2, -1}, {1, 2}}
"""
if len(self) != 1:
raise ValueError("this is only defined for basis elements")
PA = self.parent()
ans = self.support_of_term()
if ans not in PA.basis().keys():
raise ValueError("element should be keyed by a diagram")
return ans
def diagrams(self):
r"""
Return the diagrams in the support of ``self``.
EXAMPLES::
sage: R.<x> = ZZ[]
sage: P = PartitionAlgebra(2, x, R)
sage: elt = 3*P([[1,2],[-2,-1]]) + P([[1,2],[-2], [-1]])
sage: sorted(elt.diagrams(), key=str)
[{{-2, -1}, {1, 2}}, {{-2}, {-1}, {1, 2}}]
"""
return self.support()
class UnitDiagramMixin(object):
"""
Mixin class for diagram algebras that have the unit indexed by
the :func:`identity_set_partition`.
"""
@cached_method
def one_basis(self):
r"""
The following constructs the identity element of ``self``.
It is not called directly; instead one should use ``DA.one()`` if
``DA`` is a defined diagram algebra.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P = PartitionAlgebra(2, x, R)
sage: P.one_basis()
{{-2, 2}, {-1, 1}}
"""
return self._base_diagrams(identity_set_partition(self._k))
class DiagramBasis(DiagramAlgebra):
"""
Abstract base class for diagram algebras in the diagram basis.
"""
def product_on_basis(self, d1, d2):
r"""
Return the product `D_{d_1} D_{d_2}` by two basis diagrams.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: D = da.DiagramBasis(2, x, R, 'P', da.PartitionDiagrams(2))
sage: sp = da.PartitionDiagrams(2)([[1,2],[-1,-2]])
sage: D.product_on_basis(sp, sp)
x*P{{-2, -1}, {1, 2}}
"""
if not self._indices.is_parent_of(d1):
d1 = self._indices(d1)
if not self._indices.is_parent_of(d2):
d2 = self._indices(d2)
(composite_diagram, loops_removed) = d1.compose(d2)
return self.term(composite_diagram, self._q**loops_removed)
class PartitionAlgebra(DiagramBasis, UnitDiagramMixin):
r"""
A partition algebra.
A partition algebra of rank `k` over a given ground ring `R` is an
algebra with (`R`-module) basis indexed by the collection of set
partitions of `\{1, \ldots, k, -1, \ldots, -k\}`. Each such set
partition can be represented by a graph on nodes `\{1, \ldots, k, -1,
\ldots, -k\}` arranged in two rows, with nodes `1, \ldots, k` in the
top row from left to right and with nodes `-1, \ldots, -k` in the
bottom row from left to right, and edges drawn such that the connected
components of the graph are precisely the parts of the set partition.
(This choice of edges is often not unique, and so there are often many
graphs representing one and the same set partition; the representation
nevertheless is useful and vivid. We often speak of "diagrams" to mean
graphs up to such equivalence of choices of edges; of course, we could
just as well speak of set partitions.)
There is not just one partition algebra of given rank over a given
ground ring, but rather a whole family of them, indexed by the
elements of `R`. More precisely, for every `q \in R`, the partition
algebra of rank `k` over `R` with parameter `q` is defined to be the
`R`-algebra with basis the collection of all set partitions of
`\{1, \ldots, k, -1, \ldots, -k\}`, where the product of two basis
elements is given by the rule
.. MATH::
a \cdot b = q^N (a \circ b),
where `a \circ b` is the composite set partition obtained by placing
the diagram (i.e., graph) of `a` above the diagram of `b`, identifying
the bottom row nodes of `a` with the top row nodes of `b`, and
omitting any closed "loops" in the middle. The number `N` is the
number of connected components formed by the omitted loops.
The parameter `q` is a deformation parameter. Taking `q = 1` produces
the semigroup algebra (over the base ring) of the partition monoid,
in which the product of two set partitions is simply given by their
composition.
The Iwahori--Hecke algebra of type `A` (with a single parameter) is
naturally a subalgebra of the partition algebra.
The partition algebra is regarded as an example of a "diagram algebra"
due to the fact that its natural basis is given by certain graphs
often called diagrams.
An excellent reference for partition algebras and their various
subalgebras (Brauer algebra, Temperley--Lieb algebra, etc) is the
paper [HR2005]_.
INPUT:
- ``k`` -- rank of the algebra
- ``q`` -- the deformation parameter `q`
OPTIONAL ARGUMENTS:
- ``base_ring`` -- (default ``None``) a ring containing ``q``; if
``None``, then Sage automatically chooses the parent of ``q``
- ``prefix`` -- (default ``"P"``) a label for the basis elements
EXAMPLES:
The following shorthand simultaneously defines the univariate polynomial
ring over the rationals as well as the variable ``x``::
sage: R.<x> = PolynomialRing(QQ)
sage: R
Univariate Polynomial Ring in x over Rational Field
sage: x
x
sage: x.parent() is R
True
We now define the partition algebra of rank `2` with parameter ``x``
over `\ZZ` in the usual (diagram) basis::
sage: R.<x> = ZZ[]
sage: A2 = PartitionAlgebra(2, x, R)
sage: A2
Partition Algebra of rank 2 with parameter x
over Univariate Polynomial Ring in x over Integer Ring
sage: A2.basis().keys()
Partition diagrams of order 2
sage: A2.basis().keys()([[-2, 1, 2], [-1]])
{{-2, 1, 2}, {-1}}
sage: A2.basis().list()
[P{{-2, -1, 1, 2}}, P{{-2, 1, 2}, {-1}},
P{{-2}, {-1, 1, 2}}, P{{-2, -1}, {1, 2}},
P{{-2}, {-1}, {1, 2}}, P{{-2, -1, 1}, {2}},
P{{-2, 1}, {-1, 2}}, P{{-2, 1}, {-1}, {2}},
P{{-2, 2}, {-1, 1}}, P{{-2, -1, 2}, {1}},
P{{-2, 2}, {-1}, {1}}, P{{-2}, {-1, 1}, {2}},
P{{-2}, {-1, 2}, {1}}, P{{-2, -1}, {1}, {2}},
P{{-2}, {-1}, {1}, {2}}]
sage: E = A2([[1,2],[-2,-1]]); E
P{{-2, -1}, {1, 2}}
sage: E in A2.basis().list()
True
sage: E^2
x*P{{-2, -1}, {1, 2}}
sage: E^5
x^4*P{{-2, -1}, {1, 2}}
sage: (A2([[2,-2],[-1,1]]) - 2*A2([[1,2],[-1,-2]]))^2
(4*x-4)*P{{-2, -1}, {1, 2}} + P{{-2, 2}, {-1, 1}}
Next, we construct an element::
sage: a2 = A2.an_element(); a2
3*P{{-2}, {-1, 1, 2}} + 2*P{{-2, -1, 1, 2}} + 2*P{{-2, 1, 2}, {-1}}
There is a natural embedding into partition algebras on more
elements, by adding identity strands::
sage: A4 = PartitionAlgebra(4, x, R)
sage: A4(a2)
3*P{{-4, 4}, {-3, 3}, {-2}, {-1, 1, 2}}
+ 2*P{{-4, 4}, {-3, 3}, {-2, -1, 1, 2}}
+ 2*P{{-4, 4}, {-3, 3}, {-2, 1, 2}, {-1}}
Thus, the empty partition corresponds to the identity::
sage: A4([])
P{{-4, 4}, {-3, 3}, {-2, 2}, {-1, 1}}
sage: A4(5)
5*P{{-4, 4}, {-3, 3}, {-2, 2}, {-1, 1}}
The group algebra of the symmetric group is a subalgebra::
sage: S3 = SymmetricGroupAlgebra(ZZ, 3)
sage: s3 = S3.an_element(); s3
[1, 2, 3] + 2*[1, 3, 2] + 3*[2, 1, 3] + [3, 1, 2]
sage: A4(s3)
P{{-4, 4}, {-3, 1}, {-2, 3}, {-1, 2}}
+ 2*P{{-4, 4}, {-3, 2}, {-2, 3}, {-1, 1}}
+ 3*P{{-4, 4}, {-3, 3}, {-2, 1}, {-1, 2}}
+ P{{-4, 4}, {-3, 3}, {-2, 2}, {-1, 1}}
sage: A4([2,1])
P{{-4, 4}, {-3, 3}, {-2, 1}, {-1, 2}}
Be careful not to confuse the embedding of the group algebra of
the symmetric group with the embedding of partial set partitions.
The latter are embedded by adding the parts `\{i,-i\}` if
possible, and singletons sets for the remaining parts::
sage: A4([[2,1]])
P{{-4, 4}, {-3, 3}, {-2}, {-1}, {1, 2}}
sage: A4([[-1,3],[-2,-3,1]])
P{{-4, 4}, {-3, -2, 1}, {-1, 3}, {2}}
Another subalgebra is the Brauer algebra, which has perfect
matchings as basis elements. The group algebra of the
symmetric group is in fact a subalgebra of the Brauer algebra::
sage: B3 = BrauerAlgebra(3, x, R)
sage: b3 = B3(s3); b3
B{{-3, 1}, {-2, 3}, {-1, 2}} + 2*B{{-3, 2}, {-2, 3}, {-1, 1}}
+ 3*B{{-3, 3}, {-2, 1}, {-1, 2}} + B{{-3, 3}, {-2, 2}, {-1, 1}}
An important basis of the partition algebra is the
:meth:`orbit basis <orbit_basis>`::
sage: O2 = A2.orbit_basis()
sage: o2 = O2([[1,2],[-1,-2]]) + O2([[1,2,-1,-2]]); o2
O{{-2, -1}, {1, 2}} + O{{-2, -1, 1, 2}}
The diagram basis element corresponds to the sum of all orbit
basis elements indexed by coarser set partitions::
sage: A2(o2)
P{{-2, -1}, {1, 2}}
We can convert back from the orbit basis to the diagram basis::
sage: o2 = O2.an_element(); o2
3*O{{-2}, {-1, 1, 2}} + 2*O{{-2, -1, 1, 2}} + 2*O{{-2, 1, 2}, {-1}}
sage: A2(o2)
3*P{{-2}, {-1, 1, 2}} - 3*P{{-2, -1, 1, 2}} + 2*P{{-2, 1, 2}, {-1}}
One can work with partition algebras using a symbol for the parameter,
leaving the base ring unspecified. This implies that the underlying
base ring is Sage's symbolic ring.
::
sage: q = var('q')
sage: PA = PartitionAlgebra(2, q); PA
Partition Algebra of rank 2 with parameter q over Symbolic Ring
sage: PA([[1,2],[-2,-1]])^2 == q*PA([[1,2],[-2,-1]])
True
sage: (PA([[2, -2], [1, -1]]) - 2*PA([[-2, -1], [1, 2]]))^2 == (4*q-4)*PA([[1, 2], [-2, -1]]) + PA([[2, -2], [1, -1]])
True
The identity element of the partition algebra is the set
partition `\{\{1,-1\}, \{2,-2\}, \ldots, \{k,-k\}\}`::
sage: P = PA.basis().list()
sage: PA.one()
P{{-2, 2}, {-1, 1}}
sage: PA.one() * P[7] == P[7]
True
sage: P[7] * PA.one() == P[7]
True
We now give some further examples of the use of the other arguments.
One may wish to "specialize" the parameter to a chosen element of
the base ring::
sage: R.<q> = RR[]
sage: PA = PartitionAlgebra(2, q, R, prefix='B')
sage: PA
Partition Algebra of rank 2 with parameter q over
Univariate Polynomial Ring in q over Real Field with 53 bits of precision
sage: PA([[1,2],[-1,-2]])
1.00000000000000*B{{-2, -1}, {1, 2}}
sage: PA = PartitionAlgebra(2, 5, base_ring=ZZ, prefix='B')
sage: PA
Partition Algebra of rank 2 with parameter 5 over Integer Ring
sage: (PA([[2, -2], [1, -1]]) - 2*PA([[-2, -1], [1, 2]]))^2 == 16*PA([[-2, -1], [1, 2]]) + PA([[2, -2], [1, -1]])
True
Symmetric group algebra elements and elements from other subalgebras
of the partition algebra (e.g., ``BrauerAlgebra`` and
``TemperleyLiebAlgebra``) can also be coerced into the partition algebra::
sage: S = SymmetricGroupAlgebra(SR, 2)
sage: B = BrauerAlgebra(2, x, SR)
sage: A = PartitionAlgebra(2, x, SR)
sage: S([2,1])*A([[1,-1],[2,-2]])
P{{-2, 1}, {-1, 2}}
sage: B([[-1,-2],[2,1]]) * A([[1],[-1],[2,-2]])
P{{-2}, {-1}, {1, 2}}
sage: A([[1],[-1],[2,-2]]) * B([[-1,-2],[2,1]])
P{{-2, -1}, {1}, {2}}
The same is true if the elements come from a subalgebra of a partition
algebra of smaller order, or if they are defined over a different
base ring::
sage: R = FractionField(ZZ['q']); q = R.gen()
sage: S = SymmetricGroupAlgebra(ZZ, 2)
sage: B = BrauerAlgebra(2, q, ZZ[q])
sage: A = PartitionAlgebra(3, q, R)
sage: S([2,1])*A([[1,-1],[2,-3],[3,-2]])
P{{-3, 1}, {-2, 3}, {-1, 2}}
sage: A(B([[-1,-2],[2,1]]))
P{{-3, 3}, {-2, -1}, {1, 2}}
TESTS:
A computation that returned an incorrect result until :trac:`15958`::
sage: A = PartitionAlgebra(1,17)
sage: g = SetPartitionsAk(1).list()
sage: a = A[g[1]]
sage: a
P{{-1}, {1}}
sage: a*a
17*P{{-1}, {1}}
Shorthands for working with basis elements are as follows::
sage: S = SymmetricGroupAlgebra(ZZ, 3)
sage: A = PartitionAlgebra(3, x, SR)
sage: A([[1,3],[-1],[-3]]) # pair up the omitted nodes as `{-i, i}`, if possible
P{{-3}, {-2, 2}, {-1}, {1, 3}}
sage: A([[1,3],[-1],[-3]]) == A[[1,3],[-1],[-3]]
True
sage: A([[1,2]])
P{{-3, 3}, {-2}, {-1}, {1, 2}}
sage: A([[1,2]]) == A[[1,2]]
True
sage: A([2,3,1]) # permutations in one-line notation are imported as well
P{{-3, 2}, {-2, 1}, {-1, 3}}
sage: A([2,3,1]) == A(S([2,3,1]))
True
REFERENCES:
.. [HR2005] Tom Halverson and Arun Ram, *Partition algebras*. European
Journal of Combinatorics **26** (2005), 869--921.
"""
@staticmethod
def __classcall_private__(cls, k, q, base_ring=None, prefix="P"):
r"""
Standardize the input by getting the base ring from the parent of
the parameter ``q`` if no ``base_ring`` is given.
TESTS::
sage: R.<q> = QQ[]
sage: PA1 = PartitionAlgebra(2, q)
sage: PA2 = PartitionAlgebra(2, q, R, 'P')
sage: PA1 is PA2
True
"""
if base_ring is None:
base_ring = q.parent()
return super(PartitionAlgebra, cls).__classcall__(cls, k, q, base_ring, prefix)
# The following is the basic constructor method for the class.
# The purpose of the "prefix" is to label the basis elements
def __init__(self, k, q, base_ring, prefix):
r"""
Initialize ``self``.
TESTS::
sage: R.<q> = QQ[]
sage: PA = PartitionAlgebra(2, q, R)
sage: TestSuite(PA).run()
"""
self._k = k
self._prefix = prefix
self._q = base_ring(q)
DiagramAlgebra.__init__(self, k, q, base_ring, prefix, PartitionDiagrams(k))
def _element_constructor_(self, x):
r"""
Construct an element of ``self``.
TESTS::
sage: import sage.combinat.diagram_algebras as da
sage: R.<x> = QQ[]
sage: PA = PartitionAlgebra(2, x, R, 'P')
sage: PA([]) == PA.one()
True
sage: D = da.DiagramAlgebra(2, x, R, 'P', da.PartitionDiagrams(2))
sage: D([]) == D.one()
Traceback (most recent call last):
...
ValueError: invalid input of []
sage: sp = da.to_set_partition( [[1,2], [-1,-2]] )
sage: b_elt = D(sp); b_elt
P{{-2, -1}, {1, 2}}
sage: b_elt in D
True
sage: D([[1,2],[-1,-2]]) == b_elt
True
sage: D([{1,2},{-1,-2}]) == b_elt
True
sage: S = SymmetricGroupAlgebra(R,2)
sage: D(S([2,1]))
P{{-2, 1}, {-1, 2}}
sage: D2 = da.DiagramAlgebra(2, x, R, 'P', da.PlanarDiagrams(2))
sage: D2(S([1,2]))
P{{-2, 2}, {-1, 1}}
sage: D2(S([2,1]))
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must be planar
"""
# coercion from basis keys
if self.basis().keys().is_parent_of(x):
return self.basis()[x]
# conversion from (smaller) diagram or permutation
if isinstance(x, (AbstractPartitionDiagram, list, tuple, Permutations.Element)):
return self._diag_to_Blst(x)
# conversion from orbit basis
if (isinstance(x, OrbitBasis.Element)
and self.base_ring().has_coerce_map_from(x.parent().base_ring())):
return self(x.parent().to_diagram_basis(x))
# conversion from SubPartitionAlgebra
if (isinstance(x, (PartitionAlgebra.Element, SubPartitionAlgebra.Element))
and self.has_coerce_map_from(x.parent().base_ring())):
return sum(a * self._diag_to_Blst(d) for (d,a) in x)
return super(PartitionAlgebra, self)._element_constructor_(x)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<q> = QQ[]
sage: PartitionAlgebra(2, q, R)
Partition Algebra of rank 2 with parameter q
over Univariate Polynomial Ring in q over Rational Field
"""
return "Partition Algebra of rank {} with parameter {} over {}".format(
self._k, self._q, self.base_ring())
def _coerce_map_from_(self, R):
"""
Return a coerce map from ``R`` if one exists and ``None`` otherwise.
.. TODO::
- Refactor some of these generic morphisms as compositions
of morphisms.
- Allow for coercion if base_rings and parameters are the same,
up to relabeling/isomorphism?
EXAMPLES::
sage: R.<x> = QQ[]
sage: S = SymmetricGroupAlgebra(R, 4)
sage: A = PartitionAlgebra(4, x, R)
sage: O = A.orbit_basis()
sage: A._coerce_map_from_(S)
Generic morphism:
From: Symmetric group algebra of order 4 over Univariate Polynomial Ring in x over Rational Field
To: Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: A._coerce_map_from_(O)
Generic morphism:
From: Orbit basis of Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
To: Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: Sp3 = SymmetricGroupAlgebra(ZZ, 3)
sage: A._coerce_map_from_(Sp3)
Generic morphism:
From: Symmetric group algebra of order 3 over Integer Ring
To: Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: B3 = BrauerAlgebra(3, x, R)
sage: A._coerce_map_from_(B3)
Generic morphism:
From: Brauer Algebra of rank 3 with parameter x over Univariate Polynomial Ring in x over Rational Field
To: Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: A3 = PartitionAlgebra(3, x, R)
sage: A._coerce_map_from_(A3)
Generic morphism:
From: Partition Algebra of rank 3 with parameter x over Univariate Polynomial Ring in x over Rational Field
To: Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: O3 = A3.orbit_basis()
sage: A._coerce_map_from_(O3)
Generic morphism:
From: Orbit basis of Partition Algebra of rank 3 with parameter x over Univariate Polynomial Ring in x over Rational Field
To: Partition Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
TESTS::
sage: elt = O3.an_element(); elt
2*O{{-3, -2, -1, 1, 2, 3}} + 2*O{{-3, -2, 1, 2, 3}, {-1}}
+ 3*O{{-3, -1, 1, 2, 3}, {-2}}
sage: A._coerce_map_from_(O3)(elt)
-3*P{{-4, 4}, {-3, -2, -1, 1, 2, 3}}
+ 2*P{{-4, 4}, {-3, -2, 1, 2, 3}, {-1}}
+ 3*P{{-4, 4}, {-3, -1, 1, 2, 3}, {-2}}
"""
# coerce from Orbit basis.
if isinstance(R, OrbitBasis):
if R._k <= self._k and self.base_ring().has_coerce_map_from(R.base_ring()):
return R.module_morphism(self._orbit_to_diagram_on_basis, codomain=self)
return None
# coerce from sub-partition algebras.
if isinstance(R, (PartitionAlgebra, SubPartitionAlgebra)):
if R._k <= self._k and self.base_ring().has_coerce_map_from(R.base_ring()):
return R.module_morphism(self._diag_to_Blst, codomain=self)
return None
# coerce from Symmetric group algebras.
if isinstance(R, SymmetricGroupAlgebra_n):
if R.n <= self._k and self.base_ring().has_coerce_map_from(R.base_ring()):
return R.module_morphism(self._perm_to_Blst, codomain=self)
return None
return super(PartitionAlgebra, self)._coerce_map_from_(R)
def orbit_basis(self):
r"""
Return the orbit basis of ``self``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis(); O2
Orbit basis of Partition Algebra of rank 2 with parameter x over
Univariate Polynomial Ring in x over Rational Field
sage: pp = 7 * P2[{-1}, {-2, 1, 2}] - 2 * P2[{-2}, {-1, 1}, {2}]; pp
-2*P{{-2}, {-1, 1}, {2}} + 7*P{{-2, 1, 2}, {-1}}
sage: op = pp.to_orbit_basis(); op
-2*O{{-2}, {-1, 1}, {2}} - 2*O{{-2}, {-1, 1, 2}}
- 2*O{{-2, -1, 1}, {2}} + 5*O{{-2, -1, 1, 2}}
+ 7*O{{-2, 1, 2}, {-1}} - 2*O{{-2, 2}, {-1, 1}}
sage: op == O2(op)
True
sage: pp * op.leading_term()
4*P{{-2}, {-1, 1}, {2}} - 4*P{{-2, -1, 1}, {2}}
+ 14*P{{-2, -1, 1, 2}} - 14*P{{-2, 1, 2}, {-1}}
"""
return OrbitBasis(self)
def _orbit_to_diagram_on_basis(self, d):
r"""
Return the orbit basis element, indexed by the partition
diagram ``d``, in the diagram basis of the partition algebra.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: from sage.combinat.diagram_algebras import PartitionDiagrams
sage: PD = PartitionDiagrams(2)
sage: P2._orbit_to_diagram_on_basis(PD([[1,2,-2],[-1]]))
-P{{-2, -1, 1, 2}} + P{{-2, 1, 2}, {-1}}
"""
# Mobius inversion in the poset of coarsenings of ``d``
SPd = SetPartitions(len(d))
return self.sum((-1)**(len(d)-len(sp)) * prod(ZZ(len(p)-1).factorial() for p in sp)
* self([sum((list(d[i-1]) for i in p),[]) for p in sp])
for sp in SPd)
class Element(DiagramBasis.Element):
def to_orbit_basis(self):
"""
Return ``self`` in the orbit basis of the associated
partition algebra.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P = PartitionAlgebra(2, x, R)
sage: pp = P.an_element();
sage: pp.to_orbit_basis()
3*O{{-2}, {-1, 1, 2}} + 7*O{{-2, -1, 1, 2}} + 2*O{{-2, 1, 2}, {-1}}
sage: pp = (3*P([[-2], [-1, 1, 2]]) + 2*P([[-2, -1, 1, 2]])
....: + 2*P([[-2, 1, 2], [-1]])); pp
3*P{{-2}, {-1, 1, 2}} + 2*P{{-2, -1, 1, 2}} + 2*P{{-2, 1, 2}, {-1}}
sage: pp.to_orbit_basis()
3*O{{-2}, {-1, 1, 2}} + 7*O{{-2, -1, 1, 2}} + 2*O{{-2, 1, 2}, {-1}}
"""
OP = self.parent().orbit_basis()
return OP(self)
class OrbitBasis(DiagramAlgebra):
r"""
The orbit basis of the partition algebra.
Let `D_\pi` represent the diagram basis element indexed by the
partition `\pi`, then (see equations (2.14), (2.17) and (2.18) of [BH2017]_)
.. MATH::
D_\pi = \sum_{\tau \geq \pi} O_\tau,
where the sum is over all partitions `\tau` which are coarser than `\pi`
and `O_\tau` is the orbit basis element indexed by the partition `\tau`.
If `\mu_{2k}(\pi,\tau)` represents the mobius function of the partition
lattice, then
.. MATH::
O_\pi = \sum_{\tau \geq \pi} \mu_{2k}(\pi, \tau) D_\tau.
If `\tau` is a partition of `\ell` blocks and the `i^{th}` block of
`\tau` is a union of `b_i` blocks of `\pi`, then
.. MATH::
\mu_{2k}(\pi, \tau) = \prod_{i=1}^\ell (-1)^{b_i-1} (b_i-1)! .
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis(); O2
Orbit basis of Partition Algebra of rank 2 with parameter x over
Univariate Polynomial Ring in x over Rational Field
sage: oa = O2([[1],[-1],[2,-2]]); ob = O2([[-1,-2,2],[1]]); oa, ob
(O{{-2, 2}, {-1}, {1}}, O{{-2, -1, 2}, {1}})
sage: oa * ob
(x-2)*O{{-2, -1, 2}, {1}}
We can convert between the two bases::
sage: pa = P2(oa); pa
2*P{{-2, -1, 1, 2}} - P{{-2, -1, 2}, {1}} - P{{-2, 1, 2}, {-1}}
+ P{{-2, 2}, {-1}, {1}} - P{{-2, 2}, {-1, 1}}
sage: pa * ob
(-x+2)*P{{-2, -1, 1, 2}} + (x-2)*P{{-2, -1, 2}, {1}}
sage: _ == pa * P2(ob)
True
sage: O2(pa * ob)
(x-2)*O{{-2, -1, 2}, {1}}
Note that the unit in the orbit basis is not a single diagram,
in contrast to the natural diagram basis::
sage: P2.one()
P{{-2, 2}, {-1, 1}}
sage: O2.one()
O{{-2, -1, 1, 2}} + O{{-2, 2}, {-1, 1}}
sage: O2.one() == P2.one()
True
TESTS:
Check that going between the two bases is the identity::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis(); O2
Orbit basis of Partition Algebra of rank 2 with parameter x over
Univariate Polynomial Ring in x over Rational Field
sage: PD = P2.basis().keys()
sage: all(O2(P2(O2(m))) == O2(m) for m in PD)
True
sage: all(P2(O2(P2(m))) == P2(m) for m in PD)
True
"""
@staticmethod
def __classcall_private__(cls, *args):
"""
Normalize input to ensure a unique representation.
INPUT:
Either:
- ``A`` -- an abstract diagram algebra
or the arguments to construct a diagram algebra:
- ``k`` -- the rank
- ``q`` -- the parameter
- ``R`` -- the base ring
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: from sage.combinat.diagram_algebras import OrbitBasis
sage: O2a = P2.orbit_basis()
sage: O2b = OrbitBasis(P2)
sage: O2c = OrbitBasis(2, x, R)
sage: O2a is O2b and O2a is O2c
True
sage: O2d = OrbitBasis(2, x, QQ[x])
sage: O2a is O2d
True
"""
if len(args) == 1:
PA = args[0]
if not isinstance(PA, DiagramAlgebra):
raise ValueError("{} is not a partition algebra".format(PA))
alg = PA
elif len(args) != 3:
raise ValueError("expected 1 or 3 arguments, received %s: %s"%(len(args), args))
else:
(k,q,R) = args
q = R(q)
alg = PartitionAlgebra(k, q, R)
return super(OrbitBasis, cls).__classcall__(cls, alg)
def __init__(self, alg):
"""
Initialize ``self``.
EXAMPLES::
sage: O2 = PartitionAlgebra(2, -1, QQ).orbit_basis()
sage: TestSuite(O2).run()
"""
base_ring = alg.base_ring()
k = alg._k
q = alg._q
diagrams = alg._base_diagrams
# TODO: should we add additional categories?
category = alg.category()
DiagramAlgebra.__init__(self, k, q, base_ring, "O", diagrams, category)
self._fill = True
self._alg = alg
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: PartitionAlgebra(2, -1, QQ).orbit_basis()
Orbit basis of Partition Algebra of rank 2 with parameter -1 over Rational Field
"""
return "Orbit basis of {}".format(self._alg)
def _element_constructor_(self, x):
"""
Convert ``x`` into ``self``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis()
sage: O2(P2([]))
O{{-2, -1, 1, 2}} + O{{-2, 2}, {-1, 1}}
sage: O2(3).to_diagram_basis() == 3 * P2.one()
True
sage: O2(P2([[1,2,-2],[-1]]))
O{{-2, -1, 1, 2}} + O{{-2, 1, 2}, {-1}}
"""
if isinstance(x, (PartitionAlgebra.Element, SubPartitionAlgebra.Element)):
return self._alg(x).to_orbit_basis()
d = self._alg._diag_to_Blst(x).diagram()
return CombinatorialFreeModule._element_constructor_(self, d)
def _coerce_map_from_(self, R):
r"""
Return a coerce map from ``R`` if one exists and ``None`` otherwise.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis()
sage: O2(P2([]))
O{{-2, -1, 1, 2}} + O{{-2, 2}, {-1, 1}}
sage: O2(3)
3*O{{-2, -1, 1, 2}} + 3*O{{-2, 2}, {-1, 1}}
sage: O2([[1,2,-2],[-1]])
O{{-2, 1, 2}, {-1}}
"""
if R is self._alg:
return self._alg.module_morphism(self._diagram_to_orbit_on_basis, codomain=self)
if self._alg.coerce_map_from(R):
return self._coerce_map_via([self._alg], R)
return super(OrbitBasis, self)._coerce_map_from_(R)
@cached_method
def one(self):
"""
Return the element `1` of the partition algebra in the orbit basis.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis()
sage: O2.one()
O{{-2, -1, 1, 2}} + O{{-2, 2}, {-1, 1}}
"""
PDs = self._base_diagrams
base = SetPartitions()(identity_set_partition(self._k))
brone = self.base_ring().one()
return self._from_dict({PDs(d): brone for d in base.coarsenings()},
coerce=False, remove_zeros=False)
def diagram_basis(self):
"""
Return the associated partition algebra of ``self``
in the diagram basis.
EXAMPLES::
sage: R.<x> = QQ[]
sage: O2 = PartitionAlgebra(2, x, R).orbit_basis()
sage: P2 = O2.diagram_basis(); P2
Partition Algebra of rank 2 with parameter x over Univariate
Polynomial Ring in x over Rational Field
sage: o2 = O2.an_element(); o2
3*O{{-2}, {-1, 1, 2}} + 2*O{{-2, -1, 1, 2}} + 2*O{{-2, 1, 2}, {-1}}
sage: P2(o2)
3*P{{-2}, {-1, 1, 2}} - 3*P{{-2, -1, 1, 2}} + 2*P{{-2, 1, 2}, {-1}}
TESTS::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis()
sage: op = O2([]); op
O{{-2, 2}, {-1, 1}}
sage: PA = O2.diagram_basis()
sage: P2 == PA
True
sage: PA([]) == P2.one()
True
sage: PA(op)
-P{{-2, -1, 1, 2}} + P{{-2, 2}, {-1, 1}}
sage: op == PA(op).to_orbit_basis()
True
"""
return self._alg
def _diagram_to_orbit_on_basis(self, diag):
"""
Return the element ``diag`` in the orbit basis.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P2 = PartitionAlgebra(2, x, R)
sage: O2 = P2.orbit_basis()
sage: from sage.combinat.diagram_algebras import PartitionDiagrams
sage: PD = PartitionDiagrams(2)
sage: O2._diagram_to_orbit_on_basis(PD([[1,2,-2],[-1]]))
O{{-2, -1, 1, 2}} + O{{-2, 1, 2}, {-1}}
sage: P2.one().to_orbit_basis()
O{{-2, -1, 1, 2}} + O{{-2, 2}, {-1, 1}}
sage: pp = P2[{-2}, {-1, 1}, {2}]
sage: O2(pp)
O{{-2}, {-1, 1}, {2}} + O{{-2}, {-1, 1, 2}} + O{{-2, -1, 1}, {2}}
+ O{{-2, -1, 1, 2}} + O{{-2, 2}, {-1, 1}}
TESTS::
sage: P2([]).to_orbit_basis() == O2.one()
True
sage: O2([]) == O2.one()
False
sage: op = O2.an_element()
sage: op == op.to_diagram_basis().to_orbit_basis()
True
"""
PDs = PartitionDiagrams(self._alg._k)
one = self.base_ring().one()
return self._from_dict({PDs(d): one for d in diag.set_partition().coarsenings()},
coerce=False, remove_zeros=False)
def product_on_basis(self, d1, d2):
r"""
Return the product `O_{d_1} O_{d_2}` of two elements
in the orbit basis ``self``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: OP = PartitionAlgebra(2, x, R).orbit_basis()
sage: SP = OP.basis().keys(); sp = SP([[-2, -1, 1, 2]])
sage: OP.product_on_basis(sp, sp)
O{{-2, -1, 1, 2}}
sage: o1 = OP.one(); o2 = OP([]); o3 = OP.an_element()
sage: o2 == o1
False
sage: o1 * o1 == o1
True
sage: o3 * o1 == o1 * o3 and o3 * o1 == o3
True
sage: o4 = (3*OP([[-2, -1, 1], [2]]) + 2*OP([[-2, -1, 1, 2]])
....: + 2*OP([[-2, -1, 2], [1]]))
sage: o4 * o4
6*O{{-2, -1, 1}, {2}} + 4*O{{-2, -1, 1, 2}} + 4*O{{-2, -1, 2}, {1}}
We compute Examples 4.5 in [BH2017]_::
sage: R.<x> = QQ[]
sage: P = PartitionAlgebra(3,x); O = P.orbit_basis()
sage: O[[1,2,3],[-1,-2,-3]] * O[[1,2,3],[-1,-2,-3]]
(x-2)*O{{-3, -2, -1}, {1, 2, 3}} + (x-1)*O{{-3, -2, -1, 1, 2, 3}}
sage: P = PartitionAlgebra(4,x); O = P.orbit_basis()
sage: O[[1],[-1],[2,3],[4,-2],[-3,-4]] * O[[1],[2,-2],[3,4],[-1,-3],[-4]]
(x^2-11*x+30)*O{{-4}, {-3, -1}, {-2, 4}, {1}, {2, 3}}
+ (x^2-9*x+20)*O{{-4}, {-3, -1, 1}, {-2, 4}, {2, 3}}
+ (x^2-9*x+20)*O{{-4}, {-3, -1, 2, 3}, {-2, 4}, {1}}
+ (x^2-9*x+20)*O{{-4, 1}, {-3, -1}, {-2, 4}, {2, 3}}
+ (x^2-7*x+12)*O{{-4, 1}, {-3, -1, 2, 3}, {-2, 4}}
+ (x^2-9*x+20)*O{{-4, 2, 3}, {-3, -1}, {-2, 4}, {1}}
+ (x^2-7*x+12)*O{{-4, 2, 3}, {-3, -1, 1}, {-2, 4}}
sage: O[[1,-1],[2,-2],[3],[4,-3],[-4]] * O[[1,-2],[2],[3,-1],[4],[-3],[-4]]
(x-6)*O{{-4}, {-3}, {-2, 1}, {-1, 4}, {2}, {3}}
+ (x-5)*O{{-4}, {-3, 3}, {-2, 1}, {-1, 4}, {2}}
+ (x-5)*O{{-4, 3}, {-3}, {-2, 1}, {-1, 4}, {2}}
sage: P = PartitionAlgebra(6,x); O = P.orbit_basis()
sage: (O[[1,-2,-3],[2,4],[3,5,-6],[6],[-1],[-4,-5]]
....: * O[[1,-2],[2,3],[4],[5],[6,-4,-5,-6],[-1,-3]])
0
sage: (O[[1,-2],[2,-3],[3,5],[4,-5],[6,-4],[-1],[-6]]
....: * O[[1,-2],[2,-1],[3,-4],[4,-6],[5,-3],[6,-5]])
O{{-6, 6}, {-5}, {-4, 2}, {-3, 4}, {-2}, {-1, 1}, {3, 5}}
TESTS:
Check that multiplication agrees with the multiplication in the
partition algebra::
sage: R.<x> = QQ[]
sage: OP = PartitionAlgebra(2, x).orbit_basis()
sage: P = OP.diagram_basis()
sage: o1 = OP.one(); o2 = OP([]); o3 = OP.an_element()
sage: p1 = P(o1); p2 = P(o2); p3 = P(o3)
sage: (p2 * p3).to_orbit_basis() == o2 * o3
True
sage: (3*p3 * (p1 - 2*p2)).to_orbit_basis() == 3*o3 * (o1 - 2*o2)
True
sage: R.<x> = QQ[]
sage: P = PartitionAlgebra(2,x); O = P.orbit_basis()
sage: all(b * bp == OP(P(b) * P(bp)) for b in OP.basis() # long time
....: for bp in OP.basis())
True
REFERENCES:
- [BH2017]_
"""
## According to Corollary 4.12 in [BH2017]_, product is zero unless the
## stacked diagrams "exactly match" in the middle.
pi_1 = [frozenset([-i for i in part if i < 0]) for part in d1]
pi_2 = [frozenset([i for i in part if i > 0]) for part in d2]
if set([part for part in pi_1 if part]) != set([part for part in pi_2 if part]):
return self.zero()
q = self._q
R = q.parent()
PDs = self._base_diagrams
def matchings(A, B):
for i in range(min(len(A), len(B))+1):
for X in itertools.combinations(A, i):
restA = list(A.difference(X))
for Y in itertools.combinations(B, i):
restB = list(B.difference(Y))
for sigma in Permutations(Y):
yield [x.union(y) for x,y in zip(X, sigma)] + restA + restB
D, removed = d1.compose(d2)
only_top = set([frozenset(part) for part in d1 if all(i > 0 for i in part)])
only_bottom = set([frozenset(part) for part in d2 if all(i < 0 for i in part)])
only_both = only_top.union(only_bottom)
restD = [P for P in D if frozenset(P) not in only_both]
term_dict = {PDs(restD + X):
R.prod(q - t for t in range(len(X)+len(restD),
len(X)+len(restD)+removed))
for X in matchings(only_top, only_bottom)}
return self._from_dict(term_dict)
class Element(PartitionAlgebra.Element):
def to_diagram_basis(self):
"""
Expand ``self`` in the natural diagram basis of the
partition algebra.
EXAMPLES::
sage: R.<x> = QQ[]
sage: P = PartitionAlgebra(2, x, R)
sage: O = P.orbit_basis()
sage: elt = O.an_element(); elt
3*O{{-2}, {-1, 1, 2}} + 2*O{{-2, -1, 1, 2}} + 2*O{{-2, 1, 2}, {-1}}
sage: elt.to_diagram_basis()
3*P{{-2}, {-1, 1, 2}} - 3*P{{-2, -1, 1, 2}} + 2*P{{-2, 1, 2}, {-1}}
sage: pp = P.an_element(); pp
3*P{{-2}, {-1, 1, 2}} + 2*P{{-2, -1, 1, 2}} + 2*P{{-2, 1, 2}, {-1}}
sage: op = pp.to_orbit_basis(); op
3*O{{-2}, {-1, 1, 2}} + 7*O{{-2, -1, 1, 2}} + 2*O{{-2, 1, 2}, {-1}}
sage: pp == op.to_diagram_basis()
True
"""
# use _coerce_map_from_
return self.parent()._alg(self)
#return self._alg.coerce_map_from(self)
class SubPartitionAlgebra(DiagramBasis):
"""
A subalgebra of the partition algebra in the diagram basis indexed
by a subset of the diagrams.
"""
def __init__(self, k, q, base_ring, prefix, diagrams, category=None):
"""
Initialize ``self`` by adding a coercion to the ambient space.
EXAMPLES::
sage: R.<x> = QQ[]
sage: BA = BrauerAlgebra(2, x, R)
sage: BA.ambient().has_coerce_map_from(BA)
True
"""
DiagramBasis.__init__(self, k, q, base_ring, prefix, diagrams, category)
#These methods allow for a subalgebra to be correctly identified in a partition algebra
def ambient(self):
r"""
Return the partition algebra ``self`` is a sub-algebra of.
EXAMPLES::
sage: x = var('x')
sage: BA = BrauerAlgebra(2, x)
sage: BA.ambient()
Partition Algebra of rank 2 with parameter x over Symbolic Ring
"""
return self.lift.codomain()
@lazy_attribute
def lift(self):
r"""
Return the lift map from diagram subalgebra to the ambient space.
EXAMPLES::
sage: R.<x> = QQ[]
sage: BA = BrauerAlgebra(2, x, R)
sage: E = BA([[1,2],[-1,-2]])
sage: lifted = BA.lift(E); lifted
B{{-2, -1}, {1, 2}}
sage: lifted.parent() is BA.ambient()
True
"""
amb = PartitionAlgebra(self._k, self._q, self.base_ring(), prefix=self._prefix)
phi = self.module_morphism(amb.monomial, codomain=amb, category=self.category())
phi.register_as_coercion()
return phi
def retract(self, x):
r"""
Retract an appropriate partition algebra element to the
corresponding element in the partition subalgebra.
EXAMPLES::
sage: R.<x> = QQ[]
sage: BA = BrauerAlgebra(2, x, R)
sage: PA = BA.ambient()
sage: E = PA([[1,2], [-1,-2]])
sage: BA.retract(E) in BA
True
"""
if ( x not in self.ambient()
or any(i not in self._indices for i in x.support()) ):
raise ValueError("{0} cannot retract to {1}".format(x, self))
return self._from_dict(x._monomial_coefficients, remove_zeros=False)
class Element(DiagramBasis.Element):
def to_orbit_basis(self):
"""
Return ``self`` in the orbit basis of the associated
ambient partition algebra.
EXAMPLES::
sage: R.<x> = QQ[]
sage: B = BrauerAlgebra(2, x, R)
sage: bb = B([[-2, -1], [1, 2]]); bb
B{{-2, -1}, {1, 2}}
sage: bb.to_orbit_basis()
O{{-2, -1}, {1, 2}} + O{{-2, -1, 1, 2}}
"""
P = self.parent().lift.codomain()
OP = P.orbit_basis()
return OP(P(self))
class BrauerAlgebra(SubPartitionAlgebra, UnitDiagramMixin):
r"""
A Brauer algebra.
The Brauer algebra of rank `k` is an algebra with basis indexed by the
collection of set partitions of `\{1, \ldots, k, -1, \ldots, -k\}`
with block size 2.
This algebra is a subalgebra of the partition algebra.
For more information, see :class:`PartitionAlgebra`.
INPUT:
- ``k`` -- rank of the algebra
- ``q`` -- the deformation parameter `q`
OPTIONAL ARGUMENTS:
- ``base_ring`` -- (default ``None``) a ring containing ``q``; if ``None``
then just takes the parent of ``q``
- ``prefix`` -- (default ``"B"``) a label for the basis elements
EXAMPLES:
We now define the Brauer algebra of rank `2` with parameter ``x``
over `\ZZ`::
sage: R.<x> = ZZ[]
sage: B = BrauerAlgebra(2, x, R)
sage: B
Brauer Algebra of rank 2 with parameter x
over Univariate Polynomial Ring in x over Integer Ring
sage: B.basis()
Lazy family (Term map from Brauer diagrams of order 2 to Brauer Algebra
of rank 2 with parameter x over Univariate Polynomial Ring in x
over Integer Ring(i))_{i in Brauer diagrams of order 2}
sage: B.basis().keys()
Brauer diagrams of order 2
sage: B.basis().keys()([[-2, 1], [2, -1]])
{{-2, 1}, {-1, 2}}
sage: b = B.basis().list(); b
[B{{-2, -1}, {1, 2}}, B{{-2, 1}, {-1, 2}}, B{{-2, 2}, {-1, 1}}]
sage: b[0]
B{{-2, -1}, {1, 2}}
sage: b[0]^2
x*B{{-2, -1}, {1, 2}}
sage: b[0]^5
x^4*B{{-2, -1}, {1, 2}}
Note, also that since the symmetric group algebra is contained in
the Brauer algebra, there is also a conversion between the two. ::
sage: R.<x> = ZZ[]
sage: B = BrauerAlgebra(2, x, R)
sage: S = SymmetricGroupAlgebra(R, 2)
sage: S([2,1])*B([[1,-1],[2,-2]])
B{{-2, 1}, {-1, 2}}
"""
@staticmethod
def __classcall_private__(cls, k, q, base_ring=None, prefix="B"):
r"""
Standardize the input by getting the base ring from the parent of
the parameter ``q`` if no ``base_ring`` is given.
TESTS::
sage: R.<q> = QQ[]
sage: BA1 = BrauerAlgebra(2, q)
sage: BA2 = BrauerAlgebra(2, q, R, 'B')
sage: BA1 is BA2
True
"""
if base_ring is None:
base_ring = q.parent()
return super(BrauerAlgebra, cls).__classcall__(cls, k, q, base_ring, prefix)
def __init__(self, k, q, base_ring, prefix):
r"""
Initialize ``self``.
TESTS::
sage: R.<q> = QQ[]
sage: BA = BrauerAlgebra(2, q, R)
sage: TestSuite(BA).run()
"""
SubPartitionAlgebra.__init__(self, k, q, base_ring, prefix, BrauerDiagrams(k))
options = BrauerDiagram.options
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<q> = QQ[]
sage: BrauerAlgebra(2, q, R)
Brauer Algebra of rank 2 with parameter q
over Univariate Polynomial Ring in q over Rational Field
"""
return "Brauer Algebra of rank {} with parameter {} over {}".format(
self._k, self._q, self.base_ring())
# TODO: Make a mixin class for diagram algebras that have coercions from SGA?
def _coerce_map_from_(self, R):
"""
Return a coerce map from ``R`` if one exists and ``None`` otherwise.
EXAMPLES::
sage: R.<x> = QQ[]
sage: S = SymmetricGroupAlgebra(R, 4)
sage: A = BrauerAlgebra(4, x, R)
sage: A._coerce_map_from_(S)
Generic morphism:
From: Symmetric group algebra of order 4 over Univariate Polynomial Ring in x over Rational Field
To: Brauer Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: Sp = SymmetricGroupAlgebra(QQ, 4)
sage: A._coerce_map_from_(Sp)
Generic morphism:
From: Symmetric group algebra of order 4 over Rational Field
To: Brauer Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
sage: Sp3 = SymmetricGroupAlgebra(QQ, 3)
sage: A._coerce_map_from_(Sp3)
Generic morphism:
From: Symmetric group algebra of order 3 over Rational Field
To: Brauer Algebra of rank 4 with parameter x over Univariate Polynomial Ring in x over Rational Field
"""
if isinstance(R, SymmetricGroupAlgebra_n):
if R.n <= self._k and self.base_ring().has_coerce_map_from(R.base_ring()):
return R.module_morphism(self._perm_to_Blst, codomain=self)
return None
return super(BrauerAlgebra, self)._coerce_map_from_(R)
def _element_constructor_(self, set_partition):
r"""
Construct an element of ``self``.
EXAMPLES::
sage: R.<q> = QQ[]
sage: BA = BrauerAlgebra(2, q, R)
sage: sp = SetPartition([[1,2], [-1,-2]])
sage: b_elt = BA(sp); b_elt
B{{-2, -1}, {1, 2}}
sage: b_elt in BA
True
sage: BA([[1,2],[-1,-2]]) == b_elt
True
sage: BA([{1,2},{-1,-2}]) == b_elt
True
"""
set_partition = to_Brauer_partition(set_partition, k=self.order())
return DiagramAlgebra._element_constructor_(self, set_partition)
def jucys_murphy(self, j):
r"""
Return the ``j``-th generalized Jucys-Murphy element of ``self``.
The `j`-th Jucys-Murphy element of a Brauer algebra is simply
the `j`-th Jucys-Murphy element of the symmetric group algebra
with an extra `(z-1)/2` term, where ``z`` is the parameter
of the Brauer algebra.
REFERENCES:
.. [Naz96] Maxim Nazarov, Young's Orthogonal Form for Brauer's
Centralizer Algebra. Journal of Algebra 182 (1996), 664--693.
EXAMPLES::
sage: z = var('z')
sage: B = BrauerAlgebra(3,z)
sage: B.jucys_murphy(1)
(1/2*z-1/2)*B{{-3, 3}, {-2, 2}, {-1, 1}}
sage: B.jucys_murphy(3)
-B{{-3, -2}, {-1, 1}, {2, 3}} - B{{-3, -1}, {-2, 2}, {1, 3}}
+ B{{-3, 1}, {-2, 2}, {-1, 3}} + B{{-3, 2}, {-2, 3}, {-1, 1}}
+ (1/2*z-1/2)*B{{-3, 3}, {-2, 2}, {-1, 1}}
"""
if j < 1:
raise ValueError("Jucys-Murphy index must be positive")
k = self.order()
if j > k:
raise ValueError("Jucys-Murphy index cannot be greater than the order of the algebra")
I = lambda x: self._indices(to_Brauer_partition(x, k=k))
R = self.base_ring()
one = R.one()
d = {self.one_basis(): R( (self._q-1) / 2 )}
for i in range(1,j):
d[I([[i,-j],[j,-i]])] = one
d[I([[i,j],[-i,-j]])] = -one
return self._from_dict(d, remove_zeros=True)
class TemperleyLiebAlgebra(SubPartitionAlgebra, UnitDiagramMixin):
r"""
A Temperley--Lieb algebra.
The Temperley--Lieb algebra of rank `k` is an algebra with basis
indexed by the collection of planar set partitions of
`\{1, \ldots, k, -1, \ldots, -k\}` with block size 2.
This algebra is thus a subalgebra of the partition algebra.
For more information, see :class:`PartitionAlgebra`.
INPUT:
- ``k`` -- rank of the algebra
- ``q`` -- the deformation parameter `q`
OPTIONAL ARGUMENTS:
- ``base_ring`` -- (default ``None``) a ring containing ``q``; if ``None``
then just takes the parent of ``q``
- ``prefix`` -- (default ``"T"``) a label for the basis elements
EXAMPLES:
We define the Temperley--Lieb algebra of rank `2` with parameter
`x` over `\ZZ`::
sage: R.<x> = ZZ[]
sage: T = TemperleyLiebAlgebra(2, x, R); T
Temperley-Lieb Algebra of rank 2 with parameter x
over Univariate Polynomial Ring in x over Integer Ring
sage: T.basis()
Lazy family (Term map from Temperley Lieb diagrams of order 2
to Temperley-Lieb Algebra of rank 2 with parameter x over
Univariate Polynomial Ring in x over Integer
Ring(i))_{i in Temperley Lieb diagrams of order 2}
sage: T.basis().keys()
Temperley Lieb diagrams of order 2
sage: T.basis().keys()([[-1, 1], [2, -2]])
{{-2, 2}, {-1, 1}}
sage: b = T.basis().list(); b
[T{{-2, -1}, {1, 2}}, T{{-2, 2}, {-1, 1}}]
sage: b[0]
T{{-2, -1}, {1, 2}}
sage: b[0]^2 == x*b[0]
True
sage: b[0]^5 == x^4*b[0]
True
"""
@staticmethod
def __classcall_private__(cls, k, q, base_ring=None, prefix="T"):
r"""
Standardize the input by getting the base ring from the parent of
the parameter ``q`` if no ``base_ring`` is given.
TESTS::
sage: R.<q> = QQ[]
sage: T1 = TemperleyLiebAlgebra(2, q)
sage: T2 = TemperleyLiebAlgebra(2, q, R, 'T')
sage: T1 is T2
True
"""
if base_ring is None:
base_ring = q.parent()
return super(TemperleyLiebAlgebra, cls).__classcall__(cls, k, q, base_ring, prefix)
def __init__(self, k, q, base_ring, prefix):
r"""
Initialize ``self``.
TESTS::
sage: R.<q> = QQ[]
sage: TL = TemperleyLiebAlgebra(2, q, R)
sage: TestSuite(TL).run()
"""
SubPartitionAlgebra.__init__(self, k, q, base_ring, prefix, TemperleyLiebDiagrams(k))
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<q> = QQ[]
sage: TemperleyLiebAlgebra(2, q, R)
Temperley-Lieb Algebra of rank 2 with parameter q
over Univariate Polynomial Ring in q over Rational Field
"""
return "Temperley-Lieb Algebra of rank {} with parameter {} over {}".format(
self._k, self._q, self.base_ring())
def _element_constructor_(self, set_partition):
r"""
Construct an element of ``self``.
EXAMPLES::
sage: R.<q> = QQ[]
sage: TL = TemperleyLiebAlgebra(2, q, R)
sage: sp = SetPartition([[1,2], [-1,-2]])
sage: b_elt = TL(sp); b_elt
T{{-2, -1}, {1, 2}}
sage: b_elt in TL
True
sage: TL([[1,2],[-1,-2]]) == b_elt
True
sage: TL([{1,2},{-1,-2}]) == b_elt
True
sage: S = SymmetricGroupAlgebra(R, 2)
sage: TL(S([1,2]))
T{{-2, 2}, {-1, 1}}
sage: TL(S([2,1]))
Traceback (most recent call last):
...
ValueError: the diagram {{-2, 1}, {-1, 2}} must be planar
"""
if isinstance(set_partition, SymmetricGroupAlgebra_n.Element):
return SubPartitionAlgebra._element_constructor_(self, set_partition)
set_partition = to_Brauer_partition(set_partition, k=self.order())
return SubPartitionAlgebra._element_constructor_(self, set_partition)
class PlanarAlgebra(SubPartitionAlgebra, UnitDiagramMixin):
r"""
A planar algebra.
The planar algebra of rank `k` is an algebra with basis indexed by the
collection of all planar set partitions of
`\{1, \ldots, k, -1, \ldots, -k\}`.
This algebra is thus a subalgebra of the partition algebra. For more
information, see :class:`PartitionAlgebra`.
INPUT:
- ``k`` -- rank of the algebra
- ``q`` -- the deformation parameter `q`
OPTIONAL ARGUMENTS:
- ``base_ring`` -- (default ``None``) a ring containing ``q``; if ``None``
then just takes the parent of ``q``
- ``prefix`` -- (default ``"Pl"``) a label for the basis elements
EXAMPLES:
We define the planar algebra of rank `2` with parameter
`x` over `\ZZ`::
sage: R.<x> = ZZ[]
sage: Pl = PlanarAlgebra(2, x, R); Pl
Planar Algebra of rank 2 with parameter x over Univariate Polynomial Ring in x over Integer Ring
sage: Pl.basis().keys()
Planar diagrams of order 2
sage: Pl.basis().keys()([[-1, 1], [2, -2]])
{{-2, 2}, {-1, 1}}
sage: Pl.basis().list()
[Pl{{-2, -1, 1, 2}},
Pl{{-2, 1, 2}, {-1}},
Pl{{-2}, {-1, 1, 2}},
Pl{{-2, -1}, {1, 2}},
Pl{{-2}, {-1}, {1, 2}},
Pl{{-2, -1, 1}, {2}},
Pl{{-2, 1}, {-1}, {2}},
Pl{{-2, 2}, {-1, 1}},
Pl{{-2, -1, 2}, {1}},
Pl{{-2, 2}, {-1}, {1}},
Pl{{-2}, {-1, 1}, {2}},
Pl{{-2}, {-1, 2}, {1}},
Pl{{-2, -1}, {1}, {2}},
Pl{{-2}, {-1}, {1}, {2}}]
sage: E = Pl([[1,2],[-1,-2]])
sage: E^2 == x*E
True
sage: E^5 == x^4*E
True
"""
@staticmethod
def __classcall_private__(cls, k, q, base_ring=None, prefix="Pl"):
r"""
Standardize the input by getting the base ring from the parent of
the parameter ``q`` if no ``base_ring`` is given.
TESTS::
sage: R.<q> = QQ[]
sage: Pl1 = PlanarAlgebra(2, q)
sage: Pl2 = PlanarAlgebra(2, q, R, 'Pl')
sage: Pl1 is Pl2
True
"""
if base_ring is None:
base_ring = q.parent()
return super(PlanarAlgebra, cls).__classcall__(cls, k, q, base_ring, prefix)
def __init__(self, k, q, base_ring, prefix):
r"""
Initialize ``self``.
TESTS::
sage: R.<q> = QQ[]
sage: PlA = PlanarAlgebra(2, q, R)
sage: TestSuite(PlA).run()
"""
SubPartitionAlgebra.__init__(self, k, q, base_ring, prefix, PlanarDiagrams(k))
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<x> = ZZ[]
sage: Pl = PlanarAlgebra(2, x, R); Pl
Planar Algebra of rank 2 with parameter x
over Univariate Polynomial Ring in x over Integer Ring
"""
return "Planar Algebra of rank {} with parameter {} over {}".format(self._k,
self._q, self.base_ring())
class PropagatingIdeal(SubPartitionAlgebra):
r"""
A propagating ideal.
The propagating ideal of rank `k` is a non-unital algebra with basis
indexed by the collection of ideal set partitions of `\{1, \ldots, k, -1,
\ldots, -k\}`. We say a set partition is *ideal* if its propagating
number is less than `k`.
This algebra is a non-unital subalgebra and an ideal of the partition
algebra.
For more information, see :class:`PartitionAlgebra`.
EXAMPLES:
We now define the propagating ideal of rank `2` with parameter
`x` over `\ZZ`::
sage: R.<x> = QQ[]
sage: I = PropagatingIdeal(2, x, R); I
Propagating Ideal of rank 2 with parameter x
over Univariate Polynomial Ring in x over Rational Field
sage: I.basis().keys()
Ideal diagrams of order 2
sage: I.basis().list()
[I{{-2, -1, 1, 2}},
I{{-2, 1, 2}, {-1}},
I{{-2}, {-1, 1, 2}},
I{{-2, -1}, {1, 2}},
I{{-2}, {-1}, {1, 2}},
I{{-2, -1, 1}, {2}},
I{{-2, 1}, {-1}, {2}},
I{{-2, -1, 2}, {1}},
I{{-2, 2}, {-1}, {1}},
I{{-2}, {-1, 1}, {2}},
I{{-2}, {-1, 2}, {1}},
I{{-2, -1}, {1}, {2}},
I{{-2}, {-1}, {1}, {2}}]
sage: E = I([[1,2],[-1,-2]])
sage: E^2 == x*E
True
sage: E^5 == x^4*E
True
"""
@staticmethod
def __classcall_private__(cls, k, q, base_ring=None, prefix="I"):
r"""
Standardize the input by getting the base ring from the parent of
the parameter ``q`` if no ``base_ring`` is given.
TESTS::
sage: R.<q> = QQ[]
sage: IA1 = PropagatingIdeal(2, q)
sage: IA2 = PropagatingIdeal(2, q, R, 'I')
sage: IA1 is IA2
True
"""
if base_ring is None:
base_ring = q.parent()
return super(PropagatingIdeal, cls).__classcall__(cls, k, q, base_ring, prefix)
def __init__(self, k, q, base_ring, prefix):
r"""
Initialize ``self``.
TESTS::
sage: R.<q> = QQ[]
sage: I = PropagatingIdeal(2, q, R)
sage: TestSuite(I).run()
"""
category = AssociativeAlgebras(base_ring.category()).FiniteDimensional().WithBasis()
SubPartitionAlgebra.__init__(self, k, q, base_ring, prefix,
IdealDiagrams(k), category)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: PropagatingIdeal(2, x, R)
Propagating Ideal of rank 2 with parameter x over Univariate
Polynomial Ring in x over Rational Field
"""
return "Propagating Ideal of rank {} with parameter {} over {}".format(
self._k, self._q, self.base_ring())
class Element(SubPartitionAlgebra.Element):
"""
An element of a propagating ideal.
We need to take care of exponents since we are not unital.
"""
def __pow__(self, n):
"""
Return ``self`` to the `n`-th power.
INPUT:
- ``n`` -- a positive integer
EXAMPLES::
sage: R.<x> = QQ[]
sage: I = PropagatingIdeal(2, x, R)
sage: E = I([[1,2],[-1,-2]])
sage: E^2
x*I{{-2, -1}, {1, 2}}
sage: E^0
Traceback (most recent call last):
...
ValueError: can only take positive integer powers
"""
if n <= 0:
raise ValueError("can only take positive integer powers")
return generic_power(self, n)
#########################################################################
# START BORROWED CODE
#########################################################################
# Borrowed from Mike Hansen's original code -- global methods for dealing
# with partition diagrams, compositions of partition diagrams, and so on.
# --> CHANGED 'identity' to 'identity_set_partition' for enhanced clarity.
#########################################################################
def is_planar(sp):
r"""
Return ``True`` if the diagram corresponding to the set partition ``sp``
is planar; otherwise, return ``False``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: da.is_planar( da.to_set_partition([[1,-2],[2,-1]]))
False
sage: da.is_planar( da.to_set_partition([[1,-1],[2,-2]]))
True
"""
#Singletons don't affect planarity
to_consider = [x for x in map(list, sp) if len(x) > 1]
n = len(to_consider)
for i in range(n):
#Get the positive and negative entries of this part
ap = [x for x in to_consider[i] if x > 0]
an = [abs(x) for x in to_consider[i] if x < 0]
#Check if a includes numbers in both the top and bottom rows
if ap and an:
for j in range(n):
if i == j:
continue
#Get the positive and negative entries of this part
bp = [x for x in to_consider[j] if x > 0]
bn = [abs(x) for x in to_consider[j] if x < 0]
#Skip the ones that don't involve numbers in both
#the bottom and top rows
if not bn or not bp:
continue
#Make sure that if min(bp) > max(ap)
#then min(bn) > max(an)
if max(bp) > max(ap):
if min(bn) < min(an):
return False
#Go through the bottom and top rows
for row in [ap, an]:
if len(row) > 1:
row.sort()
for s in range(len(row)-1):
if row[s] + 1 == row[s+1]:
#No gap, continue on
continue
rng = list(range(row[s] + 1, row[s+1]))
#Go through and make sure any parts that
#contain numbers in this range are completely
#contained in this range
for j in range(n):
if i == j:
continue
#Make sure we make the numbers negative again
#if we are in the bottom row
if row is ap:
sr = set(rng)
else:
sr = set((-1*x for x in rng))
sj = set(to_consider[j])
intersection = sr.intersection(sj)
if intersection:
if sj != intersection:
return False
return True
def to_graph(sp):
r"""
Return a graph representing the set partition ``sp``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: g = da.to_graph( da.to_set_partition([[1,-2],[2,-1]])); g
Graph on 4 vertices
sage: g.vertices()
[-2, -1, 1, 2]
sage: g.edges()
[(-2, 1, None), (-1, 2, None)]
"""
g = Graph()
for part in sp:
part_list = list(part)
if len(part_list) > 0:
g.add_vertex(part_list[0])
for i in range(1, len(part_list)):
g.add_vertex(part_list[i])
g.add_edge(part_list[i-1], part_list[i])
return g
def pair_to_graph(sp1, sp2):
r"""
Return a graph consisting of the disjoint union of the graphs of set
partitions ``sp1`` and ``sp2`` along with edges joining the bottom
row (negative numbers) of ``sp1`` to the top row (positive numbers)
of ``sp2``.
The vertices of the graph ``sp1`` appear in the result as pairs
``(k, 1)``, whereas the vertices of the graph ``sp2`` appear as
pairs ``(k, 2)``.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: sp1 = da.to_set_partition([[1,-2],[2,-1]])
sage: sp2 = da.to_set_partition([[1,-2],[2,-1]])
sage: g = da.pair_to_graph( sp1, sp2 ); g
Graph on 8 vertices
sage: g.vertices()
[(-2, 1), (-2, 2), (-1, 1), (-1, 2), (1, 1), (1, 2), (2, 1), (2, 2)]
sage: g.edges()
[((-2, 1), (1, 1), None), ((-2, 1), (2, 2), None),
((-2, 2), (1, 2), None), ((-1, 1), (1, 2), None),
((-1, 1), (2, 1), None), ((-1, 2), (2, 2), None)]
Another example which used to be wrong until :trac:`15958`::
sage: sp3 = da.to_set_partition([[1, -1], [2], [-2]])
sage: sp4 = da.to_set_partition([[1], [-1], [2], [-2]])
sage: g = da.pair_to_graph( sp3, sp4 ); g
Graph on 8 vertices
sage: g.vertices()
[(-2, 1), (-2, 2), (-1, 1), (-1, 2), (1, 1), (1, 2), (2, 1), (2, 2)]
sage: g.edges()
[((-2, 1), (2, 2), None), ((-1, 1), (1, 1), None),
((-1, 1), (1, 2), None)]
"""
g = Graph()
#Add the first set partition to the graph
for part in sp1:
part_list = list(part)
if part_list:
g.add_vertex( (part_list[0], 1) )
#Add the edge to the second part of the graph
if part_list[0] < 0:
g.add_edge( (part_list[0], 1), (abs(part_list[0]), 2) )
for i in range(1, len(part_list)):
g.add_vertex( (part_list[i], 1) )
#Add the edge to the second part of the graph
if part_list[i] < 0:
g.add_edge( (part_list[i], 1), (abs(part_list[i]), 2) )
#Add the edge between adjacent elements of a part
g.add_edge( (part_list[i-1], 1), (part_list[i], 1) )
#Add the second set partition to the graph
for part in sp2:
part_list = list(part)
if part_list:
g.add_vertex( (part_list[0], 2) )
for i in range(1, len(part_list)):
g.add_vertex( (part_list[i], 2) )
g.add_edge( (part_list[i-1], 2), (part_list[i], 2) )
return g
def propagating_number(sp):
r"""
Return the propagating number of the set partition ``sp``.
The propagating number is the number of blocks with both a positive and
negative number.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: sp1 = da.to_set_partition([[1,-2],[2,-1]])
sage: sp2 = da.to_set_partition([[1,2],[-2,-1]])
sage: da.propagating_number(sp1)
2
sage: da.propagating_number(sp2)
0
"""
pn = 0
for part in sp:
if min(part) < 0 and max(part) > 0:
pn += 1
return pn
def to_set_partition(l, k=None):
r"""
Convert input to a set partition of `\{1, \ldots, k, -1, \ldots, -k\}`
Convert a list of a list of numbers to a set partitions. Each list
of numbers in the outer list specifies the numbers contained in one
of the blocks in the set partition.
If `k` is specified, then the set partition will be a set partition
of `\{1, \ldots, k, -1, \ldots, -k\}`. Otherwise, `k` will default to
the minimum number needed to contain all of the specified numbers.
INPUT:
- ``l`` - a list of lists of integers
- ``k`` - integer (optional, default ``None``)
OUTPUT:
- a list of sets
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: f = lambda sp: SetPartition(da.to_set_partition(sp))
sage: f([[1,-1],[2,-2]]) == SetPartition(da.identity_set_partition(2))
True
sage: da.to_set_partition([[1]])
[{1}, {-1}]
sage: da.to_set_partition([[1,-1],[-2,3]],9/2)
[{-1, 1}, {-2, 3}, {2}, {-4, 4}, {-5, 5}, {-3}]
"""
if k is None:
if l == []:
return []
else:
k = max( (max( map(abs, x) ) for x in l) )
to_be_added = set( list(range(1, ceil(k+1))) + [-1*x for x in range(1, ceil(k+1))] )
sp = []
for part in l:
spart = set(part)
to_be_added -= spart
sp.append(spart)
while to_be_added:
i = to_be_added.pop()
if -i in to_be_added:
to_be_added.remove(-i)
sp.append(set([i,-i]))
else:
sp.append(set([i]))
return sp
def to_Brauer_partition(l, k=None):
r"""
Same as :func:`to_set_partition` but assumes omitted elements are
connected straight through.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: f = lambda sp: SetPartition(da.to_Brauer_partition(sp))
sage: f([[1,2],[-1,-2]]) == SetPartition([[1,2],[-1,-2]])
True
sage: f([[1,3],[-1,-3]]) == SetPartition([[1,3],[-3,-1],[2,-2]])
True
sage: f([[1,-4],[-3,-1],[3,4]]) == SetPartition([[-3,-1],[2,-2],[1,-4],[3,4]])
True
sage: p = SetPartition([[1,2],[-1,-2],[3,-3],[4,-4]])
sage: SetPartition(da.to_Brauer_partition([[1,2],[-1,-2]], k=4)) == p
True
"""
L = to_set_partition(l, k=k)
L2 = []
paired = []
not_paired = []
for i in L:
L2.append(list(i))
for i in L2:
if len(i) > 2:
raise ValueError("blocks must have size at most 2, but {} has {}".format(i, len(i)))
if len(i) == 2:
paired.append(i)
if len(i) == 1:
not_paired.append(i)
if any(i[0] in j or -1*i[0] in j for i in not_paired for j in paired):
raise ValueError("unable to convert {} to a Brauer partition due to the invalid block {}".format(l, i))
for i in not_paired:
if [-i[0]] in not_paired:
not_paired.remove([-i[0]])
paired.append([i[0], -i[0]])
return to_set_partition(paired)
def identity_set_partition(k):
r"""
Return the identity set partition `\{\{1, -1\}, \ldots, \{k, -k\}\}`.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: SetPartition(da.identity_set_partition(2))
{{-2, 2}, {-1, 1}}
"""
if k in ZZ:
return [[i,-i] for i in range(1, k + 1)]
# Else k in 1/2 ZZ
return [[i, -i] for i in range(1, k + ZZ(3)/ZZ(2))]
def set_partition_composition(sp1, sp2):
r"""
Return a tuple consisting of the composition of the set partitions
``sp1`` and ``sp2`` and the number of components removed from the middle
rows of the graph.
EXAMPLES::
sage: import sage.combinat.diagram_algebras as da
sage: sp1 = da.to_set_partition([[1,-2],[2,-1]])
sage: sp2 = da.to_set_partition([[1,-2],[2,-1]])
sage: p, c = da.set_partition_composition(sp1, sp2)
sage: (SetPartition(p), c) == (SetPartition(da.identity_set_partition(2)), 0)
True
"""
g = pair_to_graph(sp1, sp2)
connected_components = g.connected_components()
res = []
total_removed = 0
for cc in connected_components:
#Remove the vertices that live in the middle two rows
new_cc = [x for x in cc if not ( (x[0]<0 and x[1] == 1) or (x[0]>0 and x[1]==2))]
if new_cc == []:
if len(cc) > 1:
total_removed += 1
else:
res.append( set((x[0] for x in new_cc)) )
return (res, total_removed)
##########################################################################
# END BORROWED CODE
##########################################################################
| 35.273325
| 190
| 0.508526
|
a573c6e8292b2f4561a31884e1e822569fa6aa98
| 123,820
|
py
|
Python
|
app/solrXMLWebLoad/solrXMLPEPWebLoad.py
|
lhorne-gavant/OpenPubArchive-Content-Server-1
|
2b7c02417a8bb37f5a627343fab7fa05dc532bf7
|
[
"Apache-2.0"
] | null | null | null |
app/solrXMLWebLoad/solrXMLPEPWebLoad.py
|
lhorne-gavant/OpenPubArchive-Content-Server-1
|
2b7c02417a8bb37f5a627343fab7fa05dc532bf7
|
[
"Apache-2.0"
] | null | null | null |
app/solrXMLWebLoad/solrXMLPEPWebLoad.py
|
lhorne-gavant/OpenPubArchive-Content-Server-1
|
2b7c02417a8bb37f5a627343fab7fa05dc532bf7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0321,C0103,C0301,E1101,C0303,E1004,C0330,R0915,R0914,W0703,C0326
print(
"""
OPAS - Open Publications-Archive Software - Document, Authors, and References Core Loader
Load articles into one Solr core and extract individual references from
the bibliography into a second Solr core.
This data loader is specific to PEP Data and Bibliography schemas but can
serve as a model or framework for other schemas
Example Invocation:
$ python solrXMLPEPWebLoad.py
Use -h for help on arguments.
(Requires Python 3.7)
"""
)
__author__ = "Neil R. Shapiro"
__copyright__ = "Copyright 2020, Psychoanalytic Electronic Publishing"
__license__ = "Apache 2.0"
__version__ = "2020.07.23"
__status__ = "Development"
#Revision Notes:
#2019.0516 # Added command line options to specify a different path for PEPSourceInfo.json
# Added error logging using python's built-in logging library default INFO level - nrs
#2019.0605 # added int fields for year as they are needeed to do faceting ranges (though that's
# beginning to be unclear) - nrs
#2019.0705 # added citedCounts processing to load the Solr core from the mySQL database
# table which calculates these. - nrs
#2019.1231 # Support remote datbase tunnel. Fix use of SQL when not using SQLite.
#2020.0105 # Some fields marked xml were being loaded as text...fixed.
#2020.0223 # Populate biblio table.
#2020.0224 # Insert a summary excerpt to new Solr field art_excerpt, so server doesn't have to spend time extracting at runtime.
# currently, it can be either XML or HTML but should change it to one or the other to make it more consistent.
# (though the server now handles it this way fine, converting to HTML on output when necessary.)
# Starting to convert this to snake_case, per "pythonic-style". But will put in more effort
# on this later
#2020.0303 # Complete code to populate new api_articles and api_biblioxml tables which replace the ones
# copied from the XML processing pepa1db tables. This eliminates the need for the filetracking table
# since the file tracking can now be done in the api_articles table. Run through fixes with new
# database load, adjusting schema to accomodate the sometimes oddball data in the instances.
# Finished new code to extract first page summaries, the old algorithm wasn't working properly
# across the database. In this case, the new method using libxml's parseXML had some of it's
# own issues because it automatically translated all entities, making it unsafe output for HTML.
# Fixed though easily by catching those characters in the event framework.
# Will remove the now unused code next build after it gets put into the repository for longer term
# storage and study.
#2020.0305 # Optimize performance, especially file discovery. Set up tp compare against Solr dates rather than
# MySQL because that way you make sure the articles are in Solr, which is most important.
# General cleanup and another pass at camelCase to snake_case conversion
#2020.0326 # Needed to set lang attribute from p up the ancestors in order to find all paragraphs in a particular
# language when the lang attribute could be local or inherited at intermediate (e.g., section), levels
# even. It's a shame it's not done by default in lxml.
#2020.0330 # Removing the bibliocore code (commented out for now). Instead the biblio will be managed simply in the
# database
#2020.0413 # Changed the way it checks for modified files...was in MySQL records but set it to use Solr file date instead
# because it makes less sense to have one mySQL database per solr database. This way, we can actually check
# if that version of the file is in Solr. (You don't know by checking mysql, especially if the database/solr
# relation isn't one-to-one.)
#
# Also: Added new options:
# --before
# looks at file date, was it created before this date (use YYYY-MM-DD format)
# --after
# looks at file date, was it created after this date (use YYYY-MM-DD format)
# --reloadbefore
# looks at updated (load) date IN SOLR, was it loaded into solr before this date (use YYYY-MM-DD format)
# --reloadafter
# looks at updated (load) date IN SOLR, was it loaded into solr after this date (use YYYY-MM-DD format)
#2020.0423 # Changes to excerpting in opasXMLHelper which affect loading, since that's when excerpts happen.
#2020.0424 # Added stop words to highlighted terms list. Stop words in file opasConfig.HIGHLIGHT_STOP_WORDS_FILE
# Changed it so highlighted terms would have tagging stripped.
# Verbose option -v removed. Must use --verbose instead
#2020.0425 # Changed from highlighted words collecting <i> and <b> to collecting <impx>, which are glossary terms.
#2020.0425 # Added --only option to match other PEP processing software for single file mode. (-d still works as well)
# fixed variable name problem in the error report when the file wasn't found.
#2020.0507 # Added art_pgcount and art_isbn to masterPEPWebDocsSchema and manually added them to Solr to match.
# Added code to load these data.
#2020.0529 # Added updates(!) to the docs database, allowing views to be included and updated with the weekly
# Use the -v option to turn it on. It will add data for any document that was viewed n the last month
# during updates. No user option to force the whole database, but it can be done easily by a one line
# code tweak.
#
# Note that Solr 8.5 is very tricky about updating db's with child documents that started after 8.2,
# but it works as long as you just give the art_id (I tried adding art_level when I first got the error,
# but that didn't work on 8.5. To get it to work, I had to more closely follow the definitions for
# the schema, with basically only doc_values defined.
#2020.0603 # Moved the options out of main function to the implied main area at the end of the file
# (start of code processing).
# Added a highlighted area to show what database and core will be updated, and prompt the user
# to ensure they want to continue. This was done to prevent doing updates accidentally to
# a production system when in fact it was meant for staging.
# Fixed a bug in the solr lookup of filenames to determine updates needed: was not escaping filename chars
# that need to be escaped for solr, like ( and )
#2020.0709 # Updates for schema changes
#2020.0723 # Change author core field authors (multivalued) to art_author_id_list. The author record
# is for a specific author, but this is the set of authors for that paper.
#2020.0814 # Changes to schema reflected herein...particularly the bib fields in the solrdocscore, since
# those are mainly for faceting. Also, docvalues=true was causing fields to show up in results
# when I wanted those fields hidden, so went to uninvertible instead.
# Disable many annoying pylint messages, warning me about variable naming for example.
# yes, in my Solr code I'm caught between two worlds of snake_case and camelCase.
import sys
sys.path.append('../libs')
sys.path.append('../config')
sys.path.append('../libs/configLib')
import re
import os
import os.path
# import ntpath # note: if dealing with Windows path names on Linux, use ntpath instead)
import time
import string
import logging
logger = logging.getLogger(__name__)
import urllib.request, urllib.parse, urllib.error
import random
import pysolr
import modelsOpasCentralPydantic
# Python 3 code in this block
from io import StringIO
# used this name because later we needed to refer to the module, and datetime is also the name
# of the import from datetime.
import datetime as dtime
# import datetime
from datetime import datetime
import dateutil
import dateutil.parser
from optparse import OptionParser
from lxml import etree
#now uses pysolr exclusively!
# import solrpy as solr
import pymysql
import config
import opasConfig
import opasCoreConfig
from opasCoreConfig import solr_authors, solr_gloss
# from OPASFileTrackerMySQL import FileTracker, FileTrackingInfo
import opasXMLHelper as opasxmllib
import opasCentralDBLib
import opasGenSupportLib as opasgenlib
import localsecrets
def read_stopwords():
with open(opasConfig.HIGHLIGHT_STOP_WORDS_FILE) as f:
stopWordList = f.read().splitlines()
stopPattern = "<[ib]>[A-Z]</[ib]>"
for n in stopWordList:
stopPattern += f"|<[ib]>{n}</[ib]>"
ret_val = re.compile(stopPattern, re.IGNORECASE)
return ret_val
# Module Globals
gCitedTable = dict() # large table of citation counts, too slow to run one at a time.
bib_total_reference_count = 0
rc_stopword_match = read_stopwords() # returns compile re for matching stopwords
def strip_tags(value, compiled_tag_pattern):
"""
Strip tags matching the compiled_tag_pattern.
"""
ret_val = value
m = compiled_tag_pattern.match(value)
if m:
ret_val = m.group("word")
if ret_val == None:
ret_val = "pagebreak"
ret_val = ret_val.translate(str.maketrans('','', '!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~'))
return ret_val
def remove_values_from_terms_highlighted_list(the_list, remove_stop_words=True, start_tag_pattern = "<(i|b|bi|bui|fi|impx[^>]*?)>", end_tag_pattern="</(i|b|bi|bui|impx|fi)>"):
"""
Using the list of stop words read in at initialization, remove these from the words used for highlighted lists.
>> remove_values_from_terms_highlighted_list(["<i>not</i>","<i>is</i>","<i>and</i>", "<i>she</i>", "<i>The Interpretation of Dreams</i>", "<i>will</i>", "<i>I</i>", "<i>be</i>" ])
['The Interpretation of Dreams']
"""
stripPattern = f".*<pb>.*|({start_tag_pattern}[\s\n\t]*)+(?P<word>[^<]+?)[\s\n]*({end_tag_pattern})+"
cStripPattern = re.compile(stripPattern, re.IGNORECASE)
# passing the compiled pattern saves from recompiling for every value in function
if remove_stop_words:
return [strip_tags(value, compiled_tag_pattern = cStripPattern) for value in the_list if not rc_stopword_match.match(value)]
else:
return [strip_tags(value, compiled_tag_pattern = cStripPattern) for value in the_list]
def find_all(name_pat, path):
result = []
name_patc = re.compile(name_pat, re.IGNORECASE)
for root, dirs, files in os.walk(path):
for filename in files:
if name_patc.match(filename):
result.append(os.path.join(root, filename))
return result
def non_empty_string(strval):
try:
return strval if strval != "" else None
except Exception as e:
return None
class NewFileTracker(object):
"""
>>> ocd = opasCentralDBLib.opasCentralDB()
>>> ft = NewFileTracker(ocd)
>>> fsel = r"X:\\_PEPA1\\_PEPa1v\\_PEPArchive\\IJP\\043\\IJP.043.0306A(bEXP_ARCH1).XML"
>>> ft.load_fs_fileinfo(fsel)
>>> ft.is_refresh_needed(filename=fsel)
False
>>> ft.is_refresh_needed(filename=fsel, before_date="2020-03-01")
True
>>> ft.is_refresh_needed(filename=fsel, after_date="2019-01-01")
True
>>> ft.is_refresh_needed(filename=fsel, after_date="2020-03-01")
False
>>> ft.is_refresh_needed(filename=fsel, before_date="2019-01-01")
False
>>> ft.is_refresh_needed(filename=fsel, after_date="2020-03-01")
False
>>> ft.is_refresh_needed(filename=fsel, before_date="2019-01-01")
False
"""
#----------------------------------------------------------------------------------------
def __init__(self, ocd):
self.filename = ""
self.ocd = ocd
self.fileModDate = None
self.fileSize = 0
self.buildDate = None
self.conn = self.ocd.open_connection(caller_name="FileTracker")
self.fullfileset = {}
self.file_set = {}
#----------------------------------------------------------------------------------------
def close(self):
self.conn = self.ocd.close_connection(caller_name="FileTracker")
#------------------------------------------------------------------------------------------------------
def load_fs_fileinfo(self, filename):
"""
Load key info for file of interest
"""
if os.path.exists(filename):
self.filename = filename
self.base_filename = os.path.basename(filename)
self.timestamp_str = datetime.utcfromtimestamp(os.path.getmtime(filename)).strftime(localsecrets.TIME_FORMAT_STR)
self.timestamp_obj = datetime.strptime(self.timestamp_str, localsecrets.TIME_FORMAT_STR)
self.fileSize = os.path.getsize(filename)
self.buildDate = time.time()
#------------------------------------------------------------------------------------------------------
def is_refresh_needed(self, filename, before_date=None, after_date=None):
"""
Compare the file's date with whichever is specified
1) before_date: is it before a specified date
2) after_date: is it after a specified date
if neither of those specified:
3) db_date: is the file newer
"""
ret_val = False
self.load_fs_fileinfo(filename)
if before_date is not None:
before_obj = dateutil.parser.parse(before_date)
if self.timestamp_obj < before_obj:
ret_val = True
if after_date is not None:
after_obj = dateutil.parser.parse(after_date)
if self.timestamp_obj > after_obj:
ret_val = True
if before_date is None and after_date is None: # check database updated and
getFileInfoSQL = """
SELECT
art_id,
filename,
filedatetime,
updated
FROM api_articles
WHERE filename = %s
"""
try:
c = self.ocd.db.cursor(pymysql.cursors.DictCursor)
c.execute(getFileInfoSQL, self.base_filename)
row = c.fetchone()
if row == None:
# no database record here, so newer by default
ret_val = True
else:
self.db_fileinfo = row
except pymysql.Error as e:
print(e)
ret_val = False
else:
if self.db_fileinfo["filedatetime"] < self.timestamp_str:
ret_val = True
c.close() # close cursor
return ret_val
#------------------------------------------------------------------------------------------------------
def is_load_date_before_or_after(self, before=None, after=None):
"""
To allow updating (reloading) files before or after a date, compare the date updated
in the database to the before or after time passed in from args.
Note that this uses the timestamp on the database record rather than the file mod date.
"""
ret_val = False
# lookup the current file from the fullset (no db access needed)
files_db_record = self.fullfileset.get(self.base_filename, None)
if before is not None:
before_obj = dateutil.parser.parse(before)
before_time = True
else:
before_time = False
if after is not None:
after_obj = dateutil.parser.parse(after)
after_time = True
else:
after_time = False
if files_db_record is not None:
# it's in the database
db_timestamp_obj = files_db_record.get("updated", None)
# stored format is human readable, UTC time, eg. 2020-02-24T00:41:53Z, per localsecrets.TIME_FORMAT_STR
if db_timestamp_obj is not None:
# default return False, not modified
ret_val = False
if before_time:
if db_timestamp_obj < before_obj:
# file is modified
# print ("File is considered modified: %s. %s > %s" % (curr_fileinfo.filename, curr_fileinfo.timestamp_str, db_timestamp_str))
ret_val = True
if after_time:
if db_timestamp_obj > after_obj:
# file is modified
# print ("File is considered modified: %s. %s > %s" % (curr_fileinfo.filename, curr_fileinfo.timestamp_str, db_timestamp_str))
ret_val = True
else: # db record has no value for timestamp, so consider modified
ret_val = True
else:
ret_val = True # no record of it, so effectively modified.
return ret_val
class ExitOnExceptionHandler(logging.StreamHandler):
"""
Allows us to exit on serious error.
"""
def emit(self, record):
super().emit(record)
if record.levelno in (logging.ERROR, logging.CRITICAL):
raise SystemExit(-1)
class BiblioEntry(object):
"""
An entry from a documents bibliography.
Used to populate the MySQL table api_biblioxml for statistical gathering
and the Solr core pepwebrefs for searching in special cases.
"""
def __init__(self, artInfo, ref):
self.ref_entry_xml = etree.tostring(ref, with_tail=False)
if self.ref_entry_xml is not None:
self.ref_entry_xml = self.ref_entry_xml.decode("utf8") # convert from bytes
self.ref_entry_text = opasxmllib.xml_elem_or_str_to_text(ref)
self.art_id = artInfo.art_id
self.art_year_int = artInfo.art_year_int
self.ref_local_id= opasxmllib.xml_get_element_attr(ref, "id")
self.ref_id = artInfo.art_id + "." + self.ref_local_id
self.ref_title = opasxmllib.xml_get_subelement_textsingleton(ref, "t")
self.ref_title = self.ref_title[:1023]
self.pgrg = opasxmllib.xml_get_subelement_textsingleton(ref, "pp")
self.pgrg = opasgenlib.first_item_grabber(self.pgrg, re_separator_ptn=";|,", def_return=self.pgrg)
self.pgrg = self.pgrg[:23]
self.rx = opasxmllib.xml_get_element_attr(ref, "rx", default_return=None)
self.rxcf = opasxmllib.xml_get_element_attr(ref, "rxcf", default_return=None) # related rx
if self.rx is not None:
self.rx_sourcecode = re.search("(.*?)\.", self.rx, re.IGNORECASE).group(1)
else:
self.rx_sourcecode = None
self.volume = opasxmllib.xml_get_subelement_textsingleton(ref, "v")
self.volume = self.volume[:23]
self.source_title = opasxmllib.xml_get_subelement_textsingleton(ref, "j")
self.publishers = opasxmllib.xml_get_subelement_textsingleton(ref, "bp")
self.publishers = self.publishers[:254]
if self.publishers != "":
self.source_type = "book"
else:
self.source_type = "journal"
if self.source_type == "book":
self.year_of_publication = opasxmllib.xml_get_subelement_textsingleton(ref, "bpd")
if self.year_of_publication == "":
self.year_of_publication = opasxmllib.xml_get_subelement_textsingleton(ref, "y")
if self.source_title is None or self.source_title == "":
# sometimes has markup
self.source_title = opasxmllib.xml_get_direct_subnode_textsingleton(ref, "bst") # book title
else:
self.year_of_publication = opasxmllib.xml_get_subelement_textsingleton(ref, "y")
if self.year_of_publication != "":
# make sure it's not a range or list of some sort. Grab first year
self.year_of_publication = opasgenlib.year_grabber(self.year_of_publication)
else:
# try to match
try:
self.year_of_publication = re.search(r"\(([A-z]*\s*,?\s*)?([12][0-9]{3,3}[abc]?)\)", self.ref_entry_xml).group(2)
except Exception as e:
logger.warning("no match %s/%s/%s" % (self.year_of_publication, ref, e))
self.year_of_publication_int = 0
if self.year_of_publication != "" and self.year_of_publication is not None:
self.year_of_publication = re.sub("[^0-9]", "", self.year_of_publication)
if self.year_of_publication != "" and self.year_of_publication is not None:
try:
self.year_of_publication_int = int(self.year_of_publication[0:4])
except ValueError as e:
logger.warning("Error converting year_of_publication to int: %s / %s. (%s)" % (self.year_of_publication, self.ref_entry_xml, e))
except Exception as e:
logger.warning("Error trying to find untagged bib year in %s (%s)" % (self.ref_entry_xml, e))
else:
logger.warning("Non-numeric year of pub: %s" % (self.ref_entry_xml))
self.year = self.year_of_publication
if self.year != "" and self.year is not None:
self.year_int = int(self.year)
else:
self.year_int = "Null"
self.author_name_list = [etree.tostring(x, with_tail=False).decode("utf8") for x in ref.findall("a") if x is not None]
self.authors_xml = '; '.join(self.author_name_list)
self.authors_xml = self.authors_xml[:2040]
self.author_list = [opasxmllib.xml_elem_or_str_to_text(x) for x in ref.findall("a") if opasxmllib.xml_elem_or_str_to_text(x) is not None] # final if x gets rid of any None entries which can rarely occur.
self.author_list_str = '; '.join(self.author_list)
self.author_list_str = self.author_list_str[:2040]
#if artInfo.file_classification == opasConfig.DOCUMENT_ACCESS_OFFSITE: # "pepoffsite":
## certain fields should not be stored in returnable areas. So full-text searchable special field for that.
#self.ref_offsite_entry = self.bibRefEntry
#self.bibRefEntry = None
#else:
#self.ref_offsite_entry = None
# setup for Solr load
#self.thisRef = {
#"id" : self.ref_id,
#"art_id" : artInfo.art_id,
#"file_last_modified" : artInfo.filedatetime,
#"file_classification" : artInfo.file_classification,
#"file_size" : artInfo.file_size,
#"file_name" : artInfo.filename,
#"timestamp" : artInfo.processed_datetime, # When batch was entered into core
#"art_title" : artInfo.art_title,
#"art_sourcecode" : artInfo.src_code,
#"art_sourcetitleabbr" : artInfo.src_title_abbr,
#"art_sourcetitlefull" : artInfo.src_title_full,
#"art_sourcetype" : artInfo.src_type,
#"art_authors" : artInfo.art_all_authors,
#"reference_count" :artInfo.ref_count, # would be the same for each reference in article, but could still be useful
#"art_year" : artInfo.art_year,
#"art_year_int" : artInfo.art_year_int,
#"art_vol" : artInfo.art_vol,
#"art_pgrg" : artInfo.art_pgrg,
#"art_lang" : artInfo.art_lang,
#"art_citeas_xml" : artInfo.art_citeas_xml,
#"text_ref" : self.ref_entry_xml,
#"text_offsite_ref": self.ref_offsite_entry,
#"authors" : self.author_list_str,
#"title" : self.ref_title,
#"bib_articletitle" : self.ref_title,
#"bib_sourcetitle" : self.source_title,
#"bib_authors_xml" : self.authors_xml,
#"bib_ref_id" : self.ref_id,
#"bib_ref_rx" : self.rx,
#"bib_ref_rxcf" : self.rxcf, # the not
#"bib_ref_rx_sourcecode" : self.rx_sourcecode,
#"bib_sourcetype" : self.source_type,
#"bib_pgrg" : self.pgrg,
#"bib_year" : self.year_of_publication,
#"bib_year_int" : self.year_int,
#"bib_volume" : self.volume,
#"bib_publisher" : self.publishers
#}
class ArticleInfo(object):
"""
An entry from a documents metadata.
Used to populate the MySQL table api_articles for relational type querying
and the Solr core pepwebdocs for full-text searching (and the majority of
client searches.
"""
def __init__(self, sourceinfodb_data, pepxml, art_id, logger):
# let's just double check artid!
self.art_id = None
self.art_id_from_filename = art_id # file name will always already be uppercase (from caller)
self.bk_subdoc = None
self.bk_seriestoc = None
# Just init these. Creator will set based on filename
self.file_classification = None
self.file_size = 0
self.filedatetime = ""
self.filename = ""
# now, the rest of the variables we can set from the data
self.processed_datetime = datetime.utcfromtimestamp(time.time()).strftime(localsecrets.TIME_FORMAT_STR)
try:
self.art_id = opasxmllib.xml_xpath_return_textsingleton(pepxml, "//artinfo/@id", None)
if self.art_id is None:
self.art_id = self.art_id_from_filename
else:
# just to watch for xml keying or naming errors
if self.art_id_from_filename != self.art_id:
logger.warning("File name ID tagged and artID disagree. %s vs %s", self.art_id, self.art_id_from_filename)
self.art_id = self.art_id_from_filename
# make sure it's uppercase
self.art_id = self.art_id.upper()
except Exception as err:
logger.warning("Issue reading file's article id. (%s)", err)
# Containing Article data
#<!-- Common fields -->
#<!-- Article front matter fields -->
#---------------------------------------------
artinfo_xml = pepxml.xpath("//artinfo")[0] # grab full artinfo node, so it can be returned in XML easily.
self.artinfo_xml = etree.tostring(artinfo_xml).decode("utf8")
self.src_code = pepxml.xpath("//artinfo/@j")[0]
try:
self.src_code = self.src_code.upper() # 20191115 - To make sure this is always uppercase
self.src_title_abbr = sourceinfodb_data[self.src_code].get("sourcetitleabbr", None)
self.src_title_full = sourceinfodb_data[self.src_code].get("sourcetitlefull", None)
self.src_type = sourceinfodb_data[self.src_code].get("product_type", None) # journal, book, video...
self.src_embargo = sourceinfodb_data[self.src_code].get("wall", None)
except KeyError as err:
self.src_title_abbr = None
self.src_title_full = None
if self.src_code in ["ZBK"]:
self.src_type = "book"
else:
self.src_type = None
self.src_embargo = None
logger.warning("Error: PEP Source %s not found in source info db. Use the 'PEPSourceInfo export' after fixing the issn table in MySQL DB", self.src_code)
except Exception as err:
logger.error("Error: Problem with this files source info. File skipped. (%s)", err)
#processingErrorCount += 1
return
vol_actual = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artinfo/artvol/@actual', default_return=None)
self.art_vol_str = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artinfo/artvol/node()', default_return=None)
m = re.match("(\d+)([A-Z]*)", self.art_vol_str)
if m is None:
logger.error(f"Bad Vol # in element content: {self.art_vol_str}")
m = re.match("(\d+)([A-z\-\s]*)", vol_actual)
if m is not None:
self.art_vol_int = m.group(1)
logger.error(f"Recovered Vol # from actual attr: {self.art_vol_int}")
else:
raise ValueError("Severe Error in art_vol")
else:
self.art_vol_int = m.group(1)
if len(m.groups()) == 2:
art_vol_suffix = m.group(2)
# now convert to int
try:
self.art_vol_int = int(self.art_vol_int)
except ValueError:
logger.warning(f"Can't convert art_vol to int: {self.art_vol_int} Error: {e}")
art_vol_suffix = self.art_vol_int[-1]
art_vol_ints = re.findall(r'\d+', self.art_vol_str)
if len(art_vol_ints) >= 1:
self.art_vol_int = art_vol_ints[1]
self.art_vol_int = int(self.art_vol_int)
except Exception as e:
logger.warning(f"Can't convert art_vol to int: {self.art_vol_int} Error: {e}")
if vol_actual is not None:
self.art_vol_str = vol_actual
self.art_issue = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artinfo/artiss/node()', default_return=None)
self.art_issue_title = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artinfo/artissinfo/isstitle/node()', default_return=None)
self.art_year_str = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artinfo/artyear/node()', default_return=None)
m = re.match("(?P<yearint>[0-9]{4,4})(?P<yearsuffix>[a-zA-Z])?(\s*\-\s*)?((?P<year2int>[0-9]{4,4})(?P<year2suffix>[a-zA-Z])?)?", self.art_year_str)
if m is not None:
self.art_year = m.group("yearint")
self.art_year_int = int(m.group("yearint"))
else:
try:
art_year_for_int = re.sub("[^0-9]", "", self.art_year)
self.art_year_int = int(art_year_for_int)
except ValueError as err:
logger.warning("Error converting art_year to int: %s", self.art_year)
self.art_year_int = 0
artInfoNode = pepxml.xpath('//artinfo')[0]
self.art_type = opasxmllib.xml_get_element_attr(artInfoNode, "arttype", default_return=None)
self.art_vol_title = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artinfo/artvolinfo/voltitle/node()', default_return=None)
if self.art_vol_title is None:
# try attribute for value (lower priority than element above)
self.art_vol_title = opasxmllib.xml_get_element_attr(artInfoNode, "voltitle", default_return=None)
# m = re.match("(?P<volint>[0-9]+)(?P<volsuffix>[a-zA-Z])", self.art_vol)
m = re.match("(?P<volint>[0-9]+)(?P<volsuffix>[a-zA-Z])?(\s*\-\s*)?((?P<vol2int>[0-9]+)(?P<vol2suffix>[a-zA-Z])?)?", str(self.art_vol_str))
if m is not None:
self.art_vol_suffix = m.group("volsuffix")
# self.art_vol = m.group("volint")
else:
self.art_vol_suffix = None
if self.art_vol_title is not None:
print (f" ...Volume title: {self.art_vol_title}")
if self.art_issue_title is not None:
print (f" ...Issue title: {self.art_issue_title}")
self.art_doi = opasxmllib.xml_get_element_attr(artInfoNode, "doi", default_return=None)
self.art_issn = opasxmllib.xml_get_element_attr(artInfoNode, "ISSN", default_return=None)
self.art_isbn = opasxmllib.xml_get_element_attr(artInfoNode, "ISBN", default_return=None)
self.art_orig_rx = opasxmllib.xml_get_element_attr(artInfoNode, "origrx", default_return=None)
self.start_sectname = opasxmllib.xml_get_element_attr(artInfoNode, "newsecnm", default_return=None)
if self.start_sectname is None:
# look in newer, tagged, data
self.start_sectname = opasxmllib.xml_xpath_return_textsingleton(pepxml, '//artsectinfo/secttitle/node()', default_return=None)
self.art_pgrg = opasxmllib.xml_get_subelement_textsingleton(artInfoNode, "artpgrg", default_return=None) # note: getSingleSubnodeText(pepxml, "artpgrg")
self.art_pgstart, self.art_pgend = opasgenlib.pgrg_splitter(self.art_pgrg)
try:
self.art_pgcount = int(pepxml.xpath("count(//pb)")) # 20200506
except Exception as e:
self.art_pgcount = 0
if self.art_pgstart is not None:
self.art_pgstart_prefix, self.art_pgstart, self.pgstart_suffix = opasgenlib.pgnum_splitter(self.art_pgstart)
else:
self.art_pgstart_prefix, self.art_pgstart, self.pgstart_suffix = (None, None, None)
if self.art_pgend is not None:
self.pgend_prefix, self.art_pgend, self.pgend_suffix = opasgenlib.pgnum_splitter(self.art_pgend)
else:
self.pgend_prefix, self.art_pgend, self.pgend_suffix = (None, None, None)
self.art_title = opasxmllib.xml_get_subelement_textsingleton(artInfoNode, "arttitle", skip_tags=["ftnx"])
if self.art_title == "-": # weird title in ANIJP-CHI
self.art_title = ""
self.art_subtitle = opasxmllib.xml_get_subelement_textsingleton(artInfoNode, 'artsub')
if self.art_subtitle == "":
pass
elif self.art_subtitle is None:
self.art_subtitle = ""
else:
#self.artSubtitle = ''.join(etree.fromstring(self.artSubtitle).itertext())
if self.art_title != "":
self.art_subtitle = ": " + self.art_subtitle
self.art_title = self.art_title + self.art_subtitle
else:
self.art_title = self.art_subtitle
self.art_subtitle = ""
self.art_lang = pepxml.xpath('//pepkbd3/@lang')
if self.art_lang == []:
self.art_lang = ['EN']
self.author_xml_list = pepxml.xpath('//artinfo/artauth/aut')
self.author_xml = opasxmllib.xml_xpath_return_xmlsingleton(pepxml, '//artinfo/artauth')
self.authors_bibliographic, self.author_list = opasxmllib.authors_citation_from_xmlstr(self.author_xml, listed=True)
self.art_auth_citation = self.authors_bibliographic
# ToDo: I think I should add an author ID to bib aut too. But that will have
# to wait until later.
self.art_author_id_list = opasxmllib.xml_xpath_return_textlist(pepxml, '//artinfo/artauth/aut[@listed="true"]/@authindexid')
self.art_authors_count = len(self.author_list)
if self.art_author_id_list == []: # no authindexid
logger.warning("This document %s does not have an author list; may be missing authindexids" % art_id)
self.art_author_id_list = self.author_list
self.author_ids_str = ", ".join(self.art_author_id_list)
self.art_auth_mast, self.art_auth_mast_list = opasxmllib.author_mast_from_xmlstr(self.author_xml, listed=True)
self.art_auth_mast_unlisted_str, self.art_auth_mast_unlisted_list = opasxmllib.author_mast_from_xmlstr(self.author_xml, listed=False)
self.art_auth_count = len(self.author_xml_list)
self.art_author_lastnames = opasxmllib.xml_xpath_return_textlist(pepxml, '//artinfo/artauth/aut[@listed="true"]/nlast')
self.art_all_authors = self.art_auth_mast + " (" + self.art_auth_mast_unlisted_str + ")"
self.art_kwds = opasxmllib.xml_xpath_return_textsingleton(pepxml, "//artinfo/artkwds/node()", None)
# Usually we put the abbreviated title here, but that won't always work here.
self.art_citeas_xml = u"""<p class="citeas"><span class="authors">%s</span> (<span class="year">%s</span>) <span class="title">%s</span>. <span class="sourcetitle">%s</span> <span class="pgrg">%s</span>:<span class="pgrg">%s</span></p>""" \
% (self.authors_bibliographic,
self.art_year,
self.art_title,
self.src_title_full,
self.art_vol_int,
self.art_pgrg)
self.art_citeas_text = opasxmllib.xml_elem_or_str_to_text(self.art_citeas_xml)
art_qual_node = pepxml.xpath("//artinfo/artqual")
if art_qual_node != []:
self.art_qual = opasxmllib.xml_get_element_attr(art_qual_node[0], "rx", default_return=None)
else:
self.art_qual = pepxml.xpath("//artbkinfo/@extract")
if self.art_qual == []:
self.art_qual = None
# will be None if not a book extract
# self.art_qual = None
if self.art_qual is not None:
if isinstance(self.art_qual, list):
self.art_qual = str(self.art_qual[0])
if self.art_qual != self.art_id:
self.bk_subdoc = True
else:
self.bk_subdoc = False
else:
self.bk_subdoc = False
refs = pepxml.xpath("/pepkbd3//be")
self.bib_authors = []
self.bib_rx = []
self.bib_title = []
self.bib_journaltitle = []
for x in refs:
try:
if x.attrib["rx"] is not None:
self.bib_rx.append(x.attrib["rx"])
except:
pass
journal = x.find("j")
if journal is not None:
journal_lc = opasxmllib.xml_elem_or_str_to_text(journal).lower()
journal_lc = journal_lc.translate(str.maketrans('', '', string.punctuation))
self.bib_journaltitle.append(journal_lc)
title = x.find("t")
# bib article titles for faceting, get rid of punctuation variations
if title is not None:
bib_title = opasxmllib.xml_elem_or_str_to_text(title)
bib_title = bib_title.lower()
bib_title = bib_title.translate(str.maketrans('', '', string.punctuation))
self.bib_title.append(opasxmllib.xml_elem_or_str_to_text(title))
title = x.find("bst")
# bib source titles for faceting, get rid of punctuation variations
# cumulate these together with article title
if title is not None:
bib_title = opasxmllib.xml_elem_or_str_to_text(title)
bib_title = bib_title.lower()
bib_title = bib_title.translate(str.maketrans('', '', string.punctuation))
self.bib_title.append(bib_title)
auths = x.findall("a")
for y in auths:
if opasxmllib.xml_elem_or_str_to_text(x) is not None:
self.bib_authors.append(opasxmllib.xml_elem_or_str_to_text(y))
self.ref_count = len(refs )
# clear it, we aren't saving it.
refs = None
self.bk_info_xml = opasxmllib.xml_xpath_return_xmlsingleton(pepxml, "/pepkbd3//artbkinfo") # all book info in instance
# break it down a bit for the database
self.main_toc_id = opasxmllib.xml_xpath_return_textsingleton(pepxml, "/pepkbd3//artbkinfo/@extract", None)
self.bk_title = opasxmllib.xml_xpath_return_textsingleton(pepxml, "/pepkbd3//bktitle", None)
self.bk_publisher = opasxmllib.xml_xpath_return_textsingleton(pepxml, "/pepkbd3//bkpubandloc", None)
self.bk_seriestoc = opasxmllib.xml_xpath_return_textsingleton(pepxml, "/pepkbd3//artbkinfo/@seriestoc", None)
self.bk_next_id = opasxmllib.xml_xpath_return_textsingleton(pepxml, "//artbkinfo/@next", None)
# hard code special cases SE/GW if they are not covered by the instances
if self.bk_seriestoc is None:
if self.src_code == "SE":
self.bk_seriestoc = "SE.000.0000A"
if self.src_code == "GW":
self.bk_seriestoc = "GW.000.0000A"
#------------------------------------------------------------------------------------------------------------
# Support functions
#------------------------------------------------------------------------------------------------------------
def process_article_for_doc_core(pepxml, artInfo, solrcon, file_xml_contents):
"""
Extract and load data for the full-text core. Whereas in the Refs core each
Solr document is a reference, here each Solr document is a PEP Article.
This core contains bib entries too, but not subfields.
TODO: Originally, this core supported each bibliography record in its own
json formatted structure, saved in a single field. However, when
the code was switched from PySolr to Solrpy this had to be removed,
since Solrpy prohibits this for some reason. Need to raise this
as a case on the issues board for Solrpy.
"""
#------------------------------------------------------------------------------------------------------
# global gCitedTable
print(" ...Processing main file content for the %s core." % opasCoreConfig.SOLR_DOCS)
art_lang = pepxml.xpath('//@lang')
if art_lang == []:
art_lang = ['EN']
# see if this is an offsite article
if artInfo.file_classification == opasConfig.DOCUMENT_ACCESS_OFFSITE:
# certain fields should not be stored in returnable areas. So full-text searchable special field for that.
offsite_contents = True
offsite_ref = """<p>This article or book is available online on a non-PEP website.
Click <a href="//www.doi.org/%s" target="_blank">here</a> to open that website
in another window or tab.
</p>
""" % urllib.parse.quote(artInfo.art_doi)
summaries_xml = f"""<abs>
{offsite_ref}
</abs>
"""
excerpt = excerpt_xml = abstracts_xml = summaries_xml
else:
offsite_contents = False
summaries_xml = opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//summaries", default_return=None)
abstracts_xml = opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//abs", default_return=None)
# multiple data fields, not needed, search children instead, which allows search by para
excerpt = None
excerpt_xml = None
if artInfo.art_type == opasConfig.ARTINFO_ARTTYPE_TOC_INSTANCE: # "TOC"
# put the whole thing in the abstract! Requires some extra processing though
#heading = opasxmllib.get_running_head( source_title=artInfo.src_title_abbr,
#pub_year=artInfo.art_year,
#vol=artInfo.art_vol,
#issue=artInfo.art_issue,
#pgrg=artInfo.art_pgrg,
#ret_format="HTML"
#)
#pepxml.remove(pepxml.find('artinfo'))
#pepxml.remove(pepxml.find('meta'))
excerpt_xml = pepxml
excerpt = opasxmllib.xml_str_to_html(excerpt_xml, transformer_name=opasConfig.TRANSFORMER_XMLTOHTML_EXCERPT)
# excerpt = re.sub("\[\[RunningHead\]\]", f"{heading}", excerpt, count=1)
else:
# copy abstract or summary to excerpt, if not there, then generate it.
# this is so that an app can rely on excerpt to have the abstract or excerpt (or summary)
# TODO: later consider we could just put the excerpt in abstract instead, and make abstract always HTML.
# but for now, I like to be able to distinguish an original abstract from a generated one.
if abstracts_xml is not None:
excerpt_xml = abstracts_xml[0]
elif summaries_xml is not None:
excerpt_xml = summaries_xml[0]
else:
excerpt_xml = opasxmllib.get_first_page_excerpt_from_doc_root(pepxml)
excerpt = opasxmllib.xml_str_to_html(excerpt_xml)
excerpt_xml = opasxmllib.xml_elem_or_str_to_xmlstring(excerpt_xml, None)
#art_authors_unlisted = pepxml.xpath(r'//artinfo/artauth/aut[@listed="false"]/@authindexid')
cited_counts = gCitedTable.get(artInfo.art_id, modelsOpasCentralPydantic.MostCitedArticles())
# anywhere in the doc.
children = doc_children() # new instance, reset child counter suffix
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//body//p|//body//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_body",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//h1|//h2|//h3|//h4|//h5|//h6", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_heading",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//quote//p|//quote//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_quote",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//dream//p|//dream//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_dream",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//poem//p|//poem//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_poem",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//note//p|//note//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_note",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//dialog//p|//dialog//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_dialog",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//panel//p|//panel//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_panel",
default_lang=art_lang)
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//caption//p", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_caption",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//bib//be|//binc", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_bib",
default_lang=art_lang[0])
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//appxs//p|//appxs//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_appxs",
default_lang=art_lang[0])
# summaries and abstracts
children.add_children(stringlist=opasxmllib.xml_xpath_return_xmlstringlist_withinheritance(pepxml, "//summaries//p|//summaries//p2|//abs//p|//abs//p2", attr_to_find="lang"),
parent_id=artInfo.art_id,
parent_tag="p_summaries",
default_lang=art_lang[0])
# indented status
print (f" ...Adding children, tags/counts: {children.tag_counts}")
art_kwds_str = opasgenlib.string_to_list(artInfo.art_kwds)
terms_highlighted = opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//body/*/b|//body/*/i|//body/*/bi|//body/*/bui")
#opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//body/*/i")
terms_highlighted = remove_values_from_terms_highlighted_list(terms_highlighted)
# include pep dictionary marked words
glossary_terms_list = opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//body/*/impx")
# strip the tags, but keep stop words
glossary_terms_list = remove_values_from_terms_highlighted_list(glossary_terms_list, remove_stop_words=False)
glossary_group_terms = pepxml.xpath("//body/*/impx/@grpname")
glossary_group_terms_list = []
if glossary_group_terms is not None:
for n in glossary_group_terms:
glossary_group_terms_list += opasgenlib.string_to_list(n, sep=";")
freuds_italics = opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//body/*/fi", default_return=None)
if freuds_italics is not None:
freuds_italics = remove_values_from_terms_highlighted_list(freuds_italics)
new_rec = {
"id": artInfo.art_id, # important = note this is unique id for every reference
"art_id" : artInfo.art_id, # important
"title" : artInfo.art_title, # important
"art_title_xml" : opasxmllib.xml_xpath_return_xmlsingleton(pepxml, "//arttitle", default_return = None),
"art_sourcecode" : artInfo.src_code, # important
"art_sourcetitleabbr" : artInfo.src_title_abbr,
"art_sourcetitlefull" : artInfo.src_title_full,
"art_sourcetype" : artInfo.src_type,
# abstract_xml and summaries_xml should not be searched, but useful for display without extracting
"abstract_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//abs", default_return = None),
"summaries_xml" : summaries_xml,
"art_excerpt" : excerpt,
"art_excerpt_xml" : excerpt_xml,
# very important field for displaying the whole document or extracting parts
"text_xml" : file_xml_contents, # important
"art_offsite" : offsite_contents, # true if it's offsite
"author_bio_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//nbio", default_return = None),
"author_aff_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//autaff", default_return = None),
"bk_title_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//artbkinfo/bktitle", default_return = None),
"bk_subdoc" : artInfo.bk_subdoc,
"art_info_xml" : artInfo.artinfo_xml,
"bk_alsoknownas_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//artbkinfo/bkalsoknownas", default_return = None),
"bk_editors_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//bkeditors", default_return = None),
"bk_seriestitle_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//bktitle", default_return = None),
"bk_series_toc_id" : artInfo.bk_seriestoc,
"bk_main_toc_id" : artInfo.main_toc_id,
"bk_next_id" : artInfo.bk_next_id,
"caption_text_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml,"//caption", default_return = None),
"caption_title_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//ctitle", default_return = None),
"headings_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//h1|//h2|//h3|//h4|//h5|//h6", default_return = None), # reinstated 2020-08-14
"meta_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//meta", default_return = None),
"text_xml" : file_xml_contents,
"timestamp" : artInfo.processed_datetime, # important
"file_last_modified" : artInfo.filedatetime,
"file_classification" : non_empty_string(artInfo.file_classification),
"file_size" : artInfo.file_size,
"file_name" : artInfo.filename,
"art_subtitle_xml" : opasxmllib.xml_xpath_return_xmlsingleton(pepxml, "//artsubtitle", default_return = None),
"art_citeas_xml" : artInfo.art_citeas_xml,
"art_cited_all" : cited_counts.countAll,
"art_cited_5" : cited_counts.count5,
"art_cited_10" : cited_counts.count10,
"art_cited_20" : cited_counts.count20,
#"art_body_xml" : bodyXml,
"authors" : artInfo.author_list, # artInfo.art_all_authors,
"art_authors" : artInfo.author_list,
"art_authors_count" : artInfo.art_authors_count,
"art_authors_mast" : non_empty_string(artInfo.art_auth_mast),
"art_authors_citation" : non_empty_string(artInfo.art_auth_citation),
"art_authors_unlisted" : non_empty_string(artInfo.art_auth_mast_unlisted_str),
"art_authors_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//aut", default_return = None),
"art_year" : non_empty_string(artInfo.art_year),
"art_year_int" : artInfo.art_year_int,
"art_vol" : artInfo.art_vol_int,
"art_vol_suffix" : non_empty_string(artInfo.art_vol_suffix),
"art_vol_title" : non_empty_string(artInfo.art_vol_title),
"art_pgrg" : non_empty_string(artInfo.art_pgrg),
"art_pgcount" : artInfo.art_pgcount,
"art_iss" : artInfo.art_issue,
"art_iss_title" : artInfo.art_issue_title,
"art_doi" : artInfo.art_doi,
"art_lang" : artInfo.art_lang,
"art_issn" : artInfo.art_issn,
"art_isbn" : artInfo.art_isbn,
"art_origrx" : artInfo.art_orig_rx,
"art_qual" : artInfo.art_qual,
"art_kwds" : artInfo.art_kwds, # pure search field, but maybe not as good as str
"art_kwds_str" : art_kwds_str, # list, multivalue field for faceting
"glossary_terms": glossary_terms_list,
"glossary_group_terms": glossary_group_terms_list,
"freuds_italics": freuds_italics,
"art_type" : artInfo.art_type,
"art_newsecnm" : artInfo.start_sectname,
"terms_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//impx[@type='TERM2']", default_return=None),
"terms_highlighted" : terms_highlighted,
"dialogs_spkr" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//dialog/spkr/node()", default_return=None),
"panels_spkr" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//panel/spkr", default_return=None),
"poems_src" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//poem/src/node()", default_return=None), # multi
"dialogs_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//dialog", default_return=None), # multi
"dreams_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//dream", default_return=None), # multi
"notes_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//note", default_return=None),
"panels_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//panel", default_return=None),
"poems_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//poem", default_return=None), # multi
"quotes_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//quote", default_return=None), # multi
"reference_count" : artInfo.ref_count,
"references_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//be|binc", default_return=None), # multi
"tables_xml" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//tbl", default_return=None), # multi
"bk_pubyear" : opasxmllib.xml_xpath_return_xmlstringlist(pepxml, "//bkpubyear/node()", default_return=None), # multi
"bib_authors" : artInfo.bib_authors,
"bib_title" : artInfo.bib_title,
"bib_journaltitle" : artInfo.bib_journaltitle,
"bib_rx" : artInfo.bib_rx,
"art_level" : 1,
#"art_para" : parasxml,
"_doc" : children.child_list
}
#experimental paras save
# parasxml_update(parasxml, solrcon, artInfo)
# format for pysolr (rather than solrpy, supports nesting)
try:
solrcon.add([new_rec], commit=False)
except Exception as err:
#processingErrorCount += 1
errStr = "Solr call exception for save doc on %s: %s" % (artInfo.art_id, err)
print (errStr)
return
class doc_children(object):
"""
Create an list of child strings to be used as the Solr nested document.
The parent_tag allows different groups of subelements to be added and separately searchable.
"""
def __init__(self):
self.count = 0
self.child_list = []
self.tag_counts = {}
def add_children(self, stringlist, parent_id, parent_tag=None, level=2, default_lang=None):
"""
params:
- stringlist is typically going to be the return of an xpath expression on an xml instance
- parent_id is the typically going to be the Solr ID of the parent, and this is suffixed
to produce a similar but unique id for the child
- parent_tag for indicating where this was located in the main instance, e.g., references, dreams, etc.
- level for creating children at different levels (even if in the same object)
"""
for n in stringlist:
self.count += 1
try:
self.tag_counts[parent_tag] += 1
except: # initialize
self.tag_counts[parent_tag] = 1
# special attr handling. Later look and see if this is slowing us down...
currelem = etree.fromstring(n)
lang = currelem.attrib.get("lang", default_lang)
para_lgrid = currelem.attrib.get("lgrid", None)
para_lgrx = currelem.attrib.get("lgrx", None)
if para_lgrx is not None:
para_lgrx = [item.strip() for item in para_lgrx.split(',')]
self.child_list.append({"id": parent_id + f".{self.count}",
"art_level": level,
"parent_tag": parent_tag,
"lang": lang,
"para": n,
"para_lgrid" : para_lgrid,
"para_lgrx" : para_lgrx
})
return self.count
#------------------------------------------------------------------------------------------------------
def process_info_for_author_core(pepxml, artInfo, solrAuthor):
"""
Get author data and write a record for each author in each document. Hence an author
of multiple articles will be listed multiple times, once for each article. But
this core will let us research by individual author, including facets.
"""
#------------------------------------------------------------------------------------------------------
# update author data
#<!-- ID = PEP articleID + authorID -->
try:
# Save author info in database
authorPos = 0
for author in artInfo.author_xml_list:
authorID = author.attrib.get('authindexid', None)
if authorID is None:
authorID = opasxmllib.authors_citation_from_xmlstr(author)
try:
authorID = authorID[0]
except:
authorID = "GenID" + "%05d" % random.randint(1, 5000)
authorListed = author.attrib.get('listed', "true")
if authorListed.lower() == "true":
authorPos += 1
authorRole = author.attrib.get('role', None)
authorXML = opasxmllib.xml_elem_or_str_to_xmlstring(author)
authorDocid = artInfo.art_id + "." + ''.join(e for e in authorID if e.isalnum())
authorBio = opasxmllib.xml_xpath_return_textsingleton(author, "nbio")
try:
authorAffID = author.attrib['affid']
except KeyError as e:
authorAffil = None # see if the add still takes!
else:
authorAffil = pepxml.xpath('//artinfo/artauth/autaff[@affid="%s"]' % authorAffID)
authorAffil = etree.tostring(authorAffil[0])
try:
response_update = solrAuthor.add(id = authorDocid, # important = note this is unique id for every author + artid
art_id = artInfo.art_id,
title = artInfo.art_title,
authors = artInfo.art_author_id_list,
art_author_id = authorID,
art_author_listed = authorListed,
art_author_pos_int = authorPos,
art_author_role = authorRole,
art_author_bio = authorBio,
art_author_affil_xml = authorAffil,
art_year_int = artInfo.art_year_int,
art_sourcetype = artInfo.src_type,
art_sourcetitlefull = artInfo.src_title_full,
art_citeas_xml = artInfo.art_citeas_xml,
art_author_xml = authorXML,
file_last_modified = artInfo.filedatetime,
file_classification = artInfo.file_classification,
file_name = artInfo.filename,
timestamp = artInfo.processed_datetime # When batch was entered into core
)
if not re.search('"status">0</int>', response_update):
print (response_update)
except Exception as err:
#processingErrorCount += 1
errStr = "Error for %s: %s" % (artInfo.art_id, err)
print (errStr)
config.logger.error(errStr)
except Exception as err:
#processingErrorCount += 1
errStr = "Error for %s: %s" % (artInfo.art_id, err)
print (errStr)
config.logger.error(errStr)
#------------------------------------------------------------------------------------------------------
#def processBibForReferencesCore(pepxml, artInfo, solrbib):
#"""
#Adds the bibliography data from a single document to the core per the pepwebrefs solr schema
#"""
#print((" ...Processing %s references for the references database." % (artInfo.ref_count)))
##------------------------------------------------------------------------------------------------------
##<!-- biblio section fields -->
##Note: currently, this does not include footnotes or biblio include tagged data in document (binc)
#bibReferences = pepxml.xpath("/pepkbd3//be") # this is the second time we do this (also in artinfo, but not sure or which is better per space vs time considerations)
#retVal = artInfo.ref_count
##processedFilesCount += 1
#bib_total_reference_count = 0
#allRefs = []
#for ref in bibReferences:
## bib_entry = BiblioEntry(artInfo, ref)
#bib_total_reference_count += 1
#bibRefEntry = etree.tostring(ref, with_tail=False)
#bibRefID = opasxmllib.xml_get_element_attr(ref, "id")
#refID = artInfo.art_id + "." + bibRefID
#bibSourceTitle = opasxmllib.xml_get_subelement_textsingleton(ref, "j")
#bibPublishers = opasxmllib.xml_get_subelement_textsingleton(ref, "bp")
#if bibPublishers != "":
#bibSourceType = "book"
#else:
#bibSourceType = "journal"
#if bibSourceType == "book":
#bibYearofPublication = opasxmllib.xml_get_subelement_textsingleton(ref, "bpd")
#if bibYearofPublication == "":
#bibYearofPublication = opasxmllib.xml_get_subelement_textsingleton(ref, "y")
#if bibSourceTitle is None or bibSourceTitle == "":
## sometimes has markup
#bibSourceTitle = opasxmllib.xml_get_direct_subnode_textsingleton(ref, "bst") # book title
#else:
#bibYearofPublication = opasxmllib.xml_get_subelement_textsingleton(ref, "y")
#if bibYearofPublication == "":
## try to match
#try:
#bibYearofPublication = re.search(r"\(([A-z]*\s*,?\s*)?([12][0-9]{3,3}[abc]?)\)", bibRefEntry).group(2)
#except Exception as e:
#logger.warning("no match %s/%s/%s" % (bibYearofPublication, ref, e))
#try:
#bibYearofPublication = re.sub("[^0-9]", "", bibYearofPublication)
#bibYearofPublicationInt = int(bibYearofPublication[0:4])
#except ValueError as e:
#logger.warning("Error converting bibYearofPublication to int: %s / %s. (%s)" % (bibYearofPublication, bibRefEntry, e))
#bibYearofPublicationInt = 0
#except Exception as e:
#logger.warning("Error trying to find untagged bib year in %s (%s)" % (bibRefEntry, e))
#bibYearofPublicationInt = 0
#bibAuthorNameList = [etree.tostring(x, with_tail=False).decode("utf8") for x in ref.findall("a") if x is not None]
#bibAuthorsXml = '; '.join(bibAuthorNameList)
##Note: Changed to is not None since if x gets warning - FutureWarning: The behavior of this method will change in future versions. Use specific 'len(elem)' or 'elem is not None' test instead
#authorList = [opasxmllib.xml_elem_or_str_to_text(x) for x in ref.findall("a") if opasxmllib.xml_elem_or_str_to_text(x) is not None] # final if x gets rid of any None entries which can rarely occur.
#authorList = '; '.join(authorList)
#bibRefRxCf = opasxmllib.xml_get_element_attr(ref, "rxcf", default_return=None)
#bibRefRx = opasxmllib.xml_get_element_attr(ref, "rx", default_return=None)
#if bibRefRx is not None:
#bibRefRxSourceCode = re.search("(.*?)\.", bibRefRx, re.IGNORECASE).group(1)
#else:
#bibRefRxSourceCode = None
## see if this is an offsite article
#if artInfo.file_classification == opasConfig.DOCUMENT_ACCESS_OFFSITE: # "pepoffsite":
## certain fields should not be stored in returnable areas. So full-text searchable special field for that.
#bibRefOffsiteEntry = bibRefEntry
##bibEntryXMLContents = """<html>
##<p>This reference is in an article or book where text is not is available on PEP.
##Click <a href="//www.doi.org/%s" target="_blank">here</a> to show the article on another website
##in another window or tab.
##</p>
##</html>
##""" % urllib.quote(artInfo.artDOI)
## should we trust clients, or remove this data? For now, remove. Need to probably do this in biblio core too
#bibRefEntry = None
#else:
#bibRefOffsiteEntry = None
#thisRef = {
#"id" : refID,
#"art_id" : artInfo.art_id,
#"file_last_modified" : artInfo.filedatetime,
#"file_classification" : artInfo.file_classification,
#"file_size" : artInfo.file_size,
#"file_name" : artInfo.filename,
#"timestamp" : artInfo.processed_datetime, # When batch was entered into core
#"art_title" : artInfo.art_title,
#"art_sourcecode" : artInfo.src_code,
#"art_sourcetitleabbr" : artInfo.art_source_title_abbr,
#"art_sourcetitlefull" : artInfo.art_source_title_full,
#"art_sourcetype" : artInfo.art_source_type,
#"art_authors" : artInfo.artAllAuthors,
#"reference_count" :artInfo.ref_count, # would be the same for each reference in article, but could still be useful
#"art_year" : artInfo.art_year,
#"art_year_int" : artInfo.art_year_int,
#"art_vol" : artInfo.art_vol,
#"art_pgrg" : artInfo.art_pgrg,
#"art_lang" : artInfo.art_lang,
#"art_citeas_xml" : artInfo.art_citeas_xml,
#"text_ref" : bibRefEntry,
#"text_offsite_ref": bibRefOffsiteEntry,
#"authors" : authorList,
#"title" : opasxmllib.xml_get_subelement_textsingleton(ref, "t"),
#"bib_authors_xml" : bibAuthorsXml,
#"bib_ref_id" : bibRefID,
#"bib_ref_rx" : bibRefRx,
#"bib_ref_rxcf" : bibRefRxCf, # the not
#"bib_ref_rx_sourcecode" : bibRefRxSourceCode,
#"bib_articletitle" : opasxmllib.xml_get_subelement_textsingleton(ref, "t"),
#"bib_sourcetype" : bibSourceType,
#"bib_sourcetitle" : bibSourceTitle,
#"bib_pgrg" : opasxmllib.xml_get_subelement_textsingleton(ref, "pp"),
#"bib_year" : bibYearofPublication,
#"bib_year_int" : bibYearofPublicationInt,
#"bib_volume" : opasxmllib.xml_get_subelement_textsingleton(ref, "v"),
#"bib_publisher" : bibPublishers
#}
#allRefs.append(thisRef)
## We collected all the references. Now lets save the whole shebang
#try:
#response_update = solrbib.add_many(allRefs) # lets hold off on the , _commit=True)
#if not re.search('"status">0</int>', response_update):
#print (response_update)
#except Exception as err:
##processingErrorCount += 1
#config.logger.error("Solr call exception %s", err)
#return retVal # return the bibRefCount
#------------------------------------------------------------------------------------------------------
def add_reference_to_biblioxml_table(ocd, artInfo, bib_entry):
"""
Adds the bibliography data from a single document to the biblioxml table in mysql database opascentral.
This database table is used as the basis for the cited_crosstab views, which show most cited articles
by period. It replaces fullbiblioxml which was being imported from the non-OPAS document database
pepa1db, which is generated during document conversion from KBD3 to EXP_ARCH1. That was being used
as an easy bridge to start up OPAS.
Note: This data is in addition to the Solr pepwebrefs (biblio) core which is added elsewhere. The SQL table is
primarily used for the cross-tabs, since the Solr core is more easily joined with
other Solr cores in queries. (TODO: Could later experiment with bridging Solr/SQL.)
Note: More info than needed for crosstabs is captured to this table, but that's as a bridge
to potential future uses.
TODO: Finish redefining crosstab queries to use this base table.
"""
ret_val = False
insert_if_not_exists = r"""REPLACE
INTO api_biblioxml (
art_id,
bib_local_id,
art_year,
bib_rx,
bib_sourcecode,
bib_rxcf,
bib_authors,
bib_authors_xml,
bib_articletitle,
bib_sourcetype,
bib_sourcetitle,
bib_pgrg,
bib_year,
bib_year_int,
bib_volume,
bib_publisher,
full_ref_xml,
full_ref_text
)
values (%(art_id)s,
%(ref_local_id)s,
%(art_year_int)s,
%(rx)s,
%(rx_sourcecode)s,
%(rxcf)s,
%(author_list_str)s,
%(authors_xml)s,
%(ref_title)s,
%(source_type)s,
%(source_title)s,
%(pgrg)s,
%(year_of_publication)s,
%(year_of_publication_int)s,
%(volume)s,
%(publishers)s,
%(ref_entry_xml)s,
%(ref_entry_text)s
);
"""
query_param_dict = bib_entry.__dict__
try:
res = ocd.do_action_query(querytxt=insert_if_not_exists, queryparams=query_param_dict)
except Exception as e:
print (f"Error {e}")
else:
ret_val = True
return ret_val # return True for success
#------------------------------------------------------------------------------------------------------
def add_article_to_api_articles_table(ocd, art_info):
"""
Adds the article data from a single document to the api_articles table in mysql database opascentral.
This database table is used as the basis for
Note: This data is in addition to the Solr pepwebdocs core which is added elsewhere. The SQL table is
currently primarily used for the crosstabs rather than API queries, since the Solr core is more
easily joined with other Solr cores in queries. (TODO: Could later experiment with bridging Solr/SQL.)
"""
ret_val = False
ocdconn = ocd.open_connection(caller_name="processArticles")
insert_if_not_exists = r"""REPLACE
INTO api_articles (
art_id,
art_doi,
art_type,
art_lang,
art_kwds,
art_auth_mast,
art_auth_citation,
art_title,
src_title_abbr,
src_code,
art_year,
art_vol,
art_vol_str,
art_vol_suffix,
art_issue,
art_pgrg,
art_pgstart,
art_pgend,
main_toc_id,
start_sectname,
bk_info_xml,
bk_title,
bk_publisher,
art_citeas_xml,
art_citeas_text,
ref_count,
filename,
filedatetime
)
values (
%(art_id)s,
%(art_doi)s,
%(art_type)s,
%(art_lang)s,
%(art_kwds)s,
%(art_auth_mast)s,
%(art_auth_citation)s,
%(art_title)s,
%(src_title_abbr)s,
%(src_code)s,
%(art_year)s,
%(art_vol_int)s,
%(art_vol_str)s,
%(art_vol_suffix)s,
%(art_issue)s,
%(art_pgrg)s,
%(art_pgstart)s,
%(art_pgend)s,
%(main_toc_id)s,
%(start_sectname)s,
%(bk_info_xml)s,
%(bk_title)s,
%(bk_publisher)s,
%(art_citeas_xml)s,
%(art_citeas_text)s,
%(ref_count)s,
%(filename)s,
%(filedatetime)s
);
"""
# string entries above must match an attr of the art_info instance.
query_param_dict = art_info.__dict__.copy()
# the element objects in the author_xml_list cause an error in the action query
# even though that dict entry is not used. So removed in a copy.
query_param_dict["author_xml_list"] = None
try:
res = ocd.do_action_query(querytxt=insert_if_not_exists, queryparams=query_param_dict)
except Exception as e:
print (f"art_articles table insert error {e}")
else:
ret_val = True
try:
ocd.db.commit()
ocdconn = ocd.close_connection(caller_name="processArticles")
except pymysql.Error as e:
print("SQL Database -- Commit failed!", e)
return ret_val # return True for success
#------------------------------------------------------------------------------------------------------
def update_views_data(solrcon, view_period=0):
"""
Use in-place updates to update the views data
"""
ocd = opasCentralDBLib.opasCentralDB()
# viewed last calendar year, default
if view_period < 0 or view_period > 4:
view_period = 0
count, most_viewed = ocd.get_most_viewed_crosstab()
print ("Crosstab data downloaded. Starting to update the Solr database with the views data.")
update_count = 0
if most_viewed is not None:
for n in most_viewed:
doc_id = n.get("document_id", None)
count_lastcalyear = n.get("lastcalyear", None)
count_last12mos = n.get("last12months", None)
count_last6mos = n.get("last6months", None)
count_last1mos = n.get("lastmonth", None)
count_lastweek = n.get("lastweek", None)
update_if_count = count_last6mos
if doc_id is not None and update_if_count > 0:
update_count += 1
upd_rec = {
"id":doc_id,
"art_views_lastcalyear": count_lastcalyear,
"art_views_last12mos": count_last12mos,
"art_views_last6mos": count_last6mos,
"art_views_last1mos": count_last1mos,
"art_views_lastweek": count_lastweek
}
try:
solrcon.add([upd_rec], fieldUpdates={"art_views_lastcalyear": 'set',
"art_views_last12mos": 'set',
"art_views_last6mos": 'set',
"art_views_last1mos": 'set',
"art_views_lastweek": 'set',
}, commit=True)
except Exception as err:
errStr = "Solr call exception for views update on %s: %s" % (doc_id, err)
print (errStr)
print (f"Finished updating Solr database with {update_count} article views/downloads.")
#------------------------------------------------------------------------------------------------------
def process_glossary_core(solr_glossary_core):
"""
Process the special PEP Glossary documents. These are linked to terms in the document
as popups.
Unlike the other cores processing, this has a limited document set so it runs
through them all as a single pass, in a single call to this function.
Note: Moved code 2019/11/30 from separate solrXMLGlossaryLoad program. It was separate
because the glossary isn't updated frequently. However, it was felt that
it was not as easy to keep in sync as a completely separate program.
"""
global options
countFiles = 0
countTerms = 0
ret_val = (countFiles, countTerms) # File count, entry count
# find the Glossaary (bEXP_ARCH1) files (processed with links already) in path
processedDateTime = datetime.utcfromtimestamp(time.time()).strftime(localsecrets.TIME_FORMAT_STR)
pat = r"ZBK.069(.*)\(bEXP_ARCH1\)\.(xml|XML)$"
filePatternMatch = re.compile(pat)
filenames = []
if options.singleFilePath is not None:
if os.path.exists(options.singleFilePath):
folderStart = options.singleFilePath
else:
print(f"Error: Single file mode name: {options.singleFilePath} does not exist.")
else:
folderStart = options.rootFolder
if options.subFolder is not None:
folderStart = os.path.join(folderStart, options.subFolder)
for root, d_names, f_names in os.walk(folderStart):
for f in f_names:
if filePatternMatch.match(f):
countFiles += 1
filenames.append(os.path.join(root, f))
print (f"Ready to import glossary records from {countFiles} files at path: {folderStart}")
gloss_fileTimeStart = time.time()
for n in filenames:
f = open(n, encoding='utf8')
fileXMLContents = f.read()
# get file basename without build (which is in paren)
base = os.path.basename(n)
artID = os.path.splitext(base)[0]
m = re.match(r"(.*)\(.*\)", artID)
artID = m.group(1)
# all IDs to upper case.
artID = artID.upper()
fileTimeStamp = processedDateTime
# import into lxml
# root = etree.fromstring(fileXMLContents)
root = etree.fromstring(opasxmllib.remove_encoding_string(fileXMLContents))
pepxml = root[0]
# Containing Article data
#<!-- Common fields -->
#<!-- Article front matter fields -->
#---------------------------------------------
# Usually we put the abbreviated title here, but that won't always work here.
#<!-- biblio section fields -->
#Note: currently, this does not include footnotes or biblio include tagged data in document (binc)
glossaryGroups = pepxml.xpath("/pepkbd3//dictentrygrp")
groupCount = len(glossaryGroups)
print("File %s has %s groups." % (base, groupCount))
# processedFilesCount += 1
allDictEntries = []
for glossaryGroup in glossaryGroups:
glossaryGroupXML = etree.tostring(glossaryGroup, with_tail=False)
glossaryGroupID = opasxmllib.xml_get_element_attr(glossaryGroup, "id")
glossaryGroupTerm = opasxmllib.xml_get_subelement_textsingleton(glossaryGroup, "term")
glossaryGroupAlso = opasxmllib.xml_get_subelement_xmlsingleton(glossaryGroup, "dictalso")
if glossaryGroupAlso == "":
glossaryGroupAlso = None
print ("Processing Term: %s" % glossaryGroupTerm)
countTerms += 1
dictEntries = glossaryGroup.xpath("dictentry")
groupTermCount = len(dictEntries)
counter = 0
for dictEntry in dictEntries:
counter += 1
thisDictEntry = {}
dictEntryID = glossaryGroupID + ".{:03d}".format(counter)
dictEntryTerm = opasxmllib.xml_get_subelement_textsingleton(dictEntry, "term")
if dictEntryTerm == "":
dictEntryTerm = glossaryGroupTerm
dictEntryTermType = dictEntry.xpath("term/@type")
if dictEntryTermType != []:
dictEntryTermType = dictEntryTermType[0]
else:
dictEntryTermType = "term"
dictEntrySrc = opasxmllib.xml_get_subelement_textsingleton(dictEntry, "src")
dictEntryAlso = opasxmllib.xml_get_subelement_xmlsingleton(dictEntry, "dictalso")
if dictEntryAlso == "":
dictEntryAlso = None
dictEntryDef = opasxmllib.xml_get_subelement_xmlsingleton(dictEntry, "def")
dictEntryDefRest = opasxmllib.xml_get_subelement_xmlsingleton(dictEntry, "defrest")
thisDictEntry = {
"term_id" : dictEntryID,
"group_id" : glossaryGroupID,
"art_id" : artID,
"term" : dictEntryTerm,
"term_type" : dictEntryTermType,
"term_source" : dictEntrySrc,
"term_also" : dictEntryAlso,
"term_def_xml" : dictEntryDef,
"term_def_rest_xml" : dictEntryDefRest,
"group_name" : glossaryGroupTerm,
"group_also" : glossaryGroupAlso,
"group_term_count" : groupTermCount,
"text" : str(glossaryGroupXML, "utf8"),
"file_name" : base,
"timestamp" : processedDateTime,
"file_last_modified" : fileTimeStamp
}
allDictEntries.append(thisDictEntry)
# We collected all the dictentries for the group. Now lets save the whole shebang
try:
response_update = solr_glossary_core.add_many(allDictEntries) # lets hold off on the , _commit=True)
if not re.search('"status">0</int>', response_update):
print (response_update)
except Exception as err:
logger.error("Solr call exception %s", err)
f.close()
solr_glossary_core.commit()
gloss_fileTimeEnd = time.time()
elapsed_seconds = gloss_fileTimeEnd-gloss_fileTimeStart # actual processing time going through files
elapsed_minutes = elapsed_seconds / 60
msg2 = f"Imported {countFiles} glossary documents and {countTerms} terms. Glossary load time: {elapsed_seconds} secs ({elapsed_minutes} minutes)"
print(msg2)
if countFiles > 0:
print(f"...Files per Min: {countFiles/elapsed_minutes:.4f}")
ret_val = (countFiles, countTerms) # File count, entry count
return ret_val
def collect_citation_counts(ocd):
citation_table = dict()
print ("Collecting citation counts from cross-tab in biblio database...this will take a minute or two...")
try:
ocd.open_connection()
# Get citation lookup table
try:
cursor = ocd.db.cursor(pymysql.cursors.DictCursor)
sql = """
SELECT cited_document_id, count5, count10, count20, countAll from vw_stat_cited_crosstab;
"""
success = cursor.execute(sql)
if success:
for n in cursor.fetchall():
row = modelsOpasCentralPydantic.MostCitedArticles(**n)
citation_table[row.cited_document_id] = row
cursor.close()
else:
logger.error("Cursor execution failed. Can't fetch.")
except MemoryError as e:
print(("Memory error loading table: {}".format(e)))
except Exception as e:
print(("Table Query Error: {}".format(e)))
ocd.close_connection()
except Exception as e:
print(("Database Connect Error: {}".format(e)))
citation_table["dummy"] = modelsOpasCentralPydantic.MostCitedArticles()
return citation_table
#------------------------------------------------------------------------------------------------------
def file_was_created_before(before_date, filename):
ret_val = False
try:
timestamp_str = datetime.utcfromtimestamp(os.path.getmtime(filename)).strftime(localsecrets.TIME_FORMAT_STR)
if timestamp_str < before_date:
ret_val = True
else:
ret_val = False
except Exception as e:
ret_val = False # not found or error, return False
return ret_val
#------------------------------------------------------------------------------------------------------
def file_was_created_after(after_date, filename):
ret_val = False
try:
timestamp_str = datetime.utcfromtimestamp(os.path.getmtime(filename)).strftime(localsecrets.TIME_FORMAT_STR)
if timestamp_str > after_date:
ret_val = True
else:
ret_val = False
except Exception as e:
ret_val = False # not found or error, return False
return ret_val
#------------------------------------------------------------------------------------------------------
def file_was_loaded_before(solrcore, before_date, filename):
ret_val = False
try:
result = get_file_dates_solr(solrcore, filename)
if result[0]["timestamp"] < before_date:
ret_val = True
else:
ret_val = False
except Exception as e:
ret_val = True # not found or error, return true
return ret_val
#------------------------------------------------------------------------------------------------------
def file_was_loaded_after(solrcore, after_date, filename):
ret_val = False
try:
result = get_file_dates_solr(solrcore, filename)
if result[0]["timestamp"] > after_date:
ret_val = True
else:
ret_val = False
except Exception as e:
ret_val = True # not found or error, return true
return ret_val
#------------------------------------------------------------------------------------------------------
def file_is_same_as_in_solr(solrcore, filename):
ret_val = False
try:
timestamp_str = datetime.utcfromtimestamp(os.path.getmtime(filename)).strftime(localsecrets.TIME_FORMAT_STR)
result = get_file_dates_solr(solrcore, filename)
if result[0]["file_last_modified"] == timestamp_str:
ret_val = True
else:
ret_val = False
except Exception as e:
ret_val = False # error, return false so it's loaded anyway.
return ret_val
#------------------------------------------------------------------------------------------------------
def get_file_dates_solr(solrcore, filename=None):
"""
Fetch the article dates
"""
ret_val = {}
max_rows = 1000000
basename = os.path.basename(filename)
# these legal file name chars are special chars to Solr, so escape them!
b_escaped = basename.translate(str.maketrans({"(": r"\(",
")": r"\)",
"-": r"\-",
":": r"\:",
}))
getFileInfoSOLR = f'art_level:1 && file_name:"{b_escaped}"'
try:
results = solrcore.search(getFileInfoSOLR, fl="art_id, file_name, file_last_modified, timestamp", rows=max_rows)
except Exception as e:
logger.error(f"Solr Query Error {e}")
# let me know whatever the logging is!
print (f"Warning: Solr Query Error: {e}")
else:
if results.hits > 0:
ret_val = results.docs
else:
ret_val = {}
return ret_val
#------------------------------------------------------------------------------------------------------
def main():
global options # so the information can be used in support functions
global gCitedTable
cumulative_file_time_start = time.time()
# scriptSourcePath = os.path.dirname(os.path.realpath(__file__))
processed_files_count = 0
ocd = opasCentralDBLib.opasCentralDB()
# Python 3 did not like the following...
#logging.basicConfig(handlers=[ExitOnExceptionHandler()], filename=logFilename, level=options.logLevel)
logging.basicConfig(filename=logFilename, level=options.logLevel)
logger = config.logger = logging.getLogger(programNameShort)
logger.info('Started at %s', datetime.today().strftime('%Y-%m-%d %H:%M:%S"'))
solrurl_docs = None
#solrurl_refs = None
solrurl_authors = None
solrurl_glossary = None
if (options.biblio_update or options.fulltext_core_update or options.glossary_core_update) == True:
try:
solrurl_docs = localsecrets.SOLRURL + opasCoreConfig.SOLR_DOCS # e.g., http://localhost:8983/solr/ + pepwebdocs'
#solrurl_refs = localsecrets.SOLRURL + opasConfig.SOLR_REFS # e.g., http://localhost:8983/solr/ + pepwebrefs'
solrurl_authors = localsecrets.SOLRURL + opasCoreConfig.SOLR_AUTHORS
solrurl_glossary = localsecrets.SOLRURL + opasCoreConfig.SOLR_GLOSSARY
print("Logfile: ", logFilename)
if options.singleFilePath is not None:
print (f"Single file only mode: {options.singleFilePath} will be processed.")
else:
print("Input data Root: ", options.rootFolder)
print("Input data Subfolder: ", options.subFolder)
print("Reset Core Data: ", options.resetCoreData)
print(80*"*")
print(f"Database Location: {localsecrets.DBHOST}")
if options.fulltext_core_update:
print("Solr Full-Text Core will be updated: ", solrurl_docs)
print("Solr Authors Core will be updated: ", solrurl_authors)
if options.glossary_core_update:
print("Solr Glossary Core will be updated: ", solrurl_glossary)
#**********************************
#Not used at this time
#if options.biblio_update:
#print("Solr References Core will be updated: ", solrurl_refs)
#**********************************
print(80*"*")
if not options.no_check:
cont = input ("The above databases will be updated. Do you want to continue (y/n)?")
if cont.lower() == "n":
print ("User requested exit. No data changed.")
sys.exit(0)
except Exception as e:
msg = f"cores specification error ({e})."
print((len(msg)*"-"))
print (msg)
print((len(msg)*"-"))
sys.exit(0)
else:
msg = "No cores requested for update. Use -f or -b to update the full-text and biblio cores respectively"
print((len(msg)*"-"))
print (msg)
print((len(msg)*"-"))
sys.exit(0)
timeStart = time.time()
print (f"Processing started at ({time.ctime()})..")
if options.singleFilePath is not None:
singleFileMode = True
folderStart = options.singleFilePath
else:
singleFileMode = False
folderStart = options.rootFolder
if options.subFolder is not None:
folderStart = os.path.join(folderStart, options.subFolder)
# import data about the PEP codes for journals and books.
# Codes are like APA, PAH, ... and special codes like ZBK000 for a particular book
sourceDB = opasCentralDBLib.SourceInfoDB()
solr_docs2 = None
#TODO: Try without the None test, the library should not try to use None as user name or password, so only the first case may be needed
# The connection call is to solrpy (import was just solr)
#if options.httpUserID is not None and options.httpPassword is not None:
if localsecrets.SOLRUSER is not None and localsecrets.SOLRPW is not None:
if options.fulltext_core_update:
solr_docs2 = pysolr.Solr(solrurl_docs, auth=(localsecrets.SOLRUSER, localsecrets.SOLRPW))
# fulltext update always includes authors
# solr_docs = None
# this is now done in opasCoreConfig
#solr_authors = solr.SolrConnection(solrurl_authors, http_user=localsecrets.SOLRUSER, http_pass=localsecrets.SOLRPW)
#if options.glossary_core_update:
# this is now done in opasCoreConfig
#solr_gloss = solr.SolrConnection(solrurl_glossary, http_user=localsecrets.SOLRUSER, http_pass=localsecrets.SOLRPW)
else: # no user and password needed
solr_docs2 = pysolr.Solr(solrurl_docs)
# fulltext update always includes authors
# disconnect the other
# solr_docs = None
# this is now done in opasCoreConfig
#solr_authors = solr.SolrConnection(solrurl_authors)
#if options.glossary_core_update:
# this is now done in opasCoreConfig
#solr_gloss = solr.SolrConnection(solrurl_glossary)
# Reset core's data if requested (mainly for early development)
if options.resetCoreData:
if options.fulltext_core_update:
print ("*** Deleting all data from the docs and author cores ***")
#solrcore_docs.delete_query("*:*")
solr_docs2.delete(q='*:*')
solr_docs2.commit()
#solrcore_docs.commit()
solr_authors.delete_query("*:*")
solr_authors.commit()
if options.glossary_core_update:
print ("*** Deleting all data from the Glossary core ***")
solr_gloss.delete_query("*:*")
solr_gloss.commit()
else:
# check for missing files and delete them from the core, since we didn't empty the core above
pass
if options.views_update:
print(("Update 'View Counts' in Solr selected. Counts to be updated for all files viewed in the last month."))
# Glossary Processing only
if options.glossary_core_update:
# this option will process all files in the glossary core.
glossary_file_count, glossary_terms = process_glossary_core(solr_gloss)
processed_files_count += glossary_file_count
# Docs, Authors and References go through a full set of regular XML files
bib_total_reference_count = 0 # zero this here, it's checked at the end whether references are processed or not
if (options.biblio_update or options.fulltext_core_update) == True:
if options.forceRebuildAllFiles == True:
print ("Forced Rebuild - All files added, regardless of whether they were marked in the as already added.")
# find all processed XML files where build is (bEXP_ARCH1) in path
# glob.glob doesn't unfortunately work to do this in Py2.7.x
skipped_files = 0
new_files = 0
total_files = 0
if options.file_key != None:
#selQry = "select distinct filename from articles where articleID
#New for 2021 - built TOCs as "Series TOC rather than hard coding them."
print (f"File Key Specified: {options.file_key}")
pat = fr"({options.file_key}.*)\(bEXP_ARCH1|bSeriesTOC\)\.(xml|XML)$"
file_pattern_match = re.compile(pat)
filenames = find_all(pat, folderStart)
else:
pat = r"(.*)\(bEXP_ARCH1|bSeriesTOC\)\.(xml|XML)$"
file_pattern_match = re.compile(pat)
filenames = []
#all_solr_docs = get_file_dates_solr(solrcore_docs2)
if singleFileMode: # re.match(".*\.xml$", folderStart, re.IGNORECASE):
# single file mode.
if os.path.exists(options.singleFilePath):
filenames.append(options.singleFilePath)
total_files = 1
new_files = 1
else:
print(f"Error: Single file mode name: {options.singleFilePath} does not exist.")
elif filenames != []:
total_files = len(filenames)
new_files = len(filenames)
else:
# get a list of all the XML files that are new
singleFileMode = False
currentfile_info = NewFileTracker(ocd)
for root, d_names, f_names in os.walk(folderStart):
for f in f_names:
if file_pattern_match.match(f):
total_files += 1
#needed only if the check is very slow, not now.
#if totalFiles % 1000 == 0:
#print (f"{totalFiles} files checked so far") # print a dot to show progress, no CR
filename = os.path.join(root, f)
# by default, build all files, but will check modified during actual run
is_modified = True
# this is quick, but if you use the same database for multiple solr installs, it's not accurate
# is_modified = currentfile_info.is_refresh_needed(filename, before_date=options.created_before, after_date=options.created_after)
# don't check solr database here, check to see when actually loading. Takes to long to check each one
# is_modified = file_is_same_as_in_solr(solrcore_docs2, filename=filename)
# look at file date only (no database or solr, compare to create option)
if options.created_after is not None:
is_modified = file_was_created_after(after_date=options.created_after, filename=filename)
#is_modified =\
# currentfile_info.is_load_date_before_or_after(after=options.created_after)
if options.created_before is not None:
is_modified = file_was_created_before(before_date=options.created_before, filename=filename)
#is_modified =\
#currentfile_info.is_load_date_before_or_after(before=options.created_before)
if not is_modified:
# file seen before, need to compare.
#print "File is the same! Skipping..."
skipped_files += 1
continue
else:
new_files += 1
#print "File is NOT the same! Scanning the data..."
filenames.append(filename)
# clear fileTracker it takes a lot of Memory
currentfile_info.close() # close the database
currentfile_info = None
print((80*"-"))
if singleFileMode:
print(f"Single File Mode Selected. Only file {options.singleFilePath} will be imported")
else:
if options.forceRebuildAllFiles:
print(f"Ready to import records from {new_files} files of {total_files} at path {folderStart}")
else:
print(f"Ready to import {new_files} files of {total_files} *if modified* at path: {folderStart}")
print(f"{skipped_files} Skipped files (excluded by date options)")
print((80*"-"))
precommit_file_count = 0
skipped_files = 0
cumulative_file_time_start = time.time()
if new_files > 0:
gCitedTable = collect_citation_counts(ocd)
if options.run_in_reverse:
print ("-r option selected. Running the files found in reverse order.")
filenames.reverse()
# ----------------------------------------------------------------------
# Now walk through all the filenames selected
# ----------------------------------------------------------------------
print (f"Load process started ({time.ctime()}). Examining files.")
for n in filenames:
fileTimeStart = time.time()
if not options.forceRebuildAllFiles:
if not options.display_verbose and skipped_files % 100 == 0 and skipped_files != 0:
print (f"Skipped {skipped_files} so far...loaded {processed_files_count} out of {new_files} possible." )
if options.reload_before_date is not None:
if not file_was_loaded_before(solr_docs2, before_date=options.reload_before_date, filename=n):
skipped_files += 1
if options.display_verbose:
print (f"Skipped - Not loaded before {options.reload_before_date} - {n}.")
continue
if options.reload_after_date is not None:
if not file_was_loaded_before(solr_docs2, after_date=options.reload_after_date, filename=n):
skipped_files += 1
if options.display_verbose:
print (f"Skipped - Not loaded after {options.reload_after_date} - {n}.")
continue
if file_is_same_as_in_solr(solr_docs2, filename=n):
skipped_files += 1
if options.display_verbose:
print (f"Skipped - No refresh needed for {n}")
continue
# get mod date/time, filesize, etc. for mysql database insert/update
processed_files_count += 1
f = open(n, encoding="utf-8")
fileXMLContents = f.read()
# get file basename without build (which is in paren)
base = os.path.basename(n)
artID = os.path.splitext(base)[0]
m = re.match(r"(.*)\(.*\)", artID)
# Update this file in the database as "processed"
file_info = opasgenlib.FileInfo(n)
#currentfile_info.loadForFile(n)
#fileTracker.setFileDatabaseRecord(currFileInfo)
# fileTimeStamp = datetime.utcfromtimestamp(currentfile_info.fileModDate).strftime(localsecrets.TIME_FORMAT_STR)
print(("Processing file #%s of %s: %s (%s bytes)." % (processed_files_count, new_files, base, file_info.fileSize)))
# Note: We could also get the artID from the XML, but since it's also important
# the file names are correct, we'll do it here. Also, it "could" have been left out
# of the artinfo (attribute), whereas the filename is always there.
artID = m.group(1)
# all IDs to upper case.
artID = artID.upper()
# import into lxml
root = etree.fromstring(opasxmllib.remove_encoding_string(fileXMLContents))
pepxml = root
# save common document (article) field values into artInfo instance for both databases
artInfo = ArticleInfo(sourceDB.sourceData, pepxml, artID, logger)
artInfo.filedatetime = file_info.timestamp_str
artInfo.filename = base
artInfo.file_size = file_info.fileSize
try:
artInfo.file_classification = re.search("(current|archive|future|free|offsite)", n, re.IGNORECASE).group(1)
# set it to lowercase for ease of matching later
artInfo.file_classification = artInfo.file_classification.lower()
except Exception as e:
logging.warning("Could not determine file classification for %s (%s)" % (n, e))
# walk through bib section and add to refs core database
precommit_file_count += 1
if precommit_file_count > config.COMMITLIMIT:
print(("Committing info for %s documents/articles" % config.COMMITLIMIT))
# input to the full-text code
if options.fulltext_core_update:
# this option will also load the authors cores.
process_article_for_doc_core(pepxml, artInfo, solr_docs2, fileXMLContents)
process_info_for_author_core(pepxml, artInfo,
solr_authors)
add_article_to_api_articles_table(ocd, artInfo)
if precommit_file_count > config.COMMITLIMIT:
precommit_file_count = 0
solr_docs2.commit()
solr_authors.commit()
#fileTracker.commit()
# input to the references core
if options.biblio_update:
if artInfo.ref_count > 0:
bibReferences = pepxml.xpath("/pepkbd3//be") # this is the second time we do this (also in artinfo, but not sure or which is better per space vs time considerations)
if 1: # options.display_verbose:
print((" ...Processing %s references for the references database." % (artInfo.ref_count)))
#processedFilesCount += 1
bib_total_reference_count = 0
ocd.open_connection(caller_name="processBibliographies")
for ref in bibReferences:
bib_total_reference_count += 1
bib_entry = BiblioEntry(artInfo, ref)
add_reference_to_biblioxml_table(ocd, artInfo, bib_entry)
try:
ocd.db.commit()
except pymysql.Error as e:
print("SQL Database -- Biblio Commit failed!", e)
ocd.close_connection(caller_name="processBibliographies")
# process_bibliographies(pepxml, artInfo, solrcore_references)
#if preCommitFileCount > config.COMMITLIMIT:
#preCommitFileCount = 0
#solrcore_references.commit()
#fileTracker.commit()
#preCommitFileCount += 1
# close the file, and do the next
f.close()
if 1: # options.display_verbose:
print((" ...Time: %s seconds." % (time.time() - fileTimeStart)))
# all done with the files. Do a final commit.
#try:
#if options.biblio_update:
#solrcore_references.commit()
## fileTracker.commit()
#except Exception as e:
#print(("Exception: ", e))
# if called for with the -v option, do an update on all the views data,
# it takes about 5 minutes to remote update 400 records on AWS
if options.views_update:
print (f"Updating Views Data Starting ({time.ctime()}).")
update_views_data(solr_docs2)
print (f"Load process complete ({time.ctime()}).")
if processed_files_count > 0:
try:
print ("Performing final commit.")
if options.fulltext_core_update:
solr_docs2.commit()
solr_authors.commit()
# fileTracker.commit()
except Exception as e:
print(("Exception: ", e))
# end of docs, authors, and/or references Adds
# ---------------------------------------------------------
# Closing time
# ---------------------------------------------------------
timeEnd = time.time()
#currentfile_info.close()
# for logging
msg = msg2 = None
if (options.biblio_update or options.fulltext_core_update) == True:
elapsed_seconds = timeEnd-cumulative_file_time_start # actual processing time going through files
elapsed_minutes = elapsed_seconds / 60
if bib_total_reference_count > 0:
msg = f"Finished! Imported {processed_files_count} documents and {bib_total_reference_count} references. Total file inspection/load time: {elapsed_seconds:.2f} secs ({elapsed_minutes:.2f} minutes.) "
print(msg)
else:
msg = f"Finished! Imported {processed_files_count} documents. Total file load time: {elapsed_seconds:.2f} secs ({elapsed_minutes:.2f} minutes.)"
print(msg)
if processed_files_count > 0:
print(f"...Files loaded per Min: {processed_files_count/elapsed_minutes:.4f}")
print(f"...Files evaluated per Min: {len(filenames)/elapsed_minutes:.4f}")
elapsed_seconds = timeEnd-timeStart # actual processing time going through files
elapsed_minutes = elapsed_seconds / 60
print (f"Note: File load time is not total elapsed time. Total elapsed time is: {elapsed_seconds:.2f} secs ({elapsed_minutes:.2f} minutes.)")
if processed_files_count > 0:
print(f"Files per elapsed min: {processed_files_count/elapsed_minutes:.4f}")
if msg:
config.logger.info(msg)
if msg2:
config.logger.info(msg)
#if processingWarningCount + processingErrorCount > 0:
#print (" Issues found. Warnings: %s, Errors: %s. See log file %s" % (processingWarningCount, processingErrorCount, logFilename))
# -------------------------------------------------------------------------------------------------------
# run it!
if __name__ == "__main__":
global options # so the information can be used in support functions
options = None
programNameShort = "solrXMLPEPWebLoad" # used for log file
logFilename = programNameShort + "_" + datetime.today().strftime('%Y-%m-%d') + ".log"
parser = OptionParser(usage="%prog [options] - PEP Solr Reference Text Data Loader", version="%prog ver. 0.1.14")
parser.add_option("-a", "--allfiles", action="store_true", dest="forceRebuildAllFiles", default=False,
help="Option to force all files to be updated on the specified cores. This does not reset the file tracker but updates it as files are processed.")
parser.add_option("-b", "--biblioupdate", dest="biblio_update", action="store_true", default=False,
help="Whether to update the biblio table in the mysql database (used to be a core)")
parser.add_option("-d", "--dataroot", dest="rootFolder", default=config.DEFAULTDATAROOT,
help="Root folder path where input data is located")
parser.add_option("--only", dest="singleFilePath", default=None,
help="Full path (including filename) of single file to process")
parser.add_option("-f", "--fulltextcoreupdate", dest="fulltext_core_update", action="store_true", default=False,
help="Whether to update the full-text and authors core. Use -d option to specify file folder root path.")
parser.add_option("--key", dest="file_key", default=None,
help="Key for a single file to process")
parser.add_option("-l", "--loglevel", dest="logLevel", default=logging.INFO,
help="Level at which events should be logged")
parser.add_option("--logfile", dest="logfile", default=logFilename,
help="Logfile name with full path where events should be logged")
parser.add_option("--resetcore",
action="store_true", dest="resetCoreData", default=False,
help="reset the data in the selected cores. (authorscore is reset with the fulltext core)")
parser.add_option("-g", "--glossarycoreupdate", dest="glossary_core_update", action="store_true", default=False,
help="Whether to update the glossary core. Use -d option to specify glossary file folder root path.")
parser.add_option("--pw", dest="httpPassword", default=None,
help="Password for the server")
parser.add_option("-q", "--quickload", dest="quickload", action="store_true", default=False,
help="Load the full-set of database file records for a full solr db reload")
parser.add_option("-r", "--reverse", dest="run_in_reverse", action="store_true", default=False,
help="Whether to run the files selected in reverse")
#parser.add_option("-t", "--trackerdb", dest="fileTrackerDBPath", default=None,
#help="Full path and database name where the File Tracking Database is located (sqlite3 db)")
parser.add_option("--sub", dest="subFolder", default=None,
help="Sub folder of root folder specified via -d to process")
parser.add_option("--test", dest="testmode", action="store_true", default=False,
help="Run Doctests")
parser.add_option("-v", "--viewsupdate", dest="views_update", action="store_true", default=False,
help="Whether to update the view count data in Solr when updating documents (adds about 5 minutes)")
#parser.add_option("-u", "--url",
#dest="solrURL", default=config.DEFAULTSOLRHOME,
#help="Base URL of Solr api (without core), e.g., http://localhost:8983/solr/", metavar="URL")
parser.add_option("--verbose", action="store_true", dest="display_verbose", default=False,
help="Display status and operational timing info as load progresses.")
parser.add_option("--nocheck", action="store_true", dest="no_check", default=False,
help="Display status and check whether to proceed.")
parser.add_option("--userid", dest="httpUserID", default=None,
help="UserID for the server")
parser.add_option("--config", dest="config_info", default="Local",
help="UserID for the server")
parser.add_option("--before", dest="created_before", default=None,
help="Load files created before this datetime (use YYYY-MM-DD format)")
parser.add_option("--after", dest="created_after", default=None,
help="Load files created after this datetime (use YYYY-MM-DD format)")
parser.add_option("--reloadbefore", dest="reload_before_date", default=None,
help="Reload files added to Solr before this datetime (use YYYY-MM-DD format)")
parser.add_option("--reloadafter", dest="reload_after_date", default=None,
help="Reload files added to Solr after this datetime (use YYYY-MM-DD format)")
(options, args) = parser.parse_args()
if options.testmode:
import doctest
doctest.testmod()
print ("Fini. SolrXMLPEPWebLoad Tests complete.")
sys.exit()
main()
| 52.377327
| 248
| 0.565975
|
2f98b564bc26190922cf676bd67c4622d7816386
| 1,161
|
py
|
Python
|
fbchat/models.py
|
rawbeen72/fbchat-bot
|
0f47d23ca21fdc8c337c7b0a2ff652caf13255b5
|
[
"BSD-3-Clause"
] | 1
|
2022-03-14T04:24:49.000Z
|
2022-03-14T04:24:49.000Z
|
fbchat/models.py
|
abdul97233/demobot
|
857718f1307dd48c99a7af8a3a001beaffd9ea52
|
[
"BSD-3-Clause"
] | null | null | null |
fbchat/models.py
|
abdul97233/demobot
|
857718f1307dd48c99a7af8a3a001beaffd9ea52
|
[
"BSD-3-Clause"
] | 1
|
2022-03-20T03:11:02.000Z
|
2022-03-20T03:11:02.000Z
|
# -*- coding: UTF-8 -*-
"""This file is here to maintain backwards compatability, and to re-export our models
into the global module (see `__init__.py`).
A common pattern was to use `from fbchat.models import *`, hence we need this while
transitioning to a better code structure.
"""
from __future__ import unicode_literals
from ._core import Enum
from ._exception import FBchatException, FBchatFacebookError, FBchatUserError
from ._thread import ThreadType, ThreadLocation, ThreadColor, Thread
from ._user import TypingStatus, User, ActiveStatus
from ._group import Group, Room
from ._page import Page
from ._message import EmojiSize, MessageReaction, Mention, Message
from ._attachment import Attachment, UnsentMessage, ShareAttachment
from ._sticker import Sticker
from ._location import LocationAttachment, LiveLocationAttachment
from ._file import FileAttachment, AudioAttachment, ImageAttachment, VideoAttachment
from ._quick_reply import (
QuickReply,
QuickReplyText,
QuickReplyLocation,
QuickReplyPhoneNumber,
QuickReplyEmail,
)
from ._poll import Poll, PollOption
from ._plan import GuestStatus, Plan
| 38.7
| 86
| 0.783807
|
42f38f3cc3d8c583425fb2e7a18cee009bde1537
| 555
|
py
|
Python
|
python3/numpy/ndarray.py
|
Nahid-Hassan/code-snippets
|
24bd4b81564887822a0801a696001fcbeb6a7a75
|
[
"MIT"
] | 2
|
2020-09-29T04:09:41.000Z
|
2020-10-18T13:33:36.000Z
|
python3/numpy/ndarray.py
|
Nahid-Hassan/code-snippets
|
24bd4b81564887822a0801a696001fcbeb6a7a75
|
[
"MIT"
] | null | null | null |
python3/numpy/ndarray.py
|
Nahid-Hassan/code-snippets
|
24bd4b81564887822a0801a696001fcbeb6a7a75
|
[
"MIT"
] | 1
|
2021-12-26T04:55:55.000Z
|
2021-12-26T04:55:55.000Z
|
"""
Created on Sun Mar 17 19:15:33 2019
@author: nahid
"""
import numpy as np
# Creating array object
arr = np.array( [[ 1, 2, 3],
[ 4, 2, 5]] )
# Printing type of arr object
print("Array is of type: ", type(arr))
# Printing array dimensions (axes)
print("No. of dimensions: ", arr.ndim)
# Printing shape of array
print("Shape of array: ", arr.shape)
# Printing size (total number of elements) of array
print("Size of array: ", arr.size)
# Printing type of elements in array
print("Array stores elements of type: ", arr.dtype)
| 23.125
| 51
| 0.655856
|
068afabb0c42c187cb4a11a36a58079fe6fcf1c8
| 1,138
|
py
|
Python
|
src/app/transform.py
|
zgpio/travis
|
2f540c2d00a2573b8b694a4d9e512f379cd5ea86
|
[
"BSD-3-Clause"
] | 1
|
2019-10-09T15:00:28.000Z
|
2019-10-09T15:00:28.000Z
|
src/app/transform.py
|
zgpio/travis
|
2f540c2d00a2573b8b694a4d9e512f379cd5ea86
|
[
"BSD-3-Clause"
] | null | null | null |
src/app/transform.py
|
zgpio/travis
|
2f540c2d00a2573b8b694a4d9e512f379cd5ea86
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from pathlib import Path
self_dir = Path(__file__).parent.resolve()
with open(f"{self_dir}/icons.nerdfont.json", 'r') as f:
load_dict = json.load(f)
# print(load_dict)
# NOTE: mac builtin terminal: webpack/vue 乱码
with open(f"{self_dir}/transform.cpp", "w") as f:
for name, val in load_dict.items():
if name == 'icons':
f.write('enum Icon {\n')
for k in val.keys():
line = f' {k},\n'
f.write(line)
f.write('};\n')
f.write('pair<string, string> icons[] = {\n')
for k, v in val.items():
code = v['code']
# TODO: remove #
color = v['color']
print(k, code, color)
line = f' {{"{code}", "{color}"}},\n'
f.write(line)
f.write('};\n')
else:
f.write('\n')
f.write(f'unordered_map<string, Icon> {name} = {{\n')
for k, v in val.items():
line = f' {{ "{k}", {v} }},\n'
f.write(line)
f.write('};\n')
| 30.756757
| 65
| 0.44464
|
b46ff10288ce2ba7f1cfab63674caa76a4daf1ef
| 6,760
|
py
|
Python
|
src/support/azext_support/custom.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2022-03-22T15:02:32.000Z
|
2022-03-22T15:02:32.000Z
|
src/support/azext_support/custom.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-02-10T22:04:59.000Z
|
2021-02-10T22:04:59.000Z
|
src/support/azext_support/custom.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-06-03T19:31:10.000Z
|
2021-06-03T19:31:10.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument
# pylint: disable=too-many-locals
import json
from datetime import date, datetime, timedelta
from azext_support._utils import (get_bearer_token, is_quota_ticket,
is_technical_ticket, parse_support_area_path)
from knack.log import get_logger
logger = get_logger(__name__)
def list_support_tickets(cmd, client, filters=None):
if filters is None:
filters = "CreatedDate ge " + str(date.today() - timedelta(days=7))
return client.list(top=100, filter=filters)
def get_support_tickets(cmd, client, ticket_name=None):
return client.get(support_ticket_name=ticket_name)
def update_support_tickets(cmd, client,
ticket_name=None,
severity=None,
contact_first_name=None,
contact_last_name=None,
contact_method=None,
contact_email=None,
contact_additional_emails=None,
contact_phone_number=None,
contact_timezone=None,
contact_country=None,
contact_language=None):
body = {}
body["first_name"] = contact_first_name
body["last_name"] = contact_last_name
body["preferred_contact_method"] = contact_method
body["primary_email_address"] = contact_email
body["additional_email_addresses"] = contact_additional_emails
body["phone_number"] = contact_phone_number
body["preferred_time_zone"] = contact_timezone
body["country"] = contact_country
body["preferred_support_language"] = contact_language
return client.update(support_ticket_name=ticket_name, severity=severity, contact_details=body)
def list_support_tickets_communications(cmd, client, ticket_name=None, filters=None):
return client.list(support_ticket_name=ticket_name, filter=filters)
def get_support_tickets_communications(cmd, client, ticket_name=None, communication_name=None):
return client.get(support_ticket_name=ticket_name, communication_name=communication_name)
def create_support_tickets(cmd, client,
ticket_name=None,
problem_classification=None,
title=None,
description=None,
severity=None,
start_time=None,
require_24_by_7_response=None,
contact_first_name=None,
contact_last_name=None,
contact_method=None,
contact_email=None,
contact_additional_emails=None,
contact_phone_number=None,
contact_timezone=None,
contact_country=None,
contact_language=None,
technical_resource=None,
quota_change_version=None,
quota_change_subtype=None,
quota_change_regions=None,
quota_change_payload=None,
partner_tenant_id=None):
service_name = parse_support_area_path(problem_classification)["service_name"]
service = "/providers/Microsoft.Support/services/{0}".format(service_name)
contactBody = {}
contactBody["first_name"] = contact_first_name
contactBody["last_name"] = contact_last_name
contactBody["preferred_contact_method"] = contact_method
contactBody["primary_email_address"] = contact_email
contactBody["additional_email_addresses"] = contact_additional_emails
contactBody["phone_number"] = contact_phone_number
contactBody["preferred_time_zone"] = contact_timezone
contactBody["country"] = contact_country
contactBody["preferred_support_language"] = contact_language
body = {}
body["description"] = description
body["problem_classification_id"] = problem_classification
body["severity"] = severity
body["contact_details"] = contactBody
body["title"] = title
body["service_id"] = service
body["require24_x7_response"] = require_24_by_7_response if require_24_by_7_response is not None else False
start_date_time = start_time if start_time is not None else datetime.now()
start_date_time = start_date_time.strftime("%Y-%m-%dT%H:%M:%SZ")
body["problem_start_time"] = start_date_time
if is_quota_ticket(service):
quotaBody = {}
quotaBody["quota_change_request_sub_type"] = quota_change_subtype
quotaBody["quota_change_request_version"] = quota_change_version
quota_change_requests = []
if quota_change_regions is not None and quota_change_payload is not None:
for (region, payload) in zip(quota_change_regions, quota_change_payload):
quota_change_requests.append({"region": region, "payload": payload})
quotaBody["quota_change_requests"] = quota_change_requests
body["quota_ticket_details"] = quotaBody
if is_technical_ticket(service):
body["technical_ticket_details"] = {"resource_id": technical_resource}
logger.debug("Sending create request with below payload: ")
logger.debug(json.dumps(body, indent=4))
custom_headers = {}
if partner_tenant_id is not None:
custom_headers["x-ms-authorization-auxiliary"] = get_bearer_token(cmd, partner_tenant_id)
return client.create(support_ticket_name=ticket_name, create_support_ticket_parameters=body,
custom_headers=custom_headers)
def create_support_tickets_communications(cmd, client,
ticket_name=None,
communication_name=None,
communication_body=None,
communication_subject=None,
communication_sender=None):
body = {}
body["sender"] = communication_sender
body["subject"] = communication_subject
body["body"] = communication_body
return client.create(support_ticket_name=ticket_name, communication_name=communication_name,
create_communication_parameters=body)
| 45.066667
| 111
| 0.622485
|
bbd3bcb20a59ab6125ba058efc244a5b35365371
| 630
|
py
|
Python
|
setup.py
|
ludios/Securetypes
|
fe6d93b5083e63e4a6b91428ab15a667293208bd
|
[
"PSF-2.0"
] | 2
|
2016-10-23T10:01:52.000Z
|
2021-11-29T09:03:54.000Z
|
setup.py
|
ludios/Securetypes
|
fe6d93b5083e63e4a6b91428ab15a667293208bd
|
[
"PSF-2.0"
] | null | null | null |
setup.py
|
ludios/Securetypes
|
fe6d93b5083e63e4a6b91428ab15a667293208bd
|
[
"PSF-2.0"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
import securetypes
setup(
name='Securetypes',
version=securetypes.__version__,
description="securedict implementation, a dict that uses secure " +
"hashing to stop algorithmic complexity attacks",
url="https://github.com/ludios/Securetypes",
author="Ivan Kozik",
author_email="ivan@ludios.org",
classifiers=[
'Programming Language :: Python :: 2',
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
py_modules=['securetypes', 'test_securetypes'],
)
| 26.25
| 68
| 0.725397
|
074ddd8524b9a17b7313a83da0e0a9621647bedf
| 15,819
|
py
|
Python
|
sunshine_conversations_client/model/content.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 4
|
2020-09-27T14:28:25.000Z
|
2022-02-02T13:51:29.000Z
|
sunshine_conversations_client/model/content.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 3
|
2021-09-30T18:18:58.000Z
|
2021-12-04T07:55:23.000Z
|
sunshine_conversations_client/model/content.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 5
|
2020-11-07T02:08:18.000Z
|
2021-12-07T17:10:23.000Z
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class Content(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'text': 'str',
'actions': 'list[ActionSubset]',
'items': 'list[Item]',
'display_settings': 'CarouselMessageDisplaySettings',
'media_url': 'str',
'media_size': 'float',
'media_type': 'str',
'alt_text': 'str',
'block_chat_input': 'bool',
'fields': 'list[Field]',
'text_fallback': 'str',
'coordinates': 'LocationMessageCoordinates',
'location': 'LocationMessageLocation',
'template': 'object'
}
attribute_map = {
'type': 'type',
'text': 'text',
'actions': 'actions',
'items': 'items',
'display_settings': 'displaySettings',
'media_url': 'mediaUrl',
'media_size': 'mediaSize',
'media_type': 'mediaType',
'alt_text': 'altText',
'block_chat_input': 'blockChatInput',
'fields': 'fields',
'text_fallback': 'textFallback',
'coordinates': 'coordinates',
'location': 'location',
'template': 'template'
}
nulls = set()
discriminator_value_class_map = {
}
def __init__(self, type='template', text=None, actions=None, items=None, display_settings=None, media_url=None, media_size=None, media_type=None, alt_text=None, block_chat_input=None, fields=None, text_fallback=None, coordinates=None, location=None, template=None, local_vars_configuration=None): # noqa: E501
"""Content - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._text = None
self._actions = None
self._items = None
self._display_settings = None
self._media_url = None
self._media_size = None
self._media_type = None
self._alt_text = None
self._block_chat_input = None
self._fields = None
self._text_fallback = None
self._coordinates = None
self._location = None
self._template = None
self.discriminator = 'type'
self.type = type
if text is not None:
self.text = text
if actions is not None:
self.actions = actions
self.items = items
if display_settings is not None:
self.display_settings = display_settings
self.media_url = media_url
if media_size is not None:
self.media_size = media_size
if media_type is not None:
self.media_type = media_type
if alt_text is not None:
self.alt_text = alt_text
if block_chat_input is not None:
self.block_chat_input = block_chat_input
self.fields = fields
if text_fallback is not None:
self.text_fallback = text_fallback
self.coordinates = coordinates
if location is not None:
self.location = location
self.template = template
@property
def type(self):
"""Gets the type of this Content. # noqa: E501
The type of message. # noqa: E501
:return: The type of this Content. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Content.
The type of message. # noqa: E501
:param type: The type of this Content. # noqa: E501
:type: str
"""
self._type = type
@property
def text(self):
"""Gets the text of this Content. # noqa: E501
The fallback text message used when location messages are not supported by the channel. # noqa: E501
:return: The text of this Content. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this Content.
The fallback text message used when location messages are not supported by the channel. # noqa: E501
:param text: The text of this Content. # noqa: E501
:type: str
"""
self._text = text
@property
def actions(self):
"""Gets the actions of this Content. # noqa: E501
An array of objects representing the actions associated with the message. The array length is limited by the third party channel. # noqa: E501
:return: The actions of this Content. # noqa: E501
:rtype: list[ActionSubset]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this Content.
An array of objects representing the actions associated with the message. The array length is limited by the third party channel. # noqa: E501
:param actions: The actions of this Content. # noqa: E501
:type: list[ActionSubset]
"""
self._actions = actions
@property
def items(self):
"""Gets the items of this Content. # noqa: E501
An array of objects representing the items associated with the message. Only present in carousel and list type messages. # noqa: E501
:return: The items of this Content. # noqa: E501
:rtype: list[Item]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this Content.
An array of objects representing the items associated with the message. Only present in carousel and list type messages. # noqa: E501
:param items: The items of this Content. # noqa: E501
:type: list[Item]
"""
self._items = items
@property
def display_settings(self):
"""Gets the display_settings of this Content. # noqa: E501
:return: The display_settings of this Content. # noqa: E501
:rtype: CarouselMessageDisplaySettings
"""
return self._display_settings
@display_settings.setter
def display_settings(self, display_settings):
"""Sets the display_settings of this Content.
:param display_settings: The display_settings of this Content. # noqa: E501
:type: CarouselMessageDisplaySettings
"""
self._display_settings = display_settings
@property
def media_url(self):
"""Gets the media_url of this Content. # noqa: E501
The URL for media, such as an image, attached to the message. # noqa: E501
:return: The media_url of this Content. # noqa: E501
:rtype: str
"""
return self._media_url
@media_url.setter
def media_url(self, media_url):
"""Sets the media_url of this Content.
The URL for media, such as an image, attached to the message. # noqa: E501
:param media_url: The media_url of this Content. # noqa: E501
:type: str
"""
self._media_url = media_url
@property
def media_size(self):
"""Gets the media_size of this Content. # noqa: E501
The size of the media. # noqa: E501
:return: The media_size of this Content. # noqa: E501
:rtype: float
"""
return self._media_size
@media_size.setter
def media_size(self, media_size):
"""Sets the media_size of this Content.
The size of the media. # noqa: E501
:param media_size: The media_size of this Content. # noqa: E501
:type: float
"""
self._media_size = media_size
@property
def media_type(self):
"""Gets the media_type of this Content. # noqa: E501
The media type of the file. # noqa: E501
:return: The media_type of this Content. # noqa: E501
:rtype: str
"""
return self._media_type
@media_type.setter
def media_type(self, media_type):
"""Sets the media_type of this Content.
The media type of the file. # noqa: E501
:param media_type: The media_type of this Content. # noqa: E501
:type: str
"""
self._media_type = media_type
@property
def alt_text(self):
"""Gets the alt_text of this Content. # noqa: E501
An optional description of the image for accessibility purposes. The field will be saved by default with the file name as the value. # noqa: E501
:return: The alt_text of this Content. # noqa: E501
:rtype: str
"""
return self._alt_text
@alt_text.setter
def alt_text(self, alt_text):
"""Sets the alt_text of this Content.
An optional description of the image for accessibility purposes. The field will be saved by default with the file name as the value. # noqa: E501
:param alt_text: The alt_text of this Content. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
alt_text is not None and len(alt_text) > 128):
raise ValueError("Invalid value for `alt_text`, length must be less than or equal to `128`") # noqa: E501
self._alt_text = alt_text
@property
def block_chat_input(self):
"""Gets the block_chat_input of this Content. # noqa: E501
true if the message should block the chat input on Web Messenger. # noqa: E501
:return: The block_chat_input of this Content. # noqa: E501
:rtype: bool
"""
return self._block_chat_input
@block_chat_input.setter
def block_chat_input(self, block_chat_input):
"""Sets the block_chat_input of this Content.
true if the message should block the chat input on Web Messenger. # noqa: E501
:param block_chat_input: The block_chat_input of this Content. # noqa: E501
:type: bool
"""
self._block_chat_input = block_chat_input
@property
def fields(self):
"""Gets the fields of this Content. # noqa: E501
Array of field objects that contain the submitted fields. # noqa: E501
:return: The fields of this Content. # noqa: E501
:rtype: list[Field]
"""
return self._fields
@fields.setter
def fields(self, fields):
"""Sets the fields of this Content.
Array of field objects that contain the submitted fields. # noqa: E501
:param fields: The fields of this Content. # noqa: E501
:type: list[Field]
"""
self._fields = fields
@property
def text_fallback(self):
"""Gets the text_fallback of this Content. # noqa: E501
A string containing the `label: value` of all fields, each separated by a newline character. # noqa: E501
:return: The text_fallback of this Content. # noqa: E501
:rtype: str
"""
return self._text_fallback
@text_fallback.setter
def text_fallback(self, text_fallback):
"""Sets the text_fallback of this Content.
A string containing the `label: value` of all fields, each separated by a newline character. # noqa: E501
:param text_fallback: The text_fallback of this Content. # noqa: E501
:type: str
"""
self._text_fallback = text_fallback
@property
def coordinates(self):
"""Gets the coordinates of this Content. # noqa: E501
:return: The coordinates of this Content. # noqa: E501
:rtype: LocationMessageCoordinates
"""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
"""Sets the coordinates of this Content.
:param coordinates: The coordinates of this Content. # noqa: E501
:type: LocationMessageCoordinates
"""
self._coordinates = coordinates
@property
def location(self):
"""Gets the location of this Content. # noqa: E501
:return: The location of this Content. # noqa: E501
:rtype: LocationMessageLocation
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this Content.
:param location: The location of this Content. # noqa: E501
:type: LocationMessageLocation
"""
self._location = location
@property
def template(self):
"""Gets the template of this Content. # noqa: E501
The whatsapp template message to send. For more information, consult the [guide](https://docs.smooch.io/guide/whatsapp#sending-message-templates). `schema` must be set to `whatsapp`. # noqa: E501
:return: The template of this Content. # noqa: E501
:rtype: object
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this Content.
The whatsapp template message to send. For more information, consult the [guide](https://docs.smooch.io/guide/whatsapp#sending-message-templates). `schema` must be set to `whatsapp`. # noqa: E501
:param template: The template of this Content. # noqa: E501
:type: object
"""
self._template = template
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
if self.discriminator is None:
return
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Content):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Content):
return True
return self.to_dict() != other.to_dict()
| 30.421154
| 314
| 0.608951
|
f06a9fc4dad757542ea4c7f13fd7b1b9d739c3fc
| 1,973
|
py
|
Python
|
src/users/migrations/0002_auto_20190910_1341.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/users/migrations/0002_auto_20190910_1341.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/users/migrations/0002_auto_20190910_1341.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-09-10 13:41
from django.db import migrations, models
import django.db.models.deletion
import xarala.utils
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Social",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("facebook", models.CharField(blank=True, max_length=150, null=True)),
("twitter", models.CharField(blank=True, max_length=150, null=True)),
("instagram", models.CharField(blank=True, max_length=150, null=True)),
("github", models.CharField(blank=True, max_length=150, null=True)),
("website", models.CharField(blank=True, max_length=150, null=True)),
("linkedin", models.CharField(blank=True, max_length=150, null=True)),
(
"stackoverflow",
models.CharField(blank=True, max_length=150, null=True),
),
("whatsapp", models.CharField(blank=True, max_length=150, null=True)),
],
),
migrations.AddField(
model_name="customuser",
name="avatar",
field=models.ImageField(
blank=True, null=True, upload_to=xarala.utils.upload_image_path
),
),
migrations.AddField(
model_name="customuser",
name="scoial",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="users.Social",
),
),
]
| 34.017241
| 87
| 0.49924
|
ddc738c6ed27c814c11c63a6fb453a793040af60
| 947
|
py
|
Python
|
openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
|
jonclothcat/OpenPype
|
d1208cbebc0a7f378de0062ccd653295c6399195
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | 2
|
2022-03-18T01:46:03.000Z
|
2022-03-18T01:46:16.000Z
|
openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
class RepairStartFrame(pyblish.api.Action):
"""Repair start frame."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
lib.execute_george("tv_startframe 0")
class ValidateStartFrame(pyblish.api.ContextPlugin):
"""Validate start frame being at frame 0."""
label = "Validate Start Frame"
order = pyblish.api.ValidatorOrder
hosts = ["tvpaint"]
actions = [RepairStartFrame]
optional = True
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
if start_frame == 0:
return
raise PublishXmlValidationError(
self,
"Start frame has to be frame 0.",
formatting_data={
"current_start_frame": start_frame
}
)
| 24.921053
| 57
| 0.636748
|
69274cd571f1087df4cce2083e590a4b7483a4c3
| 3,280
|
py
|
Python
|
django_riak_engine/util/dist.py
|
oubiwann-unsupported/django-riak-engine
|
cd4adc6c1e7dfa3d4e94346624578a1c3990efd5
|
[
"BSD-2-Clause"
] | 2
|
2016-01-28T14:09:56.000Z
|
2017-01-19T16:18:13.000Z
|
django_riak_engine/util/dist.py
|
oubiwann-unsupported/django-riak-engine
|
cd4adc6c1e7dfa3d4e94346624578a1c3990efd5
|
[
"BSD-2-Clause"
] | null | null | null |
django_riak_engine/util/dist.py
|
oubiwann-unsupported/django-riak-engine
|
cd4adc6c1e7dfa3d4e94346624578a1c3990efd5
|
[
"BSD-2-Clause"
] | 2
|
2015-02-06T11:38:20.000Z
|
2015-03-19T19:43:58.000Z
|
import os
rest_error_help = """
ReST validation error
See the following:
http://docutils.sourceforge.net/docs/user/rst/cheatsheet.txt
http://docutils.sourceforge.net/docs/user/rst/quickstart.html
"""
legalReSTFiles = [
"README",
"TODO",
"DEPENDENCIES",
]
def setup(*args, **kwds):
"""
Compatibility wrapper.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
return setup(*args, **kwds)
def find_packages(library_name):
"""
Compatibility wrapper.
Taken from storm setup.py.
"""
try:
from setuptools import find_packages
return find_packages()
except ImportError:
pass
packages = []
for directory, subdirectories, files in os.walk(library_name):
if "__init__.py" in files:
packages.append(directory.replace(os.sep, "."))
return packages
def has_docutils():
"""
Check to see if docutils is installed.
"""
try:
import docutils
return True
except ImportError:
return False
def _validate_ReST(text):
"""
Make sure that the given ReST text is valid.
Taken from Zope Corp's zc.twist setup.py.
"""
import docutils.utils
import docutils.parsers.rst
import StringIO
doc = docutils.utils.new_document("validator")
# our desired settings
doc.reporter.halt_level = 5
doc.reporter.report_level = 1
stream = doc.reporter.stream = StringIO.StringIO()
# docutils buglets (?)
doc.settings.tab_width = 2
doc.settings.pep_references = doc.settings.rfc_references = False
doc.settings.trim_footnote_reference_space = None
# and we're off...
parser = docutils.parsers.rst.Parser()
try:
parser.parse(text, doc)
except Exception, err:
import pdb;pdb.set_trace()
return stream.getvalue()
def validate_ReST(text):
"""
A wrapper that ensafens the validation for pythons that are not embiggened
with docutils.
"""
if has_docutils():
return _validate_ReST(text)
print " *** No docutils; can't validate ReST."
return ""
def cat_ReST(*args, **kwds):
"""
Concatenate the contents of one or more ReST files.
Taken from Zope Corp's zc.twist setup.py.
"""
# note: distutils explicitly disallows unicode for setup values :-/
# http://docs.python.org/dist/meta-data.html
tmp = []
for arg in args:
if arg in legalReSTFiles or arg.endswith(".rst"):
f = open(os.path.join(*arg.split("/")))
tmp.append(f.read())
f.close()
tmp.append("\n\n")
else:
print "Warning: '%s' not a legal ReST file." % arg
tmp.append(arg)
if len(tmp) == 1:
res = tmp[0]
else:
res = "".join(tmp)
out = kwds.get("out")
stop_on_errors = kwds.get("stop_on_errors")
if out is True:
filename = kwds.get("filename")
f = open(filename, "w")
f.write(res)
f.close()
report = validate_ReST(res)
if report:
print report
if stop_on_errors:
print rest_error_help
raise ValueError("ReST validation error")
return res
| 24.477612
| 78
| 0.609756
|
25bd07ba588271be498ec8c4664355491be56338
| 51,455
|
py
|
Python
|
portality/lib/dataobj.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | null | null | null |
portality/lib/dataobj.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | null | null | null |
portality/lib/dataobj.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
from portality.lib import dates
from portality.datasets import get_country_code, get_currency_code
from copy import deepcopy
import locale, json, warnings
from urllib.parse import urlparse
from datetime import date, datetime
#########################################################
## Data coerce functions
def to_currency_code(val):
if val is None:
return None
nv = get_currency_code(val)
if nv is None:
raise ValueError("Unable to convert {x} to a valid currency code".format(x=val))
uc = to_unicode()
return uc(nv)
def to_country_code(val):
if val is None:
return None
nv = get_country_code(val, fail_if_not_found=True)
if nv is None:
raise ValueError("Unable to convert {x} to a valid country code".format(x=val))
uc = to_unicode()
return uc(nv)
def to_unicode():
def to_utf8_unicode(val):
if isinstance(val, str):
return val
elif isinstance(val, str):
try:
return val.decode("utf8", "strict")
except UnicodeDecodeError:
raise ValueError("Could not decode string")
else:
return str(val)
return to_utf8_unicode
def to_unicode_upper(val):
ufn = to_unicode()
val = ufn(val)
return val.upper()
def to_int():
def intify(val):
# strip any characters that are outside the ascii range - they won't make up the int anyway
# and this will get rid of things like strange currency marks
if isinstance(val, str):
val = val.encode("ascii", errors="ignore")
# try the straight cast
try:
return int(val)
except ValueError:
pass
# could have commas in it, so try stripping them
try:
return int(val.replace(",", ""))
except ValueError:
pass
# try the locale-specific approach
try:
return locale.atoi(val)
except ValueError:
pass
raise ValueError("Could not convert string to int: {x}".format(x=val))
return intify
def to_float():
def floatify(val):
# strip any characters that are outside the ascii range - they won't make up the float anyway
# and this will get rid of things like strange currency marks
if isinstance(val, str):
val = val.encode("ascii", errors="ignore")
# try the straight cast
try:
return float(val)
except ValueError:
pass
# could have commas in it, so try stripping them
try:
return float(val.replace(",", ""))
except ValueError:
pass
# try the locale-specific approach
try:
return locale.atof(val)
except ValueError:
pass
raise ValueError("Could not convert string to float: {x}".format(x=val))
return floatify
def date_str(in_format=None, out_format=None):
def datify(val):
if val is None or val == "":
return None
if isinstance(val, date) or isinstance(val, datetime):
return dates.format(val, format=out_format)
else:
return dates.reformat(val, in_format=in_format, out_format=out_format)
return datify
def to_datestamp(in_format=None):
def stampify(val):
return dates.parse(val, format=in_format)
return stampify
def to_isolang(output_format=None):
"""
:param output_format: format from input source to putput. Must be one of:
* alpha3
* alt3
* alpha2
* name
* fr
Can be a list in order of preference, too
fixme: we could make these pycountry's keys, removing the need for so many transformations and intermediate steps
:return:
"""
# delayed import, since we may not always want to load the whole dataset for a dataobj
from portality.lib import isolang as dataset
# sort out the output format list
if output_format is None:
output_format = ["alpha3"]
if not isinstance(output_format, list):
output_format = [output_format]
def isolang(val):
if val is None:
return None
l = dataset.find(val)
if l is None:
raise ValueError("Unable to find iso code for language {x}".format(x=val))
for f in output_format:
v = l.get(f)
if v is None or v == "":
continue
return v
return isolang
def to_url(val):
if not isinstance(val, str):
raise ValueError("Argument passed to to_url was not a string, but type '{t}': '{val}'".format(t=type(val),val=val))
val = val.strip()
if val == '':
return val
# parse with urlparse
url = urlparse(val)
# now check the url has the minimum properties that we require
if url.scheme and url.scheme.startswith("http"):
uc = to_unicode()
return uc(val)
else:
raise ValueError("Could not convert string {val} to viable URL".format(val=val))
def to_bool(val):
"""Conservative boolean cast - don't cast lists and objects to True, just existing booleans and strings."""
if val is None:
return None
if val is True or val is False:
return val
if isinstance(val, str):
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
raise ValueError("Could not convert string {val} to boolean. Expecting string to either say 'true' or 'false' (not case-sensitive).".format(val=val))
raise ValueError("Could not convert {val} to boolean. Expect either boolean or string.".format(val=val))
def string_canonicalise(canon, allow_fail=False):
normalised = {}
for a in canon:
normalised[a.strip().lower()] = a
def sn(val):
if val is None:
if allow_fail:
return None
raise ValueError("NoneType not permitted")
try:
norm = val.strip().lower()
except:
raise ValueError("Unable to treat value as a string")
uc = to_unicode()
if norm in normalised:
return uc(normalised[norm])
if allow_fail:
return uc(val)
raise ValueError("Unable to canonicalise string")
return sn
############################################################
############################################################
# The core data object which manages all the interactions
# with the underlying data member variable
class DataObjException(Exception):
def __init__(self, *args, **kwargs):
try:
self.message = args[0]
except IndexError:
self.message = ''
super(DataObjException, self).__init__(*args, **kwargs)
class DataSchemaException(DataObjException):
pass
class DataObj(object):
"""
Class which provides services to other classes which store their internal data
as a python data structure in the self.data field.
"""
SCHEMA = None
DEFAULT_COERCE = {
# NOTE - if you add something to the default coerce, add it to the default swagger
# translation dict below as well. Furthermore if you're adding
# custom stuff to the coerce, you will likely need to add an entry
# to the swagger translation table as well, in the same way you
# extend the coerce map.
"unicode": to_unicode(),
"unicode_upper" : to_unicode_upper,
"utcdatetime": date_str(),
"utcdatetimemicros" : date_str(out_format="%Y-%m-%dT%H:%M:%S.%fZ"),
"bigenddate" : date_str(out_format="%Y-%m-%d"),
"integer": to_int(),
"float": to_float(),
"isolang": to_isolang(),
"url": to_url,
"bool": to_bool,
"isolang_2letter": to_isolang(output_format="alpha2"),
"country_code": to_country_code,
"currency_code": to_currency_code,
"license": string_canonicalise(["CC BY", "CC BY-NC", "CC BY-NC-ND", "CC BY-NC-SA", "CC BY-ND", "CC BY-SA", "Not CC-like"], allow_fail=True),
"persistent_identifier_scheme": string_canonicalise(["None", "DOI", "Handles", "ARK"], allow_fail=True),
"format": string_canonicalise(["PDF", "HTML", "ePUB", "XML"], allow_fail=True),
"deposit_policy": string_canonicalise(["None", "Sherpa/Romeo", "Dulcinea", "OAKlist", "Héloïse", "Diadorim"], allow_fail=True),
}
def __init__(self, raw=None, struct=None, construct_raw=True, expose_data=False, properties=None, coerce_map=None, construct_silent_prune=False, construct_maintain_reference=False, *args, **kwargs):
# make a shortcut to the object.__getattribute__ function
og = object.__getattribute__
# if no subclass has set the coerce, then set it from default
try:
og(self, "_coerce_map")
except:
self._coerce_map = coerce_map if coerce_map is not None else deepcopy(self.DEFAULT_COERCE)
# if no subclass has set the struct, initialise it
try:
og(self, "_struct")
except:
self._struct = struct
# assign the data if not already assigned by subclass
# NOTE: data is not _data deliberately
try:
og(self, "data")
except:
self.data = {} if raw is None else raw
# properties to allow automatic object API construction
# of the form
#
# {"<public property name>" : ("<path.to.property>", "<data object wrapper>")
# e.g
# {"identifier" : ("bibjson.identifier", DataObj))}
try:
og(self, "_properties")
except:
self._properties = properties if properties is not None else {}
# if no subclass has set expose_data, set it
try:
og(self, "_expose_data")
except:
self._expose_data = expose_data
# if no subclass has set _construct_silent_prune, set it
try:
og(self, "_construct_silent_prune")
except:
self._construct_silent_prune = construct_silent_prune
# if no subclass has set _construct_maintain_reference, set it
try:
og(self, "_construct_maintain_reference")
except:
self._construct_maintain_reference = construct_maintain_reference
# restructure the object based on the struct if requried
if self._struct is not None and raw is not None and construct_raw:
self.data = construct(self.data, self._struct, self._coerce_map, silent_prune=construct_silent_prune, maintain_reference=construct_maintain_reference)
# run against the old validation routine
# (now deprecated)
self.validate()
# run the object's native validation routine
self.custom_validate()
# keep a reference to the current data record, in case something up the inheritance chain messes with it
# (I'm looking at you, UserDict).
remember_this = self.data
# finally, kick the request up
super(DataObj, self).__init__(*args, **kwargs)
self.data = remember_this
def __getattr__(self, name):
# workaround to prevent debugger from disconnecting at the deepcopy method
# https://stackoverflow.com/questions/32831050/pycharms-debugger-gives-up-when-hitting-copy-deepcopy
# if name.startwith("__"):
# raise AttributeError
if hasattr(self.__class__, name):
return object.__getattribute__(self, name)
props, data_attrs = self._list_dynamic_properties()
# if the name is not in the dynamic properties, raise an attribute error
if name not in props and name not in data_attrs:
raise AttributeError('{name} is not set'.format(name=name))
# otherwise, extract the path from the properties list or the internal data
if name in props:
path, wrapper = self._properties.get(name)
else:
path = name
wrapper = DataObj
# request the internal property directly (which will in-turn raise the AttributeError if necessary)
try:
return self._get_internal_property(path, wrapper)
except AttributeError:
# re-wrap the attribute error with the name, rather than the path
raise AttributeError('{name} is not set'.format(name=name))
def __setattr__(self, key, value):
# first set the attribute on any explicitly defined property
try:
if hasattr(self.__class__, key):
# att = object.__getattribute__(self, key)
return object.__setattr__(self, key, value)
except AttributeError:
pass
# this could be an internal attribute from the constructor, so we need to make
# a special case
if key in ["_coerce_map", "_struct", "data", "_properties", "_expose_data"]:
return object.__setattr__(self, key, value)
props, data_attrs = self._list_dynamic_properties()
# extract the path from the properties list or the internal data
path = None
wrapper = None
if key in props:
path, wrapper = self._properties.get(key)
elif key in data_attrs:
path = key
wrapper = DataObj
# try to set the property on othe internal object
if path is not None:
wasset = self._set_internal_property(path, value, wrapper)
if wasset:
return
# fall back to the default approach of allowing any attribute to be set on the object
return object.__setattr__(self, key, value)
def check_construct(self):
"""
Apply the construct to the internal data and throw errors if it is not validated
This could be used, for example, if external processes have violated the .data encapsulation, or
if internal processes which change .data need to be checked to make sure they haven't strayed outside
their remit
:return:
"""
if self._struct is not None and self.data is not None:
construct(self.data, self._struct, self._coerce_map, silent_prune=False, maintain_reference=False)
def validate(self):
"""
DEPRECATED - use 'check_construct' instead.
:return:
"""
warnings.warn("DEPRECATED - use 'check_construct' instead.", DeprecationWarning)
if self.SCHEMA is not None:
validate(self.data, self.SCHEMA)
return True
def custom_validate(self):
pass
def populate(self, fields_and_values):
for k, v in fields_and_values.items():
setattr(self, k, v)
def clone(self):
return self.__class__(deepcopy(self.data))
def json(self):
return json.dumps(self.data)
def get_struct(self):
return self._struct
def _get_internal_property(self, path, wrapper=None):
# pull the object from the structure, to find out what kind of retrieve it needs
# (if there is a struct)
type, substruct, instructions = None, None, None
if self._struct:
type, substruct, instructions = construct_lookup(path, self._struct)
if type is None:
# if there is no struct, or no object mapping was found, try to pull the path
# as a single node (may be a field, list or dict, we'll find out in a mo)
val = self._get_single(path)
# if this is a dict or a list and a wrapper is supplied, wrap it
if wrapper is not None:
if isinstance(val, dict):
return wrapper(val, expose_data=self._expose_data)
elif isinstance(val, list) and len(val) > 0:
if isinstance(val[0], dict): # just check the first one
return [wrapper(v, expose_data=self._expose_data) for v in val]
# otherwise, return the raw value if it is not None, or raise an AttributeError
if val is None:
raise AttributeError('{name} is not set'.format(name=path))
return val
if instructions is None:
instructions = {}
# if the struct contains a reference to the path, always return something, even if it is None - don't raise an AttributeError
kwargs = construct_kwargs(type, "get", instructions)
coerce_fn = self._coerce_map.get(instructions.get("coerce"))
if coerce_fn is not None:
kwargs["coerce"] = coerce_fn
if type == "field":
return self._get_single(path, **kwargs)
elif type == "object":
d = self._get_single(path, **kwargs)
if wrapper:
return wrapper(d, substruct, construct_raw=False, expose_data=self._expose_data) # FIXME: this means all substructures are forced to use this classes expose_data policy, whatever it is
else:
return d
elif type == "list":
if instructions.get("contains") == "field":
return self._get_list(path, **kwargs)
elif instructions.get("contains") == "object":
l = self._get_list(path, **kwargs)
if wrapper:
return [wrapper(o, substruct, construct_raw=False, expose_data=self._expose_data) for o in l] # FIXME: this means all substructures are forced to use this classes expose_data policy, whatever it is
else:
return l
# if for whatever reason we get here, raise the AttributeError
raise AttributeError('{name} is not set'.format(name=path))
def _set_internal_property(self, path, value, wrapper=None):
def _wrap_validate(val, wrap, substruct):
if wrap is None:
if isinstance(val, DataObj):
return val.data
else:
return val
else:
if isinstance(val, DataObj):
if isinstance(val, wrap):
return val.data
else:
raise AttributeError("Attempt to set {x} failed; is not of an allowed type.".format(x=path))
else:
try:
d = wrap(val, substruct)
return d.data
except DataStructureException as e:
raise AttributeError(str(e))
# pull the object from the structure, to find out what kind of retrieve it needs
# (if there is a struct)
type, substruct, instructions = None, None, None
if self._struct:
type, substruct, instructions = construct_lookup(path, self._struct)
# if no type is found, then this means that either the struct was undefined, or the
# path did not point to a valid point in the struct. In the case that the struct was
# defined, this means the property is trying to set something outside the struct, which
# isn't allowed. So, only set types which are None against objects which don't define
# the struct.
if type is None:
if self._struct is None:
if isinstance(value, list):
value = [_wrap_validate(v, wrapper, None) for v in value]
self._set_list(path, value)
else:
value = _wrap_validate(value, wrapper, None)
self._set_single(path, value)
return True
else:
return False
if instructions is None:
instructions = {}
kwargs = construct_kwargs(type, "set", instructions)
coerce_fn = self._coerce_map.get(instructions.get("coerce"))
if coerce_fn is not None:
kwargs["coerce"] = coerce_fn
if type == "field":
self._set_single(path, value, **kwargs)
return True
elif type == "object":
v = _wrap_validate(value, wrapper, substruct)
self._set_single(path, v, **kwargs)
return True
elif type == "list":
if instructions.get("contains") == "field":
self._set_list(path, value, **kwargs)
return True
elif instructions.get("contains") == "object":
if not isinstance(value, list):
value = [value]
vals = [_wrap_validate(v, wrapper, substruct) for v in value]
self._set_list(path, vals, **kwargs)
return True
return False
def _list_dynamic_properties(self):
# list the dynamic properties the object could have
props = []
try:
# props = og(self, 'properties').keys()
props = list(self._properties.keys())
except AttributeError:
pass
data_attrs = []
try:
if self._expose_data:
if self._struct:
data_attrs = construct_data_keys(self._struct)
else:
data_attrs = list(self.data.keys())
except AttributeError:
pass
return props, data_attrs
def _add_struct(self, struct):
# if the struct is not yet set, set it
try:
object.__getattribute__(self, "_struct")
self._struct = construct_merge(self._struct, struct)
except:
self._struct = struct
def _get_path(self, path, default):
parts = path.split(".")
context = self.data
for i in range(len(parts)):
p = parts[i]
d = {} if i < len(parts) - 1 else default
context = context.get(p, d)
return context
def _set_path(self, path, val):
parts = path.split(".")
context = self.data
for i in range(len(parts)):
p = parts[i]
if p not in context and i < len(parts) - 1:
context[p] = {}
context = context[p]
elif p in context and i < len(parts) - 1:
context = context[p]
else:
context[p] = val
def _delete_from_list(self, path, val=None, matchsub=None, prune=True, apply_struct_on_matchsub=True):
"""
Note that matchsub will be coerced with the struct if it exists, to ensure
that the match is done correctly
:param path:
:param val:
:param matchsub:
:param prune:
:return:
"""
l = self._get_list(path)
removes = []
i = 0
for entry in l:
if val is not None:
if entry == val:
removes.append(i)
elif matchsub is not None:
# attempt to coerce the sub
if apply_struct_on_matchsub:
try:
object.__getattribute__(self, "_struct")
type, struct, instructions = construct_lookup(path, self._struct)
if struct is not None:
matchsub = construct(matchsub, struct, self._coerce_map)
except:
pass
matches = 0
for k, v in matchsub.items():
if entry.get(k) == v:
matches += 1
if matches == len(list(matchsub.keys())):
removes.append(i)
i += 1
removes.sort(reverse=True)
for r in removes:
del l[r]
if len(l) == 0 and prune:
self._delete(path, prune)
def _delete(self, path, prune=True):
parts = path.split(".")
context = self.data
stack = []
for i in range(len(parts)):
p = parts[i]
if p in context:
if i < len(parts) - 1:
stack.append(context[p])
context = context[p]
else:
del context[p]
if prune and len(stack) > 0:
stack.pop() # the last element was just deleted
self._prune_stack(stack)
def _prune_stack(self, stack):
while len(stack) > 0:
context = stack.pop()
todelete = []
for k, v in context.items():
if isinstance(v, dict) and len(list(v.keys())) == 0:
todelete.append(k)
for d in todelete:
del context[d]
def _coerce(self, val, cast, accept_failure=False):
if cast is None:
return val
try:
return cast(val)
except (ValueError, TypeError):
if accept_failure:
return val
raise DataSchemaException("Cast with {x} failed on '{y}' of type {z}".format(x=cast, y=val, z=type(val)))
def _get_single(self, path, coerce=None, default=None, allow_coerce_failure=True):
# get the value at the point in the object
val = self._get_path(path, default)
if coerce is not None and val is not None:
# if you want to coerce and there is something to coerce do it
return self._coerce(val, coerce, accept_failure=allow_coerce_failure)
else:
# otherwise return the value
return val
def _get_list(self, path, coerce=None, by_reference=True, allow_coerce_failure=True):
# get the value at the point in the object
val = self._get_path(path, None)
# if there is no value and we want to do by reference, then create it, bind it and return it
if val is None and by_reference:
mylist = []
self._set_single(path, mylist)
return mylist
# otherwise, default is an empty list
elif val is None and not by_reference:
return []
# check that the val is actually a list
if not isinstance(val, list):
raise DataSchemaException("Expecting a list at {x} but found {y}".format(x=path, y=val))
# if there is a value, do we want to coerce each of them
if coerce is not None:
coerced = [self._coerce(v, coerce, accept_failure=allow_coerce_failure) for v in val]
if by_reference:
self._set_single(path, coerced)
return coerced
else:
if by_reference:
return val
else:
return deepcopy(val)
def _set_single(self, path, val, coerce=None, allow_coerce_failure=False, allowed_values=None, allowed_range=None,
allow_none=True, ignore_none=False):
if val is None and ignore_none:
return
if val is None and not allow_none:
raise DataSchemaException("NoneType is not allowed at {x}".format(x=path))
# first see if we need to coerce the value (and don't coerce None)
if coerce is not None and val is not None:
val = self._coerce(val, coerce, accept_failure=allow_coerce_failure)
if allowed_values is not None and val not in allowed_values:
raise DataSchemaException("Value {x} is not permitted at {y}".format(x=val, y=path))
if allowed_range is not None:
lower, upper = allowed_range
if (lower is not None and val < lower) or (upper is not None and val > upper):
raise DataSchemaException("Value {x} is outside the allowed range: {l} - {u}".format(x=val, l=lower, u=upper))
# now set it at the path point in the object
self._set_path(path, val)
def _set_list(self, path, val, coerce=None, allow_coerce_failure=False, allow_none=True, ignore_none=False):
# first ensure that the value is a list
if not isinstance(val, list):
val = [val]
# now carry out the None check
# for each supplied value, if it is none, and none is not allowed, raise an error if we do not
# plan to ignore the nones.
for v in val:
if v is None and not allow_none:
if not ignore_none:
raise DataSchemaException("NoneType is not allowed at {x}".format(x=path))
# now coerce each of the values, stripping out Nones if necessary
val = [self._coerce(v, coerce, accept_failure=allow_coerce_failure) for v in val if v is not None or not ignore_none]
# check that the cleaned array isn't empty, and if it is behave appropriately
if len(val) == 0:
# this is equivalent to a None, so we need to decide what to do
if ignore_none:
# if we are ignoring nones, just do nothing
return
elif not allow_none:
# if we are not ignoring nones, and not allowing them, raise an error
raise DataSchemaException("Empty array not permitted at {x}".format(x=path))
# now set it on the path
self._set_path(path, val)
def _add_to_list(self, path, val, coerce=None, allow_coerce_failure=False, allow_none=False, allowed_values=None, ignore_none=True, unique=False):
if val is None and ignore_none:
return
if val is None and not allow_none:
raise DataSchemaException("NoneType is not allowed in list at {x}".format(x=path))
if allowed_values is not None and val not in allowed_values:
raise DataSchemaException("Value {x} is not permitted at {y}".format(x=val, y=path))
# first coerce the value
if coerce is not None:
val = self._coerce(val, coerce, accept_failure=allow_coerce_failure)
current = self._get_list(path, by_reference=True)
# if we require the list to be unique, check for the value first
if unique:
if val in current:
return
# otherwise, append
current.append(val)
def _set_with_struct(self, path, val):
type, struct, instructions = construct_lookup(path, self._struct)
if type == "field":
kwargs = construct_kwargs(type, "set", instructions)
coerce_fn = self._coerce_map.get(instructions.get("coerce", "unicode"))
self._set_single(path, val, coerce=coerce_fn, **kwargs)
elif type == "list":
if not isinstance(val, list):
val = [val]
if struct is not None:
val = [construct(x, struct, self._coerce_map) for x in val]
kwargs = construct_kwargs(type, "set", instructions)
coerce_fn = self._coerce_map.get(instructions.get("coerce"))
self._set_list(path, val, coerce=coerce_fn, **kwargs)
elif type == "object":
if struct is not None:
val = construct(val, struct, self._coerce_map)
self._set_single(path, val)
def _add_to_list_with_struct(self, path, val):
type, struct, instructions = construct_lookup(path, self._struct)
if type != "list":
raise DataStructureException("Attempt to add to list {x} failed - it is not a list element".format(x=path))
if struct is not None:
val = construct(val, struct, self._coerce_map)
kwargs = construct_kwargs(type, "set", instructions)
self._add_to_list(path, val, **kwargs)
def _utf8_unicode(self):
"""
DEPRECATED - use dataobj.to_unicode() instead
"""
return to_unicode()
def _int(self):
"""
DEPRECATED - use dataobj.to_int() instead
"""
return to_int()
def _float(self):
"""
DEPRECATED - use dataobj.to_float() instead
"""
return to_float()
def _date_str(self, in_format=None, out_format=None):
"""
DEPRECATED - use dataobj.date_str instead
"""
return date_str(in_format=in_format, out_format=out_format)
############################################################
## Primitive object schema validation
class ObjectSchemaValidationError(DataObjException):
pass
def validate(obj, schema):
"""
DEPRECATED - use 'construct' instead.
:param obj:
:param schema:
:return:
"""
warnings.warn("DEPRECATED - use 'construct' instead.", DeprecationWarning)
# all fields
allowed = schema.get("bools", []) + schema.get("fields", []) + schema.get("lists", []) + schema.get("objects", [])
for k, v in obj.items():
# is k allowed at all
if k not in allowed:
raise ObjectSchemaValidationError("object contains key " + k + " which is not permitted by schema")
# check the bools are bools
if k in schema.get("bools", []):
if type(v) != bool:
raise ObjectSchemaValidationError("object contains " + k + " = " + str(v) + " but expected boolean")
# check that the fields are plain old strings
if k in schema.get("fields", []):
if type(v) != str and type(v) != int and type(v) != float:
raise ObjectSchemaValidationError("object contains " + k + " = " + str(v) + " but expected string, unicode or a number")
# check that the lists are really lists
if k in schema.get("lists", []):
if type(v) != list:
raise ObjectSchemaValidationError("object contains " + k + " = " + str(v) + " but expected list")
# if it is a list, then for each member validate
entry_schema = schema.get("list_entries", {}).get(k)
if entry_schema is None:
# validate the entries as fields
for e in v:
if type(e) != str and type(e) != int and type(e) != float:
raise ObjectSchemaValidationError("list in object contains " + str(type(e)) + " but expected string, unicode or a number in " + k)
else:
# validate each entry against the schema
for e in v:
validate(e, entry_schema)
# check that the objects are objects
if k in schema.get("objects", []):
if type(v) != dict:
raise ObjectSchemaValidationError("object contains " + k + " = " + str(v) + " but expected object/dict")
# if it is an object, then validate
object_schema = schema.get("object_entries", {}).get(k)
if object_schema is None:
#raise ObjectSchemaValidationError("no object entry for object " + k)
pass # we are not imposing a schema on this object
else:
validate(v, object_schema)
############################################################
## Data structure coercion
class DataStructureException(DataObjException):
pass
class ConstructException(DataObjException):
pass
def construct_validate(struct, context=""):
"""
Is the provided struct of the correct form
{
"fields" : {
"field_name" : {"coerce" :"coerce_function", **kwargs}
},
"objects" : [
"field_name"
],
"lists" : {
"field_name" : {"contains" : "object|field", "coerce" : "field_coerce_function, **kwargs}
},
"required" : ["field_name"],
"structs" : {
"field_name" : {
<construct>
}
}
}
"""
# check that only the allowed keys are present
keys = struct.keys()
for k in keys:
if k not in ["fields", "objects", "lists", "required", "structs"]:
c = context if context != "" else "root"
raise ConstructException("Key '{x}' present in struct at '{y}', but is not permitted".format(x=k, y=c))
# now go through and make sure the fields are the right shape:
for field_name, instructions in struct.get("fields", {}).items():
if "coerce" not in instructions:
c = context if context != "" else "root"
raise ConstructException("Coerce function not listed in field '{x}' at '{y}'".format(x=field_name, y=c))
for k,v in instructions.items():
if not isinstance(v, list) and not isinstance(v, str):
c = context if context != "" else "root"
raise ConstructException("Argument '{a}' in field '{b}' at '{c}' is not a string or list".format(a=k, b=field_name, c=c))
# then make sure the objects are ok
for o in struct.get("objects", []):
if not isinstance(o, str):
c = context if context != "" else "root"
raise ConstructException("There is a non-string value in the object list at '{y}'".format(y=c))
# make sure the lists are correct
for field_name, instructions in struct.get("lists", {}).items():
contains = instructions.get("contains")
if contains is None:
c = context if context != "" else "root"
raise ConstructException("No 'contains' argument in list definition for field '{x}' at '{y}'".format(x=field_name, y=c))
if contains not in ["object", "field"]:
c = context if context != "" else "root"
raise ConstructException("'contains' argument in list '{x}' at '{y}' contains illegal value '{z}'".format(x=field_name, y=c, z=contains))
for k,v in instructions.items():
if not isinstance(v, list) and not isinstance(v, str):
c = context if context != "" else "root"
raise ConstructException("Argument '{a}' in list '{b}' at '{c}' is not a string or list".format(a=k, b=field_name, c=c))
# make sure the requireds are correct
for o in struct.get("required", []):
if not isinstance(o, str):
c = context if context != "" else "root"
raise ConstructException("There is a non-string value in the required list at '{y}'".format(y=c))
# now do the structs, which will involve some recursion
substructs = struct.get("structs", {})
# first check that there are no previously unknown keys in there
possibles = struct.get("objects", []) + list(struct.get("lists", {}).keys())
for s in substructs:
if s not in possibles:
c = context if context != "" else "root"
raise ConstructException("struct contains key '{a}' which is not listed in object or list definitions at '{x}'".format(a=s, x=c))
# now recurse into each struct
for k,v in substructs.items():
nc = context
if nc == "":
nc = k
else:
nc += "." + k
construct_validate(v, context=nc)
return True
def construct(obj, struct, coerce, context="", silent_prune=False, maintain_reference=False):
"""
{
"fields" : {
"field_name" : {"coerce" :"coerce_function", **kwargs}
},
"objects" : [
"field_name"
],
"lists" : {
"field_name" : {"contains" : "object|field", "coerce" : "field_coerce_function, **kwargs}
},
"required" : ["field_name"],
"structs" : {
"field_name" : {
<construct>
}
}
}
:param obj:
:param struct:
:param coerce:
:return:
"""
if obj is None:
return None
# check that all the required fields are there
try:
keys = list(obj.keys())
except:
c = context if context != "" else "root"
raise DataStructureException("Expected an object at {c} but found something else instead".format(c=c))
for r in struct.get("required", []):
if r not in keys:
c = context if context != "" else "root"
raise DataStructureException("Field '{r}' is required but not present at '{c}'".format(r=r, c=c))
# check that there are no fields that are not allowed
# Note that since the construct mechanism copies fields explicitly, silent_prune literally just turns off this
# check
if not silent_prune:
allowed = list(struct.get("fields", {}).keys()) + struct.get("objects", []) + list(struct.get("lists", {}).keys())
for k in keys:
if k not in allowed:
c = context if context != "" else "root"
raise DataStructureException("Field '{k}' is not permitted at '{c}'".format(k=k, c=c))
# this is the new object we'll be creating from the old
constructed = DataObj()
# now check all the fields
for field_name, instructions in struct.get("fields", {}).items():
val = obj.get(field_name)
if val is None:
continue
coerce_fn = coerce.get(instructions.get("coerce", "unicode"))
if coerce_fn is None:
raise DataStructureException("No coersion function defined for type '{x}' at '{c}'".format(x=instructions.get("coerce", "unicode"), c=context + field_name))
kwargs = construct_kwargs("field", "set", instructions)
try:
constructed._set_single(field_name, val, coerce=coerce_fn, **kwargs)
except DataSchemaException as e:
raise DataStructureException("Schema exception at '{a}', {b}".format(a=context + field_name, b=str(e)))
# next check all the objetcs (which will involve a recursive call to this function)
for field_name in struct.get("objects", []):
val = obj.get(field_name)
if val is None:
continue
if type(val) != dict:
raise DataStructureException("Found '{x}' = '{y}' but expected object/dict".format(x=context + field_name, y=val))
instructions = struct.get("structs", {}).get(field_name)
if instructions is None:
# this is the lowest point at which we have instructions, so just accept the data structure as-is
# (taking a deep copy to destroy any references)
try:
constructed._set_single(field_name, deepcopy(val))
except DataSchemaException as e:
raise DataStructureException(str(e))
else:
# we need to recurse further down
beneath = construct(val, instructions, coerce=coerce, context=context + field_name + ".", silent_prune=silent_prune)
# what we get back is the correct sub-data structure, which we can then store
try:
constructed._set_single(field_name, beneath)
except DataSchemaException as e:
raise DataStructureException(str(e))
# now check all the lists
for field_name, instructions in struct.get("lists", {}).items():
vals = obj.get(field_name)
if vals is None:
continue
if not isinstance(vals, list):
raise DataStructureException("Expecting list at {x} but found something else".format(x=context + field_name))
# prep the keyword arguments for the setters
kwargs = construct_kwargs("list", "set", instructions)
contains = instructions.get("contains")
if contains == "field":
# coerce all the values in the list
coerce_fn = coerce.get(instructions.get("coerce", "unicode"))
if coerce_fn is None:
raise DataStructureException("No coersion function defined for type '{x}' at '{c}'".format(x=instructions.get("coerce", "unicode"), c=context + field_name))
for i in range(len(vals)):
val = vals[i]
try:
constructed._add_to_list(field_name, val, coerce=coerce_fn, **kwargs)
except DataSchemaException as e:
raise DataStructureException(str(e))
elif contains == "object":
# for each object in the list, send it for construction
for i in range(len(vals)):
val = vals[i]
if type(val) != dict:
raise DataStructureException("Found '{x}[{p}]' = '{y}' but expected object/dict".format(x=context + field_name, y=val, p=i))
subinst = struct.get("structs", {}).get(field_name)
if subinst is None:
try:
constructed._add_to_list(field_name, deepcopy(val))
except DataSchemaException as e:
raise DataStructureException(str(e))
else:
# we need to recurse further down
beneath = construct(val, subinst, coerce=coerce, context=context + field_name + "[" + str(i) + "].", silent_prune=silent_prune)
# what we get back is the correct sub-data structure, which we can then store
try:
constructed._add_to_list(field_name, beneath)
except DataSchemaException as e:
raise DataStructureException(str(e))
else:
raise DataStructureException("Cannot understand structure where list '{x}' elements contain '{y}'".format(x=context + field_name, y=contains))
if maintain_reference:
obj.clear()
obj.update(constructed.data)
return obj
else:
return constructed.data
def construct_merge(target, source):
merged = deepcopy(target)
for field, instructions in source.get("fields", {}).items():
if "fields" not in merged:
merged["fields"] = {}
if field not in merged["fields"]:
merged["fields"][field] = deepcopy(instructions)
for obj in source.get("objects", []):
if "objects" not in merged:
merged["objects"] = []
if obj not in merged["objects"]:
merged["objects"].append(obj)
for field, instructions in source.get("lists", {}).items():
if "lists" not in merged:
merged["lists"] = {}
if field not in merged["lists"]:
merged["lists"][field] = deepcopy(instructions)
for r in source.get("required", []):
if "required" not in merged:
merged["required"] = []
if r not in merged["required"]:
merged["required"].append(r)
for field, struct in source.get("structs", {}).items():
if "structs" not in merged:
merged["structs"] = {}
if field not in merged["structs"]:
merged["structs"][field] = deepcopy(struct)
else:
# recursively merge
merged["structs"][field] = construct_merge(merged["structs"][field], struct)
return merged
def construct_lookup(path, struct):
bits = path.split(".")
# if there's more than one path element, we will need to recurse
if len(bits) > 1:
# it has to be an object, in order for the path to still have multiple
# segments
if bits[0] not in struct.get("objects", []):
return None, None, None
substruct = struct.get("structs", {}).get(bits[0])
return construct_lookup(".".join(bits[1:]), substruct)
elif len(bits) == 1:
# first check the fields
instructions = struct.get("fields", {}).get(bits[0])
if instructions is not None:
return "field", None, instructions
# then check the lists
instructions = struct.get("lists", {}).get(bits[0])
if instructions is not None:
structure = struct.get("structs", {}).get(bits[0])
return "list", structure, instructions
# then check the objects
if bits[0] in struct.get("objects", []):
structure = struct.get("structs", {}).get(bits[0])
return "object", structure, None
return None, None, None
def construct_kwargs(type, dir, instructions):
# if there are no instructions there are no kwargs
if instructions is None:
return {}
# take a copy of the instructions that we can modify
kwargs = deepcopy(instructions)
# remove the known arguments for the field type
if type == "field":
if "coerce" in kwargs:
del kwargs["coerce"]
elif type == "list":
if "coerce" in kwargs:
del kwargs["coerce"]
if "contains" in kwargs:
del kwargs["contains"]
nk = {}
if dir == "set":
for k, v in kwargs.items():
# basically everything is a "set" argument unless explicitly stated to be a "get" argument
if not k.startswith("get__"):
if k.startswith("set__"): # if it starts with the set__ prefix, remove it
k = k[5:]
nk[k] = v
elif dir == "get":
for k, v in kwargs.items():
# must start with "get" argument
if k.startswith("get__"):
nk[k[5:]] = v
return nk
def construct_data_keys(struct):
return list(struct.get("fields", {})) + list(struct.get("objects", [])) + list(struct.get("lists", {}))
def merge_outside_construct(struct, target, source):
merged = deepcopy(target)
for source_key in source.keys():
# if the source_key is one of the struct's fields, ignore it
if source_key in list(struct.get("fields", {}).keys()):
continue
# if the source_key is one of the struct's lists, ignore it
if source_key in list(struct.get("lists", {}).keys()):
continue
# if the source_key is one of the struct's object, we will need to go deeper
if source_key in struct.get("objects", []):
subsource = source[source_key]
subtarget = target.get(source_key, {})
substruct = struct.get("structs", {}).get(source_key, {})
merged[source_key] = merge_outside_construct(substruct, subtarget, subsource)
continue
# if we get to here, the field in the source is not represented at this level in the struct,
# so we should copy it over in full (unless the target already has a value here)
if source_key not in merged:
merged[source_key] = deepcopy(source[source_key])
return merged
############################################################
## Unit test support
def test_dataobj(obj, fields_and_values):
"""
Test a dataobj to make sure that the getters and setters you have specified
are working correctly.
Provide it a data object and a list of fields with the values to set and the expeceted return values (if required):
{
"key" : ("set value", "get value")
}
If you provide only the set value, then the get value will be required to be the same as the set value in the test
{
"key" : "set value"
}
:param obj:
:param fields_and_values:
:return:
"""
for k, valtup in fields_and_values.items():
if not isinstance(valtup, tuple):
valtup = (valtup,)
set_val = valtup[0]
try:
setattr(obj, k, set_val)
except AttributeError:
assert False, "Unable to set attribute {x} with value {y}".format(x=k, y=set_val)
for k, valtup in fields_and_values.items():
if not isinstance(valtup, tuple):
valtup = (valtup,)
get_val = valtup[0]
if len(valtup) > 1:
get_val = valtup[1]
val = getattr(obj, k)
assert val == get_val, (k, val, get_val)
| 37.31327
| 220
| 0.58041
|
2c73ba1c753489bb95d93e00459a8cb0098a070b
| 280
|
py
|
Python
|
adder.py
|
pochangl/qiskit-experiment
|
0a1704be6c1d430dd3dbd497e95cff1fbd31c50f
|
[
"Apache-2.0"
] | null | null | null |
adder.py
|
pochangl/qiskit-experiment
|
0a1704be6c1d430dd3dbd497e95cff1fbd31c50f
|
[
"Apache-2.0"
] | null | null | null |
adder.py
|
pochangl/qiskit-experiment
|
0a1704be6c1d430dd3dbd497e95cff1fbd31c50f
|
[
"Apache-2.0"
] | null | null | null |
import qiskit
from qiskit import QuantumRegister, QuantumCircuit
from operators import add
from utils import print_vector
q = qiskit.QuantumRegister(4)
a, b, s, c = q
circuit = qiskit.QuantumCircuit(q)
circuit.h([a, b])
add(circuit, a, b, s, c)
print_vector(circuit, width=4)
| 18.666667
| 50
| 0.753571
|
0488232a8df9eb6b04d1c4c233c7cfb0e936f7c4
| 144
|
py
|
Python
|
aoc2020/day_16/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_16/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_16/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from .part_1 import Solution as Part1
class Solution(Part1):
expected = None
def solve(self) -> any:
return super().solve2()
| 16
| 37
| 0.645833
|
0ad3a3bd78a086e3a03c882f520586d616b260fd
| 2,519
|
py
|
Python
|
src/desafioAtados/settings.py
|
Akijunior/Atados
|
255c9c9137e48aa82fdea63f9d6d65a3720c3f92
|
[
"MIT"
] | null | null | null |
src/desafioAtados/settings.py
|
Akijunior/Atados
|
255c9c9137e48aa82fdea63f9d6d65a3720c3f92
|
[
"MIT"
] | null | null | null |
src/desafioAtados/settings.py
|
Akijunior/Atados
|
255c9c9137e48aa82fdea63f9d6d65a3720c3f92
|
[
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'mu1*lyhpzwg(x*i!29)vigzwf(si%c(-l^lxj)wljes=s)csvq'
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'voluntario.apps.VoluntarioConfig',
'acao.apps.AcaoConfig',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5
}
CORS_ORIGIN_ALLOW_ALL = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'desafioAtados.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'desafioAtados.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Fortaleza'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
| 24.456311
| 91
| 0.674474
|
41d2f5e24b89eada838aee048abf84960b46c8dc
| 21,357
|
py
|
Python
|
modules/templates/RMS/layouts.py
|
waidyanatha/eden
|
a275ed7d10c2bf8839de86b7ac7c549186fc94b7
|
[
"MIT"
] | 1
|
2018-12-25T05:33:36.000Z
|
2018-12-25T05:33:36.000Z
|
modules/templates/RMS/layouts.py
|
waidyanatha/eden
|
a275ed7d10c2bf8839de86b7ac7c549186fc94b7
|
[
"MIT"
] | 1
|
2021-07-19T05:05:48.000Z
|
2021-07-19T05:05:48.000Z
|
modules/templates/RMS/layouts.py
|
armin11/eden
|
70834282bc1dee7d1bc00ea617c384755f3bf806
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from gluon import *
#from gluon.storage import Storage
from s3 import *
#from s3theme import NAV, SECTION
THEME = "RMS"
training_functions = ("certificate", "course", "course_certificate",
"facility", "training", "training_center",
"training_event", "trainee", "trainee_person",
)
# =============================================================================
class CIRCLE(DIV):
""" <circle> element """
tag = "circle"
# =============================================================================
class PATH(DIV):
""" <path> element """
tag = "path"
# =============================================================================
class SVG(DIV):
""" <svg> element """
tag = "svg"
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
""" Custom Main Menu Layout """
@staticmethod
def layout(item):
""" Custom Layout Method """
T = current.T
auth = current.auth
has_role = auth.s3_has_role
request = current.request
c = request.controller
f = request.function
# Inject JavaScript
s3 = current.response.s3
s3.scripts.append("/%s/static/themes/RMS/js/nav.js" % request.application)
# Use tooltip-f class to avoid clash with widgets.css
# Remove nub
s3.js_foundation = '''{tooltip:{tooltip_class:'.tooltip-f',tip_template:function(selector,content){var tooltipClass='';if(!$('div[data-selector="'+selector+'"]').hasClass('hd')){tooltipClass=' tooltip-m'};return '<span data-selector="'+selector+'" class="'+Foundation.libs.tooltip.settings.tooltip_class.substring(1)+tooltipClass+'">'+content+'</span>'}}}'''
settings = ""
len_roles = len(current.session.s3.roles)
if (len_roles <= 2) or \
(len_roles == 3 and has_role("RIT_MEMBER", include_admin=False)):
# No specific Roles
# Just show Profile on main menu
apps = ""
iframe = ""
side_menu_control = ""
module_logo = ""
else:
# Side-menu control
if current.menu.options is None:
# Don't show control as no side-menu
side_menu_control = ""
else:
# Show control
side_menu_control = DIV(A(SVG(PATH(_d = "M3 18h18v-2H3v2zm0-5h18v-2H3v2zm0-7v2h18V6H3z",
),
_fill = "#5f6368",
_height = "24px",
_width = "24px",
),
_role = "button",
),
_id = "menu-btn",
_class = "hd",
_title = T("Main menu"),
)
side_menu_control["_data-tooltip"] = ""
side_menu_control["_aria-haspopup"] = "true"
# Module Logo
if c == "hrm":
if f in training_functions:
image = "training.png"
module_name = T("Training")
module_href = URL(c="hrm", f="training_event")
elif "profile" in request.get_vars:
image = None
else:
image = "human_talent.png"
module_name = T("Human Talent")
module_href = URL(c="hrm", f="index")
elif c == "org":
image = "human_talent.png"
module_name = T("Human Talent")
module_href = URL(c="hrm", f="index")
elif c in ("inv", "proc", "supply", "req"):
image = "warehouses.png"
module_name = T("Warehouses")
if auth.s3_has_roles(("ORG_ADMIN",
"wh_operator",
"logs_manager",
)):
module_href = URL(c="inv", f="index")
else:
module_href = URL(c="req", f="req")
elif c == "project":
image = "projects.png"
module_name = T("Projects")
module_href = URL(c="project", f="project",
args = "summary",
)
elif c == "deploy":
image = "RIT.png"
module_name = T("RIT")
module_href = URL(c="deploy", f = "mission",
args = "summary",
vars = {"status__belongs": 2},
)
elif c == "member":
image = "partners.png"
module_name = T("Partners")
module_href = URL(c="member", f="membership")
else:
image = None
if image:
module_logo = DIV(A(IMG(_src = URL(c="static", f="themes",
args = [THEME,
"img",
image,
]),
_class = "hi",
_height = "36",
_width = "36",
),
_href = module_href,
_role = "button",
),
_class = "hdm",
_title = module_name,
)
module_logo["_data-tooltip"] = ""
module_logo["_aria-haspopup"] = "true"
else:
module_logo = ""
# Applications switcher
apps = DIV(A(SVG(PATH(_d = "M6,8c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM12,20c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM6,20c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM6,14c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM12,14c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM16,6c0,1.1 0.9,2 2,2s2,-0.9 2,-2 -0.9,-2 -2,-2 -2,0.9 -2,2zM12,8c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM18,14c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2zM18,20c1.1,0 2,-0.9 2,-2s-0.9,-2 -2,-2 -2,0.9 -2,2 0.9,2 2,2z",
),
_fill = "#5f6368",
_height = "24px",
_width = "24px",
),
_href = "#",
_role = "button",
),
_class = "hd",
_id = "apps-btn",
_title = T("RMS modules"),
)
apps["_data-tooltip"] = ""
apps["_aria-haspopup"] = "true"
iframe = DIV(IFRAME(_role = "presentation",
_class = "hide",
_id = "apps-frame",
_frameborder = "0",
_scrolling = "no",
_src = URL(c="default", f="index",
args = "apps",
),
_style = "height: 100%; width: 100%;",
),
_class = "apps-frame",
)
# Settings
if has_role("ADMIN"):
settings = URL(c="admin", f="index")
if c == "admin":
settings_active = " active"
else:
settings_active = ""
elif has_role("ORG_ADMIN"):
settings = URL(c="admin", f="user")
if c == "admin":
settings_active = " active"
else:
settings_active = ""
elif auth.s3_has_roles(("hr_manager",
"ns_training_manager",
"training_coordinator",
)):
settings = URL(c="pr", f="forum")
if c == "pr" and \
f == "forum":
settings_active = " active"
else:
settings_active = ""
elif has_role("logs_manager"):
# WMS Module configuration
# ▪ Labelling
# ▪ Auto localisation
# ▪ Sharing authorisation
# ▪ Alerts
# ▪ Email for notification
settings = URL(c="req", f="approver")
if c == "req" and \
f == "approver":
settings_active = " active"
else:
settings_active = ""
if settings:
settings = DIV(A(SVG(PATH(_d = "M13.85 22.25h-3.7c-.74 0-1.36-.54-1.45-1.27l-.27-1.89c-.27-.14-.53-.29-.79-.46l-1.8.72c-.7.26-1.47-.03-1.81-.65L2.2 15.53c-.35-.66-.2-1.44.36-1.88l1.53-1.19c-.01-.15-.02-.3-.02-.46 0-.15.01-.31.02-.46l-1.52-1.19c-.59-.45-.74-1.26-.37-1.88l1.85-3.19c.34-.62 1.11-.9 1.79-.63l1.81.73c.26-.17.52-.32.78-.46l.27-1.91c.09-.7.71-1.25 1.44-1.25h3.7c.74 0 1.36.54 1.45 1.27l.27 1.89c.27.14.53.29.79.46l1.8-.72c.71-.26 1.48.03 1.82.65l1.84 3.18c.36.66.2 1.44-.36 1.88l-1.52 1.19c.01.15.02.3.02.46s-.01.31-.02.46l1.52 1.19c.56.45.72 1.23.37 1.86l-1.86 3.22c-.34.62-1.11.9-1.8.63l-1.8-.72c-.26.17-.52.32-.78.46l-.27 1.91c-.1.68-.72 1.22-1.46 1.22zm-3.23-2h2.76l.37-2.55.53-.22c.44-.18.88-.44 1.34-.78l.45-.34 2.38.96 1.38-2.4-2.03-1.58.07-.56c.03-.26.06-.51.06-.78s-.03-.53-.06-.78l-.07-.56 2.03-1.58-1.39-2.4-2.39.96-.45-.35c-.42-.32-.87-.58-1.33-.77l-.52-.22-.37-2.55h-2.76l-.37 2.55-.53.21c-.44.19-.88.44-1.34.79l-.45.33-2.38-.95-1.39 2.39 2.03 1.58-.07.56a7 7 0 0 0-.06.79c0 .26.02.53.06.78l.07.56-2.03 1.58 1.38 2.4 2.39-.96.45.35c.43.33.86.58 1.33.77l.53.22.38 2.55z",
),
CIRCLE(_cx = "12",
_cy = "12",
_r = "3.5",
),
_fill = "#5f6368",
_height = "24px",
_width = "24px",
),
_href = settings,
_role = "button",
),
_class = "hd%s" % settings_active,
_title = T("Settings"),
)
settings["_data-tooltip"] = ""
settings["_aria-haspopup"] = "true"
# Help Menu
if c == "default" and \
f == "help":
help_active = " active"
else:
help_active = ""
support = DIV(A(SVG(PATH(_fill = "none",
_d = "M0 0h24v24H0z",
),
PATH(_d = "M11 18h2v-2h-2v2zm1-16C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8zm0-14c-2.21 0-4 1.79-4 4h2c0-1.1.9-2 2-2s2 .9 2 2c0 2-3 1.75-3 5h2c0-2.25 3-2.5 3-5 0-2.21-1.79-4-4-4z",
),
_fill = "#5f6368",
_height = "24px",
_width = "24px",
),
_href = URL(c = "default",
f = "help",
),
_role = "button",
),
_class = "hd%s" % help_active,
_title = T("Support"),
)
support["_data-tooltip"] = ""
support["_aria-haspopup"] = "true"
# Logo
name = "IFRC"
logo = None
# Lookup Root Organisation name & Logo
root_org = auth.root_org()
if root_org:
db = current.db
s3db = current.s3db
language = current.session.s3.language
if language == current.deployment_settings.get_L10n_default_language():
l10n = None
else:
ltable = s3db.org_organisation_name
query = (ltable.organisation_id == root_org) & \
(ltable.language == language)
l10n = db(query).select(ltable.name_l10n,
ltable.acronym_l10n,
limitby = (0, 1),
cache = s3db.cache,
).first()
table = s3db.org_organisation
record = db(table.id == root_org).select(table.name,
#table.acronym,
table.logo,
limitby = (0, 1),
cache = s3db.cache,
).first()
if l10n:
#if l10n.acronym_l10n:
#name = _name = l10n.acronym_l10n
#else:
name = l10n.name_l10n
if record:
if not l10n:
#if record.acronym:
#name = _name = record.acronym
#else:
name = record.name
if record.logo:
size = (60, None)
image = s3db.pr_image_library_represent(record.logo, size=size)
url_small = URL(c="default", f="download", args=image)
alt = "%s logo" % name
logo = IMG(_src = url_small,
_alt = alt,
_class = "hi",
_width = 60,
)
if not logo:
# Default to generic IFRC
logo = IMG(_src = "/%s/static/themes/RMS/img/logo_small.png" %
request.application,
_alt = T("Red Cross/Red Crescent"),
_class = "hi",
_width = 60,
)
# User Profile
user_a = A(s3_avatar_represent(auth.user.id,
_class = "hip",
_height = 36,
_width = 36,
),
_id = "user-btn",
_role = "button",
)
user_menu = DIV(UL(LI(A(T("Profile"),
_href = URL(c="default", f="person"),
),
),
LI(A(T("Change Password"),
_href = URL(c="default", f="user",
args = "change_password",
),
),
),
LI(A(T("Logout"),
_href = URL(c="default", f="user",
args = "logout",
),
),
),
),
_id = "user-menu",
_class = "hide",
)
user_profile = DIV(user_a,
user_menu,
_class = "hdp",
_title = T("RMS Account"),
)
user_profile["_data-tooltip"] = ""
user_profile["_aria-haspopup"] = "true"
# Overall menu
divs = [DIV(side_menu_control,
module_logo,
_class = "large-2 medium-3 small-4 columns",
),
DIV(DIV(support,
settings,
apps,
DIV(logo,
_class = "hdl",
),
user_profile,
iframe,
_class = "fright",
),
_class = "large-4 medium-6 small-8 columns",
),
]
return TAG[""](*divs)
# =============================================================================
class S3AboutMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return UL(items, _class="sub-nav about-menu left")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return LI(A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MA = S3AboutMenuLayout
# =============================================================================
class S3OrgMenuLayout(S3NavigationItem):
"""
Layout for the organisation-specific menu
- used by the custom PDF Form for REQ
- replace with s3db.org_organistion_logo()?
"""
@staticmethod
def layout(item):
"""
@ToDo: Migrate to s3db.org_logo_represent
"""
name = "IFRC"
logo = None
# Lookup Root Organisation name & Logo
root_org = current.auth.root_org()
if root_org:
db = current.db
s3db = current.s3db
language = current.session.s3.language
if language == current.deployment_settings.get_L10n_default_language():
l10n = None
else:
ltable = s3db.org_organisation_name
query = (ltable.organisation_id == root_org) & \
(ltable.language == language)
l10n = db(query).select(ltable.name_l10n,
ltable.acronym_l10n,
limitby = (0, 1),
cache = s3db.cache,
).first()
table = s3db.org_organisation
record = db(table.id == root_org).select(table.name,
#table.acronym,
table.logo,
limitby = (0, 1),
cache = s3db.cache,
).first()
if l10n:
#if l10n.acronym_l10n:
#name = _name = l10n.acronym_l10n
#else:
name = l10n.name_l10n
if record:
if not l10n:
#if record.acronym:
#name = _name = record.acronym
#else:
name = record.name
if record.logo:
size = (60, None)
image = s3db.pr_image_library_represent(record.logo, size=size)
url_small = URL(c="default", f="download", args=image)
alt = "%s logo" % name
logo = IMG(_src = url_small,
_alt = alt,
_width = 60,
)
if not logo:
# Default to generic IFRC
logo = IMG(_src = "/%s/static/themes/RMS/img/logo_small.png" %
current.request.application,
_alt = current.T("Red Cross/Red Crescent"),
_width = 60,
)
# Note: render using current.menu.org.render()[0] + current.menu.org.render()[1]
return (name, logo)
# -----------------------------------------------------------------------------
# Shortcut
OM = S3OrgMenuLayout
# END =========================================================================
| 43.496945
| 1,111
| 0.360397
|
8ed9f2fbea9cf3c92b21542d040cf7d27752af6a
| 8,475
|
py
|
Python
|
int_ui/html_xltree.py
|
okriuchykhin/anfisa
|
cda08e649c5a313c7d52f9b4426558c7388a73b0
|
[
"Apache-2.0"
] | null | null | null |
int_ui/html_xltree.py
|
okriuchykhin/anfisa
|
cda08e649c5a313c7d52f9b4426558c7388a73b0
|
[
"Apache-2.0"
] | null | null | null |
int_ui/html_xltree.py
|
okriuchykhin/anfisa
|
cda08e649c5a313c7d52f9b4426558c7388a73b0
|
[
"Apache-2.0"
] | null | null | null |
from xml.sax.saxutils import escape
from .gen_html import startHtmlPage
from .html_xl import formNoteDiv, formCreateWsDiv, formSubViewDiv
#===============================================
def formXLTreePage(output, common_title, html_base, xl_ds, ws_url):
startHtmlPage(output,
common_title + "-XL " + xl_ds.getName() + "(d-tree)", html_base,
css_files = ["xltree.css", "py_pygments.css", "base.css"],
js_files = ["xltree.js", "fctrl.js",
"xl_ctrl.js", "base.js"])
print(' <body onload="setupXLTree(\'%s\', \'%s\', \'%s\');">' %
(xl_ds.getName(), common_title, ws_url), file = output)
_formXLPannel(output, xl_ds)
_formCurCondDiv(output)
_formVersionsDiv(output)
_formEditCodeDiv(output)
formNoteDiv(output)
formCreateWsDiv(output)
formSubViewDiv(output)
print(' </body>', file = output)
print('</html>', file = output)
#===============================================
def _formXLPannel(output, ds):
print('''
<div id="xl-ctrl">
<div id="xl-info">
<span id="control-wrap" title="Control Menu..." class="drop">
<span id="control-open" class="drop"
onclick="openControlMenu();">⁝</span>
<div id="control-menu" class="drop">
<div onclick="goHome();"
class="drop ctrl-menu">Home Directory</div>
<div onclick="goToPage(\'DOC\');" id="menu-doc"
class="drop ctrl-menu">Documentation</div>
<div onclick="goToPage(\'XL\');"
class="drop ctrl-menu">Filtering pannel</div>
<div onclick="openNote();"
class="drop ctrl-menu">Dataset Note...</div>
<div onclick="wsCreate();"
class="drop ctrl-menu">Create workspace...</div>
</div>
</span> 
XL dataset: <span id="xl-name"></span><br/>
<select id="std-code-select" onchange="pickStdCode();"
title="Pick tree code from repository">
<option value="">in work</option>''', file = output)
for std_name in ds.getCondEnv().getStdTreeCodeNames():
print(' <option value="%s">%s</option>' % (
escape(std_name), escape(std_name)), file = output)
print('''
</select>
<button id="code-edit-show" onclick='sCodeEditH.show();'>
Edit code
</button>
</div>
<div id="xl-tree-info">
Accepted: <span id="report-accepted"></span> 
Rejected: <span id="report-rejected"></span><br/>
<div id="tree-ctrl">
<button id="tree-undo" title="Undo" class="action"
onclick='treeUndo();'> ↶
</button>
<button id="tree-redo" title="Redo" class="action"
onclick='treeRedo();'> ↷
</button>
<span id="tree-current-version" title="tree version"
onclick="modVersions();"></span>
<button id="tree-version" class="action" title="Save version"
onclick='treeVersionSave();'> Save
</button>
</div>
</div>
<div id="xl-cur-info">
Variants in scope: <span id="list-report"></span><br/>
<button id="xl-sub-view"
onclick="sSubViewH.show()">View variants</button>
</div>
</div>
<div id="xl-main">
<div id="panel-tree">
<div id="decision-tree">
</div>
</div>
<div id="panel-stat">
<div id="stat-list">
</div>
</div>
</div>''', file = output)
#===============================================
def _formCurCondDiv(output):
print('''
<div id="cur-cond-back">
<div id="cur-cond-mod">
<div id="condition-change">
<div id="cond-title-wrap">
<span id="cond-title"></span>
<span class="close-it" onclick="modalOff();">×</span>
</div>
<div id="cond-message"></div>
<div id="cur-cond-numeric">
<span id="cond-min" class="num-set"></span>
<input id="cond-min-inp" class="num-inp"
type="text" onchange="sOpNumH.checkControls();"/>
<span id="cond-sign"></span>
<input id="cond-max-inp" class="num-inp"
type="text" onchange="sOpNumH.checkControls();"/>
<span id="cond-max" class="num-set"></span>
<span id="num-count" class="num-count"></span>
</div>
<div id="cur-cond-enum">
<div id="cur-cond-zyg-problem-group"></div>
<div id="wrap-cond-enum">
<div id="cur-cond-enum-list">
<div id="op-enum-list">
</div>
</div>
<div id="cur-cond-enum-ctrl">
<div id="cur-cond-enum-zeros">
Show zeros <input id="cur-enum-zeros" type="checkbox"
onchange="sOpEnumH.careEnumZeros();"/>
</div>
<div id="cur-cond-enum-mode">
<span id="cond-mode-and-span">
<input id="cond-mode-and" type="checkbox"
onchange="sOpEnumH.checkControls(1);"/> all
</span><br/>
<span id="cond-mode-not-span">
<input id="cond-mode-not" type="checkbox"
onchange="sOpEnumH.checkControls(2);"/> not
</span><br/>
</div>
</div>
</div>
</div>
<div id="cur-cond-loading">
<div class="loading">Loading data...</div>
</div>
<div id="cond-ctrl">
<button id="cond-button-set" onclick="fixMark();">
Set
</button>
<button onclick="modalOff();">
Cancel
</button>
</div>
</div>
</div>
</div>''', file = output)
#===============================================
def _formVersionsDiv(output):
print('''
<div id="versions-back" class="modal-back">
<div id="versions-mod">
<div id="versions-title">
Versions
<span class="close-it" onclick="modalOff();">×</span>
</div>
<div id="versions-main">
<div id="versions-list-wrap">
<div id="versions-tab"></div>
</div>
<div id="versions-cmp-wrap">
<div id="versions-cmp"></div>
</div>
</div>
<div id="versions-ctrl">
<button class="action" onclick="modalOff();">
Done
</button>
<button id="btn-version-select" class="action" title="Select version"
onclick='versionSelect();'> Select
</button>
<span id="versions-ctrl-sep"></span>
<button id="btn-version-delete" class="action" title="Delete version"
onclick='versionDelete();'> Delete
</button>
</div>
</div>
</div>
''', file = output)
#===============================================
def _formEditCodeDiv(output):
print('''
<div id="code-edit-back" class="modal-back">
<div id="code-edit-mod">
<div id="code-edit-top">
<span id="code-edit-title">Edit decision tree code</span>
<span class="close-it" onclick="sViewH.modalOff();">×</span>
</div>
<div id="code-edit-ctrl">
<button id="code-edit-drop" onclick="sCodeEditH.drop();">
Drop changes
</button>
<button onclick="sViewH.modalOff();">
Done
</button>
<button id="code-edit-save" onclick="sCodeEditH.save();">
Save
</button>
<span id="code-edit-error"
onclick="sCodeEditH.posError();"></span>
</div>
<div id="code-edit-main">
<textarea id="code-edit-content"
oninput="sCodeEditH.checkContent();"></textarea>
</div>
</div>
</div>
''', file = output)
| 38.69863
| 81
| 0.466549
|
a57d0f057ddb63a0107921f8d1ec9a68e4dc54a7
| 35,068
|
py
|
Python
|
fastybird_fb_bus_connector/registry/records.py
|
FastyBird/fb-bus-connector-plugin
|
71568874243578a37a01bd3f0cbb3306c331d11f
|
[
"Apache-2.0"
] | null | null | null |
fastybird_fb_bus_connector/registry/records.py
|
FastyBird/fb-bus-connector-plugin
|
71568874243578a37a01bd3f0cbb3306c331d11f
|
[
"Apache-2.0"
] | null | null | null |
fastybird_fb_bus_connector/registry/records.py
|
FastyBird/fb-bus-connector-plugin
|
71568874243578a37a01bd3f0cbb3306c331d11f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright 2021. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FastyBird BUS connector registry module records
"""
# pylint: disable=too-many-lines
# Python base dependencies
import time
import uuid
from abc import ABC
from datetime import datetime
from typing import List, Optional, Tuple, Union
# Library dependencies
from fastybird_devices_module.utils import normalize_value
from fastybird_metadata.devices_module import ConnectionState
from fastybird_metadata.types import ButtonPayload, DataType, SwitchPayload
# Library libs
from fastybird_fb_bus_connector.types import DeviceAttribute, Packet, RegisterType
class DeviceRecord: # pylint: disable=too-many-public-methods,too-many-instance-attributes
"""
Device record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__id: uuid.UUID
__serial_number: str
__enabled: bool = False
__last_writing_packet_timestamp: float = 0.0 # Timestamp writing when request was sent to the device
__last_reading_packet_timestamp: float = 0.0 # Timestamp reading when request was sent to the device
__last_misc_packet_timestamp: float = 0.0 # Timestamp reading misc when request was sent to the device
__attempts: int = 0
__sampling_time: float = 10.0
__lost_timestamp: float = 0.0
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
device_id: uuid.UUID,
serial_number: str,
enabled: bool = False,
) -> None:
self.__id = device_id
self.__serial_number = serial_number
self.__enabled = enabled
# -----------------------------------------------------------------------------
@property
def id(self) -> uuid.UUID: # pylint: disable=invalid-name
"""Device unique database identifier"""
return self.__id
# -----------------------------------------------------------------------------
@property
def serial_number(self) -> str:
"""Device unique serial number"""
return self.__serial_number
# -----------------------------------------------------------------------------
@property
def enabled(self) -> bool:
"""Is device enabled?"""
return self.__enabled
# -----------------------------------------------------------------------------
@enabled.setter
def enabled(self, enabled: bool) -> None:
"""Enable or disable device"""
self.__enabled = enabled
# -----------------------------------------------------------------------------
@property
def last_reading_packet_timestamp(self) -> float:
"""Last reading packet sent time stamp"""
return self.__last_reading_packet_timestamp
# -----------------------------------------------------------------------------
@last_reading_packet_timestamp.setter
def last_reading_packet_timestamp(self, timestamp: float) -> None:
"""Last reading packet sent time stamp setter"""
self.__last_reading_packet_timestamp = timestamp
# -----------------------------------------------------------------------------
@property
def last_writing_packet_timestamp(self) -> float:
"""Last writing packet sent time stamp"""
return self.__last_writing_packet_timestamp
# -----------------------------------------------------------------------------
@last_writing_packet_timestamp.setter
def last_writing_packet_timestamp(self, timestamp: float) -> None:
"""Last writing packet sent time stamp setter"""
self.__last_writing_packet_timestamp = timestamp
# -----------------------------------------------------------------------------
@property
def last_misc_packet_timestamp(self) -> float:
"""Last misc packet sent time stamp"""
return self.__last_misc_packet_timestamp
# -----------------------------------------------------------------------------
@last_misc_packet_timestamp.setter
def last_misc_packet_timestamp(self, timestamp: float) -> None:
"""Last misc packet sent time stamp setter"""
self.__last_misc_packet_timestamp = timestamp
# -----------------------------------------------------------------------------
@property
def transmit_attempts(self) -> int:
"""Transmit packet attempts count"""
return self.__attempts
# -----------------------------------------------------------------------------
@transmit_attempts.setter
def transmit_attempts(self, attempts: int) -> None:
"""Transmit packet attempts count setter"""
self.__attempts = attempts
# -----------------------------------------------------------------------------
@property
def lost_timestamp(self) -> float:
"""Time stamp when communication with device was lost"""
return self.__lost_timestamp
# -----------------------------------------------------------------------------
@lost_timestamp.setter
def lost_timestamp(self, timestamp: float) -> None:
"""Set lost communication time stamp"""
self.__lost_timestamp = timestamp
# -----------------------------------------------------------------------------
@property
def is_lost(self) -> bool:
"""Is device in lost state?"""
return self.__lost_timestamp != 0
# -----------------------------------------------------------------------------
@property
def sampling_time(self) -> float:
"""Device registers reading sampling time"""
return self.__sampling_time
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return self.__id.__hash__()
class RegisterRecord(ABC): # pylint: disable=too-many-instance-attributes
"""
Base register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__device_id: uuid.UUID
__id: uuid.UUID
__address: int
__type: RegisterType
__data_type: DataType
__invalid: Union[int, float, str, None] = None
__settable: bool = False
__queryable: bool = False
_actual_value: Union[str, int, float, bool, datetime, ButtonPayload, SwitchPayload, None] = None
_expected_value: Union[str, int, float, bool, datetime, ButtonPayload, SwitchPayload, None] = None
_expected_pending: Optional[float] = None
_actual_value_valid: bool = False
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
device_id: uuid.UUID,
register_id: uuid.UUID,
register_address: int,
register_type: RegisterType,
register_data_type: DataType,
register_invalid: Union[int, float, str, None] = None,
register_settable: bool = False,
register_queryable: bool = False,
) -> None:
self.__device_id = device_id
self.__id = register_id
self.__address = register_address
self.__type = register_type
self.__data_type = register_data_type
self.__invalid = register_invalid
self.__settable = register_settable
self.__queryable = register_queryable
self._actual_value = None
self._expected_value = None
self._expected_pending = False
# -----------------------------------------------------------------------------
@property
def device_id(self) -> uuid.UUID:
"""Device unique database identifier"""
return self.__device_id
# -----------------------------------------------------------------------------
@property
def id(self) -> uuid.UUID: # pylint: disable=invalid-name
"""Register unique database identifier"""
return self.__id
# -----------------------------------------------------------------------------
@property
def address(self) -> int:
"""Register address"""
return self.__address
# -----------------------------------------------------------------------------
@property
def type(self) -> RegisterType:
"""Register type"""
return self.__type
# -----------------------------------------------------------------------------
@property
def data_type(self) -> DataType:
"""Record data type"""
return self.__data_type
# -----------------------------------------------------------------------------
@property
def format(
self,
) -> Union[
Tuple[Optional[int], Optional[int]],
Tuple[Optional[float], Optional[float]],
List[Union[str, Tuple[str, Optional[str], Optional[str]]]],
None,
]:
"""Register value format"""
if self.data_type == DataType.SWITCH:
return [
SwitchPayload.ON.value,
SwitchPayload.OFF.value,
SwitchPayload.TOGGLE.value,
]
if self.data_type == DataType.BUTTON:
return [
ButtonPayload.PRESSED.value,
ButtonPayload.RELEASED.value,
ButtonPayload.CLICKED.value,
ButtonPayload.DOUBLE_CLICKED.value,
ButtonPayload.TRIPLE_CLICKED.value,
ButtonPayload.LONG_CLICKED.value,
ButtonPayload.EXTRA_LONG_CLICKED.value,
]
return None
# -----------------------------------------------------------------------------
@property
def invalid(self) -> Union[int, float, str, None]:
"""Invalid value representation"""
return self.__invalid
# -----------------------------------------------------------------------------
@property
def data_type_size(self) -> int:
"""Register data type bytes size"""
if self.data_type in (
DataType.UCHAR,
DataType.CHAR,
DataType.BUTTON,
DataType.SWITCH,
):
return 1
if self.data_type in (
DataType.USHORT,
DataType.SHORT,
):
return 2
if self.data_type in (
DataType.UINT,
DataType.INT,
DataType.FLOAT,
):
return 4
if self.data_type == DataType.BOOLEAN:
return 2
return 0
# -----------------------------------------------------------------------------
@property
def settable(self) -> bool:
"""Is register settable?"""
return self.__settable
# -----------------------------------------------------------------------------
@property
def queryable(self) -> bool:
"""Is register queryable?"""
return self.__queryable
# -----------------------------------------------------------------------------
@property
def actual_value(self) -> Union[str, int, float, bool, datetime, ButtonPayload, SwitchPayload, None]:
"""Register actual value"""
return normalize_value(
data_type=self.data_type,
value=self._actual_value,
value_format=self.format,
value_invalid=self.invalid,
)
# -----------------------------------------------------------------------------
@actual_value.setter
def actual_value(self, value: Union[str, int, float, bool, datetime, ButtonPayload, SwitchPayload, None]) -> None:
"""Register actual value setter"""
self._actual_value = value
if self.actual_value == self.expected_value and self.expected_value is not None:
self.expected_value = None
self.expected_pending = None
if self.expected_value is None:
self.expected_pending = None
# -----------------------------------------------------------------------------
@property
def expected_value(self) -> Union[str, int, float, bool, datetime, ButtonPayload, SwitchPayload, None]:
"""Register expected value"""
return normalize_value(
data_type=self.data_type,
value=self._expected_value,
value_format=self.format,
value_invalid=self.invalid,
)
# -----------------------------------------------------------------------------
@expected_value.setter
def expected_value(
self,
value: Union[str, int, float, bool, datetime, ButtonPayload, SwitchPayload, None],
) -> None:
"""Register expected value setter"""
self._expected_value = value
self.expected_pending = None
# -----------------------------------------------------------------------------
@property
def expected_pending(self) -> Optional[float]:
"""Register expected value pending status"""
return self._expected_pending
# -----------------------------------------------------------------------------
@expected_pending.setter
def expected_pending(self, timestamp: Optional[float]) -> None:
"""Register expected value transmit timestamp setter"""
self._expected_pending = timestamp
# -----------------------------------------------------------------------------
@property
def actual_value_valid(self) -> bool:
"""Register actual value reading status"""
return self._actual_value_valid
# -----------------------------------------------------------------------------
@actual_value_valid.setter
def actual_value_valid(self, state: bool) -> None:
"""Register actual value reading status setter"""
self._actual_value_valid = state
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return self.__id.__hash__()
class InputRegisterRecord(RegisterRecord):
"""
Input register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__channel_id: Optional[uuid.UUID] = None
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
device_id: uuid.UUID,
register_id: uuid.UUID,
register_address: int,
register_data_type: DataType,
register_invalid: Union[int, float, str, None] = None,
channel_id: Optional[uuid.UUID] = None,
) -> None:
super().__init__(
device_id=device_id,
register_id=register_id,
register_address=register_address,
register_type=RegisterType.INPUT,
register_data_type=register_data_type,
register_invalid=register_invalid,
register_settable=False,
register_queryable=True,
)
self.__channel_id = channel_id
# -----------------------------------------------------------------------------
@property
def channel_id(self) -> Optional[uuid.UUID]:
"""Device channel unique database identifier"""
return self.__channel_id
class OutputRegisterRecord(RegisterRecord):
"""
Output register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__channel_id: Optional[uuid.UUID] = None
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
device_id: uuid.UUID,
register_id: uuid.UUID,
register_address: int,
register_data_type: DataType,
register_invalid: Union[int, float, str, None] = None,
channel_id: Optional[uuid.UUID] = None,
) -> None:
super().__init__(
device_id=device_id,
register_id=register_id,
register_address=register_address,
register_type=RegisterType.OUTPUT,
register_data_type=register_data_type,
register_invalid=register_invalid,
register_settable=True,
register_queryable=True,
)
self.__channel_id = channel_id
# -----------------------------------------------------------------------------
@property
def channel_id(self) -> Optional[uuid.UUID]:
"""Device channel unique database identifier"""
return self.__channel_id
class AttributeRegisterRecord(RegisterRecord):
"""
Attribute register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__name: Optional[str] = None
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
device_id: uuid.UUID,
register_id: uuid.UUID,
register_address: int,
register_data_type: DataType,
register_name: Optional[str],
register_invalid: Union[int, float, str, None] = None,
register_settable: bool = False,
register_queryable: bool = False,
) -> None:
super().__init__(
device_id=device_id,
register_id=register_id,
register_address=register_address,
register_type=RegisterType.ATTRIBUTE,
register_data_type=register_data_type,
register_invalid=register_invalid,
register_settable=register_settable,
register_queryable=register_queryable,
)
self.__name = register_name
# -----------------------------------------------------------------------------
@property
def name(self) -> Optional[str]:
"""Attribute register name"""
return self.__name
# -----------------------------------------------------------------------------
@property
def format(
self,
) -> Union[
Tuple[Optional[int], Optional[int]],
Tuple[Optional[float], Optional[float]],
List[Union[str, Tuple[str, Optional[str], Optional[str]]]],
None,
]:
"""Attribute register value format"""
if self.name == DeviceAttribute.STATE.value:
return [
ConnectionState.RUNNING.value,
ConnectionState.STOPPED.value,
ConnectionState.DISCONNECTED.value,
ConnectionState.LOST.value,
ConnectionState.ALERT.value,
ConnectionState.UNKNOWN.value,
]
return super().format
class DeviceAttributeRecord:
"""
Device attribute record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__device_id: uuid.UUID
__id: uuid.UUID
__identifier: str
__name: Optional[str]
__value: Optional[str]
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
device_id: uuid.UUID,
attribute_id: uuid.UUID,
attribute_identifier: str,
attribute_name: Optional[str],
attribute_value: Optional[str],
) -> None:
self.__device_id = device_id
self.__id = attribute_id
self.__identifier = attribute_identifier
self.__name = attribute_name
self.__value = attribute_value
# -----------------------------------------------------------------------------
@property
def device_id(self) -> uuid.UUID:
"""Attribute device unique identifier"""
return self.__device_id
# -----------------------------------------------------------------------------
@property
def id(self) -> uuid.UUID: # pylint: disable=invalid-name
"""Attribute unique database identifier"""
return self.__id
# -----------------------------------------------------------------------------
@property
def identifier(self) -> str:
"""Attribute unique identifier"""
return self.__identifier
# -----------------------------------------------------------------------------
@property
def name(self) -> Optional[str]:
"""Attribute name"""
return self.__name
# -----------------------------------------------------------------------------
@property
def value(self) -> Optional[str]:
"""Attribute value"""
return self.__value
# -----------------------------------------------------------------------------
def __eq__(self, other: object) -> bool:
if not isinstance(other, DeviceAttributeRecord):
return False
return self.device_id == other.device_id and self.id == other.id and self.identifier == other.identifier
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return self.__id.__hash__()
class DiscoveredDeviceRecord: # pylint: disable=too-many-instance-attributes
"""
Discovered device record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__address: int
__serial_number: str
__state: ConnectionState
__max_packet_length: int
__hardware_version: str
__hardware_model: str
__hardware_manufacturer: str
__firmware_version: str
__firmware_manufacturer: str
__input_registers_size: int
__output_registers_size: int
__attributes_registers_size: int
__waiting_for_packet: Optional[Packet] = None
__last_packet_sent_timestamp: float = 0.0 # Timestamp when request was sent to the device
__attempts: int = 0
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-locals,too-many-arguments
self,
device_address: int,
device_max_packet_length: int,
device_serial_number: str,
device_state: ConnectionState,
device_hardware_version: str,
device_hardware_model: str,
device_hardware_manufacturer: str,
device_firmware_version: str,
device_firmware_manufacturer: str,
input_registers_size: int,
output_registers_size: int,
attributes_registers_size: int,
) -> None:
self.__address = device_address
self.__max_packet_length = device_max_packet_length
self.__serial_number = device_serial_number
self.__state = device_state
self.__hardware_version = device_hardware_version
self.__hardware_model = device_hardware_model
self.__hardware_manufacturer = device_hardware_manufacturer
self.__firmware_version = device_firmware_version
self.__firmware_manufacturer = device_firmware_manufacturer
self.__input_registers_size = input_registers_size
self.__output_registers_size = output_registers_size
self.__attributes_registers_size = attributes_registers_size
self.__waiting_for_packet = None
self.__last_packet_sent_timestamp = 0.0
self.__attempts = 0
# -----------------------------------------------------------------------------
@property
def address(self) -> int:
"""Device communication address"""
return self.__address
# -----------------------------------------------------------------------------
@address.setter
def address(self, address: int) -> None:
"""Set device communication address"""
self.__address = address
# -----------------------------------------------------------------------------
@property
def max_packet_length(self) -> int:
"""Maximum packet bytes length"""
return self.__max_packet_length
# -----------------------------------------------------------------------------
@property
def serial_number(self) -> str:
"""Serial number"""
return self.__serial_number
# -----------------------------------------------------------------------------
@property
def state(self) -> ConnectionState:
"""Actual state"""
return self.__state
# -----------------------------------------------------------------------------
@property
def hardware_version(self) -> str:
"""Hardware version number"""
return self.__hardware_version
# -----------------------------------------------------------------------------
@property
def hardware_model(self) -> str:
"""Hardware model"""
return self.__hardware_model
# -----------------------------------------------------------------------------
@property
def hardware_manufacturer(self) -> str:
"""Hardware manufacturer"""
return self.__hardware_manufacturer
# -----------------------------------------------------------------------------
@property
def firmware_version(self) -> str:
"""Firmware version number"""
return self.__firmware_version
# -----------------------------------------------------------------------------
@property
def firmware_manufacturer(self) -> str:
"""Firmware manufacturer"""
return self.__firmware_manufacturer
# -----------------------------------------------------------------------------
@property
def input_registers_size(self) -> int:
"""Input registers size"""
return self.__input_registers_size
# -----------------------------------------------------------------------------
@property
def output_registers_size(self) -> int:
"""Output registers size"""
return self.__output_registers_size
# -----------------------------------------------------------------------------
@property
def attributes_registers_size(self) -> int:
"""Attributes registers size"""
return self.__attributes_registers_size
# -----------------------------------------------------------------------------
@property
def last_packet_timestamp(self) -> float:
"""Last packet sent time stamp"""
return self.__last_packet_sent_timestamp
# -----------------------------------------------------------------------------
@last_packet_timestamp.setter
def last_packet_timestamp(self, last_packet_timestamp: float) -> None:
"""Last packet sent time stamp setter"""
self.__last_packet_sent_timestamp = last_packet_timestamp
# -----------------------------------------------------------------------------
@property
def waiting_for_packet(self) -> Optional[Packet]:
"""Packet identifier connector is waiting for"""
return self.__waiting_for_packet
# -----------------------------------------------------------------------------
@waiting_for_packet.setter
def waiting_for_packet(self, waiting_for_packet: Optional[Packet]) -> None:
"""Packet identifier connector is waiting for setter"""
self.__waiting_for_packet = waiting_for_packet
if waiting_for_packet is not None:
self.__last_packet_sent_timestamp = time.time()
self.__attempts = self.__attempts + 1
else:
self.__attempts = 0
# -----------------------------------------------------------------------------
@property
def transmit_attempts(self) -> int:
"""Transmit packet attempts count"""
return self.__attempts
# -----------------------------------------------------------------------------
def __eq__(self, other: object) -> bool:
if not isinstance(other, DiscoveredDeviceRecord):
return False
return self.serial_number == other.serial_number
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self.__serial_number)
class DiscoveredRegisterRecord(ABC):
"""
Pairing base register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__device_address: int
__device_serial_number: str
__address: int
__type: RegisterType
__data_type: DataType
__settable: bool = False
__queryable: bool = False
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
device_address: int,
device_serial_number: str,
register_address: int,
register_type: RegisterType,
register_data_type: DataType,
register_settable: bool = False,
register_queryable: bool = False,
) -> None:
self.__device_address = device_address
self.__device_serial_number = device_serial_number
self.__address = register_address
self.__type = register_type
self.__data_type = register_data_type
self.__queryable = register_queryable
self.__settable = register_settable
# -----------------------------------------------------------------------------
@property
def device_serial_number(self) -> str:
"""Device serial number"""
return self.__device_serial_number
# -----------------------------------------------------------------------------
@property
def device_address(self) -> int:
"""Device communication address"""
return self.__device_address
# -----------------------------------------------------------------------------
@property
def address(self) -> int:
"""Register address"""
return self.__address
# -----------------------------------------------------------------------------
@property
def type(self) -> RegisterType:
"""Register type"""
return self.__type
# -----------------------------------------------------------------------------
@property
def data_type(self) -> DataType:
"""Register data type"""
return self.__data_type
# -----------------------------------------------------------------------------
@property
def settable(self) -> bool:
"""Is register settable?"""
return self.__settable
# -----------------------------------------------------------------------------
@property
def queryable(self) -> bool:
"""Is register queryable?"""
return self.__queryable
# -----------------------------------------------------------------------------
def __eq__(self, other: object) -> bool:
if not isinstance(other, DiscoveredRegisterRecord):
return False
return (
self.device_serial_number == other.device_serial_number
and self.device_address == other.device_address
and self.address == other.address
and self.type == other.type
)
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash((self.device_serial_number, self.device_address, self.address, self.type.value))
class DiscoveredInputRegisterRecord(DiscoveredRegisterRecord):
"""
Pairing input register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
def __init__(
self,
device_address: int,
device_serial_number: str,
register_address: int,
register_data_type: DataType,
):
super().__init__(
device_address=device_address,
device_serial_number=device_serial_number,
register_address=register_address,
register_type=RegisterType.INPUT,
register_data_type=register_data_type,
register_queryable=True,
register_settable=False,
)
class DiscoveredOutputRegisterRecord(DiscoveredRegisterRecord):
"""
Pairing output register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
def __init__(
self,
device_address: int,
device_serial_number: str,
register_address: int,
register_data_type: DataType,
):
super().__init__(
device_address=device_address,
device_serial_number=device_serial_number,
register_address=register_address,
register_type=RegisterType.OUTPUT,
register_data_type=register_data_type,
register_queryable=True,
register_settable=True,
)
class DiscoveredAttributeRegisterRecord(DiscoveredRegisterRecord):
"""
Pairing attribute register record
@package FastyBird:FbBusConnector!
@module registry/records
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__name: Optional[str]
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-branches,too-many-arguments
self,
device_address: int,
device_serial_number: str,
register_address: int,
register_data_type: DataType,
register_name: Optional[str],
register_settable: bool = False,
register_queryable: bool = False,
):
super().__init__(
device_address=device_address,
device_serial_number=device_serial_number,
register_address=register_address,
register_type=RegisterType.ATTRIBUTE,
register_data_type=register_data_type,
register_queryable=register_settable,
register_settable=register_queryable,
)
self.__name = register_name
# -----------------------------------------------------------------------------
@property
def name(self) -> Optional[str]:
"""Attribute register name"""
return self.__name
| 31.479354
| 118
| 0.508526
|
eb0feb47c61171f0fe9c1bba370d4d7059588395
| 3,430
|
py
|
Python
|
influxdb_client/domain/cell_links.py
|
Rajpratik71/influxdb-client-python
|
ae537018b638600552b3ac11f1b070c048719910
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/cell_links.py
|
Rajpratik71/influxdb-client-python
|
ae537018b638600552b3ac11f1b070c048719910
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/cell_links.py
|
Rajpratik71/influxdb-client-python
|
ae537018b638600552b3ac11f1b070c048719910
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CellLinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'str',
'view': 'str'
}
attribute_map = {
'_self': 'self',
'view': 'view'
}
def __init__(self, _self=None, view=None): # noqa: E501,D401,D403
"""CellLinks - a model defined in OpenAPI.""" # noqa: E501
self.__self = None
self._view = None
self.discriminator = None
if _self is not None:
self._self = _self
if view is not None:
self.view = view
@property
def _self(self):
"""Get the _self of this CellLinks.
:return: The _self of this CellLinks.
:rtype: str
""" # noqa: E501
return self.__self
@_self.setter
def _self(self, _self):
"""Set the _self of this CellLinks.
:param _self: The _self of this CellLinks.
:type: str
""" # noqa: E501
self.__self = _self
@property
def view(self):
"""Get the view of this CellLinks.
:return: The view of this CellLinks.
:rtype: str
""" # noqa: E501
return self._view
@view.setter
def view(self, view):
"""Set the view of this CellLinks.
:param view: The view of this CellLinks.
:type: str
""" # noqa: E501
self._view = view
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, CellLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 25.789474
| 120
| 0.541983
|
0c50ce929d5a6a3b9a3667598dc3490533c683b1
| 3,832
|
py
|
Python
|
how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/train_explainer.py
|
mx-iao/MachineLearningNotebooks
|
8e7bb9e00ebd6d17d937289bb04b87dd7257651e
|
[
"MIT"
] | 3
|
2020-09-10T15:02:56.000Z
|
2020-09-13T17:37:47.000Z
|
how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/train_explainer.py
|
vijetajo/MachineLearningNotebooks
|
7e2c1ca152e280dc544f3c9654e9906a7f17c89b
|
[
"MIT"
] | null | null | null |
how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/train_explainer.py
|
vijetajo/MachineLearningNotebooks
|
7e2c1ca152e280dc544f3c9654e9906a7f17c89b
|
[
"MIT"
] | 2
|
2020-07-10T05:13:17.000Z
|
2020-07-10T18:15:04.000Z
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import os
from azureml.core.run import Run
from azureml.core.experiment import Experiment
from azureml.core.dataset import Dataset
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
automl_setup_model_explanations, automl_check_model_if_explainable
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
from azureml.explain.model.mimic_wrapper import MimicWrapper
from azureml.automl.core.shared.constants import MODEL_PATH
from azureml.explain.model.scoring.scoring_explainer import TreeScoringExplainer
import joblib
OUTPUT_DIR = './outputs/'
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Get workspace from the run context
run = Run.get_context()
ws = run.experiment.workspace
# Get the AutoML run object from the experiment name and the workspace
experiment = Experiment(ws, '<<experiment_name>>')
automl_run = Run(experiment=experiment, run_id='<<run_id>>')
# Check if this AutoML model is explainable
if not automl_check_model_if_explainable(automl_run):
raise Exception("Model explanations is currently not supported for " + automl_run.get_properties().get(
'run_algorithm'))
# Download the best model from the artifact store
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')
# Load the AutoML model into memory
fitted_model = joblib.load('model.pkl')
# Get the train dataset from the workspace
train_dataset = Dataset.get_by_name(workspace=ws, name='<<train_dataset_name>>')
# Drop the lablled column to get the training set.
X_train = train_dataset.drop_columns(columns=['<<target_column_name>>'])
y_train = train_dataset.keep_columns(columns=['<<target_column_name>>'], validate=True)
# Get the train dataset from the workspace
test_dataset = Dataset.get_by_name(workspace=ws, name='<<test_dataset_name>>')
# Drop the lablled column to get the testing set.
X_test = test_dataset.drop_columns(columns=['<<target_column_name>>'])
# Setup the class for explaining the AtuoML models
automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, '<<task>>',
X=X_train, X_test=X_test,
y=y_train)
# Initialize the Mimic Explainer
explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, LGBMExplainableModel,
init_dataset=automl_explainer_setup_obj.X_transform, run=automl_run,
features=automl_explainer_setup_obj.engineered_feature_names,
feature_maps=[automl_explainer_setup_obj.feature_map],
classes=automl_explainer_setup_obj.classes)
# Compute the engineered explanations
engineered_explanations = explainer.explain(['local', 'global'], tag='engineered explanations',
eval_dataset=automl_explainer_setup_obj.X_test_transform)
# Compute the raw explanations
raw_explanations = explainer.explain(['local', 'global'], get_raw=True, tag='raw explanations',
raw_feature_names=automl_explainer_setup_obj.raw_feature_names,
eval_dataset=automl_explainer_setup_obj.X_test_transform)
print("Engineered and raw explanations computed successfully")
# Initialize the ScoringExplainer
scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])
# Pickle scoring explainer locally
with open('scoring_explainer.pkl', 'wb') as stream:
joblib.dump(scoring_explainer, stream)
# Upload the scoring explainer to the automl run
automl_run.upload_file('outputs/scoring_explainer.pkl', 'scoring_explainer.pkl')
| 47.308642
| 116
| 0.747129
|
09822fae946efeb45144b533a71ece6c46599302
| 376
|
py
|
Python
|
pal/filter/mpam.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 26
|
2020-01-06T23:53:17.000Z
|
2022-02-01T08:58:21.000Z
|
pal/filter/mpam.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 30
|
2019-11-13T00:55:22.000Z
|
2022-01-06T08:09:35.000Z
|
pal/filter/mpam.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 14
|
2019-11-15T16:56:22.000Z
|
2021-12-22T10:14:17.000Z
|
from pal.filter.abstract_filter import AbstractFilter
class MPAMRegisterFilter(AbstractFilter):
@property
def description(self):
return "memory partitioning and monitoring (MPAM) registers"
def do_filter(self, reg):
regname = reg.name.lower()
if(regname.startswith("mpam")):
return False
else:
return True
| 26.857143
| 68
| 0.656915
|
85682b22cc3b6d994ea99167f879bdefc233f84a
| 991
|
py
|
Python
|
acciones/accion_6.py
|
salaminalcuadrado/VideoClub
|
dcddbe545d9691c12c9180e375cd9573e375dafa
|
[
"MIT"
] | 3
|
2018-10-28T16:00:40.000Z
|
2018-10-29T01:52:57.000Z
|
acciones/accion_6.py
|
salaminalcuadrado/VideoClub
|
dcddbe545d9691c12c9180e375cd9573e375dafa
|
[
"MIT"
] | 13
|
2018-10-28T16:05:39.000Z
|
2018-10-28T20:03:15.000Z
|
acciones/accion_6.py
|
salaminalcuadrado/VideoClub
|
dcddbe545d9691c12c9180e375cd9573e375dafa
|
[
"MIT"
] | null | null | null |
# ACCION 6
import sys
sys.path.append('./control_libreria')
sys.path.append('./tools')
from clientes import subir_diccionario, encontrar_codigo_clientes
from generar_datos import generar_data
from agregar_modificar_clientes import modificar_client
def modificar_clientes():
codigo = input(" Ingrese el codigo para modificar un cliente ")
codigo = modificar_client(codigo)
datos = clientes()
return subir_diccionario(datos, codigo)
def clientes():
campos = [
{
"label": "nombre",
"type": "string"
},
{
"label": "fecha de alta (dd/mm/aaaa)",
"type": "string",
"key": "fecha_alta"
},
{
"label": "telefono",
"type": "string"
},
{
"label": "mail",
"type": "string"
},
{
"label": "direccion",
"type": "string"
}
]
return (generar_data(campos))
| 19.431373
| 67
| 0.533804
|
c011af4d97ac9949e8e73554a22ed0cf1cea34a0
| 1,701
|
py
|
Python
|
applications/ShallowWaterApplication/tests/shallow_water_test_factory.py
|
ma6yu/Kratos
|
02380412f8a833a2cdda6791e1c7f9c32e088530
|
[
"BSD-4-Clause"
] | null | null | null |
applications/ShallowWaterApplication/tests/shallow_water_test_factory.py
|
ma6yu/Kratos
|
02380412f8a833a2cdda6791e1c7f9c32e088530
|
[
"BSD-4-Clause"
] | null | null | null |
applications/ShallowWaterApplication/tests/shallow_water_test_factory.py
|
ma6yu/Kratos
|
02380412f8a833a2cdda6791e1c7f9c32e088530
|
[
"BSD-4-Clause"
] | null | null | null |
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.ShallowWaterApplication.shallow_water_analysis import ShallowWaterAnalysis
class ShallowWaterTestFactory(KratosUnittest.TestCase):
def test_execution(self):
with KratosUnittest.WorkFolderScope(self.execution_directory, __file__):
with open(self.execution_file + "_parameters.json",'r') as parameter_file:
ProjectParameters = KratosMultiphysics.Parameters(parameter_file.read())
model = KratosMultiphysics.Model()
test = ShallowWaterAnalysis(model, ProjectParameters)
test.Run()
class TestLagrangianShallowWaterElement(ShallowWaterTestFactory):
execution_directory = "elements_tests"
execution_file = "lagrangian_swe"
class TestShallowWaterElement(ShallowWaterTestFactory):
execution_directory = "elements_tests"
execution_file = "swe"
class TestShallowWater2D3NElement(ShallowWaterTestFactory):
execution_directory = "elements_tests"
execution_file = "shallow_water_2d_3n"
class TestMonotonicShallowWater2D3NElement(ShallowWaterTestFactory):
execution_directory = "elements_tests"
execution_file = "monotonic_shallow_water_2d_3n"
class TestSetTopographyProcess(ShallowWaterTestFactory):
execution_directory = "processes_tests"
execution_file = "set_topography_process"
class TestNodesOutputProcess(ShallowWaterTestFactory):
execution_directory = "processes_tests"
execution_file = "nodes_output_process"
class TestVisualizationMeshProcess(ShallowWaterTestFactory):
execution_directory = "processes_tests"
execution_file = "visualization_mesh_process"
| 40.5
| 98
| 0.800705
|
c9f286369ea2534ec6bd9a825dfdda475db75df4
| 1,640
|
py
|
Python
|
ch07/boston_cv10_penalized.py
|
ishandutta2007/BuildingMachineLearningSystemsWithPython
|
6828d7c242d663ad85a7bebeb4a2ef578e2d8482
|
[
"MIT"
] | 1
|
2017-04-24T16:14:59.000Z
|
2017-04-24T16:14:59.000Z
|
ch07/boston_cv10_penalized.py
|
woodhaha/BuildingMachineLearningSystemsWithPython
|
bb7510741b5a745237186a85c6b648e5b8b37f26
|
[
"MIT"
] | null | null | null |
ch07/boston_cv10_penalized.py
|
woodhaha/BuildingMachineLearningSystemsWithPython
|
bb7510741b5a745237186a85c6b648e5b8b37f26
|
[
"MIT"
] | 1
|
2015-06-08T23:10:32.000Z
|
2015-06-08T23:10:32.000Z
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script fits several forms of penalized regression
from __future__ import print_function
from sklearn.cross_validation import KFold
from sklearn.linear_model import ElasticNet, Lasso, Ridge
from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV
import numpy as np
from sklearn.datasets import load_boston
boston = load_boston()
x = np.array([np.concatenate((v, [1])) for v in boston.data])
y = boston.target
for name, met in [
('elastic-net(.5)', ElasticNet(fit_intercept=True, alpha=0.5)),
('lasso(.5)', Lasso(fit_intercept=True, alpha=0.5)),
('ridge(.5)', Ridge(fit_intercept=True, alpha=0.5)),
]:
# Fit on the whole data:
met.fit(x, y)
# Predict on the whole data:
p = np.array([met.predict(xi) for xi in x])
e = p - y
# np.dot(e, e) == sum(ei**2 for ei in e) but faster
total_error = np.dot(e, e)
rmse_train = np.sqrt(total_error / len(p))
# Now, we use 10 fold cross-validation to estimate generalization error
kf = KFold(len(x), n_folds=10)
err = 0
for train, test in kf:
met.fit(x[train], y[train])
p = np.array([met.predict(xi) for xi in x[test]])
e = p - y[test]
err += np.dot(e, e)
rmse_10cv = np.sqrt(err / len(x))
print('Method: {}'.format(name))
print('RMSE on training: {}'.format(rmse_train))
print('RMSE on 10-fold CV: {}'.format(rmse_10cv))
print()
print()
| 32.156863
| 75
| 0.660976
|
871fb4492da6a4be3514893306a727be73a5ec1d
| 13,512
|
py
|
Python
|
sap2012/SAP_worksheet/ventilation_rates.py
|
building-energy/sap2012
|
4cb3a362be4662b0e96c56a3765771f0cba91422
|
[
"MIT"
] | 7
|
2021-04-17T21:55:37.000Z
|
2021-08-19T13:06:16.000Z
|
sap2012/SAP_worksheet/ventilation_rates.py
|
building-energy/sap2012
|
4cb3a362be4662b0e96c56a3765771f0cba91422
|
[
"MIT"
] | null | null | null |
sap2012/SAP_worksheet/ventilation_rates.py
|
building-energy/sap2012
|
4cb3a362be4662b0e96c56a3765771f0cba91422
|
[
"MIT"
] | 2
|
2021-03-21T16:14:50.000Z
|
2021-04-20T08:54:41.000Z
|
# -*- coding: utf-8 -*-
def ventilation_rates(
number_of_chimneys_main_heating,
number_of_chimneys_secondary_heating,
number_of_chimneys_other,
number_of_open_flues_main_heating,
number_of_open_flues_secondary_heating,
number_of_open_flues_other,
number_of_intermittant_fans_total,
number_of_passive_vents_total,
number_of_flueless_gas_fires_total,
dwelling_volume,
air_permeability_value_q50,
number_of_storeys_in_the_dwelling,
structural_infiltration,
suspended_wooden_ground_floor_infiltration,
no_draft_lobby_infiltration,
percentage_of_windows_and_doors_draught_proofed,
number_of_sides_on_which_dwelling_is_sheltered,
monthly_average_wind_speed,
applicable_case,
mechanical_ventilation_air_change_rate_through_system,
exhaust_air_heat_pump_using_Appendix_N,
mechanical_ventilation_throughput_factor,
efficiency_allowing_for_in_use_factor,
):
"""Calculates the ventilation rates, Section 2.
:param number_of_chimneys_main_heating:
:type number_of_chimneys_main_heating: int
:param number_of_chimneys_secondary_heating:
:type number_of_chimneys_secondary_heating: int
:param number_of_chimneys_other:
:type number_of_chimneys_other: int
:param number_of_open_flues_main_heating:
:type number_of_open_flues_main_heating: int
:param number_of_open_flues_secondary_heating:
:type number_of_open_flues_secondary_heating: int
:param number_of_open_flues_other:
:type number_of_open_flues_other: int
:param number_of_intermittant_fans_total:
:type number_of_intermittant_fans_total: int
:param number_of_passive_vents_total:
:type number_of_passive_vents_total: int
:param number_of_flueless_gas_fires_total:
:type number_of_flueless_gas_fires_total: int
:param dwelling_volume: See (5).
:type dwelling_volume: float
:param air_permeability_value_q50: See (17). Use None if not carried out.
:type air_permeability_value_q50: float or None
:param number_of_storeys_in_the_dwelling: See (9).
:type number_of_storeys_in_the_dwelling: int
:param structural_infiltration: See (11).
:type structural_infiltration: float
:param suspended_wooden_ground_floor_infiltration: See (12).
:type suspended_wooden_ground_floor_infiltration: float
:param no_draft_lobby_infiltration: See (13).
:type no_draft_lobby_infiltration: float
:param percentage_of_windows_and_doors_draught_proofed: See (14).
:type percentage_of_windows_and_doors_draught_proofed: float
:param number_of_sides_on_which_dwelling_is_sheltered: See (19).
:type number_of_sides_on_which_dwelling_is_sheltered: int
:param monthly_average_wind_speed: A list of the monthly wind speeds.
12 items, from Jan to Dec, see (22).
:type monthly_average_wind_speed: list (float)
:param applicable_case: One of the following options:
'balanced mechanical ventilation with heat recovery';
'balanced mechanical ventilation without heat recovery';
'whole house extract ventilation or positive input ventilation from outside';
or 'natural ventilation or whole house positive input ventilation from loft'.
:type applicable_case: str
:param mechanical_ventilation_air_change_rate_through_system: See (23a).
:type mechanical_ventilation_air_change_rate_through_system: float
:param exhaust_air_heat_pump_using_Appendix_N:
True if exhaust air heat pump using Appendix N, otherwise False.
:type exhaust_air_heat_pump_using_Appendix_N: bool
:param mechanical_ventilation_throughput_factor: F_mv, see Equation N4.
:type mechanical_ventilation_throughput_factor: float
:param efficiency_allowing_for_in_use_factor: In %, see (23c).
:type efficiency_allowing_for_in_use_factor: float
:returns: A dictionary with keys of (
number_of_chimneys_total,
number_of_chimneys_m3_per_hour,
number_of_open_flues_total,
number_of_open_flues_m3_per_hour,
number_of_intermittant_fans_m3_per_hour,
number_of_passive_vents_m3_per_hour,
number_of_flueless_gas_fires_m3_per_hour,
infiltration_due_to_chimnneys_flues_fans_PSVs,
additional_infiltration,
window_infiltration,
infiltration_rate,
infiltration_rate2,
shelter_factor,
infiltration_rate_incorporating_shelter_factor,
wind_factor,
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed,
exhaust_air_heat_pump_air_change_rate_through_system,
effective_air_change_rate
)
- **number_of_chimneys_total** (`int`) -
- **number_of_chimneys_m3_per_hour** (`float`) - See (6a).
- **number_of_open_flues_total** (`int`) -
- **number_of_open_flues_m3_per_hour** (`float`) - See (6b).
- **infiltration_due_to_chimenys_flues_fans_PSVs** (`float`) - See (8).
- **additional_infiltration** (`float`) - See (10).
- **window_infiltration** (`float`) - See (15).
- **infiltration_rate** (`float`) - See (16).
- **infiltration_rate2** (`float`) - See (18).
- **shelter_factor** (`float`) - See (20).
- **infiltration_rate_incorporating_shelter_factor** (`float`) - See (21).
- **wind_factor** list (`float`) - See (22a).
- **adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed**: list (`float`) - See (22b).
- **exhaust_air_heat_pump_air_change_rate_through_system** (`float`) - See (23b).
- **effective_air_change_rate** list (`float`) - See (25).
:rtype: dict
"""
# number_of_chimneys
number_of_chimneys_total=(number_of_chimneys_main_heating +
number_of_chimneys_secondary_heating +
number_of_chimneys_other)
number_of_chimneys_m3_per_hour=number_of_chimneys_total * 40.0
# number_of_open_flues
number_of_open_flues_total=(number_of_open_flues_main_heating +
number_of_open_flues_secondary_heating +
number_of_open_flues_other)
number_of_open_flues_m3_per_hour=number_of_open_flues_total * 20.0
# number_of_intermittant_fans
number_of_intermittant_fans_m3_per_hour=number_of_intermittant_fans_total * 10.0
# number_of_passive_vents
number_of_passive_vents_m3_per_hour=number_of_passive_vents_total * 10.0
# number_of_flueless_gas_fires
number_of_flueless_gas_fires_m3_per_hour=number_of_flueless_gas_fires_total * 40.0
# infiltration_due_to_chimenys_flues_fans_PSVs
infiltration_due_to_chimneys_flues_fans_PSVs=((number_of_chimneys_m3_per_hour +
number_of_open_flues_m3_per_hour +
number_of_intermittant_fans_m3_per_hour +
number_of_passive_vents_m3_per_hour +
number_of_flueless_gas_fires_m3_per_hour) /
dwelling_volume)
if air_permeability_value_q50 is None: # changed from 'air_permeability_value_q50 == 0:' on 4-FEB-2021
additional_infiltration=(number_of_storeys_in_the_dwelling-1)*0.1
window_infiltration=0.25 - (0.2 * percentage_of_windows_and_doors_draught_proofed / 100.0)
infiltration_rate=(infiltration_due_to_chimneys_flues_fans_PSVs +
additional_infiltration +
structural_infiltration +
suspended_wooden_ground_floor_infiltration +
no_draft_lobby_infiltration +
window_infiltration
)
infiltration_rate2=infiltration_rate
else:
additional_infiltration=None
window_infiltration=None
infiltration_rate=None
infiltration_rate2=((air_permeability_value_q50 / 20) +
infiltration_due_to_chimneys_flues_fans_PSVs)
# shelter_factor
shelter_factor = 1 - (0.075 * number_of_sides_on_which_dwelling_is_sheltered)
# infiltration_rate_incorporating_shelter_factor
infiltration_rate_incorporating_shelter_factor = (infiltration_rate2 *
shelter_factor)
# wind_factor
wind_factor=[None]*12
for i in range(12):
wind_factor[i]=monthly_average_wind_speed[i] / 4.0
# adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed=[None]*12
for i in range(12):
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i] = (
infiltration_rate_incorporating_shelter_factor *
wind_factor[i]
)
# exhaust_air_heat_pump_air_change_rate_through_system
if applicable_case in ['balanced mechanical ventilation with heat recovery',
'balanced mechanical ventilation without heat recovery',
'whole house extract ventilation or positive input ventilation from outside']:
if exhaust_air_heat_pump_using_Appendix_N:
exhaust_air_heat_pump_air_change_rate_through_system = (
mechanical_ventilation_air_change_rate_through_system *
mechanical_ventilation_throughput_factor)
else:
exhaust_air_heat_pump_air_change_rate_through_system = \
mechanical_ventilation_air_change_rate_through_system
else:
exhaust_air_heat_pump_air_change_rate_through_system = None
# effective_air_change_rate
effective_air_change_rate=[None]*12
if applicable_case=='balanced mechanical ventilation with heat recovery':
for i in range(12):
effective_air_change_rate[i]=(
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i] +
exhaust_air_heat_pump_air_change_rate_through_system *
(1.0 - efficiency_allowing_for_in_use_factor / 100.0)
)
elif applicable_case=='balanced mechanical ventilation without heat recovery':
for i in range(12):
effective_air_change_rate[i]=(
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i] +
exhaust_air_heat_pump_air_change_rate_through_system)
elif applicable_case=='whole house extract ventilation or positive input ventilation from outside':
for i in range(12):
if (adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i]
< 0.5 * exhaust_air_heat_pump_air_change_rate_through_system):
effective_air_change_rate[i]=exhaust_air_heat_pump_air_change_rate_through_system
else:
effective_air_change_rate[i]=(
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i] +
0.5 * exhaust_air_heat_pump_air_change_rate_through_system)
elif applicable_case=='natural ventilation or whole house positive input ventilation from loft':
for i in range(12):
if adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i]>1:
effective_air_change_rate[i]=adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i]
else:
effective_air_change_rate[i]=0.5 + (adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed[i]**2 * 0.5)
return dict(
number_of_chimneys_total=number_of_chimneys_total,
number_of_chimneys_m3_per_hour=number_of_chimneys_m3_per_hour,
number_of_open_flues_total=number_of_open_flues_total,
number_of_open_flues_m3_per_hour=number_of_open_flues_m3_per_hour,
number_of_intermittant_fans_m3_per_hour=number_of_intermittant_fans_m3_per_hour,
number_of_passive_vents_m3_per_hour=number_of_passive_vents_m3_per_hour,
number_of_flueless_gas_fires_m3_per_hour=number_of_flueless_gas_fires_m3_per_hour,
infiltration_due_to_chimneys_flues_fans_PSVs=infiltration_due_to_chimneys_flues_fans_PSVs,
additional_infiltration=additional_infiltration,
window_infiltration=window_infiltration,
infiltration_rate=infiltration_rate,
infiltration_rate2=infiltration_rate2,
shelter_factor=shelter_factor,
infiltration_rate_incorporating_shelter_factor=infiltration_rate_incorporating_shelter_factor,
wind_factor=wind_factor,
adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed=adjusted_infiltration_rate_allowing_for_shelter_and_wind_speed,
exhaust_air_heat_pump_air_change_rate_through_system=exhaust_air_heat_pump_air_change_rate_through_system,
effective_air_change_rate=effective_air_change_rate
)
| 44.156863
| 138
| 0.697084
|
fc94fb4cac9f98bd6caa36d5c35ffc2cff6f2f9d
| 2,031
|
py
|
Python
|
projects/vdk-plugins/vdk-control-api-auth/src/vdk/plugin/control_api_auth/auth_exception.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 100
|
2021-10-04T09:32:04.000Z
|
2022-03-30T11:23:53.000Z
|
projects/vdk-plugins/vdk-control-api-auth/src/vdk/plugin/control_api_auth/auth_exception.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 208
|
2021-10-04T16:56:40.000Z
|
2022-03-31T10:41:44.000Z
|
projects/vdk-plugins/vdk-control-api-auth/src/vdk/plugin/control_api_auth/auth_exception.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 14
|
2021-10-11T14:15:13.000Z
|
2022-03-11T13:39:17.000Z
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
class VDKAuthException(Exception):
"""
The VDKAuthException is custom exception type following the coding standard
for error handling: see the project contributing documentation
"""
def __init__(self, what, why, consequence, countermeasure):
banner = "¯\\_(ツ)_/¯"
self.message = (
f"{banner}\n"
f"\nwhat: {what}\n"
f"why: {why}\n"
f"consequences: {consequence}\n"
f"countermeasures: {countermeasure}\n"
)
super().__init__(self.message)
class VDKInvalidAuthParamError(VDKAuthException):
"""
The VDKInvalidAuthParamError is a custom exception type derived from
the base VDKAuthException type. It is raised when a parameter needed
for authentication is missing/not provided or otherwise invalid.
"""
def __init__(self, what, why, consequence, countermeasure):
super().__init__(
what=what, why=why, consequence=consequence, countermeasure=countermeasure
)
class VDKLoginFailedError(VDKAuthException):
"""
The VDKLoginFailedError is a custom exception type derived from the
base VDKAuthException type. It is raised when an error occurs while
going through the actual authentication flow, i.e., when something
happens while connecting to a third party endpoint.
"""
def __init__(self, what, why, consequence, countermeasure):
super().__init__(
what=what, why=why, consequence=consequence, countermeasure=countermeasure
)
class VDKAuthOSError(VDKAuthException):
"""
The VDKAuthOSError is a custom exception type derived from the base
VDKAuthException type. It is raised when an error in the underlying
Operating System arises.
"""
def __init__(self, what, why, consequence, countermeasure):
super().__init__(
what=what, why=why, consequence=consequence, countermeasure=countermeasure
)
| 33.295082
| 86
| 0.681438
|
913ca46f43a7aa0ee31b881a2c4152b6c5828c03
| 2,121
|
py
|
Python
|
pytests/bucket_collections/collection_ops_specs/volume_test_load_with_CRUD_on_collections_for_volume_test.py
|
bkumaran/TAF
|
27f39eb913fa89b55cdd88ee1c7ef0bb8c094407
|
[
"Apache-2.0"
] | 9
|
2019-02-19T05:55:00.000Z
|
2022-01-20T10:37:28.000Z
|
pytests/bucket_collections/collection_ops_specs/volume_test_load_with_CRUD_on_collections_for_volume_test.py
|
bkumaran/TAF
|
27f39eb913fa89b55cdd88ee1c7ef0bb8c094407
|
[
"Apache-2.0"
] | 2
|
2019-02-19T07:28:54.000Z
|
2019-06-18T11:22:29.000Z
|
pytests/bucket_collections/collection_ops_specs/volume_test_load_with_CRUD_on_collections_for_volume_test.py
|
bkumaran/TAF
|
27f39eb913fa89b55cdd88ee1c7ef0bb8c094407
|
[
"Apache-2.0"
] | 155
|
2018-11-13T14:57:07.000Z
|
2022-03-28T11:53:22.000Z
|
from collections_helper.collections_spec_constants import MetaCrudParams
spec = {
# Scope/Collection ops params
MetaCrudParams.COLLECTIONS_TO_FLUSH: 0,
MetaCrudParams.COLLECTIONS_TO_DROP: 200,
MetaCrudParams.SCOPES_TO_DROP: 0,
MetaCrudParams.SCOPES_TO_ADD_PER_BUCKET: 0,
MetaCrudParams.COLLECTIONS_TO_ADD_FOR_NEW_SCOPES: 0,
MetaCrudParams.COLLECTIONS_TO_ADD_PER_BUCKET: 0,
MetaCrudParams.COLLECTIONS_TO_RECREATE: 200,
MetaCrudParams.BUCKET_CONSIDERED_FOR_OPS: "all",
MetaCrudParams.SCOPES_CONSIDERED_FOR_OPS: "all",
MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_OPS: "all",
# Doc loading params
"doc_crud": {
MetaCrudParams.DocCrud.NUM_ITEMS_FOR_NEW_COLLECTIONS: 100000,
MetaCrudParams.DocCrud.COMMON_DOC_KEY: "test_collections",
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION: 22,
MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION: 20,
MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION: 20,
MetaCrudParams.DocCrud.REPLACE_PERCENTAGE_PER_COLLECTION: 0,
MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION: 20,
},
"subdoc_crud": {
MetaCrudParams.SubDocCrud.XATTR_TEST: False,
MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION: 0,
MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION: 0,
MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION: 0,
MetaCrudParams.SubDocCrud.LOOKUP_PER_COLLECTION: 0,
},
# Doc_loading task options
MetaCrudParams.DOC_TTL: 0,
MetaCrudParams.DURABILITY_LEVEL: "",
MetaCrudParams.SDK_TIMEOUT: 120, # Default is 60
MetaCrudParams.SDK_TIMEOUT_UNIT: "seconds",
MetaCrudParams.TARGET_VBUCKETS: "all",
MetaCrudParams.SKIP_READ_ON_ERROR: True,
MetaCrudParams.SUPPRESS_ERROR_TABLE: True,
# The below is to skip populating success dictionary for reads
MetaCrudParams.SKIP_READ_SUCCESS_RESULTS: True, # Default is False
MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD: "all",
MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD: "all",
MetaCrudParams.BUCKETS_CONSIDERED_FOR_CRUD: "all"
}
| 38.563636
| 72
| 0.770863
|
328a8114621d0559b7c31cfab7f64e350c7cd7b3
| 7,414
|
py
|
Python
|
examples/wavenet/train.py
|
JiaXiao243/Parakeet
|
7c4267476ee9211236f5ea848131a3638fd0f555
|
[
"Apache-2.0"
] | 1
|
2020-06-22T12:12:43.000Z
|
2020-06-22T12:12:43.000Z
|
examples/wavenet/train.py
|
JiaXiao243/Parakeet
|
7c4267476ee9211236f5ea848131a3638fd0f555
|
[
"Apache-2.0"
] | null | null | null |
examples/wavenet/train.py
|
JiaXiao243/Parakeet
|
7c4267476ee9211236f5ea848131a3638fd0f555
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import ruamel.yaml
import argparse
import tqdm
from tensorboardX import SummaryWriter
from paddle import fluid
fluid.require_version('1.8.0')
import paddle.fluid.dygraph as dg
from parakeet.data import SliceDataset, TransformDataset, CacheDataset, DataCargo, SequentialSampler, RandomSampler
from parakeet.models.wavenet import UpsampleNet, WaveNet, ConditionalWavenet
from parakeet.utils.layer_tools import summary
from parakeet.utils import io
from data import LJSpeechMetaData, Transform, DataCollector
from utils import make_output_tree, valid_model
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train a WaveNet model with LJSpeech.")
parser.add_argument(
"--data", type=str, help="path of the LJspeech dataset")
parser.add_argument("--config", type=str, help="path of the config file")
parser.add_argument("--device", type=int, default=-1, help="device to use")
g = parser.add_mutually_exclusive_group()
g.add_argument("--checkpoint", type=str, help="checkpoint to resume from")
g.add_argument(
"--iteration",
type=int,
help="the iteration of the checkpoint to load from output directory")
parser.add_argument(
"output", type=str, default="experiment", help="path to save results")
args = parser.parse_args()
with open(args.config, 'rt') as f:
config = ruamel.yaml.safe_load(f)
if args.device == -1:
place = fluid.CPUPlace()
else:
place = fluid.CUDAPlace(args.device)
dg.enable_dygraph(place)
print("Command Line Args: ")
for k, v in vars(args).items():
print("{}: {}".format(k, v))
ljspeech_meta = LJSpeechMetaData(args.data)
data_config = config["data"]
sample_rate = data_config["sample_rate"]
n_fft = data_config["n_fft"]
win_length = data_config["win_length"]
hop_length = data_config["hop_length"]
n_mels = data_config["n_mels"]
train_clip_seconds = data_config["train_clip_seconds"]
transform = Transform(sample_rate, n_fft, win_length, hop_length, n_mels)
ljspeech = TransformDataset(ljspeech_meta, transform)
valid_size = data_config["valid_size"]
ljspeech_valid = CacheDataset(SliceDataset(ljspeech, 0, valid_size))
ljspeech_train = CacheDataset(
SliceDataset(ljspeech, valid_size, len(ljspeech)))
model_config = config["model"]
n_loop = model_config["n_loop"]
n_layer = model_config["n_layer"]
filter_size = model_config["filter_size"]
context_size = 1 + n_layer * sum([filter_size**i for i in range(n_loop)])
print("context size is {} samples".format(context_size))
train_batch_fn = DataCollector(context_size, sample_rate, hop_length,
train_clip_seconds)
valid_batch_fn = DataCollector(
context_size, sample_rate, hop_length, train_clip_seconds, valid=True)
batch_size = data_config["batch_size"]
train_cargo = DataCargo(
ljspeech_train,
train_batch_fn,
batch_size,
sampler=RandomSampler(ljspeech_train))
# only batch=1 for validation is enabled
valid_cargo = DataCargo(
ljspeech_valid,
valid_batch_fn,
batch_size=1,
sampler=SequentialSampler(ljspeech_valid))
make_output_tree(args.output)
if args.device == -1:
place = fluid.CPUPlace()
else:
place = fluid.CUDAPlace(args.device)
model_config = config["model"]
upsampling_factors = model_config["upsampling_factors"]
encoder = UpsampleNet(upsampling_factors)
n_loop = model_config["n_loop"]
n_layer = model_config["n_layer"]
residual_channels = model_config["residual_channels"]
output_dim = model_config["output_dim"]
loss_type = model_config["loss_type"]
log_scale_min = model_config["log_scale_min"]
decoder = WaveNet(n_loop, n_layer, residual_channels, output_dim, n_mels,
filter_size, loss_type, log_scale_min)
model = ConditionalWavenet(encoder, decoder)
summary(model)
train_config = config["train"]
learning_rate = train_config["learning_rate"]
anneal_rate = train_config["anneal_rate"]
anneal_interval = train_config["anneal_interval"]
lr_scheduler = dg.ExponentialDecay(
learning_rate, anneal_interval, anneal_rate, staircase=True)
gradiant_max_norm = train_config["gradient_max_norm"]
optim = fluid.optimizer.Adam(
lr_scheduler,
parameter_list=model.parameters(),
grad_clip=fluid.clip.ClipByGlobalNorm(gradiant_max_norm))
train_loader = fluid.io.DataLoader.from_generator(
capacity=10, return_list=True)
train_loader.set_batch_generator(train_cargo, place)
valid_loader = fluid.io.DataLoader.from_generator(
capacity=10, return_list=True)
valid_loader.set_batch_generator(valid_cargo, place)
max_iterations = train_config["max_iterations"]
checkpoint_interval = train_config["checkpoint_interval"]
snap_interval = train_config["snap_interval"]
eval_interval = train_config["eval_interval"]
checkpoint_dir = os.path.join(args.output, "checkpoints")
log_dir = os.path.join(args.output, "log")
writer = SummaryWriter(log_dir)
# load parameters and optimizer, and update iterations done so far
if args.checkpoint is not None:
iteration = io.load_parameters(
model, optim, checkpoint_path=args.checkpoint)
else:
iteration = io.load_parameters(
model,
optim,
checkpoint_dir=checkpoint_dir,
iteration=args.iteration)
global_step = iteration + 1
iterator = iter(tqdm.tqdm(train_loader))
while global_step <= max_iterations:
try:
batch = next(iterator)
except StopIteration as e:
iterator = iter(tqdm.tqdm(train_loader))
batch = next(iterator)
audio_clips, mel_specs, audio_starts = batch
model.train()
y_var = model(audio_clips, mel_specs, audio_starts)
loss_var = model.loss(y_var, audio_clips)
loss_var.backward()
loss_np = loss_var.numpy()
writer.add_scalar("loss", loss_np[0], global_step)
writer.add_scalar("learning_rate",
optim._learning_rate.step().numpy()[0], global_step)
optim.minimize(loss_var)
optim.clear_gradients()
print("global_step: {}\tloss: {:<8.6f}".format(global_step, loss_np[
0]))
if global_step % snap_interval == 0:
valid_model(model, valid_loader, writer, global_step, sample_rate)
if global_step % checkpoint_interval == 0:
io.save_parameters(checkpoint_dir, global_step, model, optim)
global_step += 1
| 36.70297
| 115
| 0.696115
|
fb5899401015d2da8126d0b1247e3f96b9e31004
| 216
|
py
|
Python
|
_16_LISTS_ADVANCED/_4_Even_Numbers.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
_16_LISTS_ADVANCED/_4_Even_Numbers.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
_16_LISTS_ADVANCED/_4_Even_Numbers.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
numbers = [int(n.strip()) for n in input().split(",")]
even_number_indexes = []
for index, number in enumerate(numbers):
if number % 2 == 0:
even_number_indexes.append(index)
print(even_number_indexes)
| 24
| 54
| 0.680556
|
cecccd18b25c0a2aead412b6872da0b4d614107e
| 263
|
py
|
Python
|
cms_lab_members/models.py
|
mfcovington/djangocms-lab-members
|
c2b0251d985255265b9ba9fbce41a772ea92174c
|
[
"BSD-3-Clause"
] | null | null | null |
cms_lab_members/models.py
|
mfcovington/djangocms-lab-members
|
c2b0251d985255265b9ba9fbce41a772ea92174c
|
[
"BSD-3-Clause"
] | null | null | null |
cms_lab_members/models.py
|
mfcovington/djangocms-lab-members
|
c2b0251d985255265b9ba9fbce41a772ea92174c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from cms.models import CMSPlugin
class ScientistPluginModel (CMSPlugin):
scientist = models.ForeignKey('lab_members.Scientist',
related_name='plugins'
)
def __str__(self):
return self.scientist.full_name
| 23.909091
| 58
| 0.730038
|
f8e2d93b107c9356f735465e279370c79c24d2cf
| 2,970
|
py
|
Python
|
src/sentry/models/releasefile.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T17:46:16.000Z
|
2019-10-17T17:46:16.000Z
|
src/sentry/models/releasefile.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/releasefile.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.db import models
from six.moves.urllib.parse import urlsplit, urlunsplit
from sentry.db.models import BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
from sentry.utils.hashlib import sha1_text
class ReleaseFile(Model):
r"""
A ReleaseFile is an association between a Release and a File.
The ident of the file should be sha1(name) or
sha1(name '\x00\x00' dist.name) and must be unique per release.
"""
__core__ = False
organization = FlexibleForeignKey("sentry.Organization")
# DEPRECATED
project_id = BoundedPositiveIntegerField(null=True)
release = FlexibleForeignKey("sentry.Release")
file = FlexibleForeignKey("sentry.File")
ident = models.CharField(max_length=40)
name = models.TextField()
dist = FlexibleForeignKey("sentry.Distribution", null=True)
__repr__ = sane_repr("release", "ident")
class Meta:
unique_together = (("release", "ident"),)
index_together = (("release", "name"),)
app_label = "sentry"
db_table = "sentry_releasefile"
def save(self, *args, **kwargs):
if not self.ident and self.name:
dist = self.dist_id and self.dist.name or None
self.ident = type(self).get_ident(self.name, dist)
return super(ReleaseFile, self).save(*args, **kwargs)
def update(self, *args, **kwargs):
# If our name is changing, we must also change the ident
if "name" in kwargs and "ident" not in kwargs:
dist = kwargs.get("dist") or self.dist
kwargs["ident"] = self.ident = type(self).get_ident(
kwargs["name"], dist and dist.name or dist
)
return super(ReleaseFile, self).update(*args, **kwargs)
@classmethod
def get_ident(cls, name, dist=None):
if dist is not None:
return sha1_text(name + "\x00\x00" + dist).hexdigest()
return sha1_text(name).hexdigest()
@classmethod
def normalize(cls, url):
"""Transforms a full absolute url into 2 or 4 generalized options
* the original url as input
* (optional) original url without querystring
* the full url, but stripped of scheme and netloc
* (optional) full url without scheme and netloc or querystring
"""
# Always ignore the fragment
scheme, netloc, path, query, _ = urlsplit(url)
uri_without_fragment = (scheme, netloc, path, query, None)
uri_relative = (None, None, path, query, None)
uri_without_query = (scheme, netloc, path, None, None)
uri_relative_without_query = (None, None, path, None, None)
urls = [urlunsplit(uri_without_fragment)]
if query:
urls.append(urlunsplit(uri_without_query))
urls.append("~" + urlunsplit(uri_relative))
if query:
urls.append("~" + urlunsplit(uri_relative_without_query))
return urls
| 37.125
| 94
| 0.650505
|
c1fcc56a1b2254079958287bd26061908fe2a39b
| 3,174
|
py
|
Python
|
source_optics/migrations/0017_auto_20190807_2024.py
|
mpdehaan/source_optics
|
ce0215ad63e47f0ab645c765129bac3c7236aff1
|
[
"Apache-2.0"
] | 1
|
2021-08-24T03:41:34.000Z
|
2021-08-24T03:41:34.000Z
|
source_optics/migrations/0017_auto_20190807_2024.py
|
mpdehaan/source_optics
|
ce0215ad63e47f0ab645c765129bac3c7236aff1
|
[
"Apache-2.0"
] | null | null | null |
source_optics/migrations/0017_auto_20190807_2024.py
|
mpdehaan/source_optics
|
ce0215ad63e47f0ab645c765129bac3c7236aff1
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-08-07 20:24
from django.db import migrations, models
import source_optics.models
class Migration(migrations.Migration):
dependencies = [
('source_optics', '0016_auto_20190727_0116'),
]
operations = [
migrations.AlterField(
model_name='author',
name='email',
field=models.CharField(db_index=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='commit',
name='sha',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='commit',
name='subject',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='credential',
name='api_endpoint',
field=models.CharField(blank=True, help_text='for github/gitlab imports off private instances', max_length=1024, null=True),
),
migrations.AlterField(
model_name='credential',
name='description',
field=models.TextField(blank=True, max_length=1024, null=True),
),
migrations.AlterField(
model_name='credential',
name='import_filter',
field=models.CharField(blank=True, help_text='if set, only import repos matching this fnmatch pattern', max_length=255, null=True),
),
migrations.AlterField(
model_name='credential',
name='name',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='credential',
name='password',
field=models.CharField(blank=True, help_text='for github/gitlab imports', max_length=255, null=True),
),
migrations.AlterField(
model_name='credential',
name='ssh_unlock_passphrase',
field=models.CharField(blank=True, help_text='for cloning private repos', max_length=255, null=True),
),
migrations.AlterField(
model_name='credential',
name='username',
field=models.CharField(blank=True, help_text='for github/gitlab username', max_length=64),
),
migrations.AlterField(
model_name='organization',
name='name',
field=models.CharField(db_index=True, max_length=255, unique=True),
),
migrations.AlterField(
model_name='repository',
name='name',
field=models.CharField(db_index=True, max_length=64, validators=[source_optics.models.repository.validate_repo_name]),
),
migrations.AlterField(
model_name='repository',
name='url',
field=models.CharField(db_index=True, help_text='use a git ssh url for private repos, else http/s are ok', max_length=255),
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(blank=True, db_index=True, max_length=64, null=True),
),
]
| 36.906977
| 143
| 0.596408
|
b99e520107ae72ca6e225f2b6e598bb37f1550cc
| 1,270
|
py
|
Python
|
leetcode/Trees/230. Kth Smallest Element in a BST.py
|
danielfsousa/algorithms-solutions
|
038c0c0bf6d89ffb1ecea596e7d4bb9bd4154ff1
|
[
"MIT"
] | 1
|
2020-03-17T23:54:32.000Z
|
2020-03-17T23:54:32.000Z
|
leetcode/Trees/230. Kth Smallest Element in a BST.py
|
danielfsousa/algorithms-solutions
|
038c0c0bf6d89ffb1ecea596e7d4bb9bd4154ff1
|
[
"MIT"
] | null | null | null |
leetcode/Trees/230. Kth Smallest Element in a BST.py
|
danielfsousa/algorithms-solutions
|
038c0c0bf6d89ffb1ecea596e7d4bb9bd4154ff1
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/kth-smallest-element-in-a-bst/
# Definition for a binary tree node.
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:
"""
Recursive DFS
Time complexity: O(n)
Space complexity: O(n)
"""
res = None
def dfs(node):
nonlocal k, res
if not node or k == 0:
return
dfs(node.left)
k -= 1
if k == 0:
res = node.val
return
dfs(node.right)
dfs(root)
return res
def kthSmallestIterative(self, root: Optional[TreeNode], k: int) -> int:
"""
Iterative DFS
Time complexity: O(n)
Space complexity: O(n)
"""
stack = []
cur = root
while cur or stack:
while cur:
stack.append(cur)
cur = cur.left
node = stack.pop()
k -= 1
if k == 0:
return node.val
cur = node.right
| 20.483871
| 76
| 0.470866
|
9cc5da53bebb6e005d7e815d2080251fb221b12b
| 11,526
|
py
|
Python
|
JumpscaleLib/tools/teammgr/Teammgr.py
|
threefoldtech/jumpscale_lib9
|
03c1451133d777e5af106fcc6f75c1138bb997f2
|
[
"Apache-2.0"
] | null | null | null |
JumpscaleLib/tools/teammgr/Teammgr.py
|
threefoldtech/jumpscale_lib9
|
03c1451133d777e5af106fcc6f75c1138bb997f2
|
[
"Apache-2.0"
] | 220
|
2018-07-29T08:37:17.000Z
|
2019-08-05T15:01:27.000Z
|
JumpscaleLib/tools/teammgr/Teammgr.py
|
threefoldtech/jumpscale_lib9
|
03c1451133d777e5af106fcc6f75c1138bb997f2
|
[
"Apache-2.0"
] | 1
|
2018-08-20T09:16:08.000Z
|
2018-08-20T09:16:08.000Z
|
from jumpscale import j
TEMPLATE_PERSON_TOML = """
login =""
first_name = ""
last_name = ""
locations = []
companies = []
departments = []
languageCode = "en-us"
title = []
description_internal =""
description_public_friendly =""
description_public_formal =""
experience = ""
hobbies = ""
pub_ssh_key= ""
skype = ""
telegram = ""
itsyou_online = ""
reports_into = ""
mobile = []
email = []
github = ""
linkedin = ""
links = []
rank = 0
core = false
"""
JSBASE = j.application.jsbase_get_class()
class Todo(JSBASE):
def __init__(self, department, path, todo):
path = path.replace("//", "/")
self.department = department
self.path = path
self.todo = todo
JSBASE.__init__(self)
@property
def person(self):
return j.sal.fs.getBaseName(self.path)
def __repr__(self):
return "Todo %s:%s:%s " % (self.department.name, self.path, self.todo)
__str__ = __repr__
class Person(JSBASE):
def __init__(self, department, name, path):
self.department = department
self.path = path
self.name = name
self.todo = []
self.link = False
self.load()
JSBASE.__init__(self)
def load(self):
self.path_fix()
self.images_fix()
self.toml_fix()
# self.readme_fix()
def add_to_do(self, path, todo):
todo = todo.replace("_", "-")
td = Todo(self, path, todo)
self.todo.append(td)
def images_fix(self):
if self.link:
return
# make sure we have an unprocessed.jpg
images = j.sal.fs.listFilesInDir(self.path, filter="*.jpg")
unprocessed_images = [
item for item in images if j.sal.fs.getBaseName(item) == "unprocessed.jpg"]
if images and not unprocessed_images:
# did not have an unprocessed one need to copy to unprocessed name
image = images[0]
j.sal.fs.renameFile(image, "%s/unprocessed.jpg" %
(j.sal.fs.getDirName(image)))
elif not unprocessed_images:
self.add_to_do(
self.path, "did not find unprocessed picture, please add")
def readme_fix(self):
if self.link:
return
rpath = self.path.rstrip("/") + "/readme.md"
if j.sal.fs.exists(rpath):
C = j.sal.fs.readFile(rpath)
if len(C) > 100:
return
self.logger.debug("readmefix")
from IPython import embed
embed(colors='Linux')
C = """
# Homepage for $name

## What is my plan for next weeks/months?
- my focus ...
## My passion?
- private
- professional?
## What is my role and ambition in the company?
- ...
"""
gitdir = j.clients.git.findGitPath(self.path)
cl = j.clients.git.get(gitdir)
unc = "/".join(cl.unc.split("/")[:-1])
url = "https://%s/src/branch/master/team/%s/%s" % (unc, self.department.name, self.name)
rawurl = "https://%s/raw/branch/master/team/%s/%s" % (unc, self.department.name, self.name)
C = C.replace("$name", self.name)
C = C.replace("$url", url)
C = C.replace("$rawurl", rawurl)
C = j.data.text.strip(C)
dpath = j.sal.fs.getDirName(self.path).rstrip("/") + "/%s/readme.md" % self.name
j.sal.fs.writeFile(dpath, C)
def path_fix(self):
bn_org = j.sal.fs.getBaseName(self.path)
def process(path):
bn = j.sal.fs.getBaseName(path)
bn = bn.replace(" ", "_")
bn = bn.replace("-", "_")
bn = bn.lower()
bn = j.data.nltk.unidecode(bn)
newdest = j.sal.fs.getDirName(path).rstrip("/") + "/" + bn
newdest = newdest.replace("//", "/")
return newdest, bn
newdest, bn = process(self.path)
if bn != bn_org:
if j.sal.fs.isLink(self.path):
self.logger.debug("path_fix")
j.sal.fs.renameFile(self.path, newdest)
else:
newdest = j.sal.fs.getDirName(self.path).rstrip("/") + "/" + bn
self.logger.debug("rename dir from %s to %s" % (self.path, newdest))
j.sal.fs.renameDir(self.path, newdest)
self.path = newdest
self.name = bn
if j.sal.fs.isLink(self.path):
self.link = True
# check where path points too, rename if required
linkpath = j.sal.fs.readLink(self.path)
bn_org = j.sal.fs.getBaseName(linkpath)
newdest, bn = process(linkpath)
gitdir = j.clients.git.findGitPath(newdest)
cl = j.clients.git.get(gitdir)
unc = "/".join(cl.unc.split("/")[:-1])
depname = linkpath.strip("/").split("/")[-2]
url = "https://%s/src/branch/master/team/%s/%s" % (unc, depname, self.name)
C = """
# $name
- [link to $name data dir]($url)
"""
C = C.replace("$name", self.name)
C = C.replace("$url", url)
C = j.data.text.strip(C)
dpath = j.sal.fs.getDirName(self.path).rstrip("/") + "/%s.md" % bn
j.sal.fs.writeFile(dpath, C)
j.sal.fs.remove(self.path)
def toml_fix(self):
if self.link:
return
self.logger.debug("PROCESS FIX:%s" % self)
def process(newtoml, name):
toml_path = "%s/%s.toml" % (self.path, name)
if j.sal.fs.exists(toml_path):
try:
tomlupdate = j.data.serializer.toml.load(toml_path)
except Exception:
self.department.add_to_do(
self.path, "toml file is corrupt:%s" % toml_path)
return newtoml
newtoml, errors = j.data.serializer.toml.merge(newtoml, tomlupdate, keys_replace={
'name': 'first_name'}, add_non_exist=False, die=False, errors=[])
for error in errors:
self.department.add_to_do(
self.path, "could not find key:'%s', value to add was: '%s' in template" % (error[0], error[1]))
return newtoml
# just remove old stuff
j.sal.fs.remove("%s/fixed.yaml" % self.path)
j.sal.fs.remove("%s/fixed.toml" % self.path)
new_toml = j.data.serializer.toml.loads(
TEMPLATE_PERSON_TOML) # load the template
new_toml = process(new_toml, "fixed_donotchange")
new_toml = process(new_toml, "profile")
new_toml = process(new_toml, "person")
# add department name to the departments in the new toml file
if self.department.name not in new_toml["departments"]:
new_toml["departments"].append(self.department.name)
for item in ["login", "first_name", "last_name", "description_public_formal", "description_public_friendly",
"pub_ssh_key", "telegram", "reports_into", "locations", "departments", "title", "mobile", "email"]:
if not new_toml[item]:
self.department.add_to_do(
self.path, "empty value for:%s" % item)
# make lower case
for key in ["locations", "companies", "departments"]:
new_toml[key] = [toml_item.lower().strip()
for toml_item in new_toml[key]]
for key in ["login", "first_name", "last_name", "telegram", "skype"]:
new_toml[key] = new_toml[key].lower().strip()
t = j.data.serializer.toml.fancydumps(new_toml)
final_toml_path = "%s/person.toml" % self.path
j.sal.fs.writeFile(final_toml_path, t)
for item in ["fixed_donotchange", "profile", "fixed"]:
j.sal.fs.remove("%s/%s.toml" % (self.path, item))
def __repr__(self):
return "Person %s:%s:%s" % (self.department.name, self.name, self.path)
__str__ = __repr__
class Department(JSBASE):
def __init__(self, name, path):
JSBASE.__init__(self)
self.path = path
self.name = name
self.todo = []
self.persons = []
self.load()
def load(self):
for person_path in j.sal.fs.listDirsInDir(self.path, recursive=False):
person_name = j.sal.fs.getBaseName(person_path)
self.persons.append(Person(self, person_name, person_path))
def add_to_do(self, path, todo):
todo = todo.replace("_", "-")
td = Todo(self, path, todo)
self.todo.append(td)
@property
def todo_per_person(self):
todo2 = {}
for todo in self.todo:
if todo.person not in todo2:
todo2[todo.person] = []
todo2[todo.person].append(todo)
return todo2
@property
def todo_md(self):
if len(self.todo_per_person.items()) == 0:
return ""
md = "# Todo for department : %s\n\n" % (self.name)
for person, todos in self.todo_per_person.items():
md += "## %s\n\n" % person
for todo in todos:
md += "- %s\n" % todo.todo
md += "\n"
return md
def __repr__(self):
return "Department %s:%s" % (self.name, self.path)
__str__ = __repr__
class Teammgr(JSBASE):
def __init__(self):
self.__jslocation__ = "j.tools.team_manager"
JSBASE.__init__(self)
self.departments = {}
def _add_department(self, path, name):
if name not in self.departments:
self.departments[name] = Department(name, path)
return self.departments[name]
def do(self, path=""):
"""
Path within the directory of the team is expected.
Parent or the directory itself should be /team
if path=='' then use current dir
to call:
js_shell 'j.tools.team_manager.do()'
"""
if path == "":
path = j.sal.fs.getcwd()
self.path = path
path0 = self.path
found = ""
# look up to find the right dir
while path0 != "":
if j.sal.fs.exists("%s/team" % path0):
found = path0
break
path0 = j.sal.fs.getParent(path0).rstrip().rstrip("/").rstrip()
if not found:
raise RuntimeError(
"could not find /team in one of the parent dir's (or this dir):'%s'" % path)
self.path = "%s/team" % path0
for department_path in j.sal.fs.listDirsInDir(self.path, recursive=False):
department_name = j.sal.fs.getBaseName(department_path)
department_obj = self._add_department(
department_path, department_name)
self.errors_write(self.path)
def errors_write(self, team_path):
# write all the todo's
errorpath = "%s/todo" % team_path
j.sal.fs.removeDirTree(errorpath)
j.sal.fs.createDir(errorpath)
for key, department in self.departments.items():
path1 = "%s/%s.md" % (errorpath, department.name)
if department.todo_md != "":
j.sal.fs.writeFile(path1, department.todo_md)
def test(self):
path = j.clients.git.pullGitRepo(
"ssh://git@docs.grid.tf:10022/gig/data_team.git")
self.load(path=path + "/team")
# TODO:*2 use as final formal = yaml
| 31.320652
| 128
| 0.545115
|
e839aa6a9466a62f15884193cf2f564feb326e99
| 288
|
py
|
Python
|
python/baekjoon/step/15-greedy/allocation-of-conference-rooms.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | 1
|
2022-03-06T03:49:31.000Z
|
2022-03-06T03:49:31.000Z
|
python/baekjoon/step/15-greedy/allocation-of-conference-rooms.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
python/baekjoon/step/15-greedy/allocation-of-conference-rooms.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
"""
출처: https://www.acmicpc.net/problem/1931
"""
size = int(input())
schedule = [tuple(map(int, input().split())) for _ in range(size)]
schedule.sort(key=lambda x: (x[1], x[0]))
result = end = 0
for s, e in schedule:
if s >= end:
result += 1
end = e
print(result)
| 16.941176
| 66
| 0.576389
|
975d79d49d5321e4e75004abaf1604a8cb9fcbbc
| 2,395
|
py
|
Python
|
keystone/common/policies/base.py
|
rajivmucheli/keystone
|
d55099d4a17e3672d478aae8c367bcdf9af15fb9
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/policies/base.py
|
rajivmucheli/keystone
|
d55099d4a17e3672d478aae8c367bcdf9af15fb9
|
[
"Apache-2.0"
] | 4
|
2020-02-10T12:02:37.000Z
|
2021-07-14T15:16:57.000Z
|
keystone/common/policies/base.py
|
rajivmucheli/keystone
|
d55099d4a17e3672d478aae8c367bcdf9af15fb9
|
[
"Apache-2.0"
] | 5
|
2019-06-06T15:11:37.000Z
|
2021-06-07T08:23:23.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
IDENTITY = 'identity:%s'
RULE_ADMIN_REQUIRED = 'rule:admin_required'
RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
RULE_ADMIN_OR_CREDENTIAL_OWNER = (
'rule:admin_required or '
'(rule:owner and user_id:%(target.credential.user_id)s)')
RULE_ADMIN_OR_TARGET_DOMAIN = (
'rule:admin_required or '
'project_domain_id:%(target.domain.id)s')
RULE_ADMIN_OR_TARGET_PROJECT = (
'rule:admin_required or '
'project_id:%(target.project.id)s')
RULE_ADMIN_OR_TOKEN_SUBJECT = 'rule:admin_or_token_subject' # nosec
RULE_REVOKE_EVENT_OR_ADMIN = 'rule:revoke_event_or_admin'
RULE_SERVICE_ADMIN_OR_TOKEN_SUBJECT = (
'rule:service_admin_or_token_subject') # nosec
RULE_SERVICE_OR_ADMIN = 'rule:service_or_admin'
RULE_TRUST_OWNER = 'user_id:%(trust.trustor_user_id)s'
rules = [
policy.RuleDefault(
name='cloud_admin',
check_str='role:admin and is_admin_project:True'),
policy.RuleDefault(
name='admin_required',
check_str='role:admin or is_admin:1'),
policy.RuleDefault(
name='service_role',
check_str='role:service'),
policy.RuleDefault(
name='service_or_admin',
check_str='rule:admin_required or rule:service_role'),
policy.RuleDefault(
name='owner',
check_str='user_id:%(user_id)s'),
policy.RuleDefault(
name='admin_or_owner',
check_str='rule:admin_required or rule:owner'),
policy.RuleDefault(
name='token_subject',
check_str='user_id:%(target.token.user_id)s'),
policy.RuleDefault(
name='admin_or_token_subject',
check_str='rule:admin_required or rule:token_subject'),
policy.RuleDefault(
name='service_admin_or_token_subject',
check_str='rule:service_or_admin or rule:token_subject'),
]
def list_rules():
return rules
| 35.220588
| 75
| 0.721921
|
098ff64cb759c182bc2d29c85e713f19d6e9a7b0
| 9,045
|
py
|
Python
|
paxos/receivers/rdtpReceiver.py
|
victordomene/ram-paxos
|
e47051b6af42ca952fedbc95e54346ecf3266a39
|
[
"MIT"
] | null | null | null |
paxos/receivers/rdtpReceiver.py
|
victordomene/ram-paxos
|
e47051b6af42ca952fedbc95e54346ecf3266a39
|
[
"MIT"
] | null | null | null |
paxos/receivers/rdtpReceiver.py
|
victordomene/ram-paxos
|
e47051b6af42ca952fedbc95e54346ecf3266a39
|
[
"MIT"
] | null | null | null |
"""
This module provides an implementation of the Receiver class using our own
RDTP Protocol, which we wrote for the Chat Assignment in CS262.
For the specific documentation of the arguments these methods take and
what they do at a high level, refer to receiver.py.
"""
import socket
import select
import thread
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from rdtp import rdtp
TIMEOUT_SECONDS = 10
RECEIVER_DEBUG = False
MAX_PENDING_CLIENTS = 50
class rdtpReceiver():
def __init__(self, proposer, acceptor, learner):
self.proposer = proposer
self.acceptor = acceptor
self.learner = learner
def serve(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setblocking(0)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((host, port))
self.sockets = [self.socket]
print 'Started listening on {}:{}'.format(host, port)
thread.start_new_thread(self._serve_forever, ())
def usage_args(self, method, num, expected):
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Received {} method with {} arguments; expected {}".format(method, num, expected)
def _serve_forever(self):
self.socket.listen(MAX_PENDING_CLIENTS)
while 1:
# This blocks until we are ready to read some socket
ready_to_read,_,_ = select.select(self.sockets,[],[],3)
for sock in ready_to_read:
# New client connection!
# we accept the connection and get a new socket
# for it
if sock == self.socket:
new_client_sock, client_addr = sock.accept()
self.sockets.append(new_client_sock)
print 'New client connection with address [%s:%s]' % client_addr
# Old client wrote us something. It must be
# a message!
else:
try:
status, args = rdtp.recv(sock)
except rdtp.ClientDead:
print "Client died"
self.sockets.remove(sock)
continue
# in this application, the only valid status is 0
if status != 0:
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Received a request with invalid status {}".format(status)
return
# in this application, all requests must come with more than 2 arguments
if len(args) < 1:
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Received a request lacking arguments: {}".format(args)
return
# the first argument must be the method name
method = args[0]
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Received a request with method {} and arguments {}".format(method, args[1:])
if method == 'send_prepare':
if len(args) != 4:
self.usage_args(method, len(args), 4)
return
p, n, proposer = int(args[1]), int(args[2]), args[3]
self.handle_prepare(p, n, proposer)
elif method == 'send_promise':
if len(args) != 7:
self.usage_args(method, len(args), 7)
return
had_previous, p, proposer, n, v, acceptor = 'True' == args[1], int(args[2]), str(args[3]), int(args[4]), int(args[5]), str(args[6])
self.handle_promise(had_previous, p, proposer, n, v, acceptor)
elif method == 'send_accept':
if len(args) != 5:
self.usage_args(method, len(args), 5)
return
p, n, v, proposer = int(args[1]), int(args[2]), int(args[3]), str(args[4])
self.handle_accept(p, n, v, proposer)
elif method == 'send_refuse':
if len(args) != 5:
self.usage_args(method, len(args), 5)
return
p, proposer, n, acceptor = int(args[1]), str(args[2]), int(args[3]), str(args[4])
self.handle_refuse(p, proposer, n, acceptor)
elif method == 'send_learn':
if len(args) != 6:
self.usage_args(method, len(args), 6)
return
p, proposer, n, v, acceptor = int(args[1]), str(args[2]), int(args[3]), int(args[4]), str(args[5])
self.handle_learn(p, proposer, n, v, acceptor)
# This should be sent by an outside client who's not part of paxos
elif method == 'print_ledger':
print 'Will print ledger'
self.handle_print_ledger()
elif method == 'print_differences':
print 'Will print differences'
self.handle_print_differences()
elif method == 'print_difference_mean':
print 'Will print difference mean'
self.learner.handle_print_difference_mean()
elif method == 'diff_file':
print 'Will print differences to file'
self.handle_diff_file()
# if none of the methods matched, we have an unknown request...
else:
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Received an unknown request with method {}".format(method)
def handle_prepare(self, p, n, proposer):
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: PrepareRequest received: p = {}, n = {}, proposer = {}".format(p, n, proposer)
return self.acceptor.handle_prepare(p, n, proposer)
def handle_accept(self, p, n, v, proposer):
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: AcceptRequest received: p = {}, n = {}, v = {}, proposer = {}".format(p, n, v, proposer)
return self.acceptor.handle_accept(p, n, v, proposer)
def handle_promise(self, had_previous, p, proposer, n, v, acceptor):
# handle the case where we had no previous value set
if not had_previous:
p = None
v = None
proposer = None
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: PromiseRequest received: p = {}, proposer = {}, n = {}, v = {}, acceptor = {}".format(p, proposer, n, v, acceptor)
return self.proposer.handle_promise(p, proposer, n, v, acceptor)
def handle_refuse(self, p, proposer, n, acceptor):
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: RefuseRequest received: p = {}, proposer = {}, n = {}, acceptor = {}".format(p, proposer, n, acceptor)
return self.proposer.handle_refuse(p, proposer, n, acceptor)
def handle_learn(self, p, proposer, n, v, acceptor):
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: LearnRequest received: p = {}, proposer = {}, n = {}, v = {}, acceptor = {}".format(p, proposer, n, v, acceptor)
return self.learner.handle_learn(p, proposer, n, v, acceptor)
def handle_print_ledger(self):
"""
Handle a print_ledger request.
This is not necessary for a Paxos implementation, and therefore,
it is not included in receivers.py abstract class.
Does not return.
"""
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Printing ledger"
return self.learner.handle_print_ledger()
def handle_print_differences(self):
"""
Handle a print_differences request.
This is not necessary for a Paxos implementation, and therefore,
it is not included in receivers.py abstract class.
Does not return.
"""
if RECEIVER_DEBUG:
print "RECEIVER_DEBUG: Printing differences"
return self.learner.handle_print_differences()
def handle_diff_file(self):
"""
Handle a diff_file request.
This is not necessary for a Paxos implementation, and therefore,
it is not included in receivers.py abstract class.
Does not return.
"""
if RECEIVER_DEBUG:
print "RECEIEVER_DEBUG: Writing Differences to file"
return self.learner.handle_diff_file()
def stop_server(self):
"""
Stops the currently running RDTP server.
Currently, it does nothing.
"""
pass
| 37.222222
| 155
| 0.539525
|
54237a67a6c7a45c96737ef9a8d145791ee42009
| 3,838
|
py
|
Python
|
src/MainAPP/migrations/0010_auto_20180521_1229.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | null | null | null |
src/MainAPP/migrations/0010_auto_20180521_1229.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | 9
|
2017-11-21T15:45:18.000Z
|
2022-02-11T03:37:54.000Z
|
src/MainAPP/migrations/0010_auto_20180521_1229.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | 1
|
2020-07-22T02:24:17.000Z
|
2020-07-22T02:24:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-21 10:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MainAPP', '0009_sitesettings'),
]
operations = [
migrations.AddField(
model_name='sitesettings',
name='VERSION_AUTO_DETECT',
field=models.BooleanField(default=True, help_text='Automatically checks the repository for new software', verbose_name='Autodetect new software releases'),
),
migrations.AddField(
model_name='sitesettings',
name='VERSION_AUTO_UPDATE',
field=models.BooleanField(default=False, help_text='Automatically updates to (and applies) the latest software', verbose_name='Apply automatically new software releases'),
),
migrations.AlterField(
model_name='sitesettings',
name='ETH_GATE',
field=models.GenericIPAddressField(default='192.168.0.1', help_text='This is the gateway IP of the LAN network that is providing the internet access.', protocol='IPv4', verbose_name='Gateway of the LAN network'),
),
migrations.AlterField(
model_name='sitesettings',
name='ETH_IP',
field=models.GenericIPAddressField(default='192.168.0.160', help_text='This is the IP for the LAN network that is providing the internet access.', protocol='IPv4', verbose_name='IP address for the LAN network'),
),
migrations.AlterField(
model_name='sitesettings',
name='ETH_MASK',
field=models.GenericIPAddressField(default='255.255.255.0', help_text='This is the mask for the LAN network that is providing the internet access.', protocol='IPv4', verbose_name='Mask for the LAN network'),
),
migrations.AlterField(
model_name='sitesettings',
name='FACILITY_NAME',
field=models.CharField(default='My house', max_length=100, verbose_name='Name of the installation'),
),
migrations.AlterField(
model_name='sitesettings',
name='SITE_DNS',
field=models.CharField(default='myDIY4dot0House.net', help_text='This is the DNS address that gives access to the application from the internet.', max_length=100, verbose_name='Name of the domain to access the application'),
),
migrations.AlterField(
model_name='sitesettings',
name='WIFI_GATE',
field=models.GenericIPAddressField(default='10.10.10.1', help_text='This is the gateway for the WiFi network generated to communicate with the slaves', protocol='IPv4', verbose_name='WIFI network gateway'),
),
migrations.AlterField(
model_name='sitesettings',
name='WIFI_IP',
field=models.GenericIPAddressField(default='10.10.10.1', help_text='This is the IP address for the WiFi network generated to communicate with the slaves', protocol='IPv4', verbose_name='IP address for the WIFI network'),
),
migrations.AlterField(
model_name='sitesettings',
name='WIFI_MASK',
field=models.GenericIPAddressField(default='255.255.255.0', help_text='This is the mask of the WiFi network generated to communicate with the slaves', protocol='IPv4', verbose_name='WIFI network mask'),
),
migrations.AlterField(
model_name='sitesettings',
name='WIFI_SSID',
field=models.CharField(default='DIY4dot0', help_text='This is the name of the WiFi network generated to communicate with the slaves', max_length=50, verbose_name='WIFI network identificator'),
),
]
| 54.056338
| 237
| 0.650599
|
0d6eb12dccd990c495fc6680ef394289b4f3ad4f
| 769
|
py
|
Python
|
phastload/mainsite/models.py
|
rubensfig/PhastLoad
|
8cb205bdc25ed3bfa577712a77a1313b085e52cd
|
[
"Apache-2.0"
] | null | null | null |
phastload/mainsite/models.py
|
rubensfig/PhastLoad
|
8cb205bdc25ed3bfa577712a77a1313b085e52cd
|
[
"Apache-2.0"
] | 1
|
2018-04-07T16:07:11.000Z
|
2018-04-07T16:07:11.000Z
|
phastload/mainsite/models.py
|
rubensfig/PhastLoad
|
8cb205bdc25ed3bfa577712a77a1313b085e52cd
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Beaches(models.Model):
BEACH_TYPES = (
('B', 'Beginner'),
('M', 'Medium'),
('A', 'Advanced'),
)
id = models.CharField(max_length=30, primary_key=True)
name = models.CharField(max_length=30)
latX = models.DecimalField(max_length=30,max_digits=5, decimal_places=2)
latY = models.DecimalField(max_length=30,max_digits=5, decimal_places=2)
cond = models.CharField(max_length=30, choices=BEACH_TYPES)
class Users(models.Model):
EXPERIENCE = (
('B', 'Beginner'),
('M', 'Medium'),
('A', 'Advanced'),
)
id = models.CharField(max_length=30, primary_key=True)
exp = models.CharField(max_length=30, choices=EXPERIENCE)
| 27.464286
| 76
| 0.642393
|
9e50ec722051c0cb5c9a6e5dcca22102b6006a68
| 1,206
|
py
|
Python
|
boom/models.py
|
boomletsgo/boom-sdk-python
|
e15af1d9158b84e9677037f2f784a5a9363564af
|
[
"Unlicense"
] | null | null | null |
boom/models.py
|
boomletsgo/boom-sdk-python
|
e15af1d9158b84e9677037f2f784a5a9363564af
|
[
"Unlicense"
] | null | null | null |
boom/models.py
|
boomletsgo/boom-sdk-python
|
e15af1d9158b84e9677037f2f784a5a9363564af
|
[
"Unlicense"
] | null | null | null |
from declaration import fields, models
class Code(models.DeclarativeBase):
scope = fields.StringField()
code = fields.StringField()
class Account(models.DeclarativeBase):
id = fields.UUIDField()
email = fields.StringField()
first_name = fields.StringField()
last_name = fields.StringField()
code = fields.NestedField(Code)
created_date = fields.DateTimeField()
updated_date = fields.DateTimeField()
class Platform(models.DeclarativeBase):
id = fields.UUIDField()
identifier = fields.StringField()
class Conversation(models.DeclarativeBase):
id = fields.UUIDField()
platform = fields.NestedField(Platform)
account = fields.NestedField(Account)
created_date = fields.DateTimeField()
updated_date = fields.DateTimeField()
class Message(models.DeclarativeBase):
conversation = fields.NestedField(Conversation)
platform = fields.NestedField(Platform)
sender = fields.StringField()
receiver = fields.StringField()
identifier = fields.StringField()
intent = fields.StringField()
content = fields.StringField()
raw = fields.StringField()
extra = fields.JSONField()
timestamp = fields.DateTimeField()
| 28.046512
| 51
| 0.72471
|
166c235172fb12c3284a0d6d6178a00396a3b7f7
| 17,244
|
py
|
Python
|
tests/platform_tests/test_cont_warm_reboot.py
|
emilmih/sonic-mgmt
|
e4e42ec8028bf51b39587e2b53e526d505fe7938
|
[
"Apache-2.0"
] | 2
|
2020-07-03T01:16:27.000Z
|
2020-10-09T05:38:07.000Z
|
tests/platform_tests/test_cont_warm_reboot.py
|
emilmih/sonic-mgmt
|
e4e42ec8028bf51b39587e2b53e526d505fe7938
|
[
"Apache-2.0"
] | 41
|
2020-03-30T03:52:22.000Z
|
2021-03-03T09:16:48.000Z
|
tests/platform_tests/test_cont_warm_reboot.py
|
emilmih/sonic-mgmt
|
e4e42ec8028bf51b39587e2b53e526d505fe7938
|
[
"Apache-2.0"
] | 1
|
2021-06-13T07:38:59.000Z
|
2021-06-13T07:38:59.000Z
|
import os
import shutil
import csv
import sys
import time
import json
import traceback
import pytest
import logging
from datetime import datetime
from tests.common.helpers.assertions import pytest_assert
from tests.common.reboot import get_reboot_cause
from tests.common.fixtures.advanced_reboot import AdvancedReboot
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.platform_tests.verify_dut_health import RebootHealthError,\
check_services, check_interfaces_and_transceivers, check_neighbors,\
verify_no_coredumps, handle_test_error, wait_until_uptime, get_test_report
pytestmark = [
pytest.mark.disable_loganalyzer,
pytest.mark.topology('t0')
]
MAX_WAIT_TIME_FOR_INTERFACES = 30
MAX_WAIT_TIME_FOR_REBOOT_CAUSE = 120
class ContinuousReboot:
def __init__(self, request, duthost, ptfhost, localhost, conn_graph_facts):
self.request = request
self.duthost = duthost
self.ptfhost = ptfhost
self.localhost = localhost
self.conn_graph_facts = conn_graph_facts
self.continuous_reboot_count = request.config.getoption("--continuous_reboot_count")
self.continuous_reboot_delay = request.config.getoption("--continuous_reboot_delay")
self.reboot_type = request.config.getoption("--reboot_type")
self.image_location = request.config.getoption("--image_location")
self.image_list = request.config.getoption("--image_list")
self.current_image = self.duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
self.test_report = dict()
if self.image_location is None:
logging.error("Invalid image location specified: {}".format(str(self.image_location)))
self.init_reporting()
def init_reporting(self):
self.reboot_count = None
self.current_image = None
self.is_new_image = None
self.test_duration = None
self.critical_services = None
self.interfaces = None
self.lag_interfaces = None
self.control_plane = None
self.data_plane = None
self.sub_test_result = True
self.test_failures = 0
self.warm_reboot_count = 0
self.warm_reboot_pass = 0
self.warm_reboot_fail = 0
self.fast_reboot_count = 0
self.fast_reboot_pass = 0
self.fast_reboot_fail = 0
self.pre_existing_cores = 0
def reboot_and_check(self, tbinfo):
"""
Perform the specified type of reboot and check platform status.
@param interfaces: DUT's interfaces defined by minigraph
@param reboot_type: The reboot type, pre-defined const that has name convention of REBOOT_TYPE_XXX.
@param reboot_kwargs: The argument used by reboot_helper
"""
logging.info("Run %s reboot on DUT" % self.reboot_type)
self.run_reboot_testcase()
# Wait until uptime reaches allowed value
wait_until_uptime(self.duthost, self.continuous_reboot_delay)
# Perform additional post-reboot health-check
verify_no_coredumps(self.duthost, self.pre_existing_cores)
self.verify_image()
check_services(self.duthost)
self.check_reboot_type()
check_interfaces_and_transceivers(self.duthost, self.request)
check_neighbors(self.duthost, tbinfo)
logging.info("Finished reboot test and health checks..")
@handle_test_error
def run_reboot_testcase(self):
result = self.advancedReboot.runRebootTest()
if result is not True:
# Create a failure report
error = result.get("stderr")
raise RebootHealthError("Reboot test failed with error: {}".format(error))
@handle_test_error
def check_reboot_type(self):
"""
Perform a match of reboot-cause and reboot-trigger
"""
logging.info("Check reboot cause")
reboot_cause = get_reboot_cause(self.duthost)
if reboot_cause != self.reboot_type:
raise RebootHealthError("Reboot cause {} did not match the trigger {}".format(reboot_cause, self.reboot_type))
@handle_test_error
def verify_image(self):
self.current_image = self.duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
if self.is_new_image is True:
# After boot-up, verify that the required image is running on the DUT
if self.advancedReboot.binaryVersion != self.current_image:
raise RebootHealthError("Image installation failed.\
Expected: {}. Found: {}".format(self.advancedReboot.binaryVersion, self.current_image))
def check_test_params(self):
while True:
with open(self.input_file, "r") as f:
try:
install_info = json.load(f)
if str(install_info.get('STOP_TEST')).lower() == 'true':
logging.info("==================== Stop test instruction received.\
Terminating test early at {}/{} iteration ====================".format \
(self.reboot_count, self.continuous_reboot_count))
return False
if str(install_info.get('PAUSE_TEST')).lower() == 'true':
time.sleep(10)
continue
reboot_type = str(install_info.get('REBOOT_TYPE')).lower()
if reboot_type != 'warm' and reboot_type != 'fast':
logging.warn("Unsupported reboot type - {}. Proceeding with {}.".format(reboot_type, self.reboot_type))
else:
self.reboot_type = reboot_type
except ValueError:
logging.warn("Invalid json file, continuing the reboot test with old list of images")
break
logging.info("Copy latest PTF test files to PTF host '{0}'".format(self.ptfhost.hostname))
self.ptfhost.copy(src="ptftests", dest="/root")
return True
def handle_image_installation(self, count):
with open(self.input_file, "r") as f:
try:
install_info = json.load(f)
image_install_list = install_info.get('install_list').split(",")
# Use modulus operator to cycle through the image_install_list per reboot iteration
self.new_image = image_install_list[count % len(image_install_list)].strip()
image_path = install_info.get('location').strip() + "/" + self.new_image
file_exists = self.duthost.command("curl -o /dev/null --silent -Iw '%{{http_code}}' {}".format(image_path),\
module_ignore_errors=True)["stdout"]
if file_exists != '200':
logging.info("Remote image file {} does not exist. Curl returned: {}".format(image_path, file_exists))
logging.warn("Continuing the test with current image")
self.new_image = "current"
except ValueError:
logging.warn("Invalid json file, continuing the reboot test with old list of images")
if self.new_image == "current":
logging.info("Next image is set to current - skip image installation")
self.advancedReboot.newSonicImage = None
self.is_new_image = False
else:
self.advancedReboot.newSonicImage = image_path
self.advancedReboot.cleanupOldSonicImages = True
self.is_new_image = True
logging.info("Image to be installed on DUT - {}".format(image_path))
self.advancedReboot.imageInstall()
if self.advancedReboot.newImage:
# The image upgrade will delete all the preexisting cores
self.pre_existing_cores = 0
def test_set_up(self):
asic_type = self.duthost.facts["asic_type"]
if asic_type in ["mellanox"]:
issu_capability = self.duthost.command("show platform mlnx issu")["stdout"]
if "disabled" in issu_capability:
pytest.skip("ISSU is not supported on this DUT, skip this test case")
self.pre_existing_cores = self.duthost.shell('ls /var/core/ | wc -l')['stdout']
logging.info("Found {} preexisting core files inside /var/core/".format(self.pre_existing_cores))
input_data = {
'install_list': self.image_list, # this list can be modified at runtime to enable testing different images
'location': self.image_location,
'REBOOT_TYPE': "warm",
'PAUSE_TEST': False,
'STOP_TEST': False
}
self.log_dir = os.getcwd() + "continous_reboot"
dir_name = "continous_reboot_{}".format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self.log_dir = os.path.join(os.getcwd(), dir_name)
os.makedirs(self.log_dir)
# test input file for dynamic interaction
self.input_file = os.path.join(self.log_dir, "continuous_reboot_input.json")
with open(self.input_file, "w") as image_file:
json.dump(input_data, image_file, indent=4)
# test output file for results
self.reports_file = os.path.join(self.log_dir, "continuous_reboot_report.csv")
with open(self.reports_file, "w") as report_file:
header = ["test_id", "image", "is_new_image", "up_time", "test_duration", "result"]
writer = csv.DictWriter(report_file, fieldnames=header)
writer.writeheader()
def create_test_report(self):
if self.sub_test_result is False:
self.test_failures = self.test_failures + 1
if self.reboot_type == "warm":
self.warm_reboot_count = self.warm_reboot_count + 1
if self.sub_test_result is False:
self.warm_reboot_fail = self.warm_reboot_fail + 1
else:
self.warm_reboot_pass = self.warm_reboot_pass + 1
elif self.reboot_type == "fast":
self.fast_reboot_count = self.fast_reboot_count + 1
if self.sub_test_result is False:
self.fast_reboot_fail = self.fast_reboot_fail + 1
else:
self.fast_reboot_pass = self.fast_reboot_pass + 1
test_report = {
"test_id": self.reboot_count,
"image": self.current_image,
"is_new_image": self.is_new_image,
"up_time": str(self.duthost.get_uptime().total_seconds()) + "s",
"test_duration": str((self.test_end_time - self.test_start_time).total_seconds()) + "s",
"result": self.sub_test_result
}
with open(self.reports_file, "a") as report_file:
header = ["test_id", "image", "is_new_image", "up_time", "test_duration", "result"]
writer = csv.DictWriter(report_file, fieldnames=header)
writer.writerow(test_report)
log_files = [
'/tmp/{0}-reboot.log'.format(self.reboot_type),
'/tmp/capture.pcap',
'/tmp/capture_filtered.pcap',
'/tmp/syslog',
'/tmp/sairedis.rec',
'/tmp/swss.rec']
if self.sub_test_result is True:
test_dir = os.path.join(self.log_dir, "pass", str(self.reboot_count))
else:
test_dir = os.path.join(self.log_dir, "fail", str(self.reboot_count))
os.makedirs(test_dir)
for file in log_files:
try:
file_exists = os.path.isfile(file)
if file_exists:
shutil.move(file, test_dir)
except Exception:
logging.error("Error copying file {}".format(str(file)))
report_file = os.path.join(test_dir, "continuous_reboot_report.json")
test_report["checks"] = self.test_report
with open(report_file, "w") as report_file:
json.dump(test_report, report_file, indent=4)
pytest_assert(self.test_failures == 0, "Continuous reboot test failed {}/{} times".\
format(self.test_failures, self.reboot_count))
def start_continuous_reboot(self, request, duthost, ptfhost, localhost, tbinfo, creds):
self.test_set_up()
# Start continuous warm/fast reboot on the DUT
for count in range(self.continuous_reboot_count):
self.reboot_count = count + 1
self.sub_test_result = True # set default result to be True, any failure will set this to False
self.test_start_time = datetime.now()
logging.info("\n==================== Start continuous reboot iteration: {}/{}. Type: {} ===================="\
.format(self.reboot_count, self.continuous_reboot_count, self.reboot_type))
reboot_type = self.reboot_type + "-reboot"
try:
self.advancedReboot = AdvancedReboot(request, duthost, ptfhost, localhost, tbinfo, creds,\
rebootType=reboot_type, moduleIgnoreErrors=True)
except Exception:
self.sub_test_result = False
self.test_failures = self.test_failures + 1
logging.error("AdvancedReboot initialization failed with {}".format(traceback.format_exc()))
logging.info("Waiting 300s for external fix or a signal to end the test...")
time.sleep(300)
if not self.check_test_params():
break
continue
self.handle_image_installation(count)
self.reboot_and_check(tbinfo)
self.test_report = get_test_report()
self.sub_test_result = all([check == True for check in list(self.test_report.values())])
self.advancedReboot.newSonicImage = None
self.test_end_time = datetime.now()
self.create_test_report()
logging.info("\n==================== End continuous reboot iteration: {}/{}. Result: {} ===================="\
.format(self.reboot_count, self.continuous_reboot_count, self.sub_test_result))
if not self.check_test_params():
break
def test_teardown(self):
logging.info("="*50)
logging.info("----- Total continuous reboots: {}. Pass: {}. Fail: {} ------".format(self.reboot_count,\
self.reboot_count - self.test_failures, self.test_failures))
logging.info("------ Total warm reboot tests: {}. Pass: {}. Fail: {} ------". \
format(self.warm_reboot_count, self.warm_reboot_pass, self.warm_reboot_fail))
logging.info("------ Total fast reboot tests: {}. Pass: {}. Fail: {} ------". \
format(self.fast_reboot_count, self.fast_reboot_pass, self.fast_reboot_fail))
logging.info("-"*50)
logging.info("Test results summary available at {}".format(self.log_dir + "/continuous_reboot_report.csv"))
logging.info("Passed tests logs stored at {}".format(self.log_dir + "/pass/"))
logging.info("Failed tests logs stored at {}".format(self.log_dir + "/fail/"))
logging.info("="*50)
pytest_assert(self.test_failures == 0, "Continuous reboot test failed {}/{} times".\
format(self.test_failures, self.reboot_count))
@pytest.mark.device_type('vs')
def test_continuous_reboot(request, duthosts, rand_one_dut_hostname, ptfhost, localhost, conn_graph_facts, tbinfo, creds):
"""
@summary: This test performs continuous reboot cycles on images that are provided as an input.
Supported parameters for this test can be modified at runtime:
Image Name, Image location - to run new iterations of the test on a new image
Pause test - for some debug, fixes on DUT to get it to stable state, etc.)
Stop test - for graceful termination of test.
Reboot type - To change the type to WARM or FAST at runtime.
Additionally, the test incorporates running a script (advanced-reboot.py) on PTF container.
To introducing additional checks (or to modify, remove checks), this test can be PAUSED, and the PTF
test script can be modified. The newer iterations of this test will start executing latest ptf script.
In between reboot cycles, the test verifies:
New image matches the image that the test has installed
Reboot cause - should match the trigger cause.
DUT is stable. Ping should work from T1 to servers and from servers to T1
Control and data plane should be healthy (as defined by advanced-reboot.py script)
Status of services - services syncd and swss should be active/running
Status of interfaces and LAGs - all interface and LAGs should comply with current topology
Status of transceivers - ports in lab_connection_graph should be present
Status of BGP neighbors - should be established
"""
duthost = duthosts[rand_one_dut_hostname]
continuous_reboot = ContinuousReboot(request, duthost, ptfhost, localhost, conn_graph_facts)
continuous_reboot.start_continuous_reboot(request, duthost, ptfhost, localhost, tbinfo, creds)
continuous_reboot.test_teardown()
| 48.711864
| 127
| 0.636105
|
c2e478fcd253c5aec6e0aec52524e3afbf1df75a
| 12,448
|
py
|
Python
|
test/python/test_optimizer.py
|
slin004/incubator-singa
|
09c8a2e65927d6405262bbb969e6dab96809df07
|
[
"Apache-2.0"
] | 1
|
2020-02-21T06:02:05.000Z
|
2020-02-21T06:02:05.000Z
|
test/python/test_optimizer.py
|
slin004/incubator-singa
|
09c8a2e65927d6405262bbb969e6dab96809df07
|
[
"Apache-2.0"
] | 8
|
2020-01-16T06:56:23.000Z
|
2020-01-18T03:46:04.000Z
|
test/python/test_optimizer.py
|
slin004/incubator-singa
|
09c8a2e65927d6405262bbb969e6dab96809df07
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
from builtins import zip
from builtins import range
import unittest
import math
import numpy as np
import singa.tensor as tensor
import singa.optimizer as opt
from singa import singa_wrap
from cuda_helper import gpu_dev as cuda
def np_adam(plist, glist, mlist, vlist, lr, t, b1=0.9, b2=0.999):
for p, g, m, v in zip(plist, glist, mlist, vlist):
m *= b1
m += (1-b1) * g
v *= b2
v += (1-b2) * g * g
alpha = lr * math.sqrt(1. - math.pow(b2, t)) / (1. - math.pow(b1, t))
p -= alpha * m / (np.sqrt(v) + 1e-8)
def np_rmsprop(plist, glist, vlist, lr, t, rho=0.9):
for p, g, v in zip(plist, glist, vlist):
v *= rho
v += (1-rho) * g * g
p -= lr * g / (np.sqrt(v + 1e-8))
def np_momentum(plist, glist, vlist, lr, t, momentum=0.9):
for p, g, v in zip(plist, glist, vlist):
v *= momentum
v += lr * g
p -= v
def np_adagrad(plist, glist, vlist, lr, t):
for p, g, v in zip(plist, glist, vlist):
v += g * g
p -= lr * g / (np.sqrt(v + 1e-8))
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.np_W = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32)
self.W = tensor.from_numpy(self.np_W)
self.np_g = np.array([0.1, 0.3, 0.1, 0.2], dtype=np.float32)
self.g = tensor.from_numpy(self.np_g)
def to_cuda(self):
self.W.to_device(cuda)
self.g.to_device(cuda)
def test_sgd(self):
lr = 0.1
sgd = opt.SGD(lr)
sgd.apply(0, self.g, self.W, 'w')
w = tensor.to_numpy(self.W)
for i in range(self.W.size()):
self.assertAlmostEqual(w[i], self.np_W[i] - lr * self.np_g[i])
def test_adam(self):
lr = 0.1
n, m = 4, 6
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
m1 = np.zeros((n, m))
m2 = np.zeros((n, m))
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 10):
np_adam([p1, p2], [g1, g2], [m1, m2], [v1, v2], lr, t)
adam = opt.Adam(lr=lr)
for t in range(1, 10):
adam.apply(0, tg1, t1, 'p1', t)
adam.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 6)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_sgd_cuda(self):
lr = 0.1
sgd = opt.SGD(lr)
self.to_cuda()
sgd.apply(0, self.g, self.W, 'w')
self.W.to_host()
w = tensor.to_numpy(self.W)
for i in range(self.W.size()):
self.assertAlmostEqual(w[i], self.np_W[i] - lr * self.np_g[i])
def test_constraint(self):
threshold = 0.02
cons = opt.L2Constraint(threshold)
cons.apply(0, self.W, self.g)
g = tensor.to_numpy(self.g)
nrm = np.linalg.norm(self.np_g) / self.np_g.size
for i in range(g.size):
self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_constraint_cuda(self):
threshold = 0.02
self.to_cuda()
cons = opt.L2Constraint(threshold)
cons.apply(0, self.W, self.g)
self.g.to_host()
g = tensor.to_numpy(self.g)
nrm = np.linalg.norm(self.np_g) / self.np_g.size
for i in range(g.size):
self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
def test_regularizer(self):
coefficient = 0.0001
reg = opt.L2Regularizer(coefficient)
reg.apply(0, self.W, self.g)
g = tensor.to_numpy(self.g)
for i in range(g.size):
self.assertAlmostEqual(g[i],
self.np_g[i] + coefficient * self.np_W[i])
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_regularizer_cuda(self):
coefficient = 0.0001
reg = opt.L2Regularizer(coefficient)
self.to_cuda()
reg.apply(0, self.W, self.g)
self.g.to_host()
g = tensor.to_numpy(self.g)
for i in range(g.size):
self.assertAlmostEqual(g[i],
self.np_g[i] + coefficient * self.np_W[i])
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_adam_cuda(self):
lr = 0.1
n, m = 4, 6
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
m1 = np.zeros((n, m))
m2 = np.zeros((n, m))
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 10):
np_adam([p1, p2], [g1, g2], [m1, m2], [v1, v2], lr, t)
adam = opt.Adam(lr=lr)
self.to_cuda()
for t in range(1, 10):
adam.apply(0, tg1, t1, 'p1', t)
adam.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 6)
def test_rmsprop(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_rmsprop([p1, p2], [g1, g2], [v1, v2], lr, t)
rsmprop = opt.RMSProp(lr=lr)
for t in range(1, 4):
rsmprop.apply(0, tg1, t1, 'p1', t)
rsmprop.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_rmsprop_cuda(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_rmsprop([p1, p2], [g1, g2], [v1, v2], lr, t)
rsmprop = opt.RMSProp(lr=lr)
self.to_cuda()
for t in range(1, 4):
rsmprop.apply(0, tg1, t1, 'p1', t)
rsmprop.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
def test_momentum(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_momentum([p1, p2], [g1, g2], [v1, v2], lr, t)
momentum = opt.SGD(lr, momentum=0.9)
for t in range(1, 4):
momentum.apply(0, tg1, t1, 'p1', t)
momentum.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_momentum_cuda(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_momentum([p1, p2], [g1, g2], [v1, v2], lr, t)
momentum = opt.SGD(lr, momentum=0.9)
self.to_cuda()
for t in range(1, 4):
momentum.apply(0, tg1, t1, 'p1', t)
momentum.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
def test_adagrad(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_adagrad([p1, p2], [g1, g2], [v1, v2], lr, t)
adagrad = opt.AdaGrad(lr=lr)
for t in range(1, 4):
adagrad.apply(0, tg1, t1, 'p1', t)
adagrad.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_adagrad_cuda(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_adagrad([p1, p2], [g1, g2], [v1, v2], lr, t)
adagrad = opt.AdaGrad(lr=lr)
self.to_cuda()
for t in range(1, 4):
adagrad.apply(0, tg1, t1, 'p1', t)
adagrad.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
if __name__ == '__main__':
unittest.main()
| 32.671916
| 79
| 0.513978
|
0e60e0703979db3e572255852f6f489d24415dda
| 625
|
py
|
Python
|
a3c/config.py
|
LaneWei/doom-rl
|
26ac29caf3f434f4c8f173534b7627b349ad4615
|
[
"MIT"
] | 1
|
2018-09-23T07:18:12.000Z
|
2018-09-23T07:18:12.000Z
|
a3c/config.py
|
LaneWei/doom-rl
|
26ac29caf3f434f4c8f173534b7627b349ad4615
|
[
"MIT"
] | null | null | null |
a3c/config.py
|
LaneWei/doom-rl
|
26ac29caf3f434f4c8f173534b7627b349ad4615
|
[
"MIT"
] | null | null | null |
import itertools as it
class Config:
WORKER_THREADS = 8
LEARNING_RATE = 1e-4
DECAY_RATE = 0.997
GAMMA = 0.95
N_STEP_RETURN = 4
FRAME_REPEAT = 8
BATCH_SIZE = 64
QUEUE_LEN = 1000
IMAGE_HEIGHT = 128
IMAGE_WIDTH = 128
IMAGE_CHANNELS = 3
IMAGE_CROP_BOX = (0, 130, 640, 400)
IMAGE_GRAY_LEVEL = 8
# buttons: TURN_LEFT
# TURN_RIGHT
# MOVE_FORWARD
AVAILABLE_ACTION_BUTTONS = 3
ACTION_SPACE = [list(a) for a in it.product([0, 1], repeat=AVAILABLE_ACTION_BUTTONS)
if a[0] != 1 or a[1] != 1]
N_ACTIONS = len(ACTION_SPACE)
| 22.321429
| 88
| 0.6096
|
ba49ba1b985f569f057b0b565ec3239f924cdff9
| 16,867
|
py
|
Python
|
pywikibot/login.py
|
anukaal/pywikibot
|
086e99d686ceebb40cb2e3dc7989e78ce6de3b85
|
[
"MIT"
] | null | null | null |
pywikibot/login.py
|
anukaal/pywikibot
|
086e99d686ceebb40cb2e3dc7989e78ce6de3b85
|
[
"MIT"
] | null | null | null |
pywikibot/login.py
|
anukaal/pywikibot
|
086e99d686ceebb40cb2e3dc7989e78ce6de3b85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""Library to log the bot in to a wiki account."""
#
# (C) Pywikibot team, 2003-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import webbrowser
from enum import IntEnum
from typing import Any, Optional
from warnings import warn
import pywikibot
import pywikibot.data.api
from pywikibot import __url__, config
from pywikibot.backports import Dict, Tuple
from pywikibot.comms import http
from pywikibot.exceptions import APIError, NoUsernameError
from pywikibot.tools import (
ModuleDeprecationWrapper,
deprecated_args,
file_mode_checker,
normalize_username,
remove_last_args,
)
try:
import mwoauth
except ImportError as e:
mwoauth = e
# TODO: replace these after T286867
OPT_SITE_TYPE = Any # Optional['pywikibot.site.BaseSite']
class _PasswordFileWarning(UserWarning):
"""The format of password file is incorrect."""
_logger = 'wiki.login'
# On some wikis you are only allowed to run a bot if there is a link to
# the bot's user page in a specific list.
# If bots are listed in a template, the templates name must be given as
# second parameter, otherwise it must be None
botList = {
'wikipedia': {
'simple': ['Wikipedia:Bots', '/links']
},
}
class LoginStatus(IntEnum):
"""
Enum for Login statuses.
>>> LoginStatus.NOT_ATTEMPTED
LoginStatus(-3)
>>> LoginStatus.IN_PROGRESS.value
-2
>>> LoginStatus.NOT_LOGGED_IN.name
'NOT_LOGGED_IN'
>>> int(LoginStatus.AS_USER)
0
>>> LoginStatus(-3).name
'NOT_ATTEMPTED'
>>> LoginStatus(0).name
'AS_USER'
"""
NOT_ATTEMPTED = -3
IN_PROGRESS = -2
NOT_LOGGED_IN = -1
AS_USER = 0
def __repr__(self) -> str:
"""Return internal representation."""
return 'LoginStatus({})'.format(self)
class LoginManager:
"""Site login manager."""
@deprecated_args(username='user', verbose=True, sysop=True)
def __init__(self, password: Optional[str] = None,
site: OPT_SITE_TYPE = None,
user: Optional[str] = None) -> None:
"""
Initializer.
All parameters default to defaults in user-config.
:param site: Site object to log into
:param user: username to use.
If user is None, the username is loaded from config.usernames.
:param password: password to use
:raises pywikibot.exceptions.NoUsernameError: No username is configured
for the requested site.
"""
site = self.site = site or pywikibot.Site()
if not user:
config_names = config.usernames
code_to_usr = config_names[site.family.name] or config_names['*']
try:
user = code_to_usr.get(site.code) or code_to_usr['*']
except KeyError:
raise NoUsernameError(
'ERROR: '
'username for {site.family.name}:{site.code} is undefined.'
'\nIf you have a username for that site, '
'please add a line to user-config.py as follows:\n'
"usernames['{site.family.name}']['{site.code}'] = "
"'myUsername'"
.format(site=site))
self.password = password
self.login_name = self.username = user
if getattr(config, 'password_file', ''):
self.readPassword()
def check_user_exists(self) -> None:
"""
Check that the username exists on the site.
:see: https://www.mediawiki.org/wiki/API:Users
:raises pywikibot.exceptions.NoUsernameError: Username doesn't exist in
user list.
"""
# convert any Special:BotPassword usernames to main account equivalent
main_username = self.username
if '@' in self.username:
warn(
'When using BotPasswords it is recommended that you store '
'your login credentials in a password_file instead. See '
'{}/BotPasswords for instructions and more information.'
.format(__url__))
main_username = self.username.partition('@')[0]
try:
data = self.site.allusers(start=main_username, total=1)
user = next(iter(data))
except APIError as e:
if e.code == 'readapidenied':
pywikibot.warning("Could not check user '{}' exists on {}"
.format(main_username, self.site))
return
raise
if user['name'] != main_username:
# Report the same error as server error code NotExists
raise NoUsernameError("Username '{}' does not exist on {}"
.format(main_username, self.site))
def botAllowed(self) -> bool:
"""
Check whether the bot is listed on a specific page.
This allows bots to comply with the policy on the respective wiki.
"""
code, fam = self.site.code, self.site.family.name
if code in botList.get(fam, []):
botlist_pagetitle, bot_template_title = botList[fam][code]
botlist_page = pywikibot.Page(self.site, botlist_pagetitle)
if bot_template_title:
for template, params in botlist_page.templatesWithParams():
if (template.title() == bot_template_title
and params[0] == self.username):
return True
else:
for linked_page in botlist_page.linkedPages():
if linked_page.title(with_ns=False) == self.username:
return True
return False
# No bot policies on other sites
return True
def login_to_site(self) -> None:
"""Login to the site."""
# THIS IS OVERRIDDEN IN data/api.py
raise NotImplementedError
@remove_last_args(['data'])
def storecookiedata(self) -> None:
"""Store cookie data."""
http.cookie_jar.save(ignore_discard=True)
def readPassword(self) -> None:
"""
Read passwords from a file.
DO NOT FORGET TO REMOVE READ ACCESS FOR OTHER USERS!!!
Use chmod 600 password-file.
All lines below should be valid Python tuples in the form
(code, family, username, password),
(family, username, password) or
(username, password)
to set a default password for an username. The last matching entry will
be used, so default usernames should occur above specific usernames.
For BotPasswords the password should be given as a BotPassword object.
The file must be either encoded in ASCII or UTF-8.
Example::
('my_username', 'my_default_password')
('wikipedia', 'my_wikipedia_user', 'my_wikipedia_pass')
('en', 'wikipedia', 'my_en_wikipedia_user', 'my_en_wikipedia_pass')
('my_username', BotPassword(
'my_BotPassword_suffix', 'my_BotPassword_password'))
"""
# Set path to password file relative to the user_config
# but fall back on absolute path for backwards compatibility
assert config.base_dir is not None and config.password_file is not None
password_file = os.path.join(config.base_dir, config.password_file)
if not os.path.isfile(password_file):
password_file = config.password_file
# We fix password file permission first.
file_mode_checker(password_file, mode=config.private_files_permission)
with codecs.open(password_file, encoding='utf-8') as f:
lines = f.readlines()
line_nr = len(lines) + 1
for line in reversed(lines):
line_nr -= 1
if not line.strip() or line.startswith('#'):
continue
try:
entry = eval(line)
except SyntaxError:
entry = None
if not isinstance(entry, tuple):
warn('Invalid tuple in line {}'.format(line_nr),
_PasswordFileWarning)
continue
if not 2 <= len(entry) <= 4:
warn('The length of tuple in line {} should be 2 to 4 ({} '
'given)'.format(line_nr, entry), _PasswordFileWarning)
continue
code, family, username, password = (
self.site.code, self.site.family.name)[:4 - len(entry)] + entry
if (normalize_username(username) == self.username
and family == self.site.family.name
and code == self.site.code):
if isinstance(password, str):
self.password = password
break
if isinstance(password, BotPassword):
self.password = password.password
self.login_name = password.login_name(self.username)
break
warn('Invalid password format', _PasswordFileWarning)
_api_error = {
'NotExists': 'does not exist',
'Illegal': 'is invalid',
'readapidenied': 'does not have read permissions',
'Failed': 'does not have read permissions',
'FAIL': 'does not have read permissions',
}
def login(self, retry: bool = False, autocreate: bool = False) -> bool:
"""
Attempt to log into the server.
:see: https://www.mediawiki.org/wiki/API:Login
:param retry: infinitely retry if the API returns an unknown error
:param autocreate: if true, allow auto-creation of the account
using unified login
:raises pywikibot.exceptions.NoUsernameError: Username is not
recognised by the site.
"""
if not self.password:
# First check that the username exists,
# to avoid asking for a password that will not work.
if not autocreate:
self.check_user_exists()
# As we don't want the password to appear on the screen, we set
# password = True
self.password = pywikibot.input(
'Password for user {name} on {site} (no characters will be '
'shown):'.format(name=self.login_name, site=self.site),
password=True)
pywikibot.output('Logging in to {site} as {name}'
.format(name=self.login_name, site=self.site))
try:
self.login_to_site()
except APIError as e:
error_code = e.code
pywikibot.error('Login failed ({}).'.format(error_code))
if error_code in self._api_error:
error_msg = 'Username "{}" {} on {}'.format(
self.login_name, self._api_error[error_code], self.site)
if error_code in ('Failed', 'FAIL'):
error_msg += '\n.{}'.format(e.info)
raise NoUsernameError(error_msg)
# TODO: investigate other unhandled API codes (bug T75539)
if retry:
self.password = None
return self.login(retry=False)
else:
self.storecookiedata()
pywikibot.log('Should be logged in now')
return True
return False
class BotPassword:
"""BotPassword object for storage in password file."""
def __init__(self, suffix: str, password: str) -> None:
"""
Initializer.
BotPassword function by using a separate password paired with a
suffixed username of the form <username>@<suffix>.
:param suffix: Suffix of the login name
:param password: bot password
:raises _PasswordFileWarning: suffix improperly specified
"""
if '@' in suffix:
warn('The BotPassword entry should only include the suffix',
_PasswordFileWarning)
self.suffix = suffix
self.password = password
def login_name(self, username: str) -> str:
"""
Construct the login name from the username and suffix.
:param user: username (without suffix)
"""
return '{}@{}'.format(username, self.suffix)
class OauthLoginManager(LoginManager):
"""Site login manager using OAuth."""
# NOTE: Currently OauthLoginManager use mwoauth directly to complete OAuth
# authentication process
@deprecated_args(sysop=True)
def __init__(self, password: Optional[str] = None,
site: OPT_SITE_TYPE = None,
user: Optional[str] = None) -> None:
"""
Initializer.
All parameters default to defaults in user-config.
:param site: Site object to log into
:param user: consumer key
:param password: consumer secret
:raises pywikibot.exceptions.NoUsernameError: No username is configured
for the requested site.
:raises ImportError: mwoauth isn't installed
"""
if isinstance(mwoauth, ImportError):
raise ImportError('mwoauth is not installed: {}.'.format(mwoauth))
assert password is not None and user is not None
super().__init__(password=None, site=site, user=None)
if self.password:
pywikibot.warn('Password exists in password file for {login.site}:'
'{login.username}. Password is unnecessary and '
'should be removed if OAuth enabled.'
.format(login=self))
self._consumer_token = (user, password)
self._access_token = None # type: Optional[Tuple[str, str]]
def login(self, retry: bool = False, force: bool = False) -> bool:
"""
Attempt to log into the server.
:see: https://www.mediawiki.org/wiki/API:Login
:param retry: infinitely retry if exception occurs during
authentication.
:param force: force to re-authenticate
"""
if self.access_token is None or force:
pywikibot.output(
'Logging in to {site} via OAuth consumer {key}'
.format(key=self.consumer_token[0], site=self.site))
consumer_token = mwoauth.ConsumerToken(*self.consumer_token)
handshaker = mwoauth.Handshaker(
self.site.base_url(self.site.path()), consumer_token)
try:
redirect, request_token = handshaker.initiate()
pywikibot.stdout('Authenticate via web browser..')
webbrowser.open(redirect)
pywikibot.stdout('If your web browser does not open '
'automatically, please point it to: {}'
.format(redirect))
request_qs = pywikibot.input('Response query string: ')
access_token = handshaker.complete(request_token, request_qs)
self._access_token = (access_token.key, access_token.secret)
return True
except Exception as e:
pywikibot.error(e)
if retry:
return self.login(retry=True, force=force)
else:
return False
else:
pywikibot.output('Logged in to {site} via consumer {key}'
.format(key=self.consumer_token[0],
site=self.site))
return True
@property
def consumer_token(self) -> Tuple[str, str]:
"""
Return OAuth consumer key token and secret token.
:see: https://www.mediawiki.org/wiki/API:Tokens
"""
return self._consumer_token
@property
def access_token(self) -> Optional[Tuple[str, str]]:
"""
Return OAuth access key token and secret token.
:see: https://www.mediawiki.org/wiki/API:Tokens
"""
return self._access_token
@property
def identity(self) -> Optional[Dict[str, Any]]:
"""Get identifying information about a user via an authorized token."""
if self.access_token is None:
pywikibot.error('Access token not set')
return None
consumer_token = mwoauth.ConsumerToken(*self.consumer_token)
access_token = mwoauth.AccessToken(*self.access_token)
try:
identity = mwoauth.identify(self.site.base_url(self.site.path()),
consumer_token, access_token)
return identity
except Exception as e:
pywikibot.error(e)
return None
OAuthImpossible = ImportError
wrapper = ModuleDeprecationWrapper(__name__)
wrapper.add_deprecated_attr(
'OAuthImpossible',
replacement_name='ImportError',
since='20210423')
| 34.849174
| 79
| 0.586471
|
192bbcc4a5d655d1c1296fa3065b041056e12be1
| 1,053
|
py
|
Python
|
examples/html_timetable.py
|
python-webuntis/python-webuntis
|
51aa2df293f01dcdf2c278386d513dbfc1c2115c
|
[
"BSD-3-Clause"
] | 22
|
2018-02-01T15:59:12.000Z
|
2022-02-10T20:25:03.000Z
|
examples/html_timetable.py
|
AugustH/python-webuntis
|
e440a0364770e6992b34c05fb8ad705a2ec493ea
|
[
"BSD-3-Clause"
] | 25
|
2017-07-05T18:06:52.000Z
|
2022-03-28T19:27:05.000Z
|
examples/html_timetable.py
|
AugustH/python-webuntis
|
e440a0364770e6992b34c05fb8ad705a2ec493ea
|
[
"BSD-3-Clause"
] | 12
|
2017-08-19T17:48:47.000Z
|
2022-02-22T07:36:12.000Z
|
from credentials import s
import datetime
import logging
# ***DO NOT USE THIS EXAMPLE AS-IS***
# Properties that are printed here may contain arbitrary
# *unescaped* HTML. That is not expected, but you should not trust
# input from remote sources in general.
logging.basicConfig(level=logging.DEBUG)
today = datetime.date.today()
monday = today - datetime.timedelta(days=today.weekday())
friday = monday + datetime.timedelta(days=4)
klasse = s.klassen().filter(name='1A')[0]
table = s.timetable(klasse=klasse, start=monday, end=friday).to_table()
print('<table border="1"><thead><th>Time</th>')
for weekday in ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']:
print('<th>' + str(weekday) + '</th>')
print('</thead><tbody>')
for time, row in table:
print('<tr>')
print('<td>{}</td>'.format(time.strftime('%H:%M')))
for date, cell in row:
print('<td>')
for period in cell:
print(', '.join(su.name for su in period.subjects))
print('</td>')
print('</tr>')
print('</tbody></table>')
| 27.710526
| 72
| 0.650522
|
8f30c3d5e5efe5f5dc700c98731bf3dfcce61906
| 75,458
|
py
|
Python
|
lrs/tests/AuthTests.py
|
Sembian/ADL_LRS
|
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
|
[
"Apache-2.0"
] | null | null | null |
lrs/tests/AuthTests.py
|
Sembian/ADL_LRS
|
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
|
[
"Apache-2.0"
] | null | null | null |
lrs/tests/AuthTests.py
|
Sembian/ADL_LRS
|
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
|
[
"Apache-2.0"
] | null | null | null |
from django.test import TestCase
from django.core.urlresolvers import reverse
from lrs import views, models
from django.conf import settings
import json
import base64
import uuid
from datetime import datetime, timedelta
from django.utils.timezone import utc
import urllib
from lrs.util import retrieve_statement
import hashlib
class AuthTests(TestCase):
# Want to test no auth, so have to disable both auths
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
if not settings.ALLOW_EMPTY_HTTP_AUTH:
settings.ALLOW_EMPTY_HTTP_AUTH = True
if settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = False
self.auth = "Basic %s" % base64.b64encode("%s:%s" % ('',''))
self.guid1 = str(uuid.uuid1())
self.guid2 = str(uuid.uuid1())
self.guid3 = str(uuid.uuid1())
self.guid4 = str(uuid.uuid1())
self.guid5 = str(uuid.uuid1())
self.guid6 = str(uuid.uuid1())
self.guid7 = str(uuid.uuid1())
self.guid8 = str(uuid.uuid1())
self.guid9 = str(uuid.uuid1())
self.guid10 = str(uuid.uuid1())
self.cguid1 = str(uuid.uuid1())
self.cguid2 = str(uuid.uuid1())
self.cguid3 = str(uuid.uuid1())
self.cguid4 = str(uuid.uuid1())
self.cguid5 = str(uuid.uuid1())
self.cguid6 = str(uuid.uuid1())
self.cguid7 = str(uuid.uuid1())
self.cguid8 = str(uuid.uuid1())
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}, "object": {"id":"act:activity"},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
exist_stmt_response = self.client.post(reverse(views.statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(exist_stmt_response.status_code, 200)
self.exist_stmt_id = json.loads(exist_stmt_response.content)[0]
self.firstTime = str(datetime.utcnow().replace(tzinfo=utc).isoformat())
self.existStmt1 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname2", "en-GB": "altname"},
"description": {"en-US":"testdesc2", "en-GB": "altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}},
"result": {"score":{"scaled":.85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:key1": "value1", "ext:key2":"value2"}},
"context":{"registration": self.cguid1, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
self.existStmt2 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@t.com"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname3", "en-GB": "altname"},
"description": {"en-US":"testdesc3","en-GB":"altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answers"],
"extensions": {"ext:key11": "value11", "ext:key22": "value22","ext:key33": "value33"}}},
"result": {"score":{"scaled":.75}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}},
"context":{"registration": self.cguid2, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey11": "cval11",
"ext:ckey22": "cval22"}}})
self.existStmt3 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"},
"object": {"objectType": "Activity", "id":"act:act:foogals",
"definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222","ext:key333": "value333"}}},
"result": {"score":{"scaled":.79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}},
"context":{"registration": self.cguid3, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform":"bard","language": "en-US",
"instructor":{"objectType": "Agent", "name":"bob", "mbox":"mailto:bob@bob.com"},
"extensions":{"ext:ckey111": "cval111","ext:ckey222": "cval222"}}})
self.existStmt4 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"},
"object": {"objectType": "Activity", "id":"act:foogal",
"definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222","ext:key333": "value333"}}},
"result": {"score":{"scaled":.79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}},
"context":{"registration": self.cguid4, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform":"bard","language": "en-US","instructor":{"name":"bill", "mbox":"mailto:bill@bill.com"},
"extensions":{"ext:ckey111": "cval111","ext:ckey222": "cval222"}}})
self.existStmt5 = json.dumps({"object":{"objectType":"Agent","name":"jon","mbox":"mailto:jon@jon.com"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created"}},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
self.existStmt6 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"object":{"id": "act:test_activity"},"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}})
self.existStmt7 = json.dumps({"object": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created"}},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
self.existStmt8 = json.dumps({"object": {"objectType":"Agent","name":"john","mbox":"mailto:john@john.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/missed","display": {"en-US":"missed"}},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
self.existStmt9 = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:sub@sub.com"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"SubStatement",
"actor":{"objectType":"Agent","mbox":"mailto:ss@ss.com"},"verb": {"id":"nested:verb/url/nested"},
"object": {"objectType":"Activity", "id":"act:testex.com"}, "result":{"completion": True, "success": True,
"response": "kicked"}, "context":{"registration": self.cguid6,
"contextActivities": {"other": {"id": "act:NewActivityID"}},"revision": "foo", "platform":"bar",
"language": "en-US", "extensions":{"ext:k1": "v1", "ext:k2": "v2"}}}})
self.existStmt10 = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:ref@ref.com"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"StatementRef",
"id":str(self.exist_stmt_id)}})
# Put statements
param = {"statementId":self.guid1}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt1
self.putresponse1 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse1.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=2)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid1).update(stored=time)
param = {"statementId":self.guid3}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt3
self.putresponse3 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse3.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=3)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid3).update(stored=time)
param = {"statementId":self.guid4}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt4
self.putresponse4 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse4.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=4)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid4).update(stored=time)
self.secondTime = str((datetime.utcnow()+timedelta(seconds=4)).replace(tzinfo=utc).isoformat())
param = {"statementId":self.guid2}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt2
self.putresponse2 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse2.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=6)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid2).update(stored=time)
param = {"statementId":self.guid5}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt5
self.putresponse5 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse5.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=7)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid5).update(stored=time)
param = {"statementId":self.guid6}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt6
self.putresponse6 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse6.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=8)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid6).update(stored=time)
param = {"statementId":self.guid7}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt7
self.putresponse7 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse7.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=9)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid7).update(stored=time)
param = {"statementId":self.guid8}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt8
self.putresponse8 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse8.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=10)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid8).update(stored=time)
param = {"statementId": self.guid9}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt9
self.putresponse9 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse9.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid9).update(stored=time)
param = {"statementId": self.guid10}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt_payload = self.existStmt10
self.putresponse10 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(self.putresponse10.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = models.Statement.objects.filter(statement_id=self.guid10).update(stored=time)
def tearDown(self):
if settings.ALLOW_EMPTY_HTTP_AUTH:
settings.ALLOW_EMPTY_HTTP_AUTH = False
if not settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = True
def test_post_with_no_valid_params(self):
# Error will be thrown in statements class
resp = self.client.post(reverse(views.statements), {"feet":"yes","hands": {"id":"http://example.com/test_post"}},
Authorization=self.auth, content_type="application/json", X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 400)
def test_post(self):
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}})
response = self.client.post(reverse(views.statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_post")
self.assertEqual(act.activity_id, "act:test_post")
agent = models.Agent.objects.get(mbox="mailto:t@t.com")
self.assertEqual(agent.name, "bob")
def test_post_stmt_ref_no_existing_stmt(self):
stmt = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:ref@ref.com"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"StatementRef",
"id":"12345678-1234-5678-1234-567812345678"}})
response = self.client.post(reverse(views.statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 404)
def test_post_with_actor(self):
stmt = json.dumps({"actor":{"mbox":"mailto:mr.t@example.com"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:i.pity.the.fool"}})
response = self.client.post(reverse(views.statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 200)
models.Agent.objects.get(mbox="mailto:mr.t@example.com")
def test_list_post(self):
stmts = json.dumps([{"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_list_post"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/failed","display": {"en-GB":"failed"}},
"object": {"id":"act:test_list_post1"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}])
response = self.client.post(reverse(views.statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 200)
activity1 = models.Activity.objects.get(activity_id="act:test_list_post")
activity2 = models.Activity.objects.get(activity_id="act:test_list_post1")
stmt1 = models.Statement.objects.get(object_activity=activity1)
stmt2 = models.Statement.objects.get(object_activity=activity2)
verb1 = models.Verb.objects.get(id=stmt1.verb.id)
verb2 = models.Verb.objects.get(id=stmt2.verb.id)
lang_map1 = verb1.display
lang_map2 = verb2.display
self.assertEqual(response.status_code, 200)
self.assertEqual(stmt1.verb.verb_id, "http://adlnet.gov/expapi/verbs/passed")
self.assertEqual(stmt2.verb.verb_id, "http://adlnet.gov/expapi/verbs/failed")
self.assertEqual(lang_map1.keys()[0], "en-US")
self.assertEqual(lang_map1.values()[0], "passed")
self.assertEqual(lang_map2.keys()[0], "en-GB")
self.assertEqual(lang_map2.values()[0], "failed")
def test_put(self):
guid = str(uuid.uuid1())
param = {"statementId":guid}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(putResponse.status_code, 204)
stmt = models.Statement.objects.get(statement_id=guid)
act = models.Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.verb.verb_id, "http://adlnet.gov/expapi/verbs/passed")
def test_put_with_substatement(self):
con_guid = str(uuid.uuid1())
st_guid = str(uuid.uuid1())
param = {"statementId": st_guid}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:sass@sass.com"},
"verb": {"id":"verb:verb/url/tested"}, "object":{"objectType":"SubStatement",
"actor":{"objectType":"Agent","mbox":"mailto:ss@ss.com"},"verb": {"id":"verb:verb/url/nested"},
"object": {"objectType":"Activity", "id":"act:testex.com"}, "result":{"completion": True, "success": True,
"response": "kicked"}, "context":{"registration": con_guid,
"contextActivities": {"other": {"id": "act:NewActivityID"}},"revision": "foo", "platform":"bar",
"language": "en-US", "extensions":{"ext:k1": "v1", "ext:k2": "v2"}}}})
response = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 204)
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
get_response = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("objectType",rsp)
self.assertIn("SubStatement", rsp)
self.assertIn("actor",rsp)
self.assertIn("mailto:ss@ss.com",rsp)
self.assertIn("verb",rsp)
self.assertIn("verb:verb/url/nested", rsp)
self.assertIn("Activity", rsp)
self.assertIn("act:testex.com", rsp)
self.assertIn("result", rsp)
self.assertIn("completion",rsp)
self.assertIn("success", rsp)
self.assertIn("response", rsp)
self.assertIn("kicked", rsp)
self.assertIn("context", rsp)
self.assertIn(con_guid, rsp)
self.assertIn("contextActivities", rsp)
self.assertIn("other", rsp)
self.assertIn("revision", rsp)
self.assertIn("foo", rsp)
self.assertIn("platform", rsp)
self.assertIn("bar", rsp)
self.assertIn("language", rsp)
self.assertIn("en-US", rsp)
self.assertIn("extensions", rsp)
self.assertIn("ext:k1", rsp)
self.assertIn("v1", rsp)
self.assertIn("ext:k2", rsp)
self.assertIn("v2", rsp)
def test_no_content_put(self):
guid = str(uuid.uuid1())
param = {"statementId":guid}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt = json.dumps({})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(putResponse.status_code, 400)
def test_existing_stmtID_put_put(self):
guid = str(uuid.uuid1())
param = {"statementId":guid}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
exist_stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:activity"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}})
first_put = self.client.put(path, exist_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(first_put.status_code, 204)
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object":{"id":"act:test_existing_put"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(putResponse.status_code, 409)
def test_existing_stmtID_put_post(self):
guid = str(uuid.uuid1())
exist_stmt = json.dumps({"id": guid, "verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:activity"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}})
post = self.client.post(reverse(views.statements), exist_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object":{"id":"act:test_existing_put"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(putResponse.status_code, 409)
def test_missing_stmtID_put(self):
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}})
response = self.client.put(reverse(views.statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertIn(response.content, "Error -- statements - method = PUT, but no statementId parameter or ID given in statement")
def test_get(self):
param = {"statementId":self.guid1}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
getResponse = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
rsp = getResponse.content
self.assertIn(self.guid1, rsp)
def test_get_no_existing_ID(self):
param = {"statementId":"aaaaaa"}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
getResponse = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=self.auth)
self.assertEqual(getResponse.status_code, 404)
def test_get_no_statementid(self):
getResponse = self.client.get(reverse(views.statements), X_Experience_API_Version="1.0.0", Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
jsn = json.loads(getResponse.content)
self.assertEqual(len(jsn["statements"]), 11)
# Sever activities are PUT-contextActivites create 3 more
def test_number_of_activities(self):
acts = len(models.Activity.objects.all())
self.assertEqual(9, acts)
def test_update_activity_correct_auth(self):
stmt = json.dumps({"verb": {"id":"verb:verb/url/changed-act"},"actor":{"objectType":"Agent", "mbox":"mailto:l@l.com"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"},
"type": "http://adlnet.gov/expapi/activities/cmi.interaction","interactionType": "fill-in","correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}},
"result": {"score":{"scaled":.85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:key1": "value1", "ext:key2":"value2"}},
"context":{"registration": self.cguid8, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
post_response = self.client.post(reverse(views.statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(post_response.status_code, 200)
act = models.Activity.objects.get(activity_id="act:foogie")
name_set = act.activity_definition_name
desc_set = act.activity_definition_description
self.assertEqual(name_set.keys()[1], "en-US")
self.assertEqual(name_set.values()[1], "testname3")
self.assertEqual(name_set.keys()[0], "en-GB")
self.assertEqual(name_set.values()[0], "altname")
self.assertEqual(desc_set.keys()[1], "en-US")
self.assertEqual(desc_set.values()[1], "testdesc3")
self.assertEqual(desc_set.keys()[0], "en-GB")
self.assertEqual(desc_set.values()[0], "altdesc")
def test_cors_post_put(self):
st_id = str(uuid.uuid1())
content = {"verb":{"id":"verb:verb/url"}, "actor":{"objectType":"Agent", "mbox": "mailto:r@r.com"},
"object": {"id":"act:test_cors_post_put"}}
bdy = "statementId=%s&content=%s&Content-Type=application/json&X-Experience-API-Version=1.0.0" % (st_id, content)
path = "%s?%s" % (reverse(views.statements), urllib.urlencode({"method":"PUT"}))
response = self.client.post(path, bdy, content_type="application/x-www-form-urlencoded", Authorization=self.auth)
self.assertEqual(response.status_code, 204)
act = models.Activity.objects.get(activity_id="act:test_cors_post_put")
self.assertEqual(act.activity_id, "act:test_cors_post_put")
def test_issue_put(self):
stmt_id = "33f60b35-e1b2-4ddc-9c6f-7b3f65244430"
stmt = json.dumps({"verb":{"id":"verb:verb/uri"},"object":{"id":"act:scorm.com/JsTetris_TCAPI","definition":{"type":"type:media",
"name":{"en-US":"Js Tetris - Tin Can Prototype"},"description":{"en-US":"A game of tetris."}}},
"context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}},
"registration":"6b1091be-2833-4886-b4a6-59e5e0b3c3f4"},
"actor":{"mbox":"mailto:tom.creighton.ctr@adlnet.gov","name":"Tom Creighton"}})
path = "%s?%s" % (reverse(views.statements), urllib.urlencode({"statementId":stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json", X_Experience_API_Version="1.0.0",Authorization=self.auth)
self.assertEqual(put_stmt.status_code, 204)
def test_post_with_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
stmt = json.dumps({"actor":{"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},
{"name":"agentB","mbox":"mailto:agentB@example.com"}]},"verb":{"id": "http://verb/uri/created", "display":{"en-US":"created"}},
"object": {"id":"act:i.pity.the.fool"}})
response = self.client.post(reverse(views.statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 200)
g = models.Agent.objects.get(mbox="mailto:the.groupST@example.com")
self.assertEquals(g.name, name)
self.assertEquals(g.mbox, mbox)
mems = g.member.values_list("name", flat=True)
self.assertEquals(len(mems), 2)
self.assertIn("agentA", mems)
self.assertIn("agentB", mems)
def test_issue_put_no_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244431'
stmt = json.dumps({"verb":"verb:completed","object":{"id":"act:scorm.com/JsTetris_TCAPI/level2",
"definition":{"type":"media","name":{"en-US":"Js Tetris Level2"},
"description":{"en-US":"Starting at 1, the higher the level, the harder the game."}}},
"result":{"extensions":{"ext:time":104,"ext:apm":229,"ext:lines":5},"score":{"raw":9911,"min":0}},
"context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}},
"registration":"b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor":{"name":"tom creighton","mbox":"mailto:tom@example.com"}})
path = '%s?%s' % (reverse(views.statements), urllib.urlencode({"statementId":stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth)
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb":"verb:completed","object":{"id":"act:scorm.com/JsTetris_TCAPI/level2",
"definition":{"type":"media","name":{"en-US":"Js Tetris Level2"},
"description":{"en-US":"Starting at 1, the higher the level, the harder the game."}}},
"result":{"extensions":{"ext:time":104,"ext:apm":229,"ext:lines":5},"score":{"raw":9911,"min":0}},
"context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}},
"registration":"b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor":{"name":"tom creighton","mbox":"mailto:tom@example.com"}})
path = '%s?%s' % (reverse(views.statements), urllib.urlencode({"statementId":stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="0.90")
self.assertEqual(put_stmt.status_code, 400)
# Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamp and stored fields
def test_all_fields_activity_as_object(self):
nested_st_id = str(uuid.uuid1())
nest_param = {"statementId":nested_st_id}
nest_path = "%s?%s" % (reverse(views.statements), urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincan@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid1())
context_id= str(uuid.uuid1())
param = {"statementId":stmt_id}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"uniqueName"}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created", "en-GB":"made"}},
"object": {"objectType": "Activity", "id":"http:adlnet.gov/my/Activity/URL",
"definition": {"name": {"en-US":"actName", "en-GB": "anotherActName"},
"description": {"en-US":"This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice",
"correctResponsesPattern": ["golf", "tetris"],
"choices":[{"id": "golf", "description": {"en-US":"Golf Example", "en-GB": "GOLF"}},
{"id": "tetris","description":{"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id":"facebook", "description":{"en-US":"Facebook App", "en-GB": "FACEBOOK"}},
{"id":"scrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}},
"result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}},
"context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"},
"grouping":{"id":"http://groupingID"} },
"revision": "Spelling error in choices.", "platform":"Platform is web browser.","language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_st_id)},
"extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}},
"timestamp":self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_stmt.status_code, 204)
get_response = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account']['name'], 'uniqueName')
self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/created')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'made')
self.assertEqual(the_returned['verb']['display']['en-US'], 'created')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['platform'], 'Platform is web browser.')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['revision'], 'Spelling error in choices.')
self.assertEqual(the_returned['context']['statement']['id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef')
# Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamp, stored fields
def test_all_fields_agent_as_object(self):
nested_st_id = str(uuid.uuid1())
nest_param = {"statementId":nested_st_id}
nest_path = "%s?%s" % (reverse(views.statements), urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincan@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid1())
context_id= str(uuid.uuid1())
param = {"statementId":stmt_id}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
msha = hashlib.sha1("tom@example.com").hexdigest()
stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"louUniqueName"}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/helped","display": {"en-US":"helped", "en-GB":"assisted"}},
"object": {"objectType":"Agent","name": "Tom Creighton","mbox_sha1sum":msha},
"result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}},
"context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_st_id)},
"extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}},
"timestamp":self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_stmt.status_code, 204)
get_response = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account']['name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/helped')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'assisted')
self.assertEqual(the_returned['verb']['display']['en-US'], 'helped')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['statement']['id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef')
self.assertEqual(the_returned['object']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['mbox_sha1sum'], 'edb97c2848fc47bdd2091028de8a3b1b24933752')
# Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamps or stored fields
def test_all_fields_substatement_as_object(self):
nested_st_id = str(uuid.uuid1())
nest_param = {"statementId":nested_st_id}
nest_path = "%s?%s" % (reverse(views.statements), urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincannest@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed", "en-GB":"graded"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_sub_stmt.status_code, 204)
nested_sub_st_id = str(uuid.uuid1())
nest_sub_param = {"statementId":nested_sub_st_id}
nest_sub_path = "%s?%s" % (reverse(views.statements), urllib.urlencode(nest_sub_param))
nested_sub_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincannestsub@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/verb","display": {"en-US":"verb", "en-GB":"altVerb"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplenestedsubstatement"}})
put_nest_sub_stmt = self.client.put(nest_sub_path, nested_sub_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_nest_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid1())
context_id= str(uuid.uuid1())
sub_context_id= str(uuid.uuid1())
param = {"statementId":stmt_id}
path = "%s?%s" % (reverse(views.statements), urllib.urlencode(param))
stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"louUniqueName"}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/said","display": {"en-US":"said", "en-GB":"talked"}},
"object": {"objectType": "SubStatement", "actor":{"objectType":"Agent","name":"Tom Creighton","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed", "en-GB": "Graded"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement",
'definition': {'name': {'en-US':'SubStatement name'},
'description': {'en-US':'SubStatement description'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'matching',
'correctResponsesPattern': ['lou.3,tom.2,andy.1'],'source':[{'id': 'lou',
'description': {'en-US':'Lou', 'it': 'Luigi'}},{'id': 'tom','description':{'en-US': 'Tom', 'it':'Tim'}},
{'id':'andy', 'description':{'en-US':'Andy'}}],'target':[{'id':'1',
'description':{'en-US': 'ADL LRS'}},{'id':'2','description':{'en-US': 'lrs'}},
{'id':'3', 'description':{'en-US': 'the adl lrs', 'en-CH': 'the lrs'}}]}},
"result": {"score":{"scaled":.50, "raw": 50, "min":1, "max":51}, "completion": True,
"success": True, "response": "Poorly done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey11": "resultValue11", "ext:resultKey22":"resultValue22"}},
"context":{"registration": sub_context_id,
"contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test/nest"}},
"revision": "Spelling error in target.", "platform":"Ipad.","language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_sub_st_id)},
"extensions":{"ext:contextKey11": "contextVal11","ext:contextKey22": "contextVal22"}}},
"result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}},
"context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"revision": "Spelling error in choices.", "platform":"Platform is web browser.","language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_st_id)},
"extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}},
"timestamp":self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_stmt.status_code, 204)
get_response = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account']['name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/said')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'talked')
self.assertEqual(the_returned['verb']['display']['en-US'], 'said')
self.assertEqual(the_returned['object']['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['actor']['name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['actor']['mbox'], 'mailto:tom@adlnet.gov')
self.assertEqual(the_returned['object']['context']['registration'], sub_context_id)
self.assertEqual(the_returned['object']['context']['language'], 'en-US')
self.assertEqual(the_returned['object']['context']['platform'], 'Ipad.')
self.assertEqual(the_returned['object']['context']['revision'], 'Spelling error in target.')
self.assertEqual(the_returned['object']['context']['statement']['id'], str(nested_sub_st_id))
self.assertEqual(the_returned['object']['context']['statement']['objectType'], 'StatementRef')
self.assertEqual(the_returned['object']['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test/nest')
self.assertEqual(the_returned['object']['context']['extensions']['ext:contextKey11'], 'contextVal11')
self.assertEqual(the_returned['object']['context']['extensions']['ext:contextKey22'], 'contextVal22')
self.assertEqual(the_returned['object']['object']['id'], 'http://example.adlnet.gov/tincan/example/simplestatement')
self.assertEqual(the_returned['object']['object']['definition']['type'], 'http://adlnet.gov/expapi/activities/cmi.interaction')
self.assertEqual(the_returned['object']['object']['definition']['description']['en-US'], 'SubStatement description')
self.assertEqual(the_returned['object']['object']['definition']['interactionType'], 'matching')
self.assertEqual(the_returned['object']['object']['definition']['name']['en-US'], 'SubStatement name')
# arrays.. testing slightly differently
source_str = json.dumps(the_returned['object']['object']['definition']['source'])
self.assertIn('description', source_str)
self.assertIn('id', source_str)
self.assertIn('Lou', source_str)
self.assertIn('Luigi', source_str)
self.assertIn('lou', source_str)
self.assertIn('Tom', source_str)
self.assertIn('Tim', source_str)
self.assertIn('tom', source_str)
self.assertIn('Andy', source_str)
self.assertIn('andy', source_str)
target_str = json.dumps(the_returned['object']['object']['definition']['target'])
self.assertIn('description', target_str)
self.assertIn('id', target_str)
self.assertIn('ADL LRS', target_str)
self.assertIn('1', target_str)
self.assertIn('lrs', target_str)
self.assertIn('2', target_str)
self.assertIn('the lrs', target_str)
self.assertIn('the adl lrs', target_str)
self.assertIn('3', target_str)
self.assertEqual(the_returned['object']['objectType'], 'SubStatement')
self.assertEqual(the_returned['object']['result']['completion'], True)
self.assertEqual(the_returned['object']['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['object']['result']['extensions']['ext:resultKey11'], 'resultValue11')
self.assertEqual(the_returned['object']['result']['extensions']['ext:resultKey22'], 'resultValue22')
self.assertEqual(the_returned['object']['result']['response'], 'Poorly done')
self.assertEqual(the_returned['object']['result']['score']['max'], 51)
self.assertEqual(the_returned['object']['result']['score']['min'], 1)
self.assertEqual(the_returned['object']['result']['score']['raw'], 50)
self.assertEqual(the_returned['object']['result']['score']['scaled'], 0.5)
self.assertEqual(the_returned['object']['result']['success'], True)
self.assertEqual(the_returned['object']['verb']['id'], 'http://adlnet.gov/expapi/verbs/assess')
self.assertEqual(the_returned['object']['verb']['display']['en-GB'], 'Graded')
self.assertEqual(the_returned['object']['verb']['display']['en-US'], 'assessed')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['platform'], 'Platform is web browser.')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['revision'], 'Spelling error in choices.')
self.assertEqual(the_returned['context']['statement']['id'], nested_st_id)
self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef')
# Third stmt in list is missing actor - should throw error and perform cascading delete on first three statements
def test_post_list_rollback(self):
cguid1 = str(uuid.uuid1())
stmts = json.dumps([{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-failed","display": {"en-US":"wrong-failed"}},"object": {"id":"act:test_wrong_list_post2"},
"actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"},"result": {"score":{"scaled":.99}, "completion": True, "success": True, "response": "wrong",
"extensions":{"ext:resultwrongkey1": "value1", "ext:resultwrongkey2":"value2"}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},
"object": {"objectType": "Activity", "id":"act:test_wrong_list_post",
"definition": {"name": {"en-US":"wrongactName", "en-GB": "anotherActName"},
"description": {"en-US":"This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://adlnet.gov/expapi/activities/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice",
"correctResponsesPattern": ["wronggolf", "wrongtetris"],
"choices":[{"id": "wronggolf", "description": {"en-US":"Golf Example", "en-GB": "GOLF"}},
{"id": "wrongtetris","description":{"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id":"wrongfacebook", "description":{"en-US":"Facebook App", "en-GB": "FACEBOOK"}},
{"id":"wrongscrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:wrongkey1": "wrongvalue1", "ext:wrongkey2": "wrongvalue2","ext:wrongkey3": "wrongvalue3"}}},
"actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-passed","display": {"en-US":"wrong-passed"}},"object": {"id":"act:test_wrong_list_post1"},
"actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"},"context":{"registration": cguid1, "contextActivities": {"other": {"id": "act:wrongActivityID2"}},
"revision": "wrong", "platform":"wrong","language": "en-US", "extensions":{"ext:wrongkey1": "wrongval1",
"ext:wrongkey2": "wrongval2"}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},"object": {"id":"act:test_wrong_list_post2"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},"object": {"id":"act:test_wrong_list_post4"}, "actor":{"objectType":"Agent", "mbox":"wrong-t@t.com"}}])
response = self.client.post(reverse(views.statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
verbs = models.Verb.objects.filter(verb_id__contains='wrong')
activities = models.Activity.objects.filter(activity_id__contains='test_wrong_list_post')
statements = models.Statement.objects.all()
# 11 statements from setup
self.assertEqual(len(statements), 11)
self.assertEqual(len(verbs), 0)
self.assertEqual(len(activities), 0)
def test_post_list_rollback_part_2(self):
stmts = json.dumps([{"object": {"objectType":"Agent","name":"john","mbox":"mailto:john@john.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/wrong","display": {"wrong-en-US":"wrong"}},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/created"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname2", "en-GB": "altname"},
"description": {"en-US":"testdesc2", "en-GB": "altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answer"]}},
"actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}])
response = self.client.post(reverse(views.statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
created_verbs = models.Verb.objects.filter(verb_id__contains='http://adlnet.gov/expapi/verbs/created')
wrong_verbs = models.Verb.objects.filter(verb_id__contains='http://adlnet.gov/expapi/verbs/wrong')
activities = models.Activity.objects.filter(activity_id='act:foogie')
statements = models.Statement.objects.all()
wrong_agent = models.Agent.objects.filter(mbox='mailto:wrong-t@t.com')
john_agent = models.Agent.objects.filter(mbox='mailto:john@john.com')
s_agent = models.Agent.objects.filter(mbox='mailto:s@s.com')
auth_agent = models.Agent.objects.filter(mbox='mailto:test1@tester.com')
self.assertEqual(len(created_verbs), 1)
# Both verbs from the first and last stmts in the list would still be there
self.assertEqual(len(wrong_verbs), 0)
self.assertEqual(len(activities), 1)
self.assertEqual(len(statements), 11)
self.assertEqual(len(wrong_agent), 0)
self.assertEqual(len(john_agent), 1)
self.assertEqual(len(s_agent), 1)
self.assertEqual(len(auth_agent), 0)
def test_post_list_rollback_with_void(self):
stmts = json.dumps([{"actor":{"objectType":"Agent","mbox":"mailto:only-s@s.com"},
"object": {"objectType":"StatementRef","id":str(self.exist_stmt_id)},
"verb": {"id": "http://adlnet.gov/expapi/verbs/voided","display": {"en-US":"voided"}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}])
response = self.client.post(reverse(views.statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
voided_st = models.Statement.objects.get(statement_id=str(self.exist_stmt_id))
voided_verb = models.Verb.objects.filter(verb_id__contains='voided')
only_actor = models.Agent.objects.filter(mbox="mailto:only-s@s.com")
statements = models.Statement.objects.all()
self.assertEqual(len(statements), 11)
self.assertEqual(voided_st.voided, False)
self.assertEqual(len(voided_verb), 0)
self.assertEqual(len(only_actor), 0)
def test_post_list_rollback_with_subs(self):
sub_context_id = str(uuid.uuid1())
stmts = json.dumps([{"actor":{"objectType":"Agent","mbox":"mailto:wrong-s@s.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/wrong","display": {"wrong-en-US":"wrong"}},
"object": {"objectType":"Agent","name":"john","mbox":"mailto:john@john.com"}},
{"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/wrong-next","display": {"wrong-en-US":"wrong-next"}},
"object":{"objectType":"SubStatement",
"actor":{"objectType":"Agent","mbox":"mailto:wrong-ss@ss.com"},"verb": {"id":"http://adlnet.gov/expapi/verbs/wrong-sub"},
"object": {"objectType":"Activity", "id":"act:wrong-testex.com"}, "result":{"completion": True, "success": True,
"response": "sub-wrong-kicked"}, "context":{"registration": sub_context_id,
"contextActivities": {"other": {"id": "act:sub-wrong-ActivityID"}},"revision": "foo", "platform":"bar",
"language": "en-US", "extensions":{"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}])
response = self.client.post(reverse(views.statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
s_agent = models.Agent.objects.filter(mbox="mailto:wrong-s@s.com")
ss_agent = models.Agent.objects.filter(mbox="mailto:wrong-ss@ss.com")
john_agent = models.Agent.objects.filter(mbox="mailto:john@john.com")
subs = models.SubStatement.objects.all()
wrong_verb = models.Verb.objects.filter(verb_id__contains="wrong")
activities = models.Activity.objects.filter(activity_id__contains="wrong")
statements = models.Statement.objects.all()
self.assertEqual(len(statements), 11)
self.assertEqual(len(s_agent), 0)
self.assertEqual(len(ss_agent), 0)
self.assertEqual(len(john_agent), 1)
# Only 1 sub from setup
self.assertEqual(len(subs), 1)
self.assertEqual(len(wrong_verb), 0)
self.assertEqual(len(activities), 0)
def test_activity_definition_change(self):
username_1 = "tester1"
email_1 = "test1@tester.com"
password_1 = "test"
auth_1 = "Basic %s" % base64.b64encode("%s:%s" % (username_1, password_1))
form_1 = {"username":username_1, "email":email_1,"password":password_1,"password2":password_1}
response_1 = self.client.post(reverse(views.register),form_1, X_Experience_API_Version="1.0.0")
username_2 = "tester2"
email_2 = "test2@tester.com"
password_2 = "test2"
auth_2 = "Basic %s" % base64.b64encode("%s:%s" % (username_2, password_2))
form_2 = {"username":username_2, "email":email_2,"password":password_2,"password2":password_2}
response_2 = self.client.post(reverse(views.register),form_2, X_Experience_API_Version="1.0.0")
# Should have no definition
stmt_1 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"object":{"id": "act:test_activity_change"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_1 = self.client.post(reverse(views.statements), stmt_1, content_type="application/json",
Authorization=auth_1, X_Experience_API_Version="1.0.0")
self.assertEqual(response_1.status_code, 200)
user1_agent = models.Agent.objects.get(mbox="mailto:test1@tester.com")
act = models.Activity.objects.get(activity_id="act:test_activity_change").object_return()
self.assertEqual(act["id"], "act:test_activity_change")
with self.assertRaises(KeyError):
act["definition"]
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 1)
# Creates local act for other user
stmt_2 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "fail_test"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_2 = self.client.post(reverse(views.statements), stmt_2, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version="1.0.0")
user2_agent = models.Agent.objects.get(mbox="mailto:test2@tester.com")
self.assertEqual(response_2.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn('definition', act)
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should update local version of activity with definition for that user
response_3 = self.client.post(reverse(views.statements), stmt_1, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version="1.0.0")
self.assertEqual(response_3.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn('definition', act)
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should have new definition for canonical since user is owner
stmt_3 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "foo"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_4 = self.client.post(reverse(views.statements), stmt_3, content_type="application/json",
Authorization=auth_1, X_Experience_API_Version="1.0.0")
self.assertEqual(response_4.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user1_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertEqual(act["definition"], {"name":{"en-US": "foo"}})
# Should have updated local activity for that user with new definition
response_5 = self.client.post(reverse(views.statements), stmt_3, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version="1.0.0")
self.assertEqual(response_5.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertEqual(act["definition"], {"name":{"en-US": "foo"}})
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should update local version of that activity for that user
stmt_4 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "bar"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_6 = self.client.post(reverse(views.statements), stmt_4, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version="1.0.0")
self.assertEqual(response_6.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertEqual(act["definition"], {"name":{"en-US": "bar"}})
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should have replaced name in def for local act of that user
stmt_5 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"fr": "bar"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_7 = self.client.post(reverse(views.statements), stmt_5, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version="1.0.0")
self.assertEqual(response_7.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn("fr", act['definition']['name'])
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Can't remove definition if it already exists - should still be there
response_8 = self.client.post(reverse(views.statements), stmt_1, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version="1.0.0")
self.assertEqual(response_8.status_code, 200)
act = models.Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).object_return()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn("definition", act.keys())
acts = models.Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Check canonical of last stmt returned from query to make sure it contains the definition
param = {"agent":{"mbox":"mailto:max@max.com"}, "format":"canonical", "activity":"act:test_activity_change"}
path = "%s?%s" % (reverse(views.statements),urllib.urlencode(param))
r = self.client.get(path, X_Experience_API_Version="1.0", Authorization=auth_1)
self.assertEqual(r.status_code, 200)
first_stmt = json.loads(r.content)["statements"][0]
self.assertEqual(first_stmt["object"]["definition"], {"name":{"en-US": "foo"}})
def test_post_with_non_oauth_not_existing_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
stmt = json.dumps({"actor":{"name":"agentA","mbox":"mailto:agentA@example.com"},"verb":{"id": "http://verb/uri/joined", "display":{"en-US":"joined"}},
"object": {"id":"act:i.pity.the.fool"}, "authority": {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},{"name":"agentB","mbox":"mailto:agentB@example.com"}]}})
response = self.client.post(reverse(views.statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertIn("Statements cannot have a non-Oauth group as the authority", response.content)
def test_post_with_non_oauth_existing_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
group = {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},{"name":"agentB","mbox":"mailto:agentB@example.com"}]}
models.Agent.objects.retrieve_or_create(**group)
stmt = json.dumps({"actor":{"name":"agentA","mbox":"mailto:agentA@example.com"},"verb":{"id": "http://verb/uri/joined", "display":{"en-US":"joined"}},
"object": {"id":"act:i.pity.the.fool"}, "authority": {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},{"name":"agentB","mbox":"mailto:agentB@example.com"}]}})
response = self.client.post(reverse(views.statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Statements cannot have a non-Oauth group as the authority")
| 67.073778
| 240
| 0.637308
|
a0752110b27d26ad3656c38c0e1a67744f7c9668
| 5,394
|
py
|
Python
|
main.py
|
google/clicktrackers-panel
|
5de8f51602c3d587c29c2d7c599f0df589f4720d
|
[
"Apache-2.0"
] | 5
|
2017-12-26T05:54:54.000Z
|
2021-10-13T22:35:23.000Z
|
main.py
|
google/clicktrackers-panel
|
5de8f51602c3d587c29c2d7c599f0df589f4720d
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
google/clicktrackers-panel
|
5de8f51602c3d587c29c2d7c599f0df589f4720d
|
[
"Apache-2.0"
] | 5
|
2017-12-26T05:54:29.000Z
|
2021-10-13T22:35:14.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposes a HTTP API from performing operations on click-tracking ads in DCM.
Supports hosting on GAE Standard.
"""
from datetime import datetime
from datetime import timedelta
import json
import logging
import os
from urlparse import urlparse
from googleapiclient import discovery
from googleapiclient.errors import HttpError
import jinja2
from oauth2client.contrib import appengine
import webapp2
from google.appengine.api import urlfetch
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
# Client should download the credentials and deploy them with the code
# We could use KMS or Datastore, but let's keep it simple for now
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
API_NAME = 'dfareporting'
API_VERSION = 'v2.8'
API_SCOPES = ['https://www.googleapis.com/auth/dfatrafficking']
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
decorator = appengine.oauth2decorator_from_clientsecrets(
CLIENT_SECRETS, scope=API_SCOPES)
service = discovery.build(API_NAME, API_VERSION)
class MainHandler(webapp2.RequestHandler):
@decorator.oauth_required
def get(self):
credentials = decorator.get_credentials()
if credentials.access_token_expired:
credentials.refresh(decorator.http())
template = JINJA_ENVIRONMENT.get_template('templates/index.html')
self.response.write(template.render())
class ProfilesHandler(webapp2.RequestHandler):
@decorator.oauth_required
def get(self):
resp = service.userProfiles().list().execute(http=decorator.http())
self.response.write(json.dumps({'profiles': resp['items']}))
class PlacementsHandler(webapp2.RequestHandler):
"""Handles placement lookup (GET) and click-tracker creation (POST)."""
@decorator.oauth_required
def get(self, profile_id, placement_id):
try:
http = decorator.http()
resp = service.placements().get(
profileId=profile_id, id=placement_id).execute(http=http)
logging.debug(resp)
self.response.write(json.dumps({'placement': resp}))
except HttpError as err:
upstream_error = json.loads(err.content)['error']
logging.error(upstream_error)
resp = {
'code': upstream_error['code'],
'message': upstream_error['message']
}
self.response.set_status(resp['code'])
self.response.write(json.dumps(resp))
@decorator.oauth_required
def post(self, profile_id, placement_id):
data = json.loads(self.request.body)
http = decorator.http()
placement = service.placements().get(
profileId=profile_id, id=placement_id).execute(http=http)
one_year = datetime.now() + timedelta(days=365)
tracker_urls = {}
for t in data['trackers']:
url = urlparse(t['url'])
ad = service.ads().insert(
profileId=profile_id,
body={
'advertiserId':
placement['advertiserId'],
'campaignId':
placement['campaignId'],
'placementId':
placement_id,
'type':
'AD_SERVING_CLICK_TRACKER',
'clickThroughUrl': {
'customClickThroughUrl': url.geturl(),
'defaultLandingPage': False
},
'name':
t['name'],
'active':
True,
'dynamicClickTracker':
True,
'startTime': (datetime.now() + timedelta(seconds=3))
.strftime(TIME_FORMAT),
'endTime':
one_year.strftime(TIME_FORMAT),
'placementAssignments': [{
'placementId': placement_id,
'active': True
}]
}).execute(http=http)
tracker_urls[ad['id']] = {'name': t['name']}
# increase timeout for tag generation
urlfetch.set_default_fetch_deadline(30)
tags = service.placements().generatetags(
profileId=profile_id,
campaignId=placement['campaignId'],
placementIds=[placement_id],
tagFormats=['PLACEMENT_TAG_CLICK_COMMANDS']).execute(http=http)
for pt in tags['placementTags']:
if pt['placementId'] == placement_id:
for td in pt['tagDatas']:
if td['adId'] in tracker_urls:
tracker_urls[td['adId']]['clickUrl'] = td['clickTag']
resp = json.dumps({'uploaded': tracker_urls})
logging.debug(resp)
self.response.write(resp)
app = webapp2.WSGIApplication(
[
('/', MainHandler),
(r'/profiles', ProfilesHandler),
(r'/profiles/(\d+)/placements/(\d+)', PlacementsHandler),
(decorator.callback_path, decorator.callback_handler()),
],
debug=True)
| 33.924528
| 79
| 0.654245
|
ba5ce629596db8390084e04638fc5175c0f5406d
| 971
|
py
|
Python
|
lib/taniumpy/object_types/saved_action_policy.py
|
netsec/pytan
|
29a3484d21cb90d8896275febd1c535e4f3cdc7e
|
[
"MIT"
] | null | null | null |
lib/taniumpy/object_types/saved_action_policy.py
|
netsec/pytan
|
29a3484d21cb90d8896275febd1c535e4f3cdc7e
|
[
"MIT"
] | 1
|
2021-12-08T08:29:26.000Z
|
2021-12-08T08:29:26.000Z
|
pytanlib/taniumpy/object_types/saved_action_policy.py
|
splunk-soar-connectors/tanium
|
e6f38fd014ea125e11a584ac9932ad4e7e855ac7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class SavedActionPolicy(BaseType):
_soap_tag = 'policy'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'saved_question_id': int,
'saved_question_group_id': int,
'row_filter_group_id': int,
'max_age': int,
'min_count': int},
complex_properties={'saved_question_group': Group,
'row_filter_group': Group},
list_properties={},
)
self.saved_question_id = None
self.saved_question_group_id = None
self.row_filter_group_id = None
self.max_age = None
self.min_count = None
self.saved_question_group = None
self.row_filter_group = None
from group import Group
from group import Group
| 24.897436
| 62
| 0.568486
|
822ad9c580fdd3cec11b14b1468fe633d419f674
| 12,303
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 1
|
2021-12-15T22:45:14.000Z
|
2021-12-15T22:45:14.000Z
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import sys
import click
from ..._env import DDTRACE_OPTIONS_LIST, E2E_PARENT_PYTHON, SKIP_ENVIRONMENT
from ...ci import get_ci_env_vars, running_on_ci
from ...fs import chdir, file_exists, remove_path
from ...subprocess import run_command
from ...utils import ON_WINDOWS, get_next
from ..constants import get_root
from ..dependencies import read_check_base_dependencies
from ..testing import construct_pytest_options, fix_coverage_report, get_tox_envs, pytest_coverage_sources
from ..utils import code_coverage_enabled, complete_testable_checks
from .console import CONTEXT_SETTINGS, abort, echo_debug, echo_info, echo_success, echo_waiting, echo_warning
def display_envs(check_envs):
for check, envs in check_envs:
echo_success(f'`{check}`:')
for e in envs:
echo_info(f' {e}')
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Run tests')
@click.argument('checks', autocompletion=complete_testable_checks, nargs=-1)
@click.option('--format-style', '-fs', is_flag=True, help='Run only the code style formatter')
@click.option('--style', '-s', is_flag=True, help='Run only style checks')
@click.option('--bench', '-b', is_flag=True, help='Run only benchmarks')
@click.option('--latest', is_flag=True, help='Only verify support of new product versions')
@click.option('--e2e', is_flag=True, help='Run only end-to-end tests')
@click.option('--ddtrace', is_flag=True, help='Run tests using dd-trace-py')
@click.option('--cov', '-c', 'coverage', is_flag=True, help='Measure code coverage')
@click.option('--cov-missing', '-cm', is_flag=True, help='Show line numbers of statements that were not executed')
@click.option('--junit', '-j', 'junit', is_flag=True, help='Generate junit reports')
@click.option('--marker', '-m', help='Only run tests matching given marker expression')
@click.option('--filter', '-k', 'test_filter', help='Only run tests matching given substring expression')
@click.option('--pdb', 'enter_pdb', is_flag=True, help='Drop to PDB on first failure, then end test session')
@click.option('--debug', '-d', is_flag=True, help='Set the log level to debug')
@click.option('--verbose', '-v', count=True, help='Increase verbosity (can be used additively)')
@click.option('--list', '-l', 'list_envs', is_flag=True, help='List available test environments')
@click.option('--passenv', help='Additional environment variables to pass down')
@click.option('--changed', is_flag=True, help='Only test changed checks')
@click.option('--cov-keep', is_flag=True, help='Keep coverage reports')
@click.option('--skip-env', is_flag=True, help='Skip environment creation and assume it is already running')
@click.option('--pytest-args', '-pa', help='Additional arguments to pytest')
@click.option('--force-base-unpinned', is_flag=True, help='Force using datadog-checks-base as specified by check dep')
@click.option('--force-base-min', is_flag=True, help='Force using lowest viable release version of datadog-checks-base')
@click.option('--force-env-rebuild', is_flag=True, help='Force creating a new env')
@click.pass_context
def test(
ctx,
checks,
format_style,
style,
bench,
latest,
e2e,
ddtrace,
coverage,
junit,
cov_missing,
marker,
test_filter,
enter_pdb,
debug,
verbose,
list_envs,
passenv,
changed,
cov_keep,
skip_env,
pytest_args,
force_base_unpinned,
force_base_min,
force_env_rebuild,
):
"""Run tests for Agent-based checks.
If no checks are specified, this will only test checks that
were changed compared to the master branch.
You can also select specific comma-separated environments to test like so:
\b
`$ ddev test mysql:mysql57,maria10130`
"""
if list_envs:
check_envs = get_tox_envs(checks, every=True, sort=True, changed_only=changed)
display_envs(check_envs)
return
root = get_root()
testing_on_ci = running_on_ci()
color = ctx.obj['color']
# Implicitly track coverage
if cov_missing:
coverage = True
if e2e:
marker = 'e2e'
coverage_show_missing_lines = str(cov_missing or testing_on_ci)
test_env_vars = {
# Environment variables we need tox to pass down
'TOX_TESTENV_PASSENV': (
# Used in .coveragerc for whether or not to show missing line numbers for coverage
# or for generic tag checking
'DDEV_* '
# Necessary for compilation on Windows: PROGRAMDATA, PROGRAMFILES, PROGRAMFILES(X86)
'PROGRAM* '
# Necessary for getting the user on Windows https://docs.python.org/3/library/getpass.html#getpass.getuser
'USERNAME '
# Space-separated list of pytest options
'PYTEST_ADDOPTS '
# https://docs.docker.com/compose/reference/envvars/
'DOCKER_* COMPOSE_*'
),
'DDEV_COV_MISSING': coverage_show_missing_lines,
}
if skip_env:
test_env_vars[SKIP_ENVIRONMENT] = 'true'
test_env_vars['TOX_TESTENV_PASSENV'] += f' {SKIP_ENVIRONMENT}'
if passenv:
test_env_vars['TOX_TESTENV_PASSENV'] += f' {passenv}'
test_env_vars['TOX_TESTENV_PASSENV'] += f" {' '.join(get_ci_env_vars())}"
if color is not None:
test_env_vars['PY_COLORS'] = '1' if color else '0'
if e2e:
test_env_vars[E2E_PARENT_PYTHON] = sys.executable
test_env_vars['TOX_TESTENV_PASSENV'] += f' {E2E_PARENT_PYTHON}'
if ddtrace:
for env in DDTRACE_OPTIONS_LIST:
test_env_vars['TOX_TESTENV_PASSENV'] += f' {env}'
# Used for CI app product
test_env_vars['TOX_TESTENV_PASSENV'] += ' TF_BUILD BUILD* SYSTEM*'
test_env_vars['DD_SERVICE'] = os.getenv('DD_SERVICE', 'ddev-integrations')
test_env_vars['DD_ENV'] = os.getenv('DD_ENV', 'ddev-integrations')
test_env_vars['DDEV_TRACE_ENABLED'] = 'true'
test_env_vars['DD_PROFILING_ENABLED'] = 'true'
org_name = ctx.obj['org']
org = ctx.obj['orgs'].get(org_name, {})
api_key = org.get('api_key') or ctx.obj['dd_api_key'] or os.getenv('DD_API_KEY')
if api_key:
test_env_vars['DD_API_KEY'] = api_key
test_env_vars['TOX_TESTENV_PASSENV'] += ' DD_API_KEY'
check_envs = get_tox_envs(
checks, style=style, format_style=format_style, benchmark=bench, changed_only=changed, latest=latest
)
tests_ran = False
for check, envs in check_envs:
# Many checks don't have benchmark envs, etc.
if not envs:
echo_debug(f"No envs found for: `{check}`")
continue
ddtrace_check = ddtrace
if ddtrace and ON_WINDOWS and any('py2' in env for env in envs):
# The pytest flag --ddtrace is not available for windows-py2 env.
# Removing it so it does not fail.
echo_warning(
'ddtrace flag is not available for windows-py2 environments ; disabling the flag for this check.'
)
ddtrace_check = False
# This is for ensuring proper spacing between output of multiple checks' tests.
# Basically this avoids printing a new line before the first check's tests.
output_separator = '\n' if tests_ran else ''
# For performance reasons we're generating what to test on the fly and therefore
# need a way to tell if anything ran since we don't know anything upfront.
tests_ran = True
# Build pytest options
pytest_options = construct_pytest_options(
check=check,
verbose=verbose,
color=color,
enter_pdb=enter_pdb,
debug=debug,
bench=bench,
latest=latest,
coverage=coverage,
junit=junit,
marker=marker,
test_filter=test_filter,
pytest_args=pytest_args,
e2e=e2e,
ddtrace=ddtrace_check,
)
if coverage:
pytest_options = pytest_options.format(pytest_coverage_sources(check))
test_env_vars['PYTEST_ADDOPTS'] = pytest_options
if verbose:
echo_info(f"pytest options: `{test_env_vars['PYTEST_ADDOPTS']}`")
with chdir(os.path.join(root, check), env_vars=test_env_vars):
if format_style:
test_type_display = 'the code formatter'
elif style:
test_type_display = 'only style checks'
elif bench:
test_type_display = 'only benchmarks'
elif latest:
test_type_display = 'only tests for the latest version'
elif e2e:
test_type_display = 'only end-to-end tests'
else:
test_type_display = 'tests'
wait_text = f'{output_separator}Running {test_type_display} for `{check}`'
echo_waiting(wait_text)
echo_waiting('-' * len(wait_text))
command = [
'tox',
# so users won't get failures for our possibly strict CI requirements
'--skip-missing-interpreters',
# so coverage tracks the real locations instead of .tox virtual envs
'--develop',
# comma-separated list of environments
'-e {}'.format(','.join(envs)),
]
env = os.environ.copy()
base_or_dev = check.startswith('datadog_checks_')
if force_base_min and not base_or_dev:
check_base_dependencies, errors = read_check_base_dependencies(check)
if errors:
abort(f'\nError collecting base package dependencies: {errors}')
spec_set = list(check_base_dependencies['datadog-checks-base'].keys())[0]
spec = get_next(spec_set) if spec_set else None
if spec is None or spec.operator != '>=':
abort(f'\nFailed to determine minimum version of package `datadog_checks_base`: {spec}')
version = spec.version
env['TOX_FORCE_INSTALL'] = f"datadog_checks_base[deps]=={version}"
elif force_base_unpinned and not base_or_dev:
env['TOX_FORCE_UNPINNED'] = "datadog_checks_base"
elif (force_base_min or force_base_unpinned) and base_or_dev:
echo_info(f'Skipping forcing base dependency for check {check}')
if force_env_rebuild:
command.append('--recreate')
if verbose:
command.append('-' + 'v' * verbose)
command = ' '.join(command)
echo_debug(f'TOX COMMAND: {command}')
result = run_command(command, env=env)
if result.code:
abort('\nFailed!', code=result.code)
if coverage and file_exists('.coverage') and code_coverage_enabled(check):
if not cov_keep:
echo_info('\n---------- Coverage report ----------\n')
result = run_command('coverage report --rcfile=../.coveragerc')
if result.code:
abort('\nFailed!', code=result.code)
if testing_on_ci:
result = run_command('coverage xml -i --rcfile=../.coveragerc')
if result.code:
abort('\nFailed!', code=result.code)
fix_coverage_report(check, 'coverage.xml')
run_command(['codecov', '-X', 'gcov', '--root', root, '-F', check, '-f', 'coverage.xml'])
else:
if not cov_keep:
remove_path('.coverage')
remove_path('coverage.xml')
echo_success('\nPassed!')
# You can only test one environment at a time since the setup/tear down occurs elsewhere
if e2e:
break
if not tests_ran:
if format_style:
echo_warning('Code formatting is not enabled!')
echo_info('To enable it, set `dd_check_style = true` under the `[testenv]` section of `tox.ini`.')
else:
echo_info('Nothing to test!')
| 40.074919
| 120
| 0.626514
|
509d0c8b54ebe0dbde5e3bb55694331e49e695de
| 468
|
py
|
Python
|
backend/todo/urls.py
|
jimbofreedman/naggingnelly-backend
|
a1e64089915ad80a2d3d8cd0252bc2bcd0e1f475
|
[
"MIT"
] | null | null | null |
backend/todo/urls.py
|
jimbofreedman/naggingnelly-backend
|
a1e64089915ad80a2d3d8cd0252bc2bcd0e1f475
|
[
"MIT"
] | null | null | null |
backend/todo/urls.py
|
jimbofreedman/naggingnelly-backend
|
a1e64089915ad80a2d3d8cd0252bc2bcd0e1f475
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
# from django.urls import url, include
from rest_framework.routers import DefaultRouter
from .views import index, complete, cancel, fail
from .viewsets import ContextViewSet, TodoItemViewSet
app_name = "todo"
router = DefaultRouter()
router.register(r'contexts', ContextViewSet, base_name='contexts')
router.register(r'todoItems', TodoItemViewSet, base_name='todo_items')
urlpatterns = [
path("", include(router.urls)),
]
| 27.529412
| 70
| 0.779915
|
ef298f23e8a5d770e785f36a4a7a864b9b036eb8
| 4,157
|
py
|
Python
|
xknx/config/yaml_loader.py
|
spacegaier/xknx
|
2c2420670da88fea386d573f78a78c5a342186e9
|
[
"MIT"
] | null | null | null |
xknx/config/yaml_loader.py
|
spacegaier/xknx
|
2c2420670da88fea386d573f78a78c5a342186e9
|
[
"MIT"
] | null | null | null |
xknx/config/yaml_loader.py
|
spacegaier/xknx
|
2c2420670da88fea386d573f78a78c5a342186e9
|
[
"MIT"
] | null | null | null |
"""Custom YAML loader."""
from collections import OrderedDict
import fnmatch
import logging
import os
from typing import Dict, Iterator, List, TypeVar, Union
from xknx.config.objects import NodeListClass, NodeStrClass
from xknx.exceptions import XKNXException
import yaml
logger = logging.getLogger("xknx.log")
JSON_TYPE = Union[List, Dict, str] # pylint: disable=invalid-name
DICT_T = TypeVar("DICT_T", bound=Dict) # pylint: disable=invalid-name
class SafeLineLoader(yaml.SafeLoader):
"""Loader class that keeps track of line numbers."""
def compose_node(self, parent: yaml.nodes.Node, index: int) -> yaml.nodes.Node:
"""Annotate a node with the first line it was seen."""
last_line: int = self.line
node: yaml.nodes.Node = super().compose_node(parent, index)
node.__line__ = last_line + 1 # type: ignore
return node
def _add_reference(obj, loader: SafeLineLoader, node: yaml.nodes.Node): # type: ignore
"""Add file reference information to an object."""
if isinstance(obj, list):
obj = NodeListClass(obj)
if isinstance(obj, str):
obj = NodeStrClass(obj)
setattr(obj, "__config_file__", loader.name)
setattr(obj, "__line__", node.start_mark.line)
return obj
def load_yaml(fname: str) -> JSON_TYPE:
"""Load a YAML file."""
try:
with open(fname, encoding="utf-8") as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file, Loader=SafeLineLoader) or OrderedDict()
except yaml.YAMLError as exc:
logger.error(str(exc))
raise XKNXException(exc) from exc
except UnicodeDecodeError as exc:
logger.error("Unable to read file %s: %s", fname, exc)
raise XKNXException(exc) from exc
def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:
"""Load another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
fname = os.path.join(os.path.dirname(loader.name), node.value)
try:
return _add_reference(load_yaml(fname), loader, node)
except FileNotFoundError as exc:
raise XKNXException(f"{node.start_mark}: Unable to read file {fname}.") from exc
def _is_file_valid(name: str) -> bool:
"""Decide if a file is valid."""
return not name.startswith(".")
def _find_files(directory: str, pattern: str) -> Iterator[str]:
"""Recursively load files in a directory."""
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if _is_file_valid(d)]
for basename in sorted(files):
if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def _construct_seq(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:
"""Add line number and file name to Load YAML sequence."""
(obj,) = loader.construct_yaml_seq(node)
return _add_reference(obj, loader, node)
def _include_dir_list_yaml(
loader: SafeLineLoader, node: yaml.nodes.Node
) -> List[JSON_TYPE]:
"""Load multiple files from directory as a list."""
loc = os.path.join(os.path.dirname(loader.name), node.value)
return [load_yaml(f) for f in _find_files(loc, "*.yaml")]
def _env_var_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> str:
"""Load environment variables and embed it into the configuration YAML."""
args = node.value.split()
# Check for a default value
if len(args) > 1:
return os.getenv(args[0], " ".join(args[1:]))
if args[0] in os.environ:
return os.environ[args[0]]
logger.error("Environment variable %s not defined", node.value)
raise XKNXException(node.value)
SafeLineLoader.add_constructor("!include", _include_yaml)
SafeLineLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG, _construct_seq
)
SafeLineLoader.add_constructor("!env_var", _env_var_yaml)
SafeLineLoader.add_constructor("!include_dir_list", _include_dir_list_yaml)
| 35.529915
| 88
| 0.68968
|
6cfe0497176e6b1e8a0a79b5f9b23d5ebf1dac14
| 8,530
|
py
|
Python
|
scripts/monitoring/cron-openshift-pruner.py
|
propyless/openshift-tools
|
16776b4f343ea3b2018f7679cc3383e616020710
|
[
"Apache-2.0"
] | null | null | null |
scripts/monitoring/cron-openshift-pruner.py
|
propyless/openshift-tools
|
16776b4f343ea3b2018f7679cc3383e616020710
|
[
"Apache-2.0"
] | null | null | null |
scripts/monitoring/cron-openshift-pruner.py
|
propyless/openshift-tools
|
16776b4f343ea3b2018f7679cc3383e616020710
|
[
"Apache-2.0"
] | 2
|
2018-10-16T05:11:13.000Z
|
2018-11-07T01:46:29.000Z
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Prune images/builds/deployments
'''
#
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Disabling invalid-name because pylint doesn't like the naming
# convention we have.
# pylint: disable=invalid-name
import argparse
import base64
import json
import os
import subprocess
SERVICE_ACCOUNT_GROUP = "openshift-infra"
SERVICE_ACCOUNT = "autopruner"
SERVICE_ACCOUNT_TEMPLATE = {"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {"name": SERVICE_ACCOUNT}
}
class OpenShiftPrune(object):
''' Class to handle pruning of old objects '''
def __init__(self):
self.args = None
self.parse_args()
def parse_args(self):
'''Parse the arguments for this script'''
parser = argparse.ArgumentParser(description="OpenShift object pruner")
parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
parser.add_argument('--image-keep-younger-than', default='24h',
help='Ignore images younger than set time')
parser.add_argument('--image-keep-tag-revisions', default='5',
help='Number of image revisions to keep')
parser.add_argument('--build-keep-younger-than', default='1h',
help='Ignore builds younger than set time')
parser.add_argument('--build-keep-complete', default='2',
help='Number of builds to keep')
parser.add_argument('--build-keep-failed', default='1',
help='Number of failed builds to keep')
parser.add_argument('--deploy-keep-younger-than', default='1h',
help='Ignore deployments younger than set time')
parser.add_argument('--deploy-keep-complete', default='2',
help='Number of deployements to keep')
parser.add_argument('--deploy-keep-failed', default='1',
help='Number of failed deployments to keep')
parser.add_argument('--kube-config', default='/tmp/admin.kubeconfig',
help='Kubeconfig creds to use')
self.args = parser.parse_args()
def ensure_autopruner_exists(self):
''' create autopruning account/perms if it doesn't exist '''
# user exists?
cmd = ['oc', 'get', 'serviceaccount', SERVICE_ACCOUNT,
'-n', SERVICE_ACCOUNT_GROUP,
'--config', self.args.kube_config]
rc = subprocess.call(cmd)
if rc != 0:
# create service account
if self.args.debug:
print "Service account not found. Creating."
read, write = os.pipe()
sa_template = json.dumps(SERVICE_ACCOUNT_TEMPLATE)
os.write(write, sa_template)
os.close(write)
cmd = ['oc', 'create', '-n', SERVICE_ACCOUNT_GROUP,
'-f', '-',
'--config', self.args.kube_config]
try:
subprocess.check_call(cmd, stdin=read)
except subprocess.CalledProcessError:
print "Error creating service account"
raise
# check if autoprune user has pruning perms
username = "system:serviceaccount:{}:{}".format(SERVICE_ACCOUNT_GROUP,
SERVICE_ACCOUNT)
cmd = ['oc', 'get', 'clusterrolebindings', 'system:image-pruner',
'-o', 'json', '--config', self.args.kube_config]
rc = 0
try:
output = json.loads(subprocess.check_output(cmd))
except subprocess.CalledProcessError as e:
rc = e.returncode
if rc != 0 or username not in output['userNames']:
# grant image pruning
if self.args.debug:
print "Granding image pruning perms"
cmd = ['oc', 'adm', 'policy', 'add-cluster-role-to-user',
'system:image-pruner', username,
'--config', self.args.kube_config]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print "Error granting image pruning perms"
raise
def get_autopruner_token(self):
''' fetch and return the token for the autopruning account '''
token = None
self.ensure_autopruner_exists()
# get token
cmd = ['oc', 'get', 'serviceaccounts', SERVICE_ACCOUNT,
'-n', SERVICE_ACCOUNT_GROUP, '-o', 'json',
'--config', self.args.kube_config]
output = json.loads(subprocess.check_output(cmd))
secretname = None
for secret in output['secrets']:
if secret['name'].startswith(SERVICE_ACCOUNT + '-token'):
secretname = secret['name']
if secretname is None:
raise Exception("No secret with token info found.")
cmd = ['oc', 'get', 'secrets', secretname, '-n', SERVICE_ACCOUNT_GROUP,
'-o', 'json',
'--config', self.args.kube_config]
output = json.loads(subprocess.check_output(cmd))
token = base64.standard_b64decode(output['data']['token'])
return token
def prune_images(self):
''' call oc adm to prune images '''
token = self.get_autopruner_token()
cmd = ['oc', 'adm', 'prune', 'images',
'--keep-younger-than', self.args.image_keep_younger_than,
'--keep-tag-revisions', self.args.image_keep_tag_revisions,
'--config', self.args.kube_config,
'--token', token,
'--confirm']
if self.args.debug:
cmd += ['--loglevel', '4']
output = subprocess.check_output(cmd)
if self.args.debug:
print "Prune images output:\n" + output
def prune_builds(self):
''' call oc adm to prune builds '''
cmd = ['oc', 'adm', 'prune', 'builds',
'--keep-complete', self.args.build_keep_complete,
'--keep-younger-than', self.args.build_keep_younger_than,
'--keep-failed', self.args.build_keep_failed,
'--config', self.args.kube_config,
'--confirm']
if self.args.debug:
cmd += ['--loglevel', '4']
output = subprocess.check_output(cmd)
if self.args.debug:
print "Prune build output:\n" + output
def prune_deployments(self):
''' call oc adm to prune deployments '''
cmd = ['oc', 'adm', 'prune', 'deployments',
'--keep-complete', self.args.deploy_keep_complete,
'--keep-younger-than', self.args.deploy_keep_younger_than,
'--keep-failed', self.args.deploy_keep_failed,
'--config', self.args.kube_config,
'--confirm']
if self.args.debug:
cmd += ['--loglevel', '4']
output = subprocess.check_output(cmd)
if self.args.debug:
print "Prune deployment output:\n" + output
def main(self):
''' Prune images/builds/deployments '''
rc = 0
try:
self.prune_deployments()
except subprocess.CalledProcessError as e:
print "Error pruning deployments"
rc = e.returncode
try:
self.prune_builds()
except subprocess.CalledProcessError as e:
print "Error pruning builds"
rc = e.returncode
try:
self.prune_images()
except subprocess.CalledProcessError as e:
print "Error pruning images"
rc = e.returncode
if rc != 0:
raise Exception("Error during pruning")
if __name__ == '__main__':
OSPruner = OpenShiftPrune()
OSPruner.main()
| 37.086957
| 79
| 0.567761
|
3bb13c6532b01f9330ca345580d2d0b2a6718195
| 249
|
py
|
Python
|
src/main.py
|
nbilbo/faz_download_imgs
|
be02b7c68c2dbbaa6976b50fb6cbdb84ad5e0839
|
[
"MIT"
] | null | null | null |
src/main.py
|
nbilbo/faz_download_imgs
|
be02b7c68c2dbbaa6976b50fb6cbdb84ad5e0839
|
[
"MIT"
] | null | null | null |
src/main.py
|
nbilbo/faz_download_imgs
|
be02b7c68c2dbbaa6976b50fb6cbdb84ad5e0839
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from backend.backend import Backend
from view.interface import Interface
from control.control_interface import ControlInterface
if __name__ == '__main__':
controler = ControlInterface(Interface, Backend)
controler.start()
| 22.636364
| 54
| 0.787149
|
b8cfd2c3bf1b9bbb449f3f31cda4b6bf7396fbfd
| 3,391
|
py
|
Python
|
example_channels/example_channels/settings.py
|
tim-bad/django-example-channels
|
fd9c97cacc3b3888882725f214dfb5d1f21f06fe
|
[
"MIT"
] | 97
|
2017-03-01T00:29:54.000Z
|
2022-03-27T15:41:36.000Z
|
example_channels/example_channels/settings.py
|
pmutua/django-example-channels
|
fd9c97cacc3b3888882725f214dfb5d1f21f06fe
|
[
"MIT"
] | 2
|
2020-06-06T00:46:06.000Z
|
2021-06-10T22:35:57.000Z
|
example_channels/example_channels/settings.py
|
pmutua/django-example-channels
|
fd9c97cacc3b3888882725f214dfb5d1f21f06fe
|
[
"MIT"
] | 70
|
2017-02-28T18:24:05.000Z
|
2022-03-27T15:41:33.000Z
|
"""
Django settings for example_channels project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ie(7l3+psd-5k!(x!h84fn7i-vm$)!=^&3y4c-(*agj75!632k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'example',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example_channels.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example_channels.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'asgi_redis.RedisChannelLayer',
'CONFIG': {
'hosts': [('localhost', 6379)],
},
'ROUTING': 'example_channels.routing.channel_routing',
}
}
| 25.496241
| 91
| 0.684459
|
ecf05a875a98eac2a78e90e6834911309bc72cba
| 5,072
|
py
|
Python
|
tests/metarl/tf/policies/test_categorical_mlp_policy.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 2
|
2020-03-15T14:35:15.000Z
|
2021-02-15T16:38:00.000Z
|
tests/metarl/tf/policies/test_categorical_mlp_policy.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | null | null | null |
tests/metarl/tf/policies/test_categorical_mlp_policy.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 1
|
2020-02-24T03:04:23.000Z
|
2020-02-24T03:04:23.000Z
|
import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from metarl.tf.envs import TfEnv
from metarl.tf.policies import CategoricalMLPPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDiscreteEnv
from tests.fixtures.models import SimpleMLPModel
class TestCategoricalMLPPolicy(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
@mock.patch('numpy.random.choice')
def test_get_action(self, mock_rand, obs_dim, action_dim):
mock_rand.return_value = 0
env = TfEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('metarl.tf.policies.'
'categorical_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = CategoricalMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, prob = policy.get_action(obs)
expected_prob = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert action == 0
assert np.array_equal(prob['prob'], expected_prob)
actions, probs = policy.get_actions([obs, obs, obs])
for action, prob in zip(actions, probs['prob']):
assert env.action_space.contains(action)
assert action == 0
assert np.array_equal(prob, expected_prob)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_dist_info(self, obs_dim, action_dim):
env = TfEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('metarl.tf.policies.'
'categorical_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = CategoricalMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
expected_prob = np.full(action_dim, 0.5)
policy_probs = policy.dist_info([obs.flatten()])
assert np.array_equal(policy_probs['prob'][0], expected_prob)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_dist_info_sym(self, obs_dim, action_dim):
env = TfEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('metarl.tf.policies.'
'categorical_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = CategoricalMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
expected_prob = np.full(action_dim, 0.5)
obs_dim = env.spec.observation_space.flat_dim
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim))
dist1 = policy.dist_info_sym(state_input, name='policy2')
prob = self.sess.run(dist1['prob'],
feed_dict={state_input: [obs.flatten()]})
assert np.array_equal(prob[0], expected_prob)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = TfEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('metarl.tf.policies.'
'categorical_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = CategoricalMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
with tf.compat.v1.variable_scope('CategoricalMLPPolicy/MLPModel',
reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = self.sess.run(
policy.model.outputs,
feed_dict={policy.model.input: [obs.flatten()]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(
policy_pickled.model.outputs,
feed_dict={policy_pickled.model.input: [obs.flatten()]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_get_regularizable_vars(self, obs_dim, action_dim):
env = TfEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalMLPPolicy(env_spec=env.spec)
reg_vars = policy.get_regularizable_vars()
assert len(reg_vars) == 2
for var in reg_vars:
assert ('bias' not in var.name) and ('output' not in var.name)
| 35.468531
| 77
| 0.582216
|
8a463c3bfe1ce761181a6f8522ee373a9dd27219
| 1,982
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_extension_instance_view_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_extension_instance_view_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_extension_instance_view_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineExtensionInstanceView(Model):
"""The instance view of a virtual machine extension.
:param name: The virtual machine extension name.
:type name: str
:param type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:type type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param substatuses: The resource status information.
:type substatuses:
list[~azure.mgmt.compute.v2017_03_30.models.InstanceViewStatus]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2017_03_30.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'substatuses': {'key': 'substatuses', 'type': '[InstanceViewStatus]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, *, name: str=None, type: str=None, type_handler_version: str=None, substatuses=None, statuses=None, **kwargs) -> None:
super(VirtualMachineExtensionInstanceView, self).__init__(**kwargs)
self.name = name
self.type = type
self.type_handler_version = type_handler_version
self.substatuses = substatuses
self.statuses = statuses
| 41.291667
| 141
| 0.639758
|
bea4b77c0f3552572ede91e6c9cb8978182f5971
| 2,740
|
py
|
Python
|
authors/apps/comments/serializers.py
|
rfpremier/ah-django
|
ff4f1ba34d074e68e49f7896848f81b729542e1f
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/comments/serializers.py
|
rfpremier/ah-django
|
ff4f1ba34d074e68e49f7896848f81b729542e1f
|
[
"BSD-3-Clause"
] | 21
|
2019-03-25T18:38:02.000Z
|
2019-04-23T09:54:52.000Z
|
authors/apps/comments/serializers.py
|
rfpremier/ah-django
|
ff4f1ba34d074e68e49f7896848f81b729542e1f
|
[
"BSD-3-Clause"
] | 3
|
2019-07-15T08:41:58.000Z
|
2019-12-20T08:25:34.000Z
|
from rest_framework import serializers
from .models import Comments, Like
from .history import CommentHistory as history
class CommentsSerializer(serializers.ModelSerializer):
article = serializers.SerializerMethodField()
user = serializers.SerializerMethodField()
highlighted_text = serializers.CharField(
allow_null=True, allow_blank=True, min_length=4, required=False)
likesCount = serializers.SerializerMethodField()
class Meta:
model = Comments
fields = [
'id',
'created_at',
'updated_at',
'body',
'user',
'highlighted_text',
'article',
'likesCount',
]
read_only_fields = ["id"]
def create(self, validated_data):
return Comments.objects.create(**validated_data)
def get_user(self, obj):
return {"username": obj.user.username,
"bio": obj.user.bio,
"image": obj.user.image}
def get_article(self, obj):
return obj.article.slug
def val_highlighted_text(self, text, article):
if text is not None and text not in article.body:
msg_d = ["Highlighted text not part of Article ({})".format(
article.title)]
msg = {'highlighted_text': msg_d}
raise serializers.ValidationError(msg)
else:
return text
def get_likesCount(self, obj):
likes_queryset = Like.objects.likes().filter(comment=obj.id)
dislikes_queryset = Like.objects.dislikes().filter(comment=obj.id)
likesCount = likes_queryset.count()
dislikesCount = dislikes_queryset.count()
count = {'likes': likesCount,
'dislikes': dislikesCount,
'total': likesCount+dislikesCount}
return count
class CommentHistorySerializer(serializers.ModelSerializer):
comment_history = serializers.SerializerMethodField()
comment_id = serializers.SerializerMethodField()
class Meta:
model = Comments
fields = ('comment_id', 'comment_history')
def get_comment_history(self, obj):
return history().get_comment_updates(obj.comment_history)
def get_comment_id(self, obj):
return obj.id
class CommentsLikesSerializer(serializers.ModelSerializer):
"""
Like dislike comments serializer
"""
class Meta:
model = Like
fields = ('id', 'comment', 'user',
'like', 'created_at', 'updated_at')
read_only_fields = ('id', 'comment', 'user',
'created_at', 'updated_at')
def create(self, validated_data):
return Like.objects.create(**validated_data)
| 30.444444
| 74
| 0.617883
|
78cff1a7a3fcb300aac30ce8a8d1669493c95d38
| 7,801
|
py
|
Python
|
vedadet/misc/bbox/assigners/atss_assigner.py
|
jie311/vedadet
|
aaf3b3bc3c7944aba1cc28138165d403023a9152
|
[
"Apache-2.0"
] | 424
|
2020-10-19T03:56:49.000Z
|
2022-03-28T02:47:39.000Z
|
vedadet/misc/bbox/assigners/atss_assigner.py
|
jie311/vedadet
|
aaf3b3bc3c7944aba1cc28138165d403023a9152
|
[
"Apache-2.0"
] | 72
|
2020-11-27T17:10:00.000Z
|
2022-03-17T02:40:53.000Z
|
vedadet/misc/bbox/assigners/atss_assigner.py
|
jie311/vedadet
|
aaf3b3bc3c7944aba1cc28138165d403023a9152
|
[
"Apache-2.0"
] | 116
|
2020-11-03T02:31:17.000Z
|
2022-03-08T08:20:48.000Z
|
# adapted from https://github.com/open-mmlab/mmcv or
# https://github.com/open-mmlab/mmdetection
import torch
from vedacore.misc import registry
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@registry.register_module('bbox_assigner')
class ATSSAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `0` or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
topk (float): number of bbox selected in each level
"""
def __init__(self,
topk,
iou_calculator=dict(type='BboxOverlaps2D'),
ignore_iof_thr=-1):
self.topk = topk
self.iou_calculator = build_iou_calculator(iou_calculator)
self.ignore_iof_thr = ignore_iof_thr
# https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
def assign(self,
bboxes,
num_level_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to bboxes.
The assignment is done in following steps
1. compute iou between all bbox (bbox of all pyramid levels) and gt
2. compute center distance between all bbox and gt
3. on each pyramid level, for each gt, select k bbox whose center
are closest to the gt center, so we total select k*l bbox as
candidates for each gt
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as postive
6. limit the positive sample's center in gt
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
num_level_bboxes (List): num of bboxes in each level
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
INF = 100000000
bboxes = bboxes[:, :4]
num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
# compute iou between all bbox and gt
overlaps = self.iou_calculator(bboxes, gt_bboxes)
# assign 0 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
0,
dtype=torch.long)
if num_gt == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gt == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
# compute center distance between all bbox and gt
gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
gt_points = torch.stack((gt_cx, gt_cy), dim=1)
bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
distances = (bboxes_points[:, None, :] -
gt_points[None, :, :]).pow(2).sum(-1).sqrt()
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
ignore_overlaps = self.iou_calculator(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
distances[ignore_idxs, :] = INF
assigned_gt_inds[ignore_idxs] = -1
# Selecting candidates based on the center distance
candidate_idxs = []
start_idx = 0
for level, bboxes_per_level in enumerate(num_level_bboxes):
# on each pyramid level, for each gt,
# select k bbox whose center are closest to the gt center
end_idx = start_idx + bboxes_per_level
distances_per_level = distances[start_idx:end_idx, :]
_, topk_idxs_per_level = distances_per_level.topk(
self.topk, dim=0, largest=False)
candidate_idxs.append(topk_idxs_per_level + start_idx)
start_idx = end_idx
candidate_idxs = torch.cat(candidate_idxs, dim=0)
# get corresponding iou for the these candidates, and compute the
# mean and std, set mean + std as the iou threshold
candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
overlaps_mean_per_gt = candidate_overlaps.mean(0)
overlaps_std_per_gt = candidate_overlaps.std(0)
overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
# limit the positive sample's center in gt
for gt_idx in range(num_gt):
candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
candidate_idxs = candidate_idxs.view(-1)
# calculate the left, top, right, bottom distance between positive
# bbox center and gt side
l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
is_pos = is_pos & is_in_gts
# if an anchor box is assigned to multiple gts,
# the one with the highest IoU will be selected.
overlaps_inf = torch.full_like(overlaps,
-INF).t().contiguous().view(-1)
index = candidate_idxs.view(-1)[is_pos.view(-1)]
overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
overlaps_inf = overlaps_inf.view(num_gt, -1).t()
max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
assigned_gt_inds[
max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| 43.338889
| 87
| 0.605179
|
1b93ac322cae58b32073b2c5d44bf019c24ed739
| 3,115
|
py
|
Python
|
mymodel.py
|
tombuuz/Depthmap-Resource-Limited
|
8ca004b10e3c30b4271da9673fd21688818f88c1
|
[
"MIT"
] | 1
|
2021-05-27T16:39:33.000Z
|
2021-05-27T16:39:33.000Z
|
mymodel.py
|
tombuuz/Depthmap-Resource-Limited
|
8ca004b10e3c30b4271da9673fd21688818f88c1
|
[
"MIT"
] | null | null | null |
mymodel.py
|
tombuuz/Depthmap-Resource-Limited
|
8ca004b10e3c30b4271da9673fd21688818f88c1
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
#import nni.retiarii.nn.pytorch as nn
#from nni.retiarii import basic_unit
from ops import ConvBn, ConvDw, UpConv, PointWise, UpProj, MyBlock
#from nni.nas.pytorch import mutables
from nni.nas import pytorch as nas
#from nni.retiarii.nn.pytorch import LayerChoice
#import collections
class MobileNet(nn.Module):
def __init__(self):
super().__init__()
self.convbn = ConvBn(3, 32, 2)
#self.convbn2 = ConvBn(3, 32, 4)
#operators = nas.mutables.LayerChoice([ConvBn(3, 32, 2), ConvBn(3, 32, 2)], key='first_layer')
#self.block = MyBlock(operators)
#self.layer = LayerChoice(collections.OrderedDict([
# ("conv2b", ConvBn(3, 32, 2)), ("conv2b2", ConvBn(3, 32, 2))]))
self.convdw1 = ConvDw( 32, 64, 1)
self.convdw2 = ConvDw( 64, 128, 2)
self.convdw3 = ConvDw(128, 128, 1)
self.convdw4 = ConvDw(128, 256, 2)
#self.convdw5 = ConvDw(256, 256, 1)
operators = nas.mutables.LayerChoice([ConvDw(256, 256, 1), ConvDw(256, 256, 1)], key='first_layer')
self.convdw5 = MyBlock(operators)
self.convdw6 = ConvDw(256, 512, 2)
self.convdw7 = ConvDw(512, 512, 1)
self.convdw8 = ConvDw(512, 512, 1)
self.convdw9 = ConvDw(512, 512, 1)
self.convdw10 = ConvDw(512, 512, 1)
self.convdw11 = ConvDw(512, 512, 1)
self.convdw12 = ConvDw(512, 1024, 2)
self.convdw13 = ConvDw(1024, 1024, 1)
"""
self.model = nn.Sequential(
ConvBn( 3, 32, 2),
ConvDw( 32, 64, 1),
ConvDw( 64, 128, 2),
ConvDw(128, 128, 1),
ConvDw(128, 256, 2),
ConvDw(256, 256, 1),
ConvDw(256, 512, 2),
ConvDw(512, 512, 1),
ConvDw(512, 512, 1),
ConvDw(512, 512, 1),
ConvDw(512, 512, 1),
ConvDw(512, 512, 1),
ConvDw(512, 1024, 2),
ConvDw(1024, 1024, 1),
#nn.AvgPool2d(7),
)
"""
#self.fc = nn.Linear(1024, 1000)
self.upconv1 = UpProj(1024, 512)
self.upconv2 = UpProj(512, 256)
self.upconv3 = UpProj(256, 128)
self.upconv4 = UpProj(128, 64)
self.upconv5 = UpProj(64, 32)
self.convf = PointWise(32, 1)
def forward(self, x):
x = self.convbn(x)
#x = self.layer(x)
x = self.convdw1(x)
x = self.convdw2(x)
x = self.convdw3(x)
x = self.convdw4(x)
x = self.convdw5(x)
x = self.convdw6(x)
x = self.convdw7(x)
x = self.convdw8(x)
x = self.convdw9(x)
x = self.convdw10(x)
x = self.convdw11(x)
x = self.convdw12(x)
x = self.convdw13(x)
#x = self.fc1(x)
# x = self.model(x)
#x = x.view(-1, 1024)
#x = self.fc(x)
x = self.upconv1(x)
x = self.upconv2(x)
x = self.upconv3(x)
x = self.upconv4(x)
x = self.upconv5(x)
x = self.convf(x)
return x
| 30.841584
| 107
| 0.523274
|
042fd97ee015bb54c88faadcd5dcc829e104a304
| 1,435
|
py
|
Python
|
monk/system_unit_tests/pytorch/test_block_inception_e.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 542
|
2019-11-10T12:09:31.000Z
|
2022-03-28T11:39:07.000Z
|
monk/system_unit_tests/pytorch/test_block_inception_e.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 117
|
2019-11-12T09:39:24.000Z
|
2022-03-12T00:20:41.000Z
|
monk/system_unit_tests/pytorch/test_block_inception_e.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 246
|
2019-11-09T21:53:24.000Z
|
2022-03-29T00:57:07.000Z
|
import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_block_inception_e(system_dict):
forward = True;
test = "test_block_inception_e";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.inception_e_block(pool_type="avg"));
network.append(gtf.inception_e_block(pool_type="max"));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = torch.randn(1, 1, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 28.7
| 80
| 0.634146
|
0654fbad8f372ab91518574fab87fb6c7739aacb
| 1,837
|
py
|
Python
|
virtual_env/Lib/site-packages/playhouse/db_url.py
|
Zachary-Jackson/Flask-Learning-Journal
|
7934766299dc864374ba7a28cb3c76add4a4ddc4
|
[
"BSD-2-Clause"
] | 1
|
2018-02-26T08:41:08.000Z
|
2018-02-26T08:41:08.000Z
|
virtual_env/Lib/site-packages/playhouse/db_url.py
|
Zachary-Jackson/Flask-Learning-Journal
|
7934766299dc864374ba7a28cb3c76add4a4ddc4
|
[
"BSD-2-Clause"
] | null | null | null |
virtual_env/Lib/site-packages/playhouse/db_url.py
|
Zachary-Jackson/Flask-Learning-Journal
|
7934766299dc864374ba7a28cb3c76add4a4ddc4
|
[
"BSD-2-Clause"
] | null | null | null |
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from peewee import *
from playhouse.sqlite_ext import SqliteExtDatabase
try:
from playhouse.apsw_ext import APSWDatabase
except ImportError:
APSWDatabase = None
try:
from playhouse.berkeleydb import BerkeleyDatabase
except ImportError:
BerkeleyDatabase = None
try:
from playhouse.postgres_ext import PostgresqlExtDatabase
except ImportError:
PostgresqlExtDatabase = None
schemes = {
'apsw': APSWDatabase,
'berkeleydb': BerkeleyDatabase,
'mysql': MySQLDatabase,
'postgres': PostgresqlDatabase,
'postgresql': PostgresqlDatabase,
'postgresext': PostgresqlExtDatabase,
'postgresqlext': PostgresqlExtDatabase,
'sqlite': SqliteDatabase,
'sqliteext': SqliteExtDatabase,
}
def connect(url):
parsed = urlparse(url)
database_class = schemes.get(parsed.scheme)
if database_class is None:
if database_class in schemes:
raise RuntimeError('Attempted to use "%s" but a required library '
'could not be imported.' % parsed.scheme)
else:
raise RuntimeError('Unrecognized or unsupported scheme: "%s".' %
parsed.scheme)
connect_kwargs = {'database': parsed.path[1:]}
if parsed.username:
connect_kwargs['user'] = parsed.username
if parsed.password:
connect_kwargs['password'] = parsed.password
if parsed.hostname:
connect_kwargs['host'] = parsed.hostname
if parsed.port:
connect_kwargs['port'] = parsed.port
# Adjust parameters for MySQL.
if database_class is MySQLDatabase and 'password' in connect_kwargs:
connect_kwargs['passwd'] = connect_kwargs.pop('password')
return database_class(**connect_kwargs)
| 30.616667
| 78
| 0.690256
|
5dd9faefc1c6cd218ce02dac1e6a7a7383caf7cb
| 7,007
|
py
|
Python
|
burger_war_dev/scripts/Radar.py
|
takaoh/burger_war_dev
|
f48918182327f07942df86c4ae04575c5f9ea6b2
|
[
"BSD-3-Clause"
] | 1
|
2021-02-05T09:11:30.000Z
|
2021-02-05T09:11:30.000Z
|
burger_war_dev/scripts/Radar.py
|
takaoh/burger_war_dev
|
f48918182327f07942df86c4ae04575c5f9ea6b2
|
[
"BSD-3-Clause"
] | 2
|
2021-02-28T05:44:59.000Z
|
2021-03-02T14:17:09.000Z
|
burger_war_dev/scripts/Radar.py
|
takaoh/burger_war_dev
|
f48918182327f07942df86c4ae04575c5f9ea6b2
|
[
"BSD-3-Clause"
] | 2
|
2021-03-01T08:00:41.000Z
|
2021-03-02T10:14:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This is ALL SENSOR use node.
Mainly echo sensor value in tarminal.
Please Use for your script base.
by Takuya Yamaguchi @dashimaki360
'''
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
import math
class AllSensorBot(object):
def __init__(self,
use_lidar=False, use_camera=False, use_imu=False,
use_odom=False, use_joint_states=False):
# velocity publisher
self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)
# lidar scan subscriber
if use_lidar:
self.scan = LaserScan()
self.scanned = LaserScan()
self.RadarRatio = 50
self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)
# camera subscribver
# please uncoment out if you use camera
if use_camera:
# for convert image topic to opencv obj
self.img = None
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)
# imu subscriber
if use_imu:
self.imu_sub = rospy.Subscriber('imu', Imu, self.imuCallback)
# odom subscriber
if use_odom:
self.odom_sub = rospy.Subscriber('odom', Odometry, self.odomCallback)
# joint_states subscriber
if use_joint_states:
self.odom_sub = rospy.Subscriber('joint_states', JointState, self.jointstateCallback)
def strategy(self):
'''
calc Twist and publish cmd_vel topic
'''
r = rospy.Rate(1)
while not rospy.is_shutdown():
# update twist
twist = Twist()
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
if len(self.scan.ranges) != 0:
bot.Radar()
# publish twist topic
self.vel_pub.publish(twist)
r.sleep()
def Radar(self):
"""
Radar map from LIDAR
"""
print "Radar func"
if len(self.scanned.ranges) == 0:
self.scanned.ranges = self.scan.ranges[:]
npScanRanges = np.array(self.scan.ranges)
npScannedRanges = np.array(self.scanned.ranges)
npSubRanges = abs(npScanRanges - npScannedRanges)
for i in range(len(npSubRanges)):
if npSubRanges[i] < 0.15:
npSubRanges[i] = 0
else:
npSubRanges[i] = 1
npMaskedRanges = npScanRanges*npSubRanges
"""
if npSubRanges[i] != 0:
print "i=%d Range=%f" %(i,npSubRanges[i])
print npSubRanges
"""
"""
Create blank image with 701x701[pixel]
"""
height = int(self.scan.range_max * self.RadarRatio * 2 + 1)
width = int(self.scan.range_max * self.RadarRatio * 2 + 1)
radar = np.ones((height,width,3),np.uint8)*40
origin_x = int(self.scan.range_max * self.RadarRatio)
origin_y = int(self.scan.range_max * self.RadarRatio)
#radar.itemset((origin_x,origin_y,2),255)
#radar[origin_x,origin_y] = [255,255,255]
for n in range(0,width):
radar.itemset((origin_y,n,2),255)
radar.itemset((n,origin_x,2),255)
for i in range(len(npMaskedRanges)):
if npMaskedRanges[i] != 0:
if i <= 90:
ang = np.deg2rad(90 - i)
x = origin_x - int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))
y = origin_y - int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))
print "i:%d ang:%f x:%d y:%d range:%f" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])
elif i > 90 and i <= 180:
ang = np.deg2rad(i - 90)
x = origin_x - int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))
y = origin_y + int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))
print "i:%d ang:%f x:%d y:%d range:%f" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])
elif i > 180 and i <= 270:
ang = np.deg2rad(270 - i)
x = origin_x + int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))
y = origin_y + int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))
print "i:%d ang:%f x:%d y:%d range:%f" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])
elif i > 270 and i <= 359:
ang = np.deg2rad(i - 270)
x = origin_x + int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))
y = origin_y - int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))
print "i:%d ang:%f x:%d y:%d range:%f" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])
#print "ang:%f x:%d y:%d" %(np.rad2deg(ang),x,y)
radar.itemset((y,x,1),255)
cv2.imshow('Radar',radar)
cv2.waitKey(1)
self.scanned.ranges = self.scan.ranges[:]
return
# lidar scan topic call back sample
# update lidar scan state
def lidarCallback(self, data):
self.scan = data
#print self.scan.range_min
#rospy.loginfo(self.scan)
# camera image call back sample
# comvert image topic to opencv object and show
def imageCallback(self, data):
try:
self.img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
cv2.imshow("Image window", self.img)
cv2.waitKey(1)
# imu call back sample
# update imu state
def imuCallback(self, data):
self.imu = data
rospy.loginfo(self.imu)
# odom call back sample
# update odometry state
def odomCallback(self, data):
self.pose_x = data.pose.pose.position.x
self.pose_y = data.pose.pose.position.y
rospy.loginfo("odom pose_x: {}".format(self.pose_x))
rospy.loginfo("odom pose_y: {}".format(self.pose_y))
# jointstate call back sample
# update joint state
def jointstateCallback(self, data):
self.wheel_rot_r = data.position[0]
self.wheel_rot_l = data.position[1]
rospy.loginfo("joint_state R: {}".format(self.wheel_rot_r))
rospy.loginfo("joint_state L: {}".format(self.wheel_rot_l))
if __name__ == '__main__':
rospy.init_node('all_sensor_sample')
bot = AllSensorBot(use_lidar=True, use_camera=False, use_imu=False,
use_odom=False, use_joint_states=False)
bot.strategy()
| 36.494792
| 102
| 0.575139
|
68e7fed24e7fb28ecd01a8e80c97b227cf5f3d41
| 18,300
|
py
|
Python
|
sdks/python/apache_beam/runners/interactive/caching/streaming_cache_test.py
|
NarimanAB/beam
|
6cedbac5bb42304f4af88634edd276b0b78e4e4e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5
|
2019-07-27T11:54:33.000Z
|
2021-06-06T11:53:36.000Z
|
sdks/python/apache_beam/runners/interactive/caching/streaming_cache_test.py
|
NarimanAB/beam
|
6cedbac5bb42304f4af88634edd276b0b78e4e4e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 12
|
2019-04-15T15:27:23.000Z
|
2019-07-01T18:13:10.000Z
|
sdks/python/apache_beam/runners/interactive/caching/streaming_cache_test.py
|
NarimanAB/beam
|
6cedbac5bb42304f4af88634edd276b0b78e4e4e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-07-16T09:15:14.000Z
|
2021-07-16T09:15:14.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import unittest
from apache_beam import coders
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileHeader
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive.cache_manager import SafeFastPrimitivesCoder
from apache_beam.runners.interactive.caching.cacheable import CacheKey
from apache_beam.runners.interactive.caching.streaming_cache import StreamingCache
from apache_beam.runners.interactive.testing.test_cache_manager import FileRecordsBuilder
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import *
from apache_beam.transforms.window import TimestampedValue
# Nose automatically detects tests if they match a regex. Here, it mistakens
# these protos as tests. For more info see the Nose docs at:
# https://nose.readthedocs.io/en/latest/writing_tests.html
TestStreamPayload.__test__ = False # type: ignore[attr-defined]
TestStreamFileHeader.__test__ = False # type: ignore[attr-defined]
TestStreamFileRecord.__test__ = False # type: ignore[attr-defined]
class StreamingCacheTest(unittest.TestCase):
def setUp(self):
pass
def test_exists(self):
cache = StreamingCache(cache_dir=None)
self.assertFalse(cache.exists('my_label'))
cache.write([TestStreamFileRecord()], 'my_label')
self.assertTrue(cache.exists('my_label'))
def test_empty(self):
CACHED_PCOLLECTION_KEY = repr(CacheKey('arbitrary_key', '', '', ''))
cache = StreamingCache(cache_dir=None)
self.assertFalse(cache.exists(CACHED_PCOLLECTION_KEY))
cache.write([], CACHED_PCOLLECTION_KEY)
reader, _ = cache.read(CACHED_PCOLLECTION_KEY)
# Assert that an empty reader returns an empty list.
self.assertFalse([e for e in reader])
def test_size(self):
cache = StreamingCache(cache_dir=None)
cache.write([TestStreamFileRecord()], 'my_label')
coder = cache.load_pcoder('my_label')
# Add one because of the new-line character that is also written.
size = len(coder.encode(TestStreamFileRecord().SerializeToString())) + 1
self.assertEqual(cache.size('my_label'), size)
def test_clear(self):
cache = StreamingCache(cache_dir=None)
self.assertFalse(cache.exists('my_label'))
cache.sink(['my_label'], is_capture=True)
cache.write([TestStreamFileRecord()], 'my_label')
self.assertTrue(cache.exists('my_label'))
self.assertEqual(cache.capture_keys, set(['my_label']))
self.assertTrue(cache.clear('my_label'))
self.assertFalse(cache.exists('my_label'))
self.assertFalse(cache.capture_keys)
def test_single_reader(self):
"""Tests that we expect to see all the correctly emitted TestStreamPayloads.
"""
CACHED_PCOLLECTION_KEY = repr(CacheKey('arbitrary_key', '', '', ''))
values = (FileRecordsBuilder(tag=CACHED_PCOLLECTION_KEY)
.add_element(element=0, event_time_secs=0)
.advance_processing_time(1)
.add_element(element=1, event_time_secs=1)
.advance_processing_time(1)
.add_element(element=2, event_time_secs=2)
.build()) # yapf: disable
cache = StreamingCache(cache_dir=None)
cache.write(values, CACHED_PCOLLECTION_KEY)
reader, _ = cache.read(CACHED_PCOLLECTION_KEY)
coder = coders.FastPrimitivesCoder()
events = list(reader)
# Units here are in microseconds.
expected = [
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(0), timestamp=0)
],
tag=CACHED_PCOLLECTION_KEY)),
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(1), timestamp=1 * 10**6)
],
tag=CACHED_PCOLLECTION_KEY)),
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(2), timestamp=2 * 10**6)
],
tag=CACHED_PCOLLECTION_KEY)),
]
self.assertSequenceEqual(events, expected)
def test_multiple_readers(self):
"""Tests that the service advances the clock with multiple outputs.
"""
CACHED_LETTERS = repr(CacheKey('letters', '', '', ''))
CACHED_NUMBERS = repr(CacheKey('numbers', '', '', ''))
CACHED_LATE = repr(CacheKey('late', '', '', ''))
letters = (FileRecordsBuilder(CACHED_LETTERS)
.advance_processing_time(1)
.advance_watermark(watermark_secs=0)
.add_element(element='a', event_time_secs=0)
.advance_processing_time(10)
.advance_watermark(watermark_secs=10)
.add_element(element='b', event_time_secs=10)
.build()) # yapf: disable
numbers = (FileRecordsBuilder(CACHED_NUMBERS)
.advance_processing_time(2)
.add_element(element=1, event_time_secs=0)
.advance_processing_time(1)
.add_element(element=2, event_time_secs=0)
.advance_processing_time(1)
.add_element(element=2, event_time_secs=0)
.build()) # yapf: disable
late = (FileRecordsBuilder(CACHED_LATE)
.advance_processing_time(101)
.add_element(element='late', event_time_secs=0)
.build()) # yapf: disable
cache = StreamingCache(cache_dir=None)
cache.write(letters, CACHED_LETTERS)
cache.write(numbers, CACHED_NUMBERS)
cache.write(late, CACHED_LATE)
reader = cache.read_multiple([[CACHED_LETTERS], [CACHED_NUMBERS],
[CACHED_LATE]])
coder = coders.FastPrimitivesCoder()
events = list(reader)
# Units here are in microseconds.
expected = [
# Advances clock from 0 to 1
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=0, tag=CACHED_LETTERS)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('a'), timestamp=0)
],
tag=CACHED_LETTERS)),
# Advances clock from 1 to 2
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(1), timestamp=0)
],
tag=CACHED_NUMBERS)),
# Advances clock from 2 to 3
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(2), timestamp=0)
],
tag=CACHED_NUMBERS)),
# Advances clock from 3 to 4
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(2), timestamp=0)
],
tag=CACHED_NUMBERS)),
# Advances clock from 4 to 11
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=7 * 10**6)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=10 * 10**6, tag=CACHED_LETTERS)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('b'), timestamp=10 * 10**6)
],
tag=CACHED_LETTERS)),
# Advances clock from 11 to 101
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=90 * 10**6)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('late'), timestamp=0)
],
tag=CACHED_LATE)),
]
self.assertSequenceEqual(events, expected)
def test_read_and_write(self):
"""An integration test between the Sink and Source.
This ensures that the sink and source speak the same language in terms of
coders, protos, order, and units.
"""
CACHED_RECORDS = repr(CacheKey('records', '', '', ''))
# Units here are in seconds.
test_stream = (
TestStream(output_tags=(CACHED_RECORDS))
.advance_watermark_to(0, tag=CACHED_RECORDS)
.advance_processing_time(5)
.add_elements(['a', 'b', 'c'], tag=CACHED_RECORDS)
.advance_watermark_to(10, tag=CACHED_RECORDS)
.advance_processing_time(1)
.add_elements(
[
TimestampedValue('1', 15),
TimestampedValue('2', 15),
TimestampedValue('3', 15)
],
tag=CACHED_RECORDS)) # yapf: disable
coder = SafeFastPrimitivesCoder()
cache = StreamingCache(cache_dir=None, sample_resolution_sec=1.0)
# Assert that there are no capture keys at first.
self.assertEqual(cache.capture_keys, set())
options = StandardOptions(streaming=True)
with TestPipeline(options=options) as p:
records = (p | test_stream)[CACHED_RECORDS]
# pylint: disable=expression-not-assigned
records | cache.sink([CACHED_RECORDS], is_capture=True)
reader, _ = cache.read(CACHED_RECORDS)
actual_events = list(reader)
# Assert that the capture keys are forwarded correctly.
self.assertEqual(cache.capture_keys, set([CACHED_RECORDS]))
# Units here are in microseconds.
expected_events = [
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=5 * 10**6)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=0, tag=CACHED_RECORDS)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('a'), timestamp=0),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('b'), timestamp=0),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('c'), timestamp=0),
],
tag=CACHED_RECORDS)),
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=10 * 10**6, tag=CACHED_RECORDS)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('1'), timestamp=15 *
10**6),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('2'), timestamp=15 *
10**6),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('3'), timestamp=15 *
10**6),
],
tag=CACHED_RECORDS)),
]
self.assertEqual(actual_events, expected_events)
def test_read_and_write_multiple_outputs(self):
"""An integration test between the Sink and Source with multiple outputs.
This tests the funcionatlity that the StreamingCache reads from multiple
files and combines them into a single sorted output.
"""
LETTERS_TAG = repr(CacheKey('letters', '', '', ''))
NUMBERS_TAG = repr(CacheKey('numbers', '', '', ''))
# Units here are in seconds.
test_stream = (TestStream()
.advance_watermark_to(0, tag=LETTERS_TAG)
.advance_processing_time(5)
.add_elements(['a', 'b', 'c'], tag=LETTERS_TAG)
.advance_watermark_to(10, tag=NUMBERS_TAG)
.advance_processing_time(1)
.add_elements(
[
TimestampedValue('1', 15),
TimestampedValue('2', 15),
TimestampedValue('3', 15)
],
tag=NUMBERS_TAG)) # yapf: disable
cache = StreamingCache(cache_dir=None, sample_resolution_sec=1.0)
coder = SafeFastPrimitivesCoder()
options = StandardOptions(streaming=True)
with TestPipeline(options=options) as p:
# pylint: disable=expression-not-assigned
events = p | test_stream
events[LETTERS_TAG] | 'Letters sink' >> cache.sink([LETTERS_TAG])
events[NUMBERS_TAG] | 'Numbers sink' >> cache.sink([NUMBERS_TAG])
reader = cache.read_multiple([[LETTERS_TAG], [NUMBERS_TAG]])
actual_events = list(reader)
# Units here are in microseconds.
expected_events = [
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=5 * 10**6)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=0, tag=LETTERS_TAG)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('a'), timestamp=0),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('b'), timestamp=0),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('c'), timestamp=0),
],
tag=LETTERS_TAG)),
TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=1 * 10**6)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=10 * 10**6, tag=NUMBERS_TAG)),
TestStreamPayload.Event(
watermark_event=TestStreamPayload.Event.AdvanceWatermark(
new_watermark=0, tag=LETTERS_TAG)),
TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=[
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('1'), timestamp=15 *
10**6),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('2'), timestamp=15 *
10**6),
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode('3'), timestamp=15 *
10**6),
],
tag=NUMBERS_TAG)),
]
self.assertListEqual(actual_events, expected_events)
if __name__ == '__main__':
unittest.main()
| 42.263279
| 89
| 0.627705
|
ba6f91f1574a352e099f2c660174823fe1d30f30
| 5,499
|
py
|
Python
|
aiida/restapi/run_api.py
|
HaoZeke/aiida-core
|
1a4cada67fe36353326dcebfe888ebc01a6c5b7b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
aiida/restapi/run_api.py
|
HaoZeke/aiida-core
|
1a4cada67fe36353326dcebfe888ebc01a6c5b7b
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2019-03-06T11:23:42.000Z
|
2020-03-09T09:34:07.000Z
|
aiida/restapi/run_api.py
|
lorisercole/aiida-core
|
84c2098318bf234641219e55795726f99dc25a16
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
It defines the method with all required parameters to run restapi locally.
"""
import importlib
import os
import warnings
from flask_cors import CORS
from aiida.common.warnings import AiidaDeprecationWarning
from .common.config import CLI_DEFAULTS, APP_CONFIG, API_CONFIG
from . import api as api_classes
__all__ = ('run_api', 'configure_api')
def run_api(flask_app=api_classes.App, flask_api=api_classes.AiidaApi, **kwargs):
"""
Takes a flask.Flask instance and runs it.
:param flask_app: Class inheriting from flask app class
:type flask_app: :py:class:`flask.Flask`
:param flask_api: flask_restful API class to be used to wrap the app
:type flask_api: :py:class:`flask_restful.Api`
:param hostname: hostname to run app on (only when using built-in server)
:param port: port to run app on (only when using built-in server)
:param config: directory containing the config.py file used to configure the RESTapi
:param catch_internal_server: If true, catch and print all inter server errors
:param debug: enable debugging
:param wsgi_profile: use WSGI profiler middleware for finding bottlenecks in web application
:param hookup: If true, hook up application to built-in server, else just return it. This parameter
is deprecated as of AiiDA 1.2.1. If you don't intend to run the API (hookup=False) use `configure_api` instead.
:returns: tuple (app, api) if hookup==False or runs app if hookup==True
"""
hookup = kwargs.pop('hookup', None)
if hookup is None:
hookup = CLI_DEFAULTS['HOOKUP_APP']
else:
warnings.warn( # pylint: disable=no-member
'Using the `hookup` parameter is deprecated since `v1.2.1` and will stop working in `v2.0.0`. '
'To configure the app without running it, use `configure_api` instead.', AiidaDeprecationWarning
)
hostname = kwargs.pop('hostname', CLI_DEFAULTS['HOST_NAME'])
port = kwargs.pop('port', CLI_DEFAULTS['PORT'])
debug = kwargs.pop('debug', APP_CONFIG['DEBUG'])
api = configure_api(flask_app, flask_api, **kwargs)
if hookup:
# Run app through built-in werkzeug server
print(' * REST API running on http://{}:{}{}'.format(hostname, port, API_CONFIG['PREFIX']))
api.app.run(debug=debug, host=hostname, port=int(port), threaded=True)
else:
# Return the app & api without specifying port/host to be handled by an external server (e.g. apache).
# Some of the user-defined configuration of the app is ineffective (only affects built-in server).
return api.app, api
def configure_api(flask_app=api_classes.App, flask_api=api_classes.AiidaApi, **kwargs):
"""
Configures a flask.Flask instance and returns it.
:param flask_app: Class inheriting from flask app class
:type flask_app: :py:class:`flask.Flask`
:param flask_api: flask_restful API class to be used to wrap the app
:type flask_api: :py:class:`flask_restful.Api`
:param config: directory containing the config.py file used to configure the RESTapi
:param catch_internal_server: If true, catch and print all inter server errors
:param wsgi_profile: use WSGI profiler middleware for finding bottlenecks in web application
:returns: Flask RESTful API
:rtype: :py:class:`flask_restful.Api`
"""
# Unpack parameters
config = kwargs.pop('config', CLI_DEFAULTS['CONFIG_DIR'])
catch_internal_server = kwargs.pop('catch_internal_server', CLI_DEFAULTS['CATCH_INTERNAL_SERVER'])
wsgi_profile = kwargs.pop('wsgi_profile', CLI_DEFAULTS['WSGI_PROFILE'])
if kwargs:
raise ValueError('Unknown keyword arguments: {}'.format(kwargs))
# Import the configuration file
spec = importlib.util.spec_from_file_location(os.path.join(config, 'config'), os.path.join(config, 'config.py'))
config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config_module)
# Instantiate an app
app = flask_app(__name__, catch_internal_server=catch_internal_server)
# Apply default configuration
app.config.update(**config_module.APP_CONFIG)
# Allow cross-origin resource sharing
cors_prefix = r'{}/*'.format(config_module.API_CONFIG['PREFIX'])
CORS(app, resources={cors_prefix: {'origins': '*'}})
# Configure the serializer
if config_module.SERIALIZER_CONFIG:
from aiida.restapi.common.utils import CustomJSONEncoder
app.json_encoder = CustomJSONEncoder
# Set up WSGI profiler if requested
if wsgi_profile:
from werkzeug.middleware.profiler import ProfilerMiddleware
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
# Instantiate and return a Flask RESTful API by associating its app
return flask_api(app, **API_CONFIG)
| 44.346774
| 119
| 0.67776
|
6fb7b02c8bf75e835bea86d1756aa9ee496468f1
| 1,917
|
py
|
Python
|
scripts/pull-workbench-conf.py
|
UKHomeOffice/dq-packer-ops-win-bastion-nineteen
|
c94c1e6d9d301bd433570be1cfa603d748f364e4
|
[
"MIT"
] | null | null | null |
scripts/pull-workbench-conf.py
|
UKHomeOffice/dq-packer-ops-win-bastion-nineteen
|
c94c1e6d9d301bd433570be1cfa603d748f364e4
|
[
"MIT"
] | 3
|
2019-02-07T09:41:53.000Z
|
2020-05-13T13:28:07.000Z
|
scripts/pull-workbench-conf.py
|
UKHomeOffice/dq-packer-ops-win-bastion-nineteen
|
c94c1e6d9d301bd433570be1cfa603d748f364e4
|
[
"MIT"
] | 1
|
2021-04-11T09:09:16.000Z
|
2021-04-11T09:09:16.000Z
|
import boto3
import os
import logging
logging.basicConfig(
filename='sqlworkbench-config-pull-logs.txt',
format='%(asctime)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=logging.INFO
)
class S3Copier:
def __init__(self, input_location, output_location):
self.input_location = input_location
self.input_bucket = input_location.split('/')[0]
self.input_prefix = '/'.join(input_location.split('/')[1:])
self.output_location = output_location
self.s3_client = boto3.resource('s3')
def _create_dir_if_not_exists(self, path):
"""Create local directory if it doesn't exist"""
if not os.path.isdir(path):
logging.info('Directory {0} does not exist. Creating'.format(path))
os.makedirs(path)
def _get_file_names(self):
"""Return a list of files in the configured s3 input location"""
return self.s3_client.Bucket(self.input_bucket).objects.filter(Prefix=self.input_prefix)
def copy(self):
"""Copy all files from the configured s3 input location to local output location"""
try:
self._create_dir_if_not_exists(self.output_location)
for file_name in self._get_file_names():
f_name = file_name.key.split('/')[-1]
self.s3_client \
.Bucket(self.input_bucket) \
.download_file(file_name.key, '{0}/{1}' \
.format(self.output_location, f_name))
logging.info('Successfully pulled {0}'.format(f_name))
except Exception as e:
logging.info('Failed to pull files')
logging.info(e)
if __name__ == '__main__':
logging.info('Starting')
output_dir = 'C:/Users/Public/.sqlworkbench'
input_dir = os.environ.get('S3_OPS_CONFIG_BUCKET')
S3Copier(input_dir, output_dir).copy()
logging.info('Task complete')
| 38.34
| 96
| 0.631195
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.