gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base class for NCC Orchestration
"""
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import *
from marvin.lib.base import Domain, Account
from marvin.lib.utils import validateList
from marvin.codes import PASS,FAILED
from marvin.cloudstackException import (InvalidParameterException,
GetDetailExceptionInfo)
from os import system
from subprocess import call
import requests, json, urllib
class NCC:
def __init__(self, nccip, nsip, csip, logger=None):
self.nccip = nccip
self.nsip = nsip
self.csip = csip
self.logger = logger
self.__lastError = ''
def registerCCP(self, apiclient):
"""
Register CCP with NCC
"""
auth_keys = self.getAdminKeys(apiclient)
url = "http://"+self.nccip+"/cs/cca/v1/cloudstacks"
cs_url = "http://"+self.csip+":8080/"
payload = {"cloudstack": {
"name": "Cloudstack",
"apikey": auth_keys[0],
"secretkey": auth_keys[1],
"driver_username": "admin",
"driver_password": "nsroot",
"cloudstack_uri": cs_url
}
}
cmd_response = self.sendCmdToNCC(url, payload)
if cmd_response == FAILED:
raise Exception("Error: %s" % self.__lastError)
def registerNS(self):
url = "http://"+self.nccip+"/nitro/v1/config/managed_device/"
payload = 'object={"params":{"action":"add_device"}, "managed_device":{"ip_address":"%s",\
"profile_name":"ns_nsroot_profile", "sync_operation":"false"}}' % self.nsip
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
cmd_response = self.sendCmdToNS(url, payload, header=headers)
if cmd_response == FAILED:
raise Exception("Error: %s" % self.__lastError)
def assignNStoCSZone(self):
cs_zone = self.getCSZoneFromNCC()
if cs_zone == FAILED:
raise Exception("Error: %s" % self.__lastError)
url = "http://"+self.nccip+"/nitro/v1/config/tag/"
payload = 'object={"tag": {"entity_type": "managed_device", "entity_id": "%s",\
"tag_key": "zone", "tag_value": "%s"}}' % (self.nsip, cs_zone)
header = {'Content-Type':'application/x-www-form-urlencoded'}
cmd_response = self.sendCmdToNS(url, payload, header=header)
if cmd_response == FAILED:
raise Exception("Error: %s" % self.__lastError)
def createServicePackages(self, name, platform_type, device_ip, isolation_policy="shared"):
tnt_group = self.createTenantGroup(name)
if tnt_group.status_code != 201:
raise Exception("Error: %s" % self.__lastError)
tnt_group_id = json.loads(tnt_group.content)["tenantgroups"][0]["id"]
dv_group = self.createDeviceGroup(name, platform_type)
if dv_group.status_code != 201:
raise Exception("Error: %s" % self.__lastError)
dv_group_id = json.loads(dv_group.content)["devicegroups"][0]["id"]
if isolation_policy.lower() == "shared":
srv_pkg = self.createServicePackageShared(name, tnt_group_id, dv_group_id, isolation_policy )
elif isolation_policy.lower() == "dedicated":
srv_pkg = self.createServicePackageDedicated(name, tnt_group_id, dv_group_id, isolation_policy )
else:
raise Exception("NS device must be either in shared or dedicated mode")
if srv_pkg.status_code != 201:
raise Exception("Error: %s" % self.__lastError)
dev_add_res =self.addDevicetoServicePackage(dv_group_id, device_ip)
if dev_add_res == FAILED:
raise Exception ("Error: %s" % self.__lastError)
srv_pkg_id = json.loads(srv_pkg.content)["servicepackages"][0]["id"]
publish_srv_pkg_res = self.publishServicePackage(srv_pkg_id)
if publish_srv_pkg_res == FAILED:
raise Exception("Error: %s" % self.__lastError)
return (dv_group_id, tnt_group_id, srv_pkg_id)
def createTenantGroup(self, name):
url = "http://"+self.nccip+"/admin/v1/tenantgroups"
payload = {"tenantgroups": [{"name": name}]}
res = self.sendCmdToNCC(url, payload)
return res
def createDeviceGroup(self, name, platform_type, device_type="netscaler"):
url = "http://"+self.nccip+"/admin/v1/devicegroups"
payload = {"devicegroups":[{"name": name,
"device_type": device_type,
"platform_type": platform_type
}]
}
res = self.sendCmdToNCC(url, payload)
return res
def createServicePackageShared(self, name, tenant_group, device_group, allocation, device_type="netscaler"):
url = "http://"+self.nccip+"/admin/v1/servicepackages"
payload = {"servicepackages":[{"allocationgroups": [{"device_type": device_type,
"allocationpolicy":allocation,
"placement_scheme": "ROUNDROBIN",
"deviceaffinity": "onedevice",
"devicegroup":{"ref": device_group}
}],
"name": name,
"isdefault": "false",
"tenantgroup": {"ref": tenant_group}
}]
}
res = self.sendCmdToNCC(url, payload)
return res
def createServicePackageDedicated(self, name, tenant_group, device_group, allocation, device_type="netscaler"):
url = "http://"+self.nccip+"/admin/v1/servicepackages"
payload = {"servicepackages":[{"allocationgroups": [{"device_type": device_type,
"allocationpolicy":allocation,
#"placement_scheme": "roundrobin or leastentity",
"devicegroup":{"ref": device_group}
}],
"name": name,
"isdefault": "false",
"tenantgroup": {"ref": tenant_group}
}]
}
res = self.sendCmdToNCC(url, payload)
return res
def addDevicetoServicePackage(self, devicegroup_id, device_ip):
url = "http://"+self.nccip+"/admin/v1/devicegroups/"+devicegroup_id+"/devices"
payload = {"devices":[{"ref":device_ip }]}
res = self.sendCmdToNCC(url, payload, method="PUT")
return res
def removeDevicefromServicePackage(self, devicegroup_id):
url = "http://"+self.nccip+"/admin/v1/devicegroups/"+devicegroup_id+"/devices"
payload = {"devices":[]}
res = self.sendCmdToNCC(url, payload, method="PUT")
return res
def publishServicePackage(self, pkg_id):
url = "http://"+self.nccip+"/cs/cca/v1/servicepackages"
payload = {"servicepackages":[{"servicepackageid":pkg_id}]}
res = self.sendCmdToNCC(url, payload)
return res
def getCSZoneFromNCC(self):
url = "http://"+self.nccip+"/cs/cca/v1/zones"
res = self.sendCmdToNCC(url, method="GET")
if res != FAILED:
zoneid = json.loads(res.content)["zones"][0]
return zoneid
else:
return FAILED
def sendCmdToNCC(self, url, payload={}, method="POST", header={'content-type': 'application/json'}):
try:
# self.logger.debug("url :%s" % url)
# self.logger.debug("payload: %s" % payload)
if method == "POST":
#self.logger.debug("====Sending POST Request====")
return self.sendPostRequstToNCC(url, payload, header)
if method == "GET":
#self.logger.debug("====Sending GET Request====")
return self.sendGetRequestToNCC(url, payload, header)
if method == "PUT":
return self.sendPutRequestToNCC(url, payload, header)
if method == "DELETE":
self.logger.debug("Trying delete")
return self.sendDeleteRequestToNCC(url, header)
except Exception as e:
self.__lastError = e
# self.logger.exception("sendCmdToNCC: Exception:%s" %
# GetDetailExceptionInfo(e))
return FAILED
def sendGetRequestToNCC(self, url, payload, header):
try:
res = requests.get(url, json=payload, auth=("nsroot", "nsroot"), headers=header)
return res
except Exception as e:
self.__lastError = e
# self.logger.exception("sendGetRequestToNCC : Exception Occured: %s" %
# str(self.__lastError))
return FAILED
def sendPostRequstToNCC(self, url, payload, header):
try:
res = requests.post(url, json=payload, auth=("nsroot", "nsroot"), headers=header)
return res
except Exception as e:
self.__lastError = e
# self.logger.exception("sendPostRequstToNCC: Exception Occured: %s" %
# str(self.__lastError))
return FAILED
def sendPutRequestToNCC(self, url, payload, header):
try:
res = requests.put(url, json=payload, auth=("nsroot", "nsroot"), headers=header)
return res
except Exception as e:
self.__lastError = e
# self.logger.exception("sendPostRequstToNCC: Exception Occured: %s" %
# str(self.__lastError))
return FAILED
def sendDeleteRequestToNCC(self, url, header):
try:
res = requests.delete (url, auth=("nsroot", "nsroot"), headers=header)
return res
except Exception as e:
self.__lastError = e
# self.logger.exception("sendPostRequstToNCC: Exception Occured: %s" %
# str(self.__lastError))
return FAILED
def sendCmdToNS(self, url, payload={}, method="POST", header={'content-type': 'application/json'}):
try:
# self.logger.debug("url :%s" % url)
# self.logger.debug("payload: %s" % payload)
if method == "POST":
#self.logger.debug("====Sending POST Request====")
return self.sendPostRequstToNS(url, payload, header)
if method == "GET":
#self.logger.debug("====Sending GET Request====")
return self.sendGetRequestToNS(url, payload, header)
except Exception as e:
self.__lastError = e
# self.logger.exception("sendCmdToNCC: Exception:%s" %
# GetDetailExceptionInfo(e))
return FAILED
def sendPostRequstToNS(self, url, payload, header):
try:
res = requests.post(url, data=payload, auth=("nsroot", "nsroot"), headers=header)
return res
except Exception as e:
self.__lastError = e
# self.logger.exception("sendPostRequstToNCC: Exception Occured: %s" %
# str(self.__lastError))
return FAILED
def sendGetRequestToNS(self, url, payload, header):
try:
res = requests.get(url, data=payload, auth=("nsroot", "nsroot"), headers=header)
return res
except Exception as e:
self.__lastError = e
# self.logger.exception("sendGetRequestToNCC : Exception Occured: %s" %
# str(self.__lastError))
return FAILED
def getAdminKeys(self, apiClient):
domains = Domain.list(apiClient, name="ROOT")
listuser = listUsers.listUsersCmd()
listuser.username = "admin"
listuser.domainid = domains[0].id
listuser.listall = True
listuserRes = apiClient.listUsers(listuser)
userId = listuserRes[0].id
apiKey = listuserRes[0].apikey
securityKey = listuserRes[0].secretkey
return [apiKey, securityKey]
def cleanup_ncc(self, device_gp_id, srv_pkg_uuid, srv_pkg_id, tnt_group_id):
self.removeDevicefromServicePackage(device_gp_id)
# Remove service package reference from Cloudplatform
url = "http://"+self.nccip+"/cs/cca/v1/servicepackages/"+srv_pkg_uuid
self.logger.debug("Sending DELETE SP uuid: %s " % url)
res = self.sendCmdToNCC(url, method="DELETE")
# Remove Service package from NCC
url = "http://"+self.nccip+"/admin/v1/servicepackages/"+srv_pkg_id
self.logger.debug("Sending DELETE SP : %s " % url)
res = self.sendCmdToNCC(url, method="DELETE")
# Remove Device group
url = "http://"+self.nccip+"/admin/v1/devicegroups/"+device_gp_id
self.logger.debug("Sending DELETE devicegroup: %s " % url)
res = self.sendCmdToNCC(url, method="DELETE")
# Remove Tenant group
url = "http://"+self.nccip+"/admin/v1/tenantgroups/"+tnt_group_id
self.logger.debug("Sending DELETE tenant group: %s " % url)
res = self.sendCmdToNCC(url, method="DELETE")
self.logger.debug("Result: %s" % res)
return res
|
|
"""Import CLTK corpora.
TODO: ? Fix so ``import_corpora()`` can take relative path.
TODO: ? Add https://github.com/cltk/pos_latin
TODO: Consider renaming all "import" to "clone"
"""
import errno
import os
import shutil
import sys
from urllib.parse import urljoin
import yaml
from git import RemoteProgress, Repo
from cltk.core.cltk_logger import logger
from cltk.core.exceptions import CorpusImportError
from cltk.languages.utils import get_lang
from cltk.utils.utils import CLTK_DATA_DIR
__author__ = [
"Kyle P. Johnson <kyle@kyle-p-johnson.com>",
"Stephen Margheim <stephen.margheim@gmail.com>",
]
# TODO: Decide whether to drop repos w/o models
# langs_with_model_repo = ["grc", "lat", "ang", "gmh", "gml", "san", "non", "pli"]
LANGUAGE_CORPORA = {
"akk": [
{
"name": "cdli_corpus",
"origin": "https://github.com/cdli-gh/data.git",
"type": "atf",
}
],
"arb": [
{
"name": "arabic_text_perseus",
"origin": "https://github.com/cltk/arabic_text_perseus",
"type": "text",
},
{
"name": "quranic-corpus",
"origin": "https://github.com/cltk/arabic_text_quranic_corpus",
"type": "text",
},
{
"name": "quranic-corpus-morphology",
"origin": "https://github.com/cltk/arabic_morphology_quranic-corpus",
"type": "text",
},
],
"lzh": [
{
"type": "text",
"origin": "https://github.com/cltk/chinese_text_cbeta_01.git",
"name": "chinese_text_cbeta_01",
},
{
"type": "text",
"origin": "https://github.com/cltk/chinese_text_cbeta_02.git",
"name": "chinese_text_cbeta_02",
},
{
"type": "text",
"origin": "https://github.com/cltk/chinese_text_cbeta_indices.git",
"name": "chinese_text_cbeta_indices",
},
{
"type": "text",
"origin": "https://github.com/cltk/chinese_text_cbeta_txt.git",
"name": "chinese_text_cbeta_txt",
},
{
"type": "text",
"origin": "https://github.com/cltk/chinese_text_cbeta_taf_xml.git",
"name": "chinese_text_cbeta_taf_xml",
},
{
"type": "text",
"origin": "https://github.com/cltk/chinese_text_cbeta_txt.git",
"name": "chinese_text_cbeta_txt",
},
],
"cop": [
{
"type": "text",
"origin": "https://github.com/cltk/coptic_text_scriptorium.git",
"name": "coptic_text_scriptorium",
}
],
"grc": [
{
"name": "grc_software_tlgu",
"origin": "https://github.com/cltk/grc_software_tlgu.git",
"type": "software",
},
{
"name": "grc_text_perseus",
"origin": "https://github.com/cltk/grc_text_perseus.git",
"type": "text",
},
{"origin": None, "name": "phi7", "location": "local", "type": "text"},
{"name": "tlg", "origin": None, "location": "local", "type": "text"},
{
"name": "greek_proper_names_cltk",
"origin": "https://github.com/cltk/greek_proper_names_cltk.git",
"type": "lexicon",
},
{
"name": "grc_models_cltk",
"origin": "https://github.com/cltk/grc_models_cltk.git",
"type": "model",
},
{
"origin": "https://github.com/cltk/greek_treebank_perseus.git",
"name": "greek_treebank_perseus",
"type": "treebank",
},
{
"origin": "https://github.com/vgorman1/Greek-Dependency-Trees.git",
"name": "greek_treebank_gorman",
"type": "treebank",
},
{
"origin": "https://github.com/cltk/greek_lexica_perseus.git",
"name": "greek_lexica_perseus",
"type": "lexicon",
},
{
"origin": "https://github.com/cltk/greek_training_set_sentence_cltk.git",
"name": "greek_training_set_sentence_cltk",
"type": "training_set",
},
{
"name": "greek_word2vec_cltk",
"origin": "https://github.com/cltk/greek_word2vec_cltk.git",
"type": "model",
},
{
"name": "greek_text_lacus_curtius",
"origin": "https://github.com/cltk/greek_text_lacus_curtius.git",
"type": "text",
},
{
"name": "grc_text_first1kgreek",
"origin": "https://github.com/cltk/First1KGreek",
"type": "text",
},
{
"name": "grc_text_tesserae",
# modified plaintext with Tesserae-style citations
"origin": "https://github.com/cltk/grc_text_tesserae.git",
"type": "text",
},
],
"hbo": [
{
"name": "hebrew_text_sefaria",
"origin": "https://github.com/cltk/hebrew_text_sefaria.git",
"type": "text",
}
],
"lat": [
{
"type": "text",
"name": "lat_text_perseus",
"origin": "https://github.com/cltk/lat_text_perseus.git",
},
{
"name": "lat_treebank_perseus",
"origin": "https://github.com/cltk/lat_treebank_perseus.git",
"type": "treebank",
},
{
"name": "lat_text_latin_library",
"origin": "https://github.com/cltk/lat_text_latin_library.git",
"type": "text",
},
{"location": "local", "name": "phi5", "origin": None, "type": "text"},
{"origin": None, "name": "phi7", "location": "local", "type": "text"},
{
"name": "latin_proper_names_cltk",
"origin": "https://github.com/cltk/latin_proper_names_cltk.git",
"type": "lexicon",
},
{
"origin": "https://github.com/cltk/lat_models_cltk.git",
"name": "lat_models_cltk",
"type": "model",
},
{
"name": "latin_pos_lemmata_cltk",
"origin": "https://github.com/cltk/latin_pos_lemmata_cltk.git",
"type": "lemma",
},
{
"name": "latin_treebank_index_thomisticus",
"origin": "https://github.com/cltk/latin_treebank_index_thomisticus.git",
"type": "treebank",
},
{
"name": "latin_lexica_perseus",
"origin": "https://github.com/cltk/latin_lexica_perseus.git",
"type": "lexicon",
},
{
"name": "latin_training_set_sentence_cltk",
"origin": "https://github.com/cltk/latin_training_set_sentence_cltk.git",
"type": "training_set",
},
{
"origin": "https://github.com/cltk/latin_word2vec_cltk.git",
"name": "latin_word2vec_cltk",
"type": "model",
},
{
"type": "text",
"name": "latin_text_antique_digiliblt",
"origin": "https://github.com/cltk/latin_text_antique_digiliblt.git",
},
{
"type": "text",
"name": "latin_text_corpus_grammaticorum_latinorum",
"origin": "https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git",
},
{
"type": "text",
"name": "latin_text_poeti_ditalia",
"origin": "https://github.com/cltk/latin_text_poeti_ditalia.git",
},
{
"name": "lat_text_tesserae",
# modified plaintext with Tesserae-style citations
"origin": "https://github.com/cltk/lat_text_tesserae.git",
"type": "text",
},
{
"type": "lexicon",
"name": "cltk_lat_lewis_elementary_lexicon",
"origin": "https://github.com/cltk/cltk_lat_lewis_elementary_lexicon.git",
},
],
"multilingual": [
{
"type": "treebank",
"origin": "https://github.com/cltk/multilingual_treebank_proiel.git",
"name": "multilingual_treebank_proiel",
},
{
"type": "treebank",
"origin": "https://github.com/cltk/iswoc-treebank.git",
"name": "multilingual_treebank_iswoc",
},
{
"type": "treebank",
"origin": "https://github.com/cltk/treebank-releases.git",
"name": "multilingual_treebank_torot",
},
],
"pli": [
{
"type": "text",
"origin": "https://github.com/cltk/pali_text_ptr_tipitaka.git",
"name": "pali_text_ptr_tipitaka",
},
{
"name": "pali_texts_gretil",
"type": "text",
"origin": "https://github.com/cltk/pali_texts_gretil",
},
],
"pan": [
{
"name": "punjabi_text_gurban",
"origin": "https://github.com/cltk/punjabi_text_gurban.git",
"type": "text",
}
],
"xct": [
{
"type": "pos",
"origin": "https://github.com/cltk/tibetan_pos_tdc.git",
"name": "tibetan_pos_tdc",
},
{
"type": "lexicon",
"origin": "https://github.com/cltk/tibetan_lexica_tdc.git",
"name": "tibetan_lexica_tdc",
},
],
"san": [
{
"name": "sanskrit_text_jnu",
"origin": "https://github.com/cltk/sanskrit_text_jnu.git",
"type": "text",
},
{
"name": "sanskrit_text_dcs",
"origin": "https://github.com/cltk/sanskrit_text_dcs.git",
"type": "text",
},
{
"name": "sanskrit_parallel_sacred_texts",
"origin": "https://github.com/cltk/sanskrit_parallel_sacred_texts.git",
"type": "parallel",
},
{
"name": "sanskrit_text_sacred_texts",
"origin": "https://github.com/cltk/sanskrit_text_sacred_texts.git",
"type": "text",
},
{
"name": "sanskrit_parallel_gitasupersite",
"origin": "https://github.com/cltk/sanskrit_parallel_gitasupersite.git",
"type": "parallel",
},
{
"name": "sanskrit_text_gitasupersite",
"origin": "https://github.com/cltk/sanskrit_text_gitasupersite.git",
"type": "text",
},
{
"name": "sanskrit_text_wikipedia",
"origin": "https://github.com/cltk/sanskrit_text_wikipedia.git",
"type": "text",
},
{
"name": "sanskrit_text_sanskrit_documents",
"origin": "https://github.com/cltk/sanskrit_text_sanskrit_documents.git",
"type": "text",
},
{
"name": "san_models_cltk",
"origin": "https://github.com/cltk/san_models_cltk.git",
"type": "model",
},
],
"ang": [
{
"name": "old_english_text_sacred_texts",
"origin": "https://github.com/cltk/old_english_text_sacred_texts.git",
"type": "html",
},
{
"origin": "https://github.com/cltk/ang_models_cltk.git",
"name": "ang_models_cltk",
"type": "model",
},
],
"ben": [
{
"name": "bengali_text_wikisource",
"origin": "https://github.com/cltk/bengali_text_wikisource.git",
"type": "text",
}
],
"chu": [
{
"name": "old_church_slavonic_ccmh",
"origin": "https://github.com/cltk/old_church_slavonic_ccmh.git",
"type": "text",
}
],
"pmh": [
{
"name": "prakrit_texts_gretil",
"type": "text",
"origin": "https://github.com/cltk/prakrit_texts_gretil.git",
}
],
"mal": [
{
"name": "malayalam_text_gretil",
"origin": "https://github.com/cltk/malayalam_text_gretil.git",
"type": "text",
}
],
"omr": [
{
"name": "marathi_text_wikisource",
"origin": "https://github.com/cltk/marathi_text_wikisource.git",
"type": "text",
}
],
"kaw": [
{
"name": "javanese_text_gretil",
"origin": "https://github.com/cltk/javanese_text_gretil.git",
"type": "text",
}
],
"non": [
{
"name": "old_norse_text_perseus",
"origin": "https://github.com/cltk/old_norse_text_perseus.git",
"type": "text",
},
{
"name": "non_models_cltk",
"origin": "https://github.com/cltk/non_models_cltk.git",
"type": "model",
},
{
"name": "old_norse_texts_heimskringla",
"origin": "https://github.com/cltk/old_norse_texts_heimskringla.git",
"type": "text",
},
{
"name": "old_norse_runic_transcriptions",
"origin": "https://github.com/cltk/old_norse_runes_corpus.git",
"type": "text",
},
{
"name": "cltk_non_zoega_dictionary",
"origin": "https://github.com/cltk/cltk_non_zoega_dictionary.git",
"type": "dictionary",
},
],
"tel": [
{
"name": "telugu_text_wikisource",
"origin": "https://github.com/cltk/telugu_text_wikisource.git",
"type": "text",
}
],
"hin": [
{
"type": "text",
"origin": "https://github.com/cltk/hindi_text_ltrc.git",
"name": "hindi_text_ltrc",
}
],
"fro": [
{
"name": "french_text_wikisource",
"origin": "https://github.com/cltk/french_text_wikisource.git",
"type": "text",
},
{
"name": "french_lexicon_cltk",
"origin": "https://github.com/cltk/french_lexicon_cltk.git",
"type": "text",
},
{
"name": "fro_models_cltk",
"origin": "https://github.com/cltk/fro_models_cltk.git",
"type": "text",
},
],
"guj": [
{
"name": "gujarati_text_wikisource",
"origin": "https://github.com/cltk/gujarati_text_wikisource.git",
"type": "text",
}
],
"gml": [
{
"name": "gml_models_cltk",
"origin": "https://github.com/cltk/gml_models_cltk.git",
"type": "model",
}
],
"gmh": [
{
"name": "gmh_models_cltk",
"origin": "https://github.com/cltk/gmh_models_cltk.git",
"type": "model",
}
],
}
class ProgressPrinter(RemoteProgress):
"""Class that implements progress reporting."""
def update(self, op_code, cur_count, max_count=None, message=""):
if message:
percentage = "%.0f" % (100 * cur_count / (max_count or 100.0))
sys.stdout.write("Downloaded %s%% %s \r" % (percentage, message))
class FetchCorpus:
"""Import CLTK corpora."""
def __init__(self, language: str, testing: bool = False):
"""Setup corpus importing.
`testing` is a hack to check a tmp .yaml file to look at
or local corpus. This keeps from overwriting local. A
better idea is probably to refuse to overwrite the .yaml.
"""
self.language = language.lower()
if self.language != "multilingual":
get_lang(iso_code=language)
assert isinstance(testing, bool), "``testing`` parameter must be boolean type"
self.testing = testing
self.user_defined_corpora = self._get_user_defined_corpora()
self.library_defined_corpora = self._get_library_defined_corpora()
self.all_corpora_for_lang = (
self.user_defined_corpora + self.library_defined_corpora
)
def __repr__(self):
"""Representation string for ipython
:rtype : str
"""
return "FetchCorpus for: {}".format(self.language)
def _get_user_defined_corpora(self):
"""Check CLTK_DATA_DIR + '/distributed_corpora.yaml' for any custom,
distributed corpora that the user wants to load locally.
"""
if self.testing:
distributed_corpora_fp = os.path.normpath(
CLTK_DATA_DIR + "/test_distributed_corpora.yaml"
)
else:
distributed_corpora_fp = os.path.normpath(
CLTK_DATA_DIR + "/distributed_corpora.yaml"
)
try:
with open(distributed_corpora_fp) as file_open:
corpora_dict = yaml.safe_load(file_open)
except FileNotFoundError:
logger.debug("``~/cltk_data/distributed_corpora.yaml`` file not found.")
return []
except yaml.parser.ParserError as parse_err:
logger.debug("Yaml parsing error: %s" % parse_err)
return []
user_defined_corpora = []
for corpus_name in corpora_dict:
about = corpora_dict[corpus_name]
if about["language"].lower() == self.language:
user_defined_corpus = dict()
user_defined_corpus["origin"] = about["origin"]
user_defined_corpus["type"] = about["type"]
user_defined_corpus["name"] = corpus_name
user_defined_corpus["user_defined"] = True
user_defined_corpora.append(user_defined_corpus)
return user_defined_corpora
def _get_library_defined_corpora(self):
"""Pull from ``LANGUAGE_CORPORA`` and return
corpora for given language.
"""
try:
return LANGUAGE_CORPORA[self.language]
except KeyError:
return list()
@property
def list_corpora(self):
"""Show corpora available for the CLTK to download."""
return [corpus_info["name"] for corpus_info in self.all_corpora_for_lang]
@staticmethod
def _copy_dir_recursive(src_rel, dst_rel):
"""Copy contents of one directory to another. `dst_rel` dir cannot
exist. Source: http://stackoverflow.com/a/1994840
TODO: Move this to file_operations.py module.
:type src_rel: str
:param src_rel: Directory to be copied.
:type dst_rel: str
:param dst_rel: Directory to be created with contents of ``src_rel``.
"""
src = os.path.expanduser(src_rel)
dst = os.path.expanduser(dst_rel)
try:
shutil.copytree(src, dst)
logger.info("Files copied from %s to %s", src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
logger.info("Files copied from %s to %s", src, dst)
else:
raise
def _get_corpus_properties(self, corpus_name: str):
"""Check whether a corpus is available for import.
:type corpus_name: str
:param corpus_name: Name of available corpus.
:rtype : str
"""
try:
corpora = self.all_corpora
except NameError as name_error:
msg = "Corpus not available for language " '"%s": %s' % (
self.language,
name_error,
)
logger.error(msg)
raise CorpusImportError(msg)
for corpus_properties in corpora:
if corpus_properties["name"] == corpus_name:
return corpus_properties
msg = 'Corpus "%s" not available for the ' '"%s" language.' % (
corpus_name,
self.language,
)
logger.error(msg)
raise CorpusImportError(msg)
def _git_user_defined_corpus(
self, corpus_name, corpus_type, uri: str, branch="master"
):
"""Clone or update a git repo defined by user.
TODO: This code is very redundant with what's in import_corpus(),
could be refactored.
"""
type_dir_rel = os.path.join(CLTK_DATA_DIR, self.language, corpus_type)
type_dir = os.path.expanduser(type_dir_rel)
repo_name = uri.split("/")[-1] # eg, 'latin_corpus_newton_example.git'
repo_name = repo_name.rstrip(".git")
target_dir = os.path.join(type_dir, repo_name)
target_file = os.path.join(type_dir, repo_name, "README.md")
# check if corpus already present
# if not, clone
if not os.path.isfile(target_file):
if not os.path.isdir(type_dir):
os.makedirs(type_dir)
try:
msg = "Cloning '{}' from '{}'".format(corpus_name, uri)
logger.info(msg)
Repo.clone_from(
uri, target_dir, branch=branch, depth=1, progress=ProgressPrinter()
)
except CorpusImportError as corpus_imp_err:
msg = "Git clone of '{}' failed: '{}'".format(uri, corpus_imp_err)
logger.error(msg)
# if corpus is present, pull latest
else:
try:
repo = Repo(target_dir)
assert not repo.bare # or: assert repo.exists()
git_origin = repo.remotes.origin
msg = "Pulling latest '{}' from '{}'.".format(corpus_name, uri)
logger.info(msg)
git_origin.pull()
except CorpusImportError as corpus_imp_err:
msg = "Git pull of '{}' failed: '{}'".format(uri, corpus_imp_err)
logger.error(msg)
def import_corpus(
self, corpus_name: str, local_path: str = None, branch: str = "master"
):
"""Download a remote or load local corpus into dir ``~/cltk_data``.
TODO: maybe add ``from git import RemoteProgress``
TODO: refactor this, it's getting kinda long
:param corpus_name: The name of an available corpus.
:param local_path: A filepath, required when importing local corpora.
:param branch: What Git branch to clone.
"""
matching_corpus_list = [
_dict for _dict in self.all_corpora_for_lang if _dict["name"] == corpus_name
]
if not matching_corpus_list:
raise CorpusImportError(
f"No corpus ``{corpus_name}`` for language ``{self.language}``."
)
if len(matching_corpus_list) > 1:
raise CorpusImportError(
f"Found more than one corpus with the name ``{corpus_name}``."
)
matching_corpus = matching_corpus_list[0]
if matching_corpus.get("user_defined"):
"""{'origin': 'https://github.com/kylepjohnson/latin_corpus_newton_example.git',
'type': 'text',
'name': 'example_distributed_latin_corpus',
'user_defined': True}
"""
self._git_user_defined_corpus(
matching_corpus["name"],
matching_corpus["type"],
matching_corpus["origin"],
)
return
elif matching_corpus.get("location") == "local":
# {'location': 'local', 'name': 'phi5', 'origin': None, 'type': 'text'}
msg = "Importing from local path: '{}'".format(local_path)
logger.info(msg)
if corpus_name not in ["phi5", "phi7", "tlg"]:
raise CorpusImportError(f"Unsupported local corpus ``{corpus_name}``.")
if corpus_name == "phi5":
# normalize path for checking dir
if local_path.endswith("/"):
local_path = local_path[:-1]
# check for right corpus dir
if os.path.split(local_path)[1] != "PHI5":
logger.info("Directory must be named 'PHI5'.")
if corpus_name == "phi7":
# normalize local_path for checking dir
if local_path.endswith("/"):
local_path = local_path[:-1]
# check for right corpus dir
if os.path.split(local_path)[1] != "PHI7":
logger.info("Directory must be named 'PHI7'.")
if corpus_name == "tlg":
# normalize path for checking dir
if local_path.endswith("/"):
local_path = local_path[:-1]
# check for right corpus dir
if os.path.split(local_path)[1] != "TLG_E":
logger.info("Directory must be named 'TLG_E'.")
# move the dir-checking commands into a function
data_dir = os.path.expanduser(CLTK_DATA_DIR)
originals_dir = os.path.join(data_dir, "originals")
# check for `originals` dir; if not present mkdir
if not os.path.isdir(originals_dir):
os.makedirs(originals_dir)
msg = "Wrote directory at '{}'.".format(originals_dir)
logger.info(msg)
tlg_originals_dir = os.path.join(data_dir, "originals", corpus_name)
# check for `originals/<corpus_name>`; if pres, delete
if os.path.isdir(tlg_originals_dir):
shutil.rmtree(tlg_originals_dir)
msg = "Removed directory at '{}'.".format(tlg_originals_dir)
logger.info(msg)
# copy_dir requires that target
if not os.path.isdir(tlg_originals_dir):
self._copy_dir_recursive(local_path, tlg_originals_dir)
else:
"""{'type': 'text',
'name': 'lat_text_perseus',
'origin': 'https://github.com/cltk/lat_text_perseus.git'},
"""
if (
not matching_corpus.get("type")
and not matching_corpus.get("name")
and not matching_corpus.get("origin")
):
raise FetchCorpus(f"Malformed record for ``{corpus_name}``.")
git_uri = matching_corpus["origin"]
type_dir_rel = os.path.join(
CLTK_DATA_DIR, self.language, matching_corpus["type"]
)
type_dir = os.path.expanduser(type_dir_rel)
target_dir = os.path.join(type_dir, corpus_name)
target_file = os.path.join(type_dir, corpus_name, "README.md")
# check if corpus already present
# if not, clone
if not os.path.isfile(target_file):
if not os.path.isdir(type_dir):
os.makedirs(type_dir)
try:
msg = "Cloning '{}' from '{}'".format(corpus_name, git_uri)
logger.info(msg)
Repo.clone_from(
git_uri,
target_dir,
branch=branch,
depth=1,
progress=ProgressPrinter(),
)
except CorpusImportError as corpus_imp_err:
msg = "Git clone of '{}' failed: '{}'".format(
git_uri, corpus_imp_err
)
logger.error(msg)
# if corpus is present, pull latest
else:
try:
repo = Repo(target_dir)
assert not repo.bare # or: assert repo.exists()
git_origin = repo.remotes.origin
msg = "Pulling latest '{}' from '{}'.".format(corpus_name, git_uri)
logger.info(msg)
git_origin.pull()
except CorpusImportError as corpus_imp_err:
msg = "Git pull of '{}' failed: '{}'".format(
git_uri, corpus_imp_err
)
logger.error(msg)
|
|
'''author Xinwei Ding'''
# [START imports]
import os
import time
import urllib
import datetime
import calendar
from google.appengine.api import users
from google.appengine.api import mail
from google.appengine.ext import ndb
from google.appengine.ext import db
import jinja2
import webapp2
DEFAULT_ADMIN='test@example.com'
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# [END imports]
DEFAULT_STAFFID = '00'
VIPRATE={'A':[10,1],'B':[10,1],'C':[10,1],'D':[10,1],'H':[10,1],'p':[10,1]}
HRATE = [['A',10,10,2,3,4,10,2],
['B',10,10,2,3,4,10,2],
['C1',10,10,2,3,4,10,2],
['C2',10,10,2,3,4,10,2],
['C3',10,10,2,3,4,10,2],
['D1',10,10,2,3,4,10,2],
['D2',10,10,2,3,4,10,2],
['D3',10,10,2,3,4,10,2],
['D4',10,10,2,3,4,10,2],
['D5',10,10,2,3,4,10,2],
['D6',10,10,2,3,4,10,2],
['HOD',10,10,2,3,4,10,2],
['placeholder',10,10,2,3,4,10,2]]
UNIS=[]
LEVEL={'A':5,'B':61,'C':45,'D':121,'H':9999999}
LEVELa={'A':5,'B':61,'C1':45,'C2':45,'C3':45,'D1':121,'D2':121,'D3':121,'D4':121,'D5':121,'D6':121,'HOD':9999999}
def staffid_key(staffid=DEFAULT_STAFFID):
return ndb.Key('Guestbook', staffid)
def cid_key(cid=DEFAULT_STAFFID):
return ndb.Key('coursepage', cid)
def lvlup(lvl):
LVUP=['A','B','C1','C2','C3','D1','D2','D3','D4','D5','D6','HOD','placeholder']
return LVUP[LVUP.index(lvl)+1]
def dlevel(lvl):
dlevel=['A','B','C1','C2','C3','D1','D2','D3','D4','D5','D6']
return dlevel[dlevel.index(lvl)-1]
class Tutorial(ndb.Model):
date_t = ndb.StringProperty(indexed=True)
date = ndb.DateTimeProperty(auto_now_add=True)
numstudent = ndb.IntegerProperty(indexed=False)
duration = ndb.FloatProperty(indexed=False)
vip = ndb.StringProperty(indexed=True)
earning = ndb.FloatProperty(indexed=True)
paid = ndb.StringProperty(indexed=True)
approved = ndb.StringProperty(indexed=True)
coursename_t= ndb.StringProperty(indexed=False)
approving= ndb.StringProperty(indexed=True)
staffusername = ndb.StringProperty(indexed=True)
courselevel = ndb.StringProperty(indexed=True)
isbonus =ndb.StringProperty(indexed=True)
reason =ndb.StringProperty(indexed=True)
tactive=ndb.StringProperty(indexed=True)
userdelete=ndb.StringProperty(indexed=False)
realdur = ndb.FloatProperty(indexed=False)
class Staff(ndb.Model):
username = ndb.StringProperty(indexed=True)
name = ndb.StringProperty(indexed=False)
#password = ndb.StringProperty(indexed=False)
#mail = ndb.StringProperty(indexed=False)
role = ndb.StringProperty(indexed=False)
phone = ndb.StringProperty(indexed=False)
level = ndb.StringProperty(indexed=False)
basehour = ndb.FloatProperty(indexed=False)
teachhour = ndb.FloatProperty(indexed=False)
bsb = ndb.StringProperty(indexed=False)
address = ndb.StringProperty(indexed=False)
age =ndb.StringProperty(indexed=False)
#goal =ndb.FloatProperty(indexed=False)
#cur_perc=ndb.FloatProperty(indexed=False)
sactive=ndb.StringProperty(indexed=True)
subjects = ndb.JsonProperty(indexed=False)
#tutorial = ndb.StructuredProperty(Tutorial, repeated=True)
class Course(ndb.Model):
coursename = ndb.StringProperty(indexed=False)
coursecode=ndb.StringProperty(indexed=False)
university = ndb.StringProperty(indexed=False)
active=ndb.StringProperty(indexed=False)
def checkuser(user):
staff_query = Staff.query()
staffs = staff_query.fetch()
for staff in staffs:
if user ==staff.username:
print(staff.role)
return staff.role
return 0
# [START main_page]
class MainPage(webapp2.RequestHandler):
def get(self):
staff_query = Staff.query()
staffs = staff_query.fetch()
query2 = Tutorial.query().order(-Tutorial.date)
tutorials = query2.fetch()
user = users.get_current_user()
if user:
umail =user.email()
if checkuser(umail)=='Admin' or umail == DEFAULT_ADMIN:
print(' is a admin')
url = users.create_logout_url(self.request.uri)
template_values = {
'user': user,
'staffs': staffs,
'tutorials':tutorials,
'adminusername':umail,
}
self.redirect('/admin')
elif checkuser(umail)=='Tutor':
url = users.create_logout_url(self.request.uri)
print(' is a tutor')
query_params = {'tutorusername': umail}
self.redirect('/tutor?' + urllib.urlencode(query_params))
else:
print(' not in datastore',)
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'user': user,
'staffs': staffs,
'tutorials':tutorials,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class admin(webapp2.RequestHandler):
def get(self):
cur_date=datetime.date.today()
sd = self.request.get('start_date')
ed = self.request.get('end_date')
if not sd:
sd=str(cur_date - datetime.timedelta(days=356))
else:
try:
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
except ValueError:
sd=str('01/01/'+sd)
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
if not ed:
ed=str(cur_date + datetime.timedelta(days=1))
else:
try:
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
except ValueError:
ed=str('31/12/'+ed)
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
staff_query = Staff.query()
staffs = staff_query.fetch()
query2 = Tutorial.query().order(-Tutorial.date)
tutorials = query2.fetch()
tut_refine=[]
for tutorial in tutorials:
if tutorial.tactive!='0' and tutorial.date_t>=sd and tutorial.date_t<=ed:
tut_refine.append(tutorial)
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or umail == DEFAULT_ADMIN:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'staffs': staffs,
'tutorials':tutorials,
'sd':sd,
'ed':ed,
'tut_refine':tut_refine,
'adminusername':umail,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
else:
self.redirect('/?')
def post(self):
start_date=self.request.get('start_date')
end_date = self.request.get('end_date')
query_params = {'start_date': start_date,'end_date':end_date}
self.redirect('/admin?' + urllib.urlencode(query_params))
class tutor(webapp2.RequestHandler):
def get(self):
cur_date=datetime.date.today()
sd = self.request.get('start_date')
ed = self.request.get('end_date')
if not sd:
sd=str(cur_date - datetime.timedelta(days=356))
else:
try:
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
except ValueError:
sd=str('01/01/'+sd)
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
if not ed:
ed=str(cur_date + datetime.timedelta(days=1))
else:
try:
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
except ValueError:
ed=str('31/12/'+ed)
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
user = users.get_current_user()
if user:
umail =user.email()
if checkuser(umail)=='Tutor':
staff_query = Staff.query(Staff.username==umail)
staffs = staff_query.fetch()
query2 = Tutorial.query().order(-Tutorial.date)
tutorials = query2.fetch()
date_tut=[]
for tutorial in tutorials:
if tutorial.date_t>=sd and tutorial.date_t<=ed:
date_tut.append(tutorial)
for staff in staffs:
if staff.sactive!='0':
goal = LEVELa[staff.level]
cur=staff.basehour
if cur==None:
cur=0
print(goal,cur)
remain = float(goal)-float(cur)
if cur==None or cur=='None':
cur_perc=0;
else:
cur_perc= float(cur)/float(goal)*100.0
unpaidamount=0;
for tut in tutorials:
if tut.paid != '1' and tut.staffusername==umail and tut.approved=='1' and tut.earning and tut.tactive=='1':
unpaidamount+=tut.earning
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'staffs': staffs,
'tutorials':tutorials,
'unpaidamount':unpaidamount,
'umail':umail,
'sd':sd,
'date_tut':date_tut,
'ed':ed,
'tutorusername':umail,
'cur_perc':cur_perc,
'goal':goal,
'remain':remain,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('tutor-tutor.html')
self.response.write(template.render(template_values))
else:
self.redirect('/?')
class register(webapp2.RequestHandler):
def get(self):
staff_query = Staff.query()
staffs = staff_query.fetch()
user=0
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'user': user,
'staffs': staffs,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('register.html')
self.response.write(template.render(template_values))
class payrollpage(webapp2.RequestHandler):
def get(self):
try:
cur_date=datetime.date.today()
sd = self.request.get('start_date')
ed = self.request.get('end_date')
if not sd:
sd=str(cur_date - datetime.timedelta(days=31))
else:
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
if not ed:
ed=str(cur_date + datetime.timedelta(days=1))
else:
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
#datetime.datetime.strptime('02/02/2017',"%d/%m/%Y")
staff_query = Staff.query(Staff.sactive!='0')
staffs = staff_query.fetch()
query2 = Tutorial.query(Tutorial.tactive!='0')
tutorials = query2.fetch()
fort={}
for staff in staffs:
if staff.username:
fortsum=0
for tut in tutorials:
if staff.username == tut.staffusername and tut.earning and tut.paid !='1' and tut.approved =='1' and tut.date_t>=sd and tut.date_t<=ed:
fortsum+=tut.earning
fort[staff.username]=[fortsum,staff.name,staff.bsb,staff.username,staff.role]
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or umail == DEFAULT_ADMIN:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'staffs': staffs,
'tutorials':tutorials,
'fort':fort.items(),
'url': url,
'sd':sd,
'adminusername':umail,
'ed':ed,
'url_linktext': url_linktext,}
template=JINJA_ENVIRONMENT.get_template('admin-payroll.html')
self.response.write(template.render(template_values))
else:
self.redirect('/?')
except ValueError:
self.redirect('/error')
def post(self):
start_date=self.request.get('start_date')
end_date = self.request.get('end_date')
query_params = {'start_date': start_date,'end_date':end_date}
self.redirect('/payrollpage?' + urllib.urlencode(query_params))
class approvepage(webapp2.RequestHandler):
def get(self):
staff_query = Staff.query()
staffs = staff_query.fetch()
query2 = Tutorial.query().order(-Tutorial.date)
tutorials = query2.fetch()
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or umail == DEFAULT_ADMIN:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'staffs': staffs,
'tutorials':tutorials,
'adminusername':umail,
'url': url,
'url_linktext': url_linktext
}
template=JINJA_ENVIRONMENT.get_template('admin-approve.html')
self.response.write(template.render(template_values))
else:
self.redirect('/?')
# [START guestbook]
class Approve(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.approved = '1'
dur=record2.duration
record2.put()
'''
key=staffid
record3 = ndb.Key(urlsafe=key).get()
goal = LEVELa[record3.level]
newhour=record3.basehour+ float(dur)
remain=goal-record3.basehour
if newhour>=goal and record3.level !='D6':
record3.level=lvlup(record3.level)
newlevel =record3.level
record3.basehour=newhour-goal
goal=LEVELa[record3.level]
remain=goal-record3.basehour
elif newhour<goal:
record3.basehour=newhour
remain=goal-record3.basehour
record3.put()
'''
query_params = {'staffid': staffid}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
class dpprove(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.tactive='0'
dur=record2.duration
record2.put()
key=staffid
staff = ndb.Key(urlsafe=key).get()
print('00000000000000000000',staff)
goal = LEVELa[staff.level]
if dur>staff.basehour and staff.level!='A':
remain=dur-staff.basehour
staff.level=dlevel(staff.level)
goal=LEVELa[staff.level]
staff.basehour=goal-remain
elif dur<staff.basehour:
staff.basehour=staff.basehour-dur
staff.put()
query_params = {'staffid': staffid}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
class dstaff(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
s=ndb.Key(urlsafe=staffid).get()
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.sactive = '0'
record2.put()
q1=Tutorial.query(Tutorial.staffusername==s.username)
tutorials=q1.fetch()
for tutorial in tutorials:
tutorial.userdelete='1'
tutorial.put()
self.redirect('/register?')
class astaff(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
s=ndb.Key(urlsafe=staffid).get()
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.sactive = '1'
record2.put()
q1=Tutorial.query(Tutorial.staffusername==s.username)
tutorials=q1.fetch()
for tutorial in tutorials:
tutorial.userdelete='0'
tutorial.put()
self.redirect('/register?')
class dtutorial(webapp2.RequestHandler):
def post(self):
staffkey = self.request.get('staffid',DEFAULT_STAFFID)
key2=staffkey
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.tactive = '0'
dur=record2.realdur
username=record2.staffusername
record2.put()
if dur:
q1=Staff.query(Staff.username==username)
staffs=q1.fetch(1)
staff=staffs[0]
goal = LEVELa[staff.level]
if dur>staff.basehour and staff.level!='A':
remain=dur-staff.basehour
staff.level=dlevel(staff.level)
goal=LEVELa[staff.level]
staff.basehour=goal-remain
elif dur<staff.basehour:
staff.basehour=staff.basehour-dur
staff.put()
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
class atutorial(webapp2.RequestHandler):
def post(self):
staffkey = self.request.get('staffid',DEFAULT_STAFFID)
key2=staffkey
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.tactive = '1'
dur=record2.realdur
username=record2.staffusername
record2.put()
if dur:
q1=Staff.query(Staff.username==username)
staffs=q1.fetch(1)
staff=staffs[0]
goal = LEVELa[staff.level]
newhour=dur+staff.basehour
if newhour>=goal and staff.level!='D6':
remain=newhour-goal
staff.level=lvlup(staff.level)
goal=LEVELa[staff.level]
staff.basehour=remain
elif newhour<goal:
staff.basehour=newhour
staff.put()
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
class dtutorialap(webapp2.RequestHandler):
def post(self):
staffkey = self.request.get('staffid',DEFAULT_STAFFID)
key2=staffkey
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.tactive = '0'
dur=record2.duration
username=record2.staffusername
record2.put()
if dur:
q1=Staff.query(Staff.username==username)
staffs=q1.fetch(1)
staff=staffs[0]
goal = LEVELa[staff.level]
if dur>staff.basehour and staff.level!='A':
remain=dur-staff.basehour
staff.level=dlevel(staff.level)
goal=LEVELa[staff.level]
staff.basehour=goal-remain
elif dur<staff.basehour:
staff.basehour=staff.basehour-dur
staff.put()
query_params = {'staffid': key2}
self.redirect('/admin')
class atutorialap(webapp2.RequestHandler):
def post(self):
staffkey = self.request.get('staffid',DEFAULT_STAFFID)
key2=staffkey
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.tactive = '1'
dur=record2.duration
username=record2.staffusername
record2.put()
if dur:
q1=Staff.query(Staff.username==username)
staffs=q1.fetch(1)
staff=staffs[0]
goal = LEVELa[staff.level]
newhour=dur+staff.basehour
if newhour>=goal and staff.level!='D6':
remain=newhour-goal
staff.level=lvlup(staff.level)
goal=LEVELa[staff.level]
staff.basehour=remain
elif newhour<goal:
staff.basehour=newhour
staff.put()
query_params = {'staffid': key2}
self.redirect('/admin')
class approvep(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
print('999999999999999999999999999999999999999999999',staffid)
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.approved = '1'
record2.put()
self.redirect('/approvepage')
class payall(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
ed= self.request.get('ed')
sd= self.request.get('sd')
q1=Tutorial.query(Tutorial.staffusername==staffid)
q2 = q1.filter(Tutorial.tactive !='0')
q3 = q2.filter(Tutorial.approved =='1')
qs= q3.fetch()
for q in qs:
if q.date_t<=ed and q.date_t>=sd:
key=q.key.urlsafe()
record2 = ndb.Key(urlsafe=key).get()
record2.paid = '1'
record2.put()
self.redirect('/payrollpage')
class sendemail(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('toid',DEFAULT_STAFFID)
q1=Staff.query(Staff.username==staffid)
staffs=q1.fetch()
for staff in staffs:
if staff.username==staffid:
name=staff.name
user = users.get_current_user()
sender_address =user.email()
message = mail.EmailMessage(
sender=sender_address,
subject="Your Wage has been sent")
message.to = "<"+staffid+">"
message.body = """Dear """+name+""":
We sent you a payment recently."""
message.send()
self.redirect('/payrollpage')
class updateinfo(webapp2.RequestHandler):
def post(self):
try:
staffid=self.request.get('staffid')
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
new=self.request.get('mail')
if(new):
record2.mail = new
new=self.request.get('username')
if(new):
record2.username = new
new=self.request.get('name')
if(new):
record2.name = new
new=self.request.get('bsb')
if(new):
record2.bsb = new
new=self.request.get('address')
if(new):
record2.address = new
base=self.request.get('basehour')
if(base and base!='None' and base!= None):
record2.basehour = float(base)
elif base==None or base=='None':
record2.basehour=0.0
new=self.request.get('level')
if(new):
record2.level = new
new=self.request.get('phone')
if(new):
record2.phone = new
newgoal =20.0
record2. put()
query_params = {'staffid': staffid}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class Guestbook(webapp2.RequestHandler):
def post(self):
staffid = self.request.get('staffid',DEFAULT_STAFFID)
staff = Staff(parent=staffid_key(staffid))
staffq=Staff.query(Staff.username==self.request.get('username'))
staffs=staffq.fetch()
for staff in staffs:
if str(staff.username)==str(self.request.get('username')):
print(str(staff.username),str(self.request.get('username')),str(staff.username)==str(self.request.get('username')))
self.redirect('/erroruserdup')# + urllib.urlencode(query_params))
return
staff.username=self.request.get('username')
staff.bsb=self.request.get('bsb')
staff.name=self.request.get('name')
staff.basehour=float(self.request.get('basehour'))
staff.role=self.request.get('role')
staff.phone=self.request.get('phone')
staff.age=self.request.get('age')
staff.address=self.request.get('address')
staff.level=self.request.get('level')
staff.sactive='1'
staff.put()
#query_params = {'staffid': staffid}
self.redirect('/register')# + urllib.urlencode(query_params))
class addtutorial(webapp2.RequestHandler):
def post(self):
try:
key=self.request.get('staffid')
key2=key
record = ndb.Key(urlsafe=key).get()
if float(self.request.get('duration')):
if self.request.get('vip')=='1':
hr=float(self.request.get('duration'))/3.0
elif self.request.get('vip')=='0':
hr=float(self.request.get('duration'))
elif self.request.get('vip')=='2':
hr=float(self.request.get('duration'))/2.0
goal = LEVEL[record.level[0]]
if record.basehour ==None or record.basehour=='None':
record.basehour==0
newhour=record.basehour+ float(hr)
remain=goal-record.basehour
if newhour>=goal and record.level !='D6':
record.level=lvlup(record.level)
newlevel =record.level
record.basehour=newhour-goal
goal=LEVEL[record.level[0]]
remain=goal-record.basehour
elif newhour<goal:
record.basehour=newhour
remain=goal-record.basehour
record.put()
staffid=record.username
tutorial = Tutorial()
tutorial.numstudent = int(self.request.get('numstudent'))
tutorial.duration = float(self.request.get('duration'))
tutorial.date_t = str(datetime.datetime.strptime(self.request.get('date_t'),"%d/%m/%Y"))
tutorial.coursename_t = self.request.get('coursename_t')
tutorial.vip=self.request.get('vip')
tutorial.approved = self.request.get('approved')
tutorial.staffusername=staffid
tutorial.tactive='1'
if (tutorial.vip =='1'):
realdur= float(self.request.get('duration'))/3.0
hourrate = VIPRATE[self.request.get('level')[0]][0]
hourrate2 = VIPRATE[lvlup(self.request.get('level'))[0]][0]
fixed = VIPRATE[self.request.get('level')[0]][1]
if tutorial.duration>remain:
dur1=float(remain)
dur2=float(tutorial.duration)-float(dur1)
tutorial.earning = float((hourrate+tutorial.numstudent*fixed)*dur1)+float((hourrate2+tutorial.numstudent*fixed)*dur2)
else:
print(hourrate,tutorial.numstudent,fixed,tutorial.duration)
tutorial.earning = float((hourrate+tutorial.numstudent*fixed)*tutorial.duration)
elif tutorial.vip =='0':
realdur= float(self.request.get('duration'))
tutorial.courselevel = self.request.get('courselevel')
if (tutorial.courselevel=='Level 1'):
level_c = 1
else:
level_c= 2
stu_num = tutorial.numstudent
stu_num1 = 0
stu_num2 = 0
stu_num3 = 0
if (stu_num > 0 and stu_num <= 10):
stu_num1 = stu_num
elif (stu_num > 10 and stu_num <= 20):
stu_num1 = 10
stu_num2 = stu_num - 10
elif (stu_num > 20):
stu_num1 = 10
stu_num2 = 10
stu_num3 = stu_num - 20
stu_basic = [stu_num1,stu_num2,stu_num3]
for item in HRATE:
if (item[0]==self.request.get('level')):
hourate=item
hourrate_2=HRATE[HRATE.index(item)+1]
break
if tutorial.duration>remain:
dur1=float(remain)
dur2=float(tutorial.duration)-float(dur1)
tutorial.earning=(hourate[level_c]+stu_basic[0]*hourate[3]+stu_basic[1]*hourate[4]+stu_basic[2]*hourate[5])*dur1+(hourate_2[level_c]+stu_basic[0]*hourate_2[3]+stu_basic[1]*hourate_2[4]+stu_basic[2]*hourate_2[5])*dur2
else:
tutorial.earning=(hourate[level_c]+stu_basic[0]*hourate[3]+stu_basic[1]*hourate[4]+stu_basic[2]*hourate[5])*tutorial.duration
tutorial.realdur=realdur
tutorial.put()
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class addbonus(webapp2.RequestHandler):
def post(self):
try:
key=self.request.get('staffid')
key2=key
record = ndb.Key(urlsafe=key).get()
staffid=record.username
tutorial = Tutorial()
tutorial.staffusername=staffid
if len(self.request.get('bonus'))<1:
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
else:
tutorial.earning=float(self.request.get('bonus'))
tutorial.isbonus='1'
tutorial.tactive='1'
tutorial.reason=self.request.get('reason')
tutorial.coursename_t=self.request.get('coursename_t')
tutorial.approved='1'
tutorial.paid='0'
tutorial.date_t=str(datetime.date.today())
tutorial.put()
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class error(webapp2.RequestHandler):
def get(self):
template_values={}
template=JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(template_values))
class erroruserdup(webapp2.RequestHandler):
def get(self):
template_values={}
template=JINJA_ENVIRONMENT.get_template('erroruserdup.html')
self.response.write(template.render(template_values))
class showdetail(webapp2.RequestHandler):
def get(self):
cur_date=datetime.date.today()
sd = self.request.get('start_date')
ed = self.request.get('end_date')
query3 = Staff.query()
allstaffs = query3.fetch()
if not sd:
sd=str(cur_date - datetime.timedelta(days=356))
else:
try:
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
except ValueError:
sd=str('01/01/'+sd)
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
if not ed:
ed=str(cur_date + datetime.timedelta(days=1))
else:
try:
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
except ValueError:
ed=str('31/12/'+ed)
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
key=self.request.get('staffid')
safekeyy=key
record = ndb.Key(urlsafe=key).get()
staffid=record.username
tutorid = staffid
query1 = Staff.query(Staff.username==staffid)
query0=query1.filter(Staff.sactive !='0')
staffs = query1.fetch()
query2 = Tutorial.query(Tutorial.staffusername==staffid).order(-Tutorial.date)
tutorials = query2.fetch()
query4 = Course.query()
courses = query4.fetch()
date_tut=[]
for tutorial in tutorials:
if tutorial.date_t>=sd and tutorial.date_t<=ed:
date_tut.append(tutorial)
subjects={}
for staff in staffs:
if staff.sactive!='0':
goal = LEVEL[staff.level[0]]
cur=staff.basehour
if cur==None:
cur=0
remain = float(goal)-float(cur)
if cur==None or cur=='None':
cur_perc=0;
else:
cur_perc= float(cur)/float(goal)*100.0
subjects=staff.subjects
unpaidamount=0;
for tut in tutorials:
if tut.paid != '1' and tut.approved=='1' and tut.earning>0 and tut.tactive=='1':
unpaidamount+=tut.earning
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or umail == DEFAULT_ADMIN:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'staffs': staffs,
'allstaffs': allstaffs,
'tutorid': tutorid,
'tutorials':tutorials,
'adminusername':umail,
'safekeyy':safekeyy,
'unpaidamount':unpaidamount,
'url': url,
'subjects':subjects,
'url_linktext': url_linktext,
'cur_perc':cur_perc,
'goal':goal,
'remain':remain,
'sd':sd,
'ed':ed,
'date_tut':date_tut,
'courses':courses,
}
template=JINJA_ENVIRONMENT.get_template('admin-tutor.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/')
class detail(webapp2.RequestHandler):
def post(self):
try:
staffid = self.request.get('staffid',DEFAULT_STAFFID)
print('00000000000000000000')
print(staffid)
staff = Staff(parent=staffid_key(staffid))
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
tutorid=record2.username
query_params = {'staffid': staffid}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class detailrefine(webapp2.RequestHandler):
def post(self):
try:
staffid = self.request.get('staffid')
start_date=self.request.get('start_date')
end_date = self.request.get('end_date')
query_params = {'staffid': staffid,'start_date': start_date,'end_date':end_date}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class tutdate(webapp2.RequestHandler):
def post(self):
try:
staffid = self.request.get('staffid')
start_date=self.request.get('start_date')
end_date = self.request.get('end_date')
query_params = {'staffid': staffid,'start_date': start_date,'end_date':end_date}
self.redirect('/tutor?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class coursepage(webapp2.RequestHandler):
def get(self):
staff_query = Staff.query()
staffs = staff_query.fetch()
query2 = Tutorial.query().order(-Tutorial.date)
tutorials = query2.fetch()
q1=Course.query()
courses=q1.fetch()
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or umail == DEFAULT_ADMIN:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'courses':courses,
'staffs':staffs,
'tutorials':tutorials,
'adminid':umail,
'url': url,
'url_linktext': url_linktext,
}
template=JINJA_ENVIRONMENT.get_template('course.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/')
def post(self):
try:
record2 = Course()
coursename = self.request.get('coursename')
university = self.request.get('university')
coursecode=self.request.get('coursecode')
active=self.request.get('active')
record2.coursename=coursename
record2.coursecode=coursecode
record2.university=university
record2.active=active
record2.put()
self.redirect('/coursepage')
except ValueError:
self.redirect('/error')
class dcourse(webapp2.RequestHandler):
def post(self):
cid = self.request.get('cid',DEFAULT_STAFFID)
key2=cid
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.active = '0'
record2.put()
self.redirect('/coursepage')
class acourse(webapp2.RequestHandler):
def post(self):
cid = self.request.get('cid',DEFAULT_STAFFID)
key2=cid
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.active = '1'
record2.put()
self.redirect('/coursepage')
class ecourse(webapp2.RequestHandler):
def post(self):
cid = self.request.get('cid',DEFAULT_STAFFID)
key2=cid
key=self.request.get('keyy')
record2 = ndb.Key(urlsafe=key).get()
record2.university = self.request.get('university')
record2.coursename = self.request.get('coursename')
record2.coursecode = self.request.get('coursecode')
record2.active='1'
record2.put()
self.redirect('/coursepage')
class calendar(webapp2.RequestHandler):
def get(self):
import calendar
key=self.request.get('id')
idsafekey=self.request.get('id')
record2 = ndb.Key(urlsafe=key).get()
tutorid=record2.username
month_str=calendar.month_name[int(datetime.datetime.now().strftime("%m"))]
date_str=int(datetime.datetime.now().strftime("%d"))
dd=str(date_str)
now = datetime.datetime.now()
weekday=datetime.datetime.now().strftime("%A")
now = datetime.datetime.now()
smonth=self.request.get('smonth')
if not smonth:
month = int(now.strftime("%m"))
smonth=str(month)
if len(smonth)<2:
smonth='0'+smonth
else:
month_str=calendar.month_name[int(smonth)]
syear=self.request.get('syear')
pagemon=syear
if not syear:
syear = str(now.year)
pagemon=now.year
iyear=int(syear)
cal = calendar.month(iyear, int(smonth))
c=cal.split('\n')
iscur=0
if int(now.month)==int(smonth):
iscur=1
lists=[]
for line in c:
i=0
sublist=[]
while i<21:
dayy=str(line[i:i+3]).strip(' ')
if len(dayy)<2:
dayyy='0'+dayy
else:
dayyy=dayy
curd=syear+'-'+smonth+'-'+dayyy
curd2=curd+' 00:00:00'
q1=Tutorial.query(Tutorial.tactive!='0')
q2=q1.filter(Tutorial.staffusername==tutorid)
q3=q2.filter(Tutorial.date_t == curd2)
thatdaytut=q3.fetch()
daysum=0
asum=0
for tut in thatdaytut:
daysum+=tut.earning
if tut.approved=='1':
asum+=tut.earning
sublist.append([dayy,curd,daysum,asum])
i+=3
lists.append(sublist)
lists=lists[2::]
msum=0
for li in lists:
for l in li:
msum+=l[2]
masum=0
for li in lists:
for l in li:
masum+=l[3]
DEFAULTYEAR=[]
i=0
start_year=now.year
while i<5:
if start_year==now.year:
DEFAULTYEAR.append([start_year-1,start_year,'0'])
else:
DEFAULTYEAR.append([start_year-1,start_year,start_year+1])
start_year-=1
i+=1
monthlist=[]
m=1
while m<=12:
monthlist.append([m,calendar.month_name[m]])
m+=1
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or checkuser(umail)=='Tutor' or umail == DEFAULT_ADMIN:
staffq=Staff.query(Staff.username==tutorid)
staffs=staffq.fetch()
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'dd':dd,
'idsafekey':idsafekey,
'month_str':month_str,
'curmonth':now.month,
'curyear':now.year,
'years':DEFAULTYEAR,
'date_str':date_str,
'tutorid':tutorid,
'url': url,
'msum': msum,
'masum': masum,
'lists': lists,
'url_linktext': url_linktext,
'staffs':staffs,
'iscur':iscur,
'monthlist':monthlist,
'pagemon':pagemon,
}
template=JINJA_ENVIRONMENT.get_template('calendar.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/')
def post(self):
id=self.request.get('tutorid')
syear=self.request.get('syear')
smonth=self.request.get('smonth')
query_params = {'id': id,'syear':syear,'smonth':smonth}
self.redirect('/calendar?' + urllib.urlencode(query_params))
class chart(webapp2.RequestHandler):
def get(self):
import calendar
key=self.request.get('id')
idsafekey=key
record2 = ndb.Key(urlsafe=key).get()
tutorid=record2.username
syear=self.request.get('syear')
if not syear:
now = datetime.datetime.now()
syear=str(now.year)
q1=Tutorial.query(Tutorial.tactive!='0')
q2=q1.filter(Tutorial.staffusername==tutorid)
tuts=q2.fetch()
q1=[0,0]
q2=[0,0]
q3=[0,0]
q4=[0,0]
for tut in tuts:
datte=tut.date_t[5:7]
byear=str(tut.date)[0:4]
bdate=None
if tut.isbonus=='1' and byear==str(syear):
bdate=str(tut.date)[5:7]
if tut.date_t[0:4]==syear:
if datte=='01' or datte=='02' or datte=='03'or bdate=='01' or bdate=='02' or bdate=='03':
if tut.approved=='1':
q1[1]+=tut.earning
q1[0]+=tut.earning
if datte=='04' or datte=='05' or datte=='06'or bdate=='04' or bdate=='05' or bdate=='06':
if tut.approved=='1':
q2[1]+=tut.earning
q2[0]+=tut.earning
if datte=='07' or datte=='08' or datte=='09'or bdate=='07' or bdate=='08' or bdate=='09':
if tut.approved=='1':
q3[1]+=tut.earning
q3[0]+=tut.earning
if datte=='10' or datte=='11' or datte=='12' or bdate=='10' or bdate=='11' or bdate=='12':
if tut.approved=='1':
q4[1]+=tut.earning
q4[0]+=tut.earning
factor=13.2
q1.append(q1[0]/factor)
q1.append(q1[1]/factor)
q2.append(q2[0]/factor)
q2.append(q2[1]/factor)
q3.append(q3[0]/factor)
q3.append(q3[1]/factor)
q4.append(q4[0]/factor)
q4.append(q4[1]/factor)
print('00000000000000000000000000',q1,q2,q3,q4)
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or checkuser(umail)=='Tutor' or umail == DEFAULT_ADMIN:
staffq=Staff.query(Staff.username==tutorid)
staffs=staffq.fetch()
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'q1':q1,
'q2':q2,
'q3':q3,
'q4':q4,
'staffs':staffs,
'syear':syear,
'idsafekey':idsafekey,
'tutorid':tutorid,
'url': url,
'url_linktext': url_linktext,
}
template=JINJA_ENVIRONMENT.get_template('chart.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/')
def post(self):
id=self.request.get('tutorid')
syear=self.request.get('syear')
query_params = {'id': id,'syear':syear}
self.redirect('/chart?' + urllib.urlencode(query_params))
class stats(webapp2.RequestHandler):
def get(self):
import collections
import calendar
key=self.request.get('id')
idsafekey=key
record2 = ndb.Key(urlsafe=key).get()
tutorid=record2.username
sd=self.request.get('sd')
ed=self.request.get('ed')
cur_date=datetime.date.today()
if not sd:
sd=str(cur_date - datetime.timedelta(days=31))
else:
try:
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
except ValueError:
sd=str('01/01/'+sd)
sd=str(datetime.datetime.strptime(sd,"%d/%m/%Y"))
if not ed:
ed=str(cur_date + datetime.timedelta(days=1))
else:
try:
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
except ValueError:
ed=str('31/12/'+ed)
ed=str(datetime.datetime.strptime(ed,"%d/%m/%Y"))
q1=Tutorial.query(Tutorial.tactive!='0')
q2=q1.filter(Tutorial.staffusername==tutorid)
tutorials=q2.fetch()
sumall=[['Net Gross',0,0],
['VIP Gross',0,0],
['Basic Gross',0,0],
['Bonus Gross',0,0,[]]]
sdate=datetime.datetime.strptime(sd[8:10]+'/'+sd[5:7]+'/'+sd[0:4], '%d/%m/%Y').date()
edate=datetime.datetime.strptime(ed[8:10]+'/'+ed[5:7]+'/'+ed[0:4], '%d/%m/%Y').date()
print(sd,ed)
print(sdate,edate)
dictt = collections.defaultdict(list)
dt=[]
idate=sdate
while idate<=edate:
dt.append([str(idate)[0:10],0,0,0,idate.year,idate.month-1, idate.day])
idate=idate+ datetime.timedelta(days=1)
for t in tutorials:
if t.date_t>=sd and t.date_t<=ed and t.earning>0 and t.tactive=='1' and t.approved=='1':
sumall[0][1]+=t.earning
newt=str(t.date_t[0:10])
if not dictt[newt]:
dictt[newt]=[0,0,0]
if t.vip=='1':
sumall[1][1]+=t.earning
sumall[1][2]+=float(t.duration)
dictt[newt][0]+=t.earning
elif t.vip=='0':
sumall[2][1]+=t.earning
sumall[2][2]+=float(t.duration)
dictt[newt][1]+=t.earning
elif t.coursename_t== 'Bonus':
sumall[3][1]+=t.earning
dictt[newt][2]+=t.earning
newdt=[]
for item in dt:
key=item[0]
if dictt[key]:
a=dictt[key][0]
b=dictt[key][1]
c=dictt[key][2]
item[1]=a
item[2]=b
item[3]=c
newdt.append(item)
else:
newdt.append(item)
newdt=sorted(newdt,key=lambda x: (x[0]))
print(sumall)
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or checkuser(umail)=='Tutor' or umail == DEFAULT_ADMIN:
staffq=Staff.query(Staff.username==tutorid)
staffs=staffq.fetch()
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'staffs':staffs,
'sd':sd,
'ed':ed,
'idsafekey':idsafekey,
'tutorid':tutorid,
'url': url,
'bvb':newdt,
'sumall': sumall,
'url_linktext': url_linktext,
}
template=JINJA_ENVIRONMENT.get_template('stats.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/')
def post(self):
id=self.request.get('tutorid')
sd=self.request.get('sd')
ed=self.request.get('ed')
query_params = {'id': id,'sd':sd,'ed':ed}
self.redirect('/stats?' + urllib.urlencode(query_params))
class tedit(webapp2.RequestHandler):
def post(self):
key = self.request.get('tutorid')
staff=ndb.Key(urlsafe=key).get()
keyy2=key
key=self.request.get('keyy')
tutorial = ndb.Key(urlsafe=key).get()
olddur=tutorial.duration
r=self.request.get('date_t')
if r:
tutorial.date_t=r
r=self.request.get('coursename_t')
if r:
tutorial.coursename_t=r
r=self.request.get('numstudent')
if r:
tutorial.numstudent=int(r)
r=self.request.get('duration')
if r:
tutorial.duration=float(r)
newdur=float(r)
r=self.request.get('courselevel')
if r:
tutorial.courselevel=r
tutorial.vip=self.request.get('vip')
if tutorial.coursename_t=='Bonus' and self.request.get('earning') and tutorial.vip!='1' and tutorial.vip!='0':
tutorial.earning=float(self.request.get('earning'))
else:
ddur=newdur-olddur
goal = LEVEL[staff.level[0]]
newhour=staff.basehour+ ddur # -1hr
remain=goal-staff.basehour #8 hr
if newhour>=goal and staff.level !='D6':
staff.level=lvlup(staff.level)
newlevel =staff.level
staff.basehour=newhour-goal
goal=LEVEL[staff.level[0]]
remain=goal-staff.basehour
elif newhour<goal and newhour>=0:
staff.basehour=newhour
remain=goal-staff.basehour
elif newhour<0:
staff.level=dlevel(staff.level)
goal=LEVEL[staff.level[0]]
staff.basehour=goal+ddur
remain=goal-staff.basehour
if (tutorial.vip =='1'):
hourrate = VIPRATE[staff.level[0]][0]
hourrate2 = VIPRATE[lvlup(staff.level)[0]][0]
fixed = VIPRATE[staff.level[0]][1]
if tutorial.duration>remain:
dur1=float(remain)
dur2=float(tutorial.duration)-float(dur1)
tutorial.earning = float((hourrate+tutorial.numstudent*fixed)*dur1)+float((hourrate2+tutorial.numstudent*fixed)*dur2)
else:
tutorial.earning = float((hourrate+tutorial.numstudent*fixed)*tutorial.duration)
else:
if (tutorial.courselevel=='Level 1'):
level_c = 1
else:
level_c= 2
stu_num = tutorial.numstudent
stu_num1 = 0
stu_num2 = 0
stu_num3 = 0
if (stu_num > 0 and stu_num <= 10):
stu_num1 = stu_num
elif (stu_num > 10 and stu_num <= 20):
stu_num1 = 10
stu_num2 = stu_num - 10
elif (stu_num > 20):
stu_num1 = 10
stu_num2 = 10
stu_num3 = stu_num - 20
stu_basic = [stu_num1,stu_num2,stu_num3]
for item in HRATE:
if (item[0]==staff.level):
hourate=item
hourrate_2=HRATE[HRATE.index(item)+1]
break
if tutorial.duration>remain:
dur1=float(remain)
dur2=float(tutorial.duration)-float(dur1)
print('aaaaaaaaaaaaaaaa')
tutorial.earning=(hourate[level_c]+stu_basic[0]*hourate[3]+stu_basic[1]*hourate[4]+stu_basic[2]*hourate[5])*dur1+(hourate_2[level_c]+stu_basic[0]*hourate_2[3]+stu_basic[1]*hourate_2[4]+stu_basic[2]*hourate_2[5])*dur2
else:
print('bbbbbbbbbbbbb')
tutorial.earning=(hourate[level_c]+stu_basic[0]*hourate[3]+stu_basic[1]*hourate[4]+stu_basic[2]*hourate[5])*tutorial.duration
staff.put()
tutorial.put()
if self.request.get('src')=='dash':
self.redirect('/admin')
else:
self.redirect('/showdetail?staffid='+keyy2)
class addsbj(webapp2.RequestHandler):
def post(self):
try:
key=self.request.get('staffid')
key2=key
staff = ndb.Key(urlsafe=key).get()
sbjname=str(self.request.get('sbjname'))
rating=int(self.request.get('rating'))
if staff.subjects:
staff.subjects[sbjname]=rating
else:
staff.subjects={}
staff.subjects[sbjname]=rating
print(staff.subjects,key2)
staff.put()
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class delsbj(webapp2.RequestHandler):
def post(self):
try:
key=self.request.get('staffid')
key2=key
staff = ndb.Key(urlsafe=key).get()
newsbj=staff.subjects
sbjname=str(self.request.get('sbj'))
newsbj.pop(sbjname, None)
print('1111111111111111',sbjname)
staff.subjects=newsbj
staff.put()
query_params = {'staffid': key2}
self.redirect('/showdetail?' + urllib.urlencode(query_params))
except ValueError:
self.redirect('/error')
class coursedetail(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
umail =0;
if user:
umail =user.email()
if checkuser(umail)=='Admin' or checkuser(umail)=='Tutor' or umail == DEFAULT_ADMIN:
coursename=self.request.get('coursename_t')
q1= Staff.query(Staff.sactive!='0')
staffs=q1.fetch()
stafflist=[]
for staff in staffs:
if staff.subjects:
tdict=staff.subjects
for subject, rating in tdict.items():
if coursename.lower().replace(' ','')==subject.lower().replace(' ',''):
stafflist.append([staff.name,rating,staff.key.urlsafe(),staff.username])
print(stafflist)
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
template_values = {
'user': user,
'staffs':staffs,
'stafflist':stafflist,
'coursename':coursename,
'adminusername':umail,
'url': url,
'url_linktext': url_linktext,
}
template=JINJA_ENVIRONMENT.get_template('coursedetail.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/')
# [START app]
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook),
('/approve', Approve),
('/updateinfo', updateinfo),
('/register', register),
('/approvepage', approvepage),
('/addtutorial', addtutorial),
('/payrollpage', payrollpage),
('/showdetail', showdetail),
('/detail', detail),
('/addbonus', addbonus),
('/payall', payall),
('/admin', admin),
('/tutor',tutor),
('/dstaff',dstaff),
('/astaff',astaff),
('/atutorial',atutorial),
('/dtutorial',dtutorial),
('/atutorial-adminpage',atutorialap),
('/dtutorial-adminpage',dtutorialap),
('/approvep',approvep),
('/detailrefine',detailrefine),
('/sendemail',sendemail),
('/error',error),
('/coursepage',coursepage),
('/acourse',acourse),
('/dcourse',dcourse),
('/ecourse',ecourse),
('/tutdate',tutdate),
('/dpprove',dpprove),
('/calendar',calendar),
('/chart',chart),
('/stats',stats),
('/tedit',tedit),
('/erroruserdup',erroruserdup),
('/addsbj',addsbj),
('/delsbj',delsbj),
('/coursedetail',coursedetail)
], debug=True)
# [END app]
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
def adam_update_numpy_amsgrad(param,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
vhat_t = np.maximum(vhat, v_t)
param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon)
return param_t, m_t, v_t, vhat_t
def adam_sparse_update_numpy_amsgrad(param,
indices,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat),
np.copy(param))
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
v_hat_t = np.maximum(vhat_t, v_t)
v_hat_t_slice = v_hat_t[indices]
param_t_slice = param[indices] - (
lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon)))
param_t[indices] = param_t_slice
return param_t, m_t, v_t, vhat_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.Adam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.Adam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.Adam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.Adam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testResourceBasic(self):
self.doTestBasic()
@combinations.generate(combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.Adam(amsgrad=True)
opt_aggregated = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.Adam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.Adam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["eager"]))
def testSlotsUniqueEager(self):
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
opt = adam.Adam(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertLen(set(v.ref() for v in opt.variables()), 5)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testSetWeightsFromV1AdamWithoutMinimize(self):
keras_v1_adam = optimizer_v1.Adam()
keras_v2_adam = adam.Adam()
keras_v2_adam.set_weights(keras_v1_adam.get_weights())
keras_v1_iteration = keras_v1_adam.iterations
keras_v2_iteration = keras_v2_adam.iterations
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration))
def testConstructAdamWithLR(self):
opt = adam.Adam(lr=1.0)
opt_2 = adam.Adam(learning_rate=0.1, lr=1.0)
opt_3 = adam.Adam(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.NonFusedAdam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of NonFusedAdam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.NonFusedAdam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.NonFusedAdam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.NonFusedAdam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.NonFusedAdam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testResourceBasic(self):
self.doTestBasic()
@combinations.generate(combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.NonFusedAdam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.NonFusedAdam(amsgrad=True)
opt_aggregated = adam.NonFusedAdam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.NonFusedAdam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.NonFusedAdam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.NonFusedAdam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of NonFusedAdam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.NonFusedAdam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined NonFusedAdam1 and NonFusedAdam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
|
import uuid
from django.conf.urls import url as conf_url
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.urls import NoReverseMatch, Resolver404, path, resolve, reverse
from django.utils.deprecation import RemovedInDjango40Warning
from .converters import DynamicConverter
from .views import empty_view
included_kwargs = {'base': b'hello', 'value': b'world'}
converter_test_data = (
# ('url', ('url_name', 'app_name', {kwargs})),
# aGVsbG8= is 'hello' encoded in base64.
('/base64/aGVsbG8=/', ('base64', '', {'value': b'hello'})),
('/base64/aGVsbG8=/subpatterns/d29ybGQ=/', ('subpattern-base64', '', included_kwargs)),
('/base64/aGVsbG8=/namespaced/d29ybGQ=/', ('subpattern-base64', 'namespaced-base64', included_kwargs)),
)
@override_settings(ROOT_URLCONF='urlpatterns.path_urls')
class SimplifiedURLTests(SimpleTestCase):
def test_path_lookup_without_parameters(self):
match = resolve('/articles/2003/')
self.assertEqual(match.url_name, 'articles-2003')
self.assertEqual(match.args, ())
self.assertEqual(match.kwargs, {})
self.assertEqual(match.route, 'articles/2003/')
def test_path_lookup_with_typed_parameters(self):
match = resolve('/articles/2015/')
self.assertEqual(match.url_name, 'articles-year')
self.assertEqual(match.args, ())
self.assertEqual(match.kwargs, {'year': 2015})
self.assertEqual(match.route, 'articles/<int:year>/')
def test_path_lookup_with_multiple_parameters(self):
match = resolve('/articles/2015/04/12/')
self.assertEqual(match.url_name, 'articles-year-month-day')
self.assertEqual(match.args, ())
self.assertEqual(match.kwargs, {'year': 2015, 'month': 4, 'day': 12})
self.assertEqual(match.route, 'articles/<int:year>/<int:month>/<int:day>/')
def test_two_variable_at_start_of_path_pattern(self):
match = resolve('/en/foo/')
self.assertEqual(match.url_name, 'lang-and-path')
self.assertEqual(match.kwargs, {'lang': 'en', 'url': 'foo'})
self.assertEqual(match.route, '<lang>/<path:url>/')
def test_re_path(self):
match = resolve('/regex/1/')
self.assertEqual(match.url_name, 'regex')
self.assertEqual(match.kwargs, {'pk': '1'})
self.assertEqual(match.route, '^regex/(?P<pk>[0-9]+)/$')
def test_re_path_with_optional_parameter(self):
for url, kwargs in (
('/regex_optional/1/2/', {'arg1': '1', 'arg2': '2'}),
('/regex_optional/1/', {'arg1': '1'}),
):
with self.subTest(url=url):
match = resolve(url)
self.assertEqual(match.url_name, 'regex_optional')
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(
match.route,
r'^regex_optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?',
)
def test_re_path_with_missing_optional_parameter(self):
match = resolve('/regex_only_optional/')
self.assertEqual(match.url_name, 'regex_only_optional')
self.assertEqual(match.kwargs, {})
self.assertEqual(match.args, ())
self.assertEqual(
match.route,
r'^regex_only_optional/(?:(?P<arg1>\d+)/)?',
)
def test_path_lookup_with_inclusion(self):
match = resolve('/included_urls/extra/something/')
self.assertEqual(match.url_name, 'inner-extra')
self.assertEqual(match.route, 'included_urls/extra/<extra>/')
def test_path_lookup_with_empty_string_inclusion(self):
match = resolve('/more/99/')
self.assertEqual(match.url_name, 'inner-more')
self.assertEqual(match.route, r'^more/(?P<extra>\w+)/$')
def test_path_lookup_with_double_inclusion(self):
match = resolve('/included_urls/more/some_value/')
self.assertEqual(match.url_name, 'inner-more')
self.assertEqual(match.route, r'included_urls/more/(?P<extra>\w+)/$')
def test_path_reverse_without_parameter(self):
url = reverse('articles-2003')
self.assertEqual(url, '/articles/2003/')
def test_path_reverse_with_parameter(self):
url = reverse('articles-year-month-day', kwargs={'year': 2015, 'month': 4, 'day': 12})
self.assertEqual(url, '/articles/2015/4/12/')
@override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls')
def test_converter_resolve(self):
for url, (url_name, app_name, kwargs) in converter_test_data:
with self.subTest(url=url):
match = resolve(url)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.kwargs, kwargs)
@override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls')
def test_converter_reverse(self):
for expected, (url_name, app_name, kwargs) in converter_test_data:
if app_name:
url_name = '%s:%s' % (app_name, url_name)
with self.subTest(url=url_name):
url = reverse(url_name, kwargs=kwargs)
self.assertEqual(url, expected)
@override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls')
def test_converter_reverse_with_second_layer_instance_namespace(self):
kwargs = included_kwargs.copy()
kwargs['last_value'] = b'world'
url = reverse('instance-ns-base64:subsubpattern-base64', kwargs=kwargs)
self.assertEqual(url, '/base64/aGVsbG8=/subpatterns/d29ybGQ=/d29ybGQ=/')
def test_path_inclusion_is_matchable(self):
match = resolve('/included_urls/extra/something/')
self.assertEqual(match.url_name, 'inner-extra')
self.assertEqual(match.kwargs, {'extra': 'something'})
def test_path_inclusion_is_reversible(self):
url = reverse('inner-extra', kwargs={'extra': 'something'})
self.assertEqual(url, '/included_urls/extra/something/')
def test_invalid_converter(self):
msg = "URL route 'foo/<nonexistent:var>/' uses invalid converter 'nonexistent'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path('foo/<nonexistent:var>/', empty_view)
def test_space_in_route(self):
msg = "URL route 'space/<int: num>' cannot contain whitespace."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path('space/<int: num>', empty_view)
@override_settings(ROOT_URLCONF='urlpatterns.converter_urls')
class ConverterTests(SimpleTestCase):
def test_matching_urls(self):
def no_converter(x):
return x
test_data = (
('int', {'0', '1', '01', 1234567890}, int),
('str', {'abcxyz'}, no_converter),
('path', {'allows.ANY*characters'}, no_converter),
('slug', {'abcxyz-ABCXYZ_01234567890'}, no_converter),
('uuid', {'39da9369-838e-4750-91a5-f7805cd82839'}, uuid.UUID),
)
for url_name, url_suffixes, converter in test_data:
for url_suffix in url_suffixes:
url = '/%s/%s/' % (url_name, url_suffix)
with self.subTest(url=url):
match = resolve(url)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.kwargs, {url_name: converter(url_suffix)})
# reverse() works with string parameters.
string_kwargs = {url_name: url_suffix}
self.assertEqual(reverse(url_name, kwargs=string_kwargs), url)
# reverse() also works with native types (int, UUID, etc.).
if converter is not no_converter:
# The converted value might be different for int (a
# leading zero is lost in the conversion).
converted_value = match.kwargs[url_name]
converted_url = '/%s/%s/' % (url_name, converted_value)
self.assertEqual(reverse(url_name, kwargs={url_name: converted_value}), converted_url)
def test_nonmatching_urls(self):
test_data = (
('int', {'-1', 'letters'}),
('str', {'', '/'}),
('path', {''}),
('slug', {'', 'stars*notallowed'}),
('uuid', {
'',
'9da9369-838e-4750-91a5-f7805cd82839',
'39da9369-838-4750-91a5-f7805cd82839',
'39da9369-838e-475-91a5-f7805cd82839',
'39da9369-838e-4750-91a-f7805cd82839',
'39da9369-838e-4750-91a5-f7805cd8283',
}),
)
for url_name, url_suffixes in test_data:
for url_suffix in url_suffixes:
url = '/%s/%s/' % (url_name, url_suffix)
with self.subTest(url=url), self.assertRaises(Resolver404):
resolve(url)
@override_settings(ROOT_URLCONF='urlpatterns.path_same_name_urls')
class SameNameTests(SimpleTestCase):
def test_matching_urls_same_name(self):
@DynamicConverter.register_to_url
def requires_tiny_int(value):
if value > 5:
raise ValueError
return value
tests = [
('number_of_args', [
([], {}, '0/'),
([1], {}, '1/1/'),
]),
('kwargs_names', [
([], {'a': 1}, 'a/1/'),
([], {'b': 1}, 'b/1/'),
]),
('converter', [
(['a/b'], {}, 'path/a/b/'),
(['a b'], {}, 'str/a%20b/'),
(['a-b'], {}, 'slug/a-b/'),
(['2'], {}, 'int/2/'),
(
['39da9369-838e-4750-91a5-f7805cd82839'],
{},
'uuid/39da9369-838e-4750-91a5-f7805cd82839/'
),
]),
('regex', [
(['ABC'], {}, 'uppercase/ABC/'),
(['abc'], {}, 'lowercase/abc/'),
]),
('converter_to_url', [
([6], {}, 'int/6/'),
([1], {}, 'tiny_int/1/'),
]),
]
for url_name, cases in tests:
for args, kwargs, url_suffix in cases:
expected_url = '/%s/%s' % (url_name, url_suffix)
with self.subTest(url=expected_url):
self.assertEqual(
reverse(url_name, args=args, kwargs=kwargs),
expected_url,
)
class ParameterRestrictionTests(SimpleTestCase):
def test_integer_parameter_name_causes_exception(self):
msg = (
"URL route 'hello/<int:1>/' uses parameter name '1' which isn't "
"a valid Python identifier."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path(r'hello/<int:1>/', lambda r: None)
def test_non_identifier_parameter_name_causes_exception(self):
msg = (
"URL route 'b/<int:book.id>/' uses parameter name 'book.id' which "
"isn't a valid Python identifier."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path(r'b/<int:book.id>/', lambda r: None)
def test_allows_non_ascii_but_valid_identifiers(self):
# \u0394 is "GREEK CAPITAL LETTER DELTA", a valid identifier.
p = path('hello/<str:\u0394>/', lambda r: None)
match = p.resolve('hello/1/')
self.assertEqual(match.kwargs, {'\u0394': '1'})
@override_settings(ROOT_URLCONF='urlpatterns.path_dynamic_urls')
class ConversionExceptionTests(SimpleTestCase):
"""How are errors in Converter.to_python() and to_url() handled?"""
def test_resolve_value_error_means_no_match(self):
@DynamicConverter.register_to_python
def raises_value_error(value):
raise ValueError()
with self.assertRaises(Resolver404):
resolve('/dynamic/abc/')
def test_resolve_type_error_propagates(self):
@DynamicConverter.register_to_python
def raises_type_error(value):
raise TypeError('This type error propagates.')
with self.assertRaisesMessage(TypeError, 'This type error propagates.'):
resolve('/dynamic/abc/')
def test_reverse_value_error_means_no_match(self):
@DynamicConverter.register_to_url
def raises_value_error(value):
raise ValueError
with self.assertRaises(NoReverseMatch):
reverse('dynamic', kwargs={'value': object()})
def test_reverse_type_error_propagates(self):
@DynamicConverter.register_to_url
def raises_type_error(value):
raise TypeError('This type error propagates.')
with self.assertRaisesMessage(TypeError, 'This type error propagates.'):
reverse('dynamic', kwargs={'value': object()})
class DeprecationTests(SimpleTestCase):
def test_url_warning(self):
msg = (
'django.conf.urls.url() is deprecated in favor of '
'django.urls.re_path().'
)
with self.assertRaisesMessage(RemovedInDjango40Warning, msg):
conf_url(r'^regex/(?P<pk>[0-9]+)/$', empty_view, name='regex')
|
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import logging
import mock
# 3p
from docker import Client
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
from tests.checks.common import load_check
from utils.dockerutil import DockerUtil
log = logging.getLogger('tests')
CONTAINERS_TO_RUN = [
"nginx:latest",
"redis:latest",
]
DEFAULT_CUSTOM_TAGS = ["env:test", "docker:test"]
MOCK_CONFIG = {
"init_config": {},
"instances": [{
"url": "unix://var/run/w00t.sock",
"collect_disk_stats": True,
"tags": DEFAULT_CUSTOM_TAGS
}]
}
POD_NAME_LABEL = "io.kubernetes.pod.name"
def reset_docker_settings():
"""Populate docker settings with default, dummy settings"""
DockerUtil().set_docker_settings({}, {})
DockerUtil()._client = Client(**DockerUtil().settings)
@attr(requires='docker_daemon')
class TestCheckDockerDaemonDown(AgentCheckTest):
"""Tests for docker_daemon integration when docker is down."""
CHECK_NAME = 'docker_daemon'
@mock.patch('docker.client.Client._retrieve_server_version',
side_effect=Exception("Connection timeout"))
def test_docker_down(self, *args):
DockerUtil().set_docker_settings({}, {})
DockerUtil().last_init_retry = None
DockerUtil().left_init_retries = 10
DockerUtil()._client = None
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertServiceCheck("docker.service_up", status=AgentCheck.CRITICAL, tags=DEFAULT_CUSTOM_TAGS, count=1)
@attr(requires='docker_daemon')
class TestCheckDockerDaemonNoSetUp(AgentCheckTest):
"""Tests for docker_daemon integration that don't need the setUp."""
CHECK_NAME = 'docker_daemon'
def test_event_attributes_tag(self):
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"event_attributes_as_tags": ["exitCode", "name"],
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
DockerUtil().last_init_retry = None
DockerUtil().left_init_retries = 10
DockerUtil()._client = None
container_fail = DockerUtil().client.create_container(
"nginx:latest", detach=True, name='event-tags-test', entrypoint='/bin/false')
log.debug('start nginx:latest with entrypoint /bin/false')
DockerUtil().client.start(container_fail)
log.debug('container exited with %s' % DockerUtil().client.wait(container_fail, 1))
# Wait 1 second after exit so the event will be picked up
from time import sleep
sleep(1)
self.run_check(config, force_reload=True)
DockerUtil().client.remove_container(container_fail)
# Previous tests might have left unprocessed events, to be ignored
filtered_events = []
for event in self.events:
if 'container_name:event-tags-test' in event.get('tags', []):
filtered_events.append(event)
self.assertEqual(len(filtered_events), 1)
self.assertIn("exitCode:1", filtered_events[0]["tags"])
self.assertNotIn("name:test-exit-fail", filtered_events[0]["tags"])
@attr(requires='docker_daemon')
class TestCheckDockerDaemon(AgentCheckTest):
"""Basic Test for docker_daemon integration."""
CHECK_NAME = 'docker_daemon'
# Mock tests #
def mock_normal_get_info(self):
return {
'DriverStatus': [
['Data Space Used', '1 GB'],
['Data Space Available', '9 GB'],
['Data Space Total', '10 GB'],
['Metadata Space Used', '1 MB'],
['Metadata Space Available', '9 MB'],
['Metadata Space Total', '10 MB'],
]
}
def mock_get_info_no_used(self):
return {
'DriverStatus': [
['Data Space Available', '9 GB'],
['Data Space Total', '10 GB'],
['Metadata Space Available', '9 MB'],
['Metadata Space Total', '10 MB'],
]
}
def mock_get_info_no_data(self):
return {
'DriverStatus': [
['Metadata Space Available', '9 MB'],
['Metadata Space Total', '10 MB'],
['Metadata Space Used', '1 MB'],
]
}
def mock_get_info_invalid_values(self):
return {
'DriverStatus': [
['Metadata Space Available', '9 MB'],
['Metadata Space Total', '10 MB'],
['Metadata Space Used', '11 MB'],
]
}
def mock_get_info_all_zeros(self):
return {
'DriverStatus': [
['Data Space Available', '0 MB'],
['Data Space Total', '0 GB'],
['Data Space Used', '0 KB'],
]
}
def mock_get_info_no_spaces(self):
return {
'DriverStatus': [
['Data Space Used', '1GB'],
['Data Space Available', '9GB'],
['Data Space Total', '10GB'],
['Metadata Space Used', '1MB'],
['Metadata Space Available', '9MB'],
['Metadata Space Total', '10MB'],
]
}
@mock.patch('docker.Client.info')
def test_main_service_check(self, mock_info):
mock_info.return_value = self.mock_normal_get_info()
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertServiceCheck("docker.service_up", status=AgentCheck.OK, tags=DEFAULT_CUSTOM_TAGS, count=1)
@mock.patch('docker.Client.info')
def test_devicemapper_disk_metrics(self, mock_info):
mock_info.return_value = self.mock_normal_get_info()
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertMetric('docker.data.free', value=9e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.used', value=1e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.total', value=10e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.free', value=9e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.used', value=1e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.total', value=10e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
@mock.patch('docker.Client.info')
def test_devicemapper_no_used_info(self, mock_info):
"""Disk metrics collection should still work and `percent` can be calculated"""
mock_info.return_value = self.mock_get_info_no_used()
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertMetric('docker.data.free', value=9e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.total', value=10e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.free', value=9e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.total', value=10e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
@mock.patch('docker.Client.info')
def test_devicemapper_no_data_info(self, mock_info):
"""Disk metrics collection should still partially work for metadata"""
mock_info.return_value = self.mock_get_info_no_data()
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertMetric('docker.metadata.free', value=9e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.total', value=10e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
@mock.patch('docker.Client.info')
def test_devicemapper_invalid_values(self, mock_info):
"""Invalid values are detected in _calc_percent_disk_stats and 'percent' use 'free'+'used' instead of 'total' """
mock_info.return_value = self.mock_get_info_invalid_values()
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertMetric('docker.metadata.free', value=9e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.used', value=11e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.total', value=10e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.percent', value=55, tags=DEFAULT_CUSTOM_TAGS)
@mock.patch('docker.Client.info')
def test_devicemapper_all_zeros(self, mock_info):
"""Percentage should not be calculated, other metrics should be collected correctly"""
mock_info.return_value = self.mock_get_info_all_zeros()
self.run_check(MOCK_CONFIG, force_reload=True)
metric_names = [metric[0] for metric in self.metrics]
self.assertMetric('docker.data.free', value=0, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.used', value=0, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.total', value=0, tags=DEFAULT_CUSTOM_TAGS)
self.assertNotIn('docker.data.percent', metric_names)
@mock.patch('docker.Client.info')
def test_devicemapper_no_spaces(self, mock_info):
mock_info.return_value = self.mock_get_info_no_spaces()
self.run_check(MOCK_CONFIG, force_reload=True)
self.assertMetric('docker.data.free', value=9e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.used', value=1e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.total', value=10e9, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.data.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.free', value=9e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.used', value=1e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.total', value=10e6, tags=DEFAULT_CUSTOM_TAGS)
self.assertMetric('docker.metadata.percent', value=10.0, tags=DEFAULT_CUSTOM_TAGS)
# integration tests #
def setUp(self):
self.docker_client = DockerUtil().client
self.second_network = self.docker_client.create_network("second", driver="bridge")['Id']
for c in CONTAINERS_TO_RUN:
images = [i["RepoTags"][0] for i in self.docker_client.images(c.split(":")[0]) if i["RepoTags"] and i["RepoTags"][0].startswith(c)]
if len(images) == 0:
for line in self.docker_client.pull(c, stream=True):
print line
self.containers = []
for c in CONTAINERS_TO_RUN:
name = "test-new-{0}".format(c.replace(":", "-"))
host_config = None
labels = None
if c == "nginx:latest":
host_config = {"Memory": 137438953472}
labels = {"label1": "nginx", "foo": "bar"}
cont = self.docker_client.create_container(
c, detach=True, name=name, host_config=host_config, labels=labels)
self.containers.append(cont)
if c == "nginx:latest":
self.docker_client.connect_container_to_network(cont['Id'], self.second_network)
for c in self.containers:
log.info("Starting container: {0}".format(c))
self.docker_client.start(c)
def tearDown(self):
for c in self.containers:
log.info("Stopping container: {0}".format(c))
self.docker_client.remove_container(c, force=True)
self.docker_client.remove_network(self.second_network)
def test_basic_config_single(self):
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest'])
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"collect_image_size": True,
"collect_images_stats": True
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
self.run_check(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_basic_config_twice(self):
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.cpu.system', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.cpu.system', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.cpu.user', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.cpu.user', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.io.read_bytes', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.io.read_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.io.write_bytes', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.io.write_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.net.bytes_rcvd', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_rcvd', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_sent', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_sent', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'docker_network:bridge'])
]
custom_tags = ["extra_tag", "env:testing"]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"tags": custom_tags,
"collect_image_size": True,
"collect_images_stats": True,
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
self.run_check_twice(config, force_reload=True)
for mname, tags in expected_metrics:
expected_tags = list(custom_tags)
if tags is not None:
expected_tags += tags
self.assertMetric(mname, tags=expected_tags, count=1, at_least=1)
def test_exclude_filter(self):
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.cpu.system', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.cpu.user', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.io.read_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.io.write_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.net.bytes_rcvd', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_sent', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge'])
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"exclude": ["docker_image:nginx"],
"collect_images_stats": True,
"collect_image_size": True,
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.run_check_twice(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
perf_metrics = [
"docker.cpu.system",
"docker.cpu.user",
"docker.io.read_bytes",
"docker.io.write_bytes",
"docker.mem.cache",
"docker.mem.rss",
"docker.net.bytes_rcvd",
"docker.net.bytes_sent"
]
nginx_tags = ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest',
'image_name:nginx', 'image_tag:latest']
for mname in perf_metrics:
self.assertMetric(mname, tags=nginx_tags, count=0)
def test_include_filter(self):
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.cpu.system', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.cpu.user', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.io.read_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.io.write_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.net.bytes_rcvd', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_sent', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge'])
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"include": ["image_name:redis"],
"exclude": [".*"],
"collect_images_stats": True,
"collect_image_size": True,
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.run_check_twice(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
perf_metrics = [
"docker.cpu.system",
"docker.cpu.user",
"docker.io.read_bytes",
"docker.io.write_bytes",
"docker.mem.cache",
"docker.mem.rss",
"docker.net.bytes_rcvd",
"docker.net.bytes_sent"
]
nginx_tags = ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']
for m in perf_metrics:
self.assertMetric(mname, tags=nginx_tags, count=0)
def test_tags_options(self):
expected_metrics = [
('docker.containers.running', ["container_command:nginx -g 'daemon off;'"]),
('docker.containers.running', ['container_command:docker-entrypoint.sh redis-server']),
('docker.containers.stopped', ["container_command:nginx -g 'daemon off;'"]),
('docker.containers.stopped', ['container_command:docker-entrypoint.sh redis-server']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.cpu.system', ["container_command:nginx -g 'daemon off;'"]),
('docker.cpu.system', ['container_command:docker-entrypoint.sh redis-server']),
('docker.cpu.user', ['container_command:docker-entrypoint.sh redis-server']),
('docker.cpu.user', ["container_command:nginx -g 'daemon off;'"]),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.io.read_bytes', ["container_command:nginx -g 'daemon off;'"]),
('docker.io.read_bytes', ['container_command:docker-entrypoint.sh redis-server']),
('docker.io.write_bytes', ['container_command:docker-entrypoint.sh redis-server']),
('docker.io.write_bytes', ["container_command:nginx -g 'daemon off;'"]),
('docker.mem.cache', ["container_command:nginx -g 'daemon off;'"]),
('docker.mem.cache', ['container_command:docker-entrypoint.sh redis-server']),
('docker.mem.rss', ['container_command:docker-entrypoint.sh redis-server']),
('docker.mem.rss', ["container_command:nginx -g 'daemon off;'"]),
('docker.net.bytes_rcvd', ['container_command:docker-entrypoint.sh redis-server', 'docker_network:bridge']),
('docker.net.bytes_rcvd', ["container_command:nginx -g 'daemon off;'", 'docker_network:bridge']),
('docker.net.bytes_sent', ["container_command:nginx -g 'daemon off;'", 'docker_network:bridge']),
('docker.net.bytes_sent', ['container_command:docker-entrypoint.sh redis-server', 'docker_network:bridge'])
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"performance_tags": ["container_command"],
"container_tags": ["container_command"],
"collect_images_stats": True,
"collect_image_size": True,
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.run_check_twice(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_set_docker_settings(self):
"""Test a client settings update"""
self.assertEqual(DockerUtil().settings["version"], "auto")
cur_loc = __file__
init_config = {
"api_version": "foobar",
"timeout": "42",
"tls_client_cert": cur_loc,
"tls_client_key": cur_loc,
"tls_cacert": cur_loc,
"tls": True
}
instance = {
"url": "https://foo.bar:42",
}
DockerUtil().set_docker_settings(init_config, instance)
DockerUtil()._client = Client(**DockerUtil().settings)
self.assertEqual(DockerUtil().client.verify, cur_loc)
self.assertEqual(DockerUtil().client.cert, (cur_loc, cur_loc))
reset_docker_settings()
def test_labels_collection(self):
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.limit', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.in_use', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"collect_labels_as_tags": ["label1"],
"collect_image_size": True,
"collect_images_stats": True,
"collect_container_count": True,
"collect_dead_container_count": True,
"collect_exited_container_count": True,
"collect_volume_count": True,
"collect_dangling_volume_count": True,
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.run_check(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_collect_labels_as_tags(self):
expected_metrics = [
('docker.containers.stopped.total', None),
('docker.containers.running.total', None),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.limit', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.in_use', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']),
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.agentConfig = {
'docker_labels_as_tags': 'label1'
}
self.check = load_check('docker_daemon', config, self.agentConfig)
self.check.check(config)
self.metrics = self.check.get_metrics()
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_histogram(self):
metric_suffix = ["count", "avg", "median", "max", "95percentile"]
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
]
histo_metrics = [
('docker.mem.cache', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.cache', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.rss', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.limit', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.in_use', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"collect_image_size": True,
"collect_images_stats": True,
"use_histogram": True,
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.run_check(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
for mname, tags in histo_metrics:
for suffix in metric_suffix:
self.assertMetric(mname + "." + suffix, tags=tags, at_least=1)
def test_events(self):
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"collect_images_stats": True,
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
self.run_check(config, force_reload=True)
self.assertEqual(len(self.events), 2)
def test_healthcheck(self):
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"health_service_check_whitelist": ["docker_image:nginx", "docker_image:redis"],
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
DockerUtil().filtering_enabled = False
self.run_check(config, force_reload=True)
self.assertServiceCheck('docker.container_health', count=2)
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"health_service_check_whitelist": [],
},
],
}
DockerUtil._drop()
DockerUtil(init_config=config['init_config'], instance=config['instances'][0])
self.run_check(config, force_reload=True)
self.assertServiceCheck('docker.container_health', count=0)
def test_container_size(self):
expected_metrics = [
('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.containers.running.total', None),
('docker.containers.stopped.total', None),
('docker.image.size', ['image_name:redis', 'image_tag:latest']),
('docker.image.size', ['image_name:nginx', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']),
('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']),
('docker.images.available', None),
('docker.images.intermediate', None),
('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
('docker.mem.limit', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
('docker.mem.in_use', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
# Container size metrics
("docker.container.size_rootfs", ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
("docker.container.size_rootfs", ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']),
("docker.container.size_rw", ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']),
]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"collect_container_size": True,
"collect_image_size": True,
"collect_images_stats": True,
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
self.run_check(config, force_reload=True)
for mname, tags in expected_metrics:
self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_image_tags_extraction(self):
entities = [
# ({'Image': image_name}, [expected_image_name, expected_image_tag])
({'Image': 'nginx:latest'}, [['nginx'], ['latest']]),
({'Image': 'localhost/nginx:latest'}, [['localhost/nginx'], ['latest']]),
({'Image': 'localhost:5000/nginx:latest'}, [['localhost:5000/nginx'], ['latest']]),
({'RepoTags': ['redis:latest']}, [['redis'], ['latest']]),
({'RepoTags': ['localhost/redis:latest']}, [['localhost/redis'], ['latest']]),
({'RepoTags': ['localhost:5000/redis:latest']}, [['localhost:5000/redis'], ['latest']]),
({'RepoTags': ['localhost:5000/redis:latest', 'localhost:5000/redis:v1.1']}, [['localhost:5000/redis'], ['latest', 'v1.1']]),
({'RepoTags': [], 'RepoDigests': [u'datadog/docker-dd-agent@sha256:47a59c2ea4f6d9555884aacc608b303f18bde113b1a3a6743844bfc364d73b44']},
[['datadog/docker-dd-agent'], None]),
]
for entity in entities:
self.assertEqual(sorted(DockerUtil().image_tag_extractor(entity[0], 0)), sorted(entity[1][0]))
tags = DockerUtil().image_tag_extractor(entity[0], 1)
if isinstance(entity[1][1], list):
self.assertEqual(sorted(tags), sorted(entity[1][1]))
else:
self.assertEqual(tags, entity[1][1])
def test_container_name_extraction(self):
containers = [
({'Id': 'deadbeef'}, ['deadbeef']),
({'Names': ['/redis'], 'Id': 'deadbeef'}, ['redis']),
({'Names': ['/mongo', '/redis/mongo'], 'Id': 'deadbeef'}, ['mongo']),
({'Names': ['/redis/mongo', '/mongo'], 'Id': 'deadbeef'}, ['mongo']),
]
for co in containers:
self.assertEqual(DockerUtil.container_name_extractor(co[0]), co[1])
def test_collect_exit_code(self):
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"collect_exit_codes": True
}]
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
expected_service_checks = [
(AgentCheck.OK, ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'container_name:test-exit-ok']),
(AgentCheck.CRITICAL, ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'container_name:test-exit-fail']),
]
container_ok = self.docker_client.create_container(
"nginx:latest", detach=True, name='test-exit-ok', entrypoint='/bin/true')
log.debug('start nginx:latest with entrypoint /bin/true')
container_fail = self.docker_client.create_container(
"nginx:latest", detach=True, name='test-exit-fail', entrypoint='/bin/false')
log.debug('start nginx:latest with entrypoint /bin/false')
self.docker_client.start(container_ok)
self.docker_client.start(container_fail)
log.debug('container exited with %s' % self.docker_client.wait(container_ok, 1))
log.debug('container exited with %s' % self.docker_client.wait(container_fail, 1))
# After the container exits, we need to wait a second so the event isn't too recent
# when the check runs, otherwise the event is not picked up
from time import sleep
sleep(1)
self.run_check(config)
self.docker_client.remove_container(container_ok)
self.docker_client.remove_container(container_fail)
for status, tags in expected_service_checks:
self.assertServiceCheck('docker.exit', status=status, tags=tags, count=1)
def test_network_tagging(self):
expected_metrics = [
('docker.net.bytes_rcvd',
['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx',
'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_rcvd',
['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx',
'image_tag:latest', 'docker_network:second']),
('docker.net.bytes_sent',
['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx',
'image_tag:latest', 'docker_network:bridge']),
('docker.net.bytes_sent',
['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx',
'image_tag:latest', 'docker_network:second'])
]
custom_tags = ["extra_tag", "env:testing"]
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"tags": custom_tags,
"collect_image_size": True,
"collect_images_stats": True,
},
],
}
DockerUtil().set_docker_settings(config['init_config'], config['instances'][0])
self.run_check_twice(config, force_reload=True)
for mname, tags in expected_metrics:
expected_tags = list(custom_tags)
if tags is not None:
expected_tags += tags
self.assertMetric(mname, tags=expected_tags, count=1, at_least=1)
def mock_parse_cgroup_file(self, stat_file):
with open(stat_file, 'r') as fp:
if 'blkio' in stat_file:
return {}
elif 'cpuacct.usage' in stat_file:
return dict({'usage': str(int(fp.read())/10000000)})
# mocked part
elif 'cpu' in stat_file:
return {'user': 1000 * self.run, 'system': 1000 * self.run}
self.run += 1
elif 'memory.soft_limit_in_bytes' in stat_file:
value = int(fp.read())
if value < 2 ** 60:
return dict({'softlimit': value})
else:
return dict(map(lambda x: x.split(' ', 1), fp.read().splitlines()))
def test_filter_capped_metrics(self):
config = {
"init_config": {},
"instances": [{
"url": "unix://var/run/docker.sock",
"capped_metrics": {
"docker.cpu.user": 100,
"docker.cpu.system": 100,
}
}]
}
self.run = 1
self.run_check_twice(config, mocks={'_parse_cgroup_file': self.mock_parse_cgroup_file})
# last 2 points should be dropped so the rate should be 0
self.assertMetric('docker.cpu.user', value=0.0)
self.assertMetric('docker.cpu.system', value=0.0)
def test_filter_event_type(self):
""" Testing event type filtering"""
event_list = [
{"status":"create","id":"aa717771661fb29ed0ca74274178dbc7114dee3d4adfde7760828ee3f6b52001","from":"redis","Type":"container","Action":"create","Actor":{"ID":"aa717771661fb29ed0ca74274178dbc7114dee3d4adfde7760828ee3f6b52001","Attributes":{"image":"redis","name":"brave_rosalind"}},"scope":"local","time":1505221851,"timeNano":1505221851874332240},
{"status":"pause","id":"aa717771661fb29ed0ca74274178dbc7114dee3d4adfde7760828ee3f6b52001","from":"redis","Type":"container","Action":"pause","Actor":{"ID":"aa717771661fb29ed0ca74274178dbc7114dee3d4adfde7760828ee3f6b52001","Attributes":{"image":"redis","name":"brave_rosalind"}},"scope":"local","time":1505221892,"timeNano":1505221892885900077},
{"status":"top","id":"aa717771661fb29ed0ca74274178dbc7114dee3d4adfde7760828ee3f6b52001","from":"redis","Type":"container","Action":"top","Actor":{"ID":"aa717771661fb29ed0ca74274178dbc7114dee3d4adfde7760828ee3f6b52001","Attributes":{"image":"redis","name":"brave_rosalind"}},"scope":"local","time":1505221910,"timeNano":1505221910331861955},
]
dict_mock = {"redis":event_list}
# Testing with the default config
self.run_check(MOCK_CONFIG, force_reload=True)
result = self.check._format_events(dict_mock, {})
self.assertEqual(1, len(result))
self.assertIn('create', result[0]['msg_text'])
self.assertIn('pause', result[0]['msg_text'])
self.assertNotIn('top', result[0]['msg_text'])
# Testing with a custom config
mock_config_top = {
"init_config": {},
"instances": [{
"url": "unix://var/run/w00t.sock",
"filtered_event_types": ["pause"]
}]
}
self.run_check(mock_config_top, force_reload=True)
resulttop = self.check._format_events(dict_mock, {})
self.assertEqual(1, len(resulttop))
self.assertIn('create', resulttop[0]['msg_text'])
self.assertNotIn('pause', resulttop[0]['msg_text'])
self.assertIn('top', resulttop[0]['msg_text'])
|
|
# Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 Amin Farmahini-Farahani
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
# Ani Udipi
from m5.params import *
from AbstractMemory import *
# Enum for memory scheduling algorithms, currently First-Come
# First-Served and a First-Row Hit then First-Come First-Served
class MemSched(Enum): vals = ['fcfs', 'frfcfs']
# Enum for the address mapping. With Ch, Ra, Ba, Ro and Co denoting
# channel, rank, bank, row and column, respectively, and going from
# MSB to LSB. Available are RoRaBaChCo and RoRaBaCoCh, that are
# suitable for an open-page policy, optimising for sequential accesses
# hitting in the open row. For a closed-page policy, RoCoRaBaCh
# maximises parallelism.
class AddrMap(Enum): vals = ['RoRaBaChCo', 'RoRaBaCoCh', 'RoCoRaBaCh']
# Enum for the page policy, either open, open_adaptive, close, or
# close_adaptive.
class PageManage(Enum): vals = ['open', 'open_adaptive', 'close',
'close_adaptive']
# DRAMCtrl is a single-channel single-ported DRAM controller model
# that aims to model the most important system-level performance
# effects of a DRAM without getting into too much detail of the DRAM
# itself.
class DRAMCtrl(AbstractMemory):
type = 'DRAMCtrl'
cxx_header = "mem/dram_ctrl.hh"
# single-ported on the system interface side, instantiate with a
# bus in front of the controller for multiple ports
port = SlavePort("Slave port")
# the basic configuration of the controller architecture, note
# that each entry corresponds to a burst for the specific DRAM
# configuration (e.g. x32 with burst length 8 is 32 bytes) and not
# the cacheline size or request/packet size
write_buffer_size = Param.Unsigned(64, "Number of write queue entries")
read_buffer_size = Param.Unsigned(32, "Number of read queue entries")
# threshold in percent for when to forcefully trigger writes and
# start emptying the write buffer
write_high_thresh_perc = Param.Percent(85, "Threshold to force writes")
# threshold in percentage for when to start writes if the read
# queue is empty
write_low_thresh_perc = Param.Percent(50, "Threshold to start writes")
# minimum write bursts to schedule before switching back to reads
min_writes_per_switch = Param.Unsigned(16, "Minimum write bursts before "
"switching to reads")
# scheduler, address map and page policy
mem_sched_policy = Param.MemSched('frfcfs', "Memory scheduling policy")
addr_mapping = Param.AddrMap('RoRaBaCoCh', "Address mapping policy")
page_policy = Param.PageManage('open_adaptive', "Page management policy")
# enforce a limit on the number of accesses per row
max_accesses_per_row = Param.Unsigned(16, "Max accesses per row before "
"closing");
# size of DRAM Chip in Bytes
device_size = Param.MemorySize("Size of DRAM chip")
# pipeline latency of the controller and PHY, split into a
# frontend part and a backend part, with reads and writes serviced
# by the queues only seeing the frontend contribution, and reads
# serviced by the memory seeing the sum of the two
static_frontend_latency = Param.Latency("10ns", "Static frontend latency")
static_backend_latency = Param.Latency("10ns", "Static backend latency")
# the physical organisation of the DRAM
device_bus_width = Param.Unsigned("data bus width in bits for each DRAM "\
"device/chip")
burst_length = Param.Unsigned("Burst lenght (BL) in beats")
device_rowbuffer_size = Param.MemorySize("Page (row buffer) size per "\
"device/chip")
devices_per_rank = Param.Unsigned("Number of devices/chips per rank")
ranks_per_channel = Param.Unsigned("Number of ranks per channel")
# default to 0 bank groups per rank, indicating bank group architecture
# is not used
# update per memory class when bank group architecture is supported
bank_groups_per_rank = Param.Unsigned(0, "Number of bank groups per rank")
banks_per_rank = Param.Unsigned("Number of banks per rank")
# only used for the address mapping as the controller by
# construction is a single channel and multiple controllers have
# to be instantiated for a multi-channel configuration
channels = Param.Unsigned(1, "Number of channels")
# For power modelling we need to know if the DRAM has a DLL or not
dll = Param.Bool(True, "DRAM has DLL or not")
# DRAMPower provides in addition to the core power, the possibility to
# include RD/WR termination and IO power. This calculation assumes some
# default values. The integration of DRAMPower with gem5 does not include
# IO and RD/WR termination power by default. This might be added as an
# additional feature in the future.
# timing behaviour and constraints - all in nanoseconds
# the base clock period of the DRAM
tCK = Param.Latency("Clock period")
# the amount of time in nanoseconds from issuing an activate command
# to the data being available in the row buffer for a read/write
tRCD = Param.Latency("RAS to CAS delay")
# the time from issuing a read/write command to seeing the actual data
tCL = Param.Latency("CAS latency")
# minimum time between a precharge and subsequent activate
tRP = Param.Latency("Row precharge time")
# minimum time between an activate and a precharge to the same row
tRAS = Param.Latency("ACT to PRE delay")
# minimum time between a write data transfer and a precharge
tWR = Param.Latency("Write recovery time")
# minimum time between a read and precharge command
tRTP = Param.Latency("Read to precharge")
# time to complete a burst transfer, typically the burst length
# divided by two due to the DDR bus, but by making it a parameter
# it is easier to also evaluate SDR memories like WideIO.
# This parameter has to account for burst length.
# Read/Write requests with data size larger than one full burst are broken
# down into multiple requests in the controller
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = Param.Latency("Burst duration (for DDR burst length / 2 cycles)")
# CAS-to-CAS delay for bursts to the same bank group
# only utilized with bank group architectures; set to 0 for default case
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = Param.Latency("0ns", "Same bank group CAS to CAS delay")
# time taken to complete one refresh cycle (N rows in all banks)
tRFC = Param.Latency("Refresh cycle time")
# refresh command interval, how often a "ref" command needs
# to be sent. It is 7.8 us for a 64ms refresh requirement
tREFI = Param.Latency("Refresh command interval")
# write-to-read, same rank turnaround penalty
tWTR = Param.Latency("Write to read, same rank switching time")
# read-to-write, same rank turnaround penalty
tRTW = Param.Latency("Read to write, same rank switching time")
# rank-to-rank bus delay penalty
# this does not correlate to a memory timing parameter and encompasses:
# 1) RD-to-RD, 2) WR-to-WR, 3) RD-to-WR, and 4) WR-to-RD
# different rank bus delay
tCS = Param.Latency("Rank to rank switching time")
# minimum row activate to row activate delay time
tRRD = Param.Latency("ACT to ACT delay")
# only utilized with bank group architectures; set to 0 for default case
tRRD_L = Param.Latency("0ns", "Same bank group ACT to ACT delay")
# time window in which a maximum number of activates are allowed
# to take place, set to 0 to disable
tXAW = Param.Latency("X activation window")
activation_limit = Param.Unsigned("Max number of activates in window")
# time to exit power-down mode
# Exit power-down to next valid command delay
tXP = Param.Latency("0ns", "Power-up Delay")
# Exit Powerdown to commands requiring a locked DLL
tXPDLL = Param.Latency("0ns", "Power-up Delay with locked DLL")
# time to exit self-refresh mode
tXS = Param.Latency("0ns", "Self-refresh exit latency")
# time to exit self-refresh mode with locked DLL
tXSDLL = Param.Latency("0ns", "Self-refresh exit latency DLL")
# Currently rolled into other params
######################################################################
# tRC - assumed to be tRAS + tRP
# Power Behaviour and Constraints
# DRAMs like LPDDR and WideIO have 2 external voltage domains. These are
# defined as VDD and VDD2. Each current is defined for each voltage domain
# separately. For example, current IDD0 is active-precharge current for
# voltage domain VDD and current IDD02 is active-precharge current for
# voltage domain VDD2.
# By default all currents are set to 0mA. Users who are only interested in
# the performance of DRAMs can leave them at 0.
# Operating 1 Bank Active-Precharge current
IDD0 = Param.Current("0mA", "Active precharge current")
# Operating 1 Bank Active-Precharge current multiple voltage Range
IDD02 = Param.Current("0mA", "Active precharge current VDD2")
# Precharge Power-down Current: Slow exit
IDD2P0 = Param.Current("0mA", "Precharge Powerdown slow")
# Precharge Power-down Current: Slow exit multiple voltage Range
IDD2P02 = Param.Current("0mA", "Precharge Powerdown slow VDD2")
# Precharge Power-down Current: Fast exit
IDD2P1 = Param.Current("0mA", "Precharge Powerdown fast")
# Precharge Power-down Current: Fast exit multiple voltage Range
IDD2P12 = Param.Current("0mA", "Precharge Powerdown fast VDD2")
# Precharge Standby current
IDD2N = Param.Current("0mA", "Precharge Standby current")
# Precharge Standby current multiple voltage range
IDD2N2 = Param.Current("0mA", "Precharge Standby current VDD2")
# Active Power-down current: slow exit
IDD3P0 = Param.Current("0mA", "Active Powerdown slow")
# Active Power-down current: slow exit multiple voltage range
IDD3P02 = Param.Current("0mA", "Active Powerdown slow VDD2")
# Active Power-down current : fast exit
IDD3P1 = Param.Current("0mA", "Active Powerdown fast")
# Active Power-down current : fast exit multiple voltage range
IDD3P12 = Param.Current("0mA", "Active Powerdown fast VDD2")
# Active Standby current
IDD3N = Param.Current("0mA", "Active Standby current")
# Active Standby current multiple voltage range
IDD3N2 = Param.Current("0mA", "Active Standby current VDD2")
# Burst Read Operating Current
IDD4R = Param.Current("0mA", "READ current")
# Burst Read Operating Current multiple voltage range
IDD4R2 = Param.Current("0mA", "READ current VDD2")
# Burst Write Operating Current
IDD4W = Param.Current("0mA", "WRITE current")
# Burst Write Operating Current multiple voltage range
IDD4W2 = Param.Current("0mA", "WRITE current VDD2")
# Refresh Current
IDD5 = Param.Current("0mA", "Refresh current")
# Refresh Current multiple voltage range
IDD52 = Param.Current("0mA", "Refresh current VDD2")
# Self-Refresh Current
IDD6 = Param.Current("0mA", "Self-refresh Current")
# Self-Refresh Current multiple voltage range
IDD62 = Param.Current("0mA", "Self-refresh Current VDD2")
# Main voltage range of the DRAM
VDD = Param.Voltage("0V", "Main Voltage Range")
# Second voltage range defined by some DRAMs
VDD2 = Param.Voltage("0V", "2nd Voltage Range")
# A single DDR3-1600 x64 channel (one command and address bus), with
# timings based on a DDR3-1600 4 Gbit datasheet (Micron MT41J512M8) in
# an 8x8 configuration.
class DDR3_1600_x64(DRAMCtrl):
# size of device in bytes
device_size = '512MB'
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Use two ranks
ranks_per_channel = 2
# DDR3 has 8 banks in all configurations
banks_per_rank = 8
# 800 MHz
tCK = '1.25ns'
# 8 beats across an x64 interface translates to 4 clocks @ 800 MHz
tBURST = '5ns'
# DDR3-1600 11-11-11
tRCD = '13.75ns'
tCL = '13.75ns'
tRP = '13.75ns'
tRAS = '35ns'
tRRD = '6ns'
tXAW = '30ns'
activation_limit = 4
tRFC = '260ns'
tWR = '15ns'
# Greater of 4 CK or 7.5 ns
tWTR = '7.5ns'
# Greater of 4 CK or 7.5 ns
tRTP = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns
tRTW = '2.5ns'
# Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns
tCS = '2.5ns'
# <=85C, half for >85C
tREFI = '7.8us'
# Current values from datasheet
IDD0 = '75mA'
IDD2N = '50mA'
IDD3N = '57mA'
IDD4W = '165mA'
IDD4R = '187mA'
IDD5 = '220mA'
VDD = '1.5V'
# A single DDR3-2133 x64 channel refining a selected subset of the
# options for the DDR-1600 configuration, based on the same DDR3-1600
# 4 Gbit datasheet (Micron MT41J512M8). Most parameters are kept
# consistent across the two configurations.
class DDR3_2133_x64(DDR3_1600_x64):
# 1066 MHz
tCK = '0.938ns'
# 8 beats across an x64 interface translates to 4 clocks @ 1066 MHz
tBURST = '3.752ns'
# DDR3-2133 14-14-14
tRCD = '13.09ns'
tCL = '13.09ns'
tRP = '13.09ns'
tRAS = '33ns'
tRRD = '5ns'
tXAW = '25ns'
# Current values from datasheet
IDD0 = '70mA'
IDD2N = '37mA'
IDD3N = '44mA'
IDD4W = '157mA'
IDD4R = '191mA'
IDD5 = '250mA'
VDD = '1.5V'
# A single DDR4-2400 x64 channel (one command and address bus), with
# timings based on a DDR4-2400 4 Gbit datasheet (Micron MT40A512M8)
# in an 8x8 configuration.
class DDR4_2400_x64(DRAMCtrl):
# size of device
device_size = '512MB'
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR4 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Match our DDR3 configurations which is dual rank
ranks_per_channel = 2
# DDR4 has 2 (x16) or 4 (x4 and x8) bank groups
# Set to 4 for x4, x8 case
bank_groups_per_rank = 4
# DDR4 has 16 banks (4 bank groups) in all
# configurations. Currently we do not capture the additional
# constraints incurred by the bank groups
banks_per_rank = 16
# 1200 MHz
tCK = '0.833ns'
# 8 beats across an x64 interface translates to 4 clocks @ 1200 MHz
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = '3.333ns'
# @2400 data rate, tCCD_L is 6 CK
# CAS-to-CAS delay for bursts to the same bank group
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = '5ns';
# DDR4-2400 17-17-17
tRCD = '14.16ns'
tCL = '14.16ns'
tRP = '14.16ns'
tRAS = '32ns'
# RRD_S (different bank group) for 1K page is MAX(4 CK, 3.3ns)
tRRD = '3.3ns'
# RRD_L (same bank group) for 1K page is MAX(4 CK, 4.9ns)
tRRD_L = '4.9ns';
tXAW = '21ns'
activation_limit = 4
tRFC = '350ns'
tWR = '15ns'
# Here using the average of WTR_S and WTR_L
tWTR = '5ns'
# Greater of 4 CK or 7.5 ns
tRTP = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @1200 MHz = 1.666 ns
tRTW = '1.666ns'
# Default different rank bus delay to 2 CK, @1200 MHz = 1.666 ns
tCS = '1.666ns'
# <=85C, half for >85C
tREFI = '7.8us'
# Current values from datasheet
IDD0 = '64mA'
IDD02 = '4mA'
IDD2N = '50mA'
IDD3N = '67mA'
IDD3N2 = '3mA'
IDD4W = '180mA'
IDD4R = '160mA'
IDD5 = '192mA'
VDD = '1.2V'
VDD2 = '2.5V'
# A single LPDDR2-S4 x32 interface (one command/address bus), with
# default timings based on a LPDDR2-1066 4 Gbit part (Micron MT42L128M32D1)
# in a 1x32 configuration.
class LPDDR2_S4_1066_x32(DRAMCtrl):
# No DLL in LPDDR2
dll = False
# size of device
device_size = '512MB'
# 1x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# LPDDR2_S4 is a BL4 and BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1KB
# (this depends on the memory density)
device_rowbuffer_size = '1kB'
# 1x32 configuration, so 1 device
devices_per_rank = 1
# Use a single rank
ranks_per_channel = 1
# LPDDR2-S4 has 8 banks in all configurations
banks_per_rank = 8
# 533 MHz
tCK = '1.876ns'
# Fixed at 15 ns
tRCD = '15ns'
# 8 CK read latency, 4 CK write latency @ 533 MHz, 1.876 ns cycle time
tCL = '15ns'
# Pre-charge one bank 15 ns (all banks 18 ns)
tRP = '15ns'
tRAS = '42ns'
tWR = '15ns'
tRTP = '7.5ns'
# 8 beats across an x32 DDR interface translates to 4 clocks @ 533 MHz.
# Note this is a BL8 DDR device.
# Requests larger than 32 bytes are broken down into multiple requests
# in the controller
tBURST = '7.5ns'
# LPDDR2-S4, 4 Gbit
tRFC = '130ns'
tREFI = '3.9us'
# Irrespective of speed grade, tWTR is 7.5 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @533 MHz = 3.75 ns
tRTW = '3.75ns'
# Default different rank bus delay to 2 CK, @533 MHz = 3.75 ns
tCS = '3.75ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Irrespective of density, tFAW is 50 ns
tXAW = '50ns'
activation_limit = 4
# Current values from datasheet
IDD0 = '15mA'
IDD02 = '70mA'
IDD2N = '2mA'
IDD2N2 = '30mA'
IDD3N = '2.5mA'
IDD3N2 = '30mA'
IDD4W = '10mA'
IDD4W2 = '190mA'
IDD4R = '3mA'
IDD4R2 = '220mA'
IDD5 = '40mA'
IDD52 = '150mA'
VDD = '1.8V'
VDD2 = '1.2V'
# A single WideIO x128 interface (one command and address bus), with
# default timings based on an estimated WIO-200 8 Gbit part.
class WideIO_200_x128(DRAMCtrl):
# No DLL for WideIO
dll = False
# size of device
device_size = '1024MB'
# 1x128 configuration, 1 device with a 128-bit interface
device_bus_width = 128
# This is a BL4 device
burst_length = 4
# Each device has a page (row buffer) size of 4KB
# (this depends on the memory density)
device_rowbuffer_size = '4kB'
# 1x128 configuration, so 1 device
devices_per_rank = 1
# Use one rank for a one-high die stack
ranks_per_channel = 1
# WideIO has 4 banks in all configurations
banks_per_rank = 4
# 200 MHz
tCK = '5ns'
# WIO-200
tRCD = '18ns'
tCL = '18ns'
tRP = '18ns'
tRAS = '42ns'
tWR = '15ns'
# Read to precharge is same as the burst
tRTP = '20ns'
# 4 beats across an x128 SDR interface translates to 4 clocks @ 200 MHz.
# Note this is a BL4 SDR device.
tBURST = '20ns'
# WIO 8 Gb
tRFC = '210ns'
# WIO 8 Gb, <=85C, half for >85C
tREFI = '3.9us'
# Greater of 2 CK or 15 ns, 2 CK @ 200 MHz = 10 ns
tWTR = '15ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @200 MHz = 10 ns
tRTW = '10ns'
# Default different rank bus delay to 2 CK, @200 MHz = 10 ns
tCS = '10ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Two instead of four activation window
tXAW = '50ns'
activation_limit = 2
# The WideIO specification does not provide current information
# A single LPDDR3 x32 interface (one command/address bus), with
# default timings based on a LPDDR3-1600 4 Gbit part (Micron
# EDF8132A1MC) in a 1x32 configuration.
class LPDDR3_1600_x32(DRAMCtrl):
# No DLL for LPDDR3
dll = False
# size of device
device_size = '512MB'
# 1x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# LPDDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 4KB
device_rowbuffer_size = '4kB'
# 1x32 configuration, so 1 device
devices_per_rank = 1
# Technically the datasheet is a dual-rank package, but for
# comparison with the LPDDR2 config we stick to a single rank
ranks_per_channel = 1
# LPDDR3 has 8 banks in all configurations
banks_per_rank = 8
# 800 MHz
tCK = '1.25ns'
tRCD = '18ns'
# 12 CK read latency, 6 CK write latency @ 800 MHz, 1.25 ns cycle time
tCL = '15ns'
tRAS = '42ns'
tWR = '15ns'
# Greater of 4 CK or 7.5 ns, 4 CK @ 800 MHz = 5 ns
tRTP = '7.5ns'
# Pre-charge one bank 18 ns (all banks 21 ns)
tRP = '18ns'
# 8 beats across a x32 DDR interface translates to 4 clocks @ 800 MHz.
# Note this is a BL8 DDR device.
# Requests larger than 32 bytes are broken down into multiple requests
# in the controller
tBURST = '5ns'
# LPDDR3, 4 Gb
tRFC = '130ns'
tREFI = '3.9us'
# Irrespective of speed grade, tWTR is 7.5 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns
tRTW = '2.5ns'
# Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns
tCS = '2.5ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Irrespective of size, tFAW is 50 ns
tXAW = '50ns'
activation_limit = 4
# Current values from datasheet
IDD0 = '8mA'
IDD02 = '60mA'
IDD2N = '0.8mA'
IDD2N2 = '26mA'
IDD3N = '2mA'
IDD3N2 = '34mA'
IDD4W = '2mA'
IDD4W2 = '190mA'
IDD4R = '2mA'
IDD4R2 = '230mA'
IDD5 = '28mA'
IDD52 = '150mA'
VDD = '1.8V'
VDD2 = '1.2V'
# A single GDDR5 x64 interface, with
# default timings based on a GDDR5-4000 1 Gbit part (SK Hynix
# H5GQ1H24AFR) in a 2x32 configuration.
class GDDR5_4000_x64(DRAMCtrl):
# size of device
device_size = '128MB'
# 2x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# GDDR5 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 2Kbits (256Bytes)
device_rowbuffer_size = '256B'
# 2x32 configuration, so 2 devices
devices_per_rank = 2
# assume single rank
ranks_per_channel = 1
# GDDR5 has 4 bank groups
bank_groups_per_rank = 4
# GDDR5 has 16 banks with 4 bank groups
banks_per_rank = 16
# 1000 MHz
tCK = '1ns'
# 8 beats across an x64 interface translates to 2 clocks @ 1000 MHz
# Data bus runs @2000 Mhz => DDR ( data runs at 4000 MHz )
# 8 beats at 4000 MHz = 2 beats at 1000 MHz
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = '2ns'
# @1000MHz data rate, tCCD_L is 3 CK
# CAS-to-CAS delay for bursts to the same bank group
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = '3ns';
tRCD = '12ns'
# tCL is not directly found in datasheet and assumed equal tRCD
tCL = '12ns'
tRP = '12ns'
tRAS = '28ns'
# RRD_S (different bank group)
# RRD_S is 5.5 ns in datasheet.
# rounded to the next multiple of tCK
tRRD = '6ns'
# RRD_L (same bank group)
# RRD_L is 5.5 ns in datasheet.
# rounded to the next multiple of tCK
tRRD_L = '6ns'
tXAW = '23ns'
# tXAW < 4 x tRRD.
# Therefore, activation limit is set to 0
activation_limit = 0
tRFC = '65ns'
tWR = '12ns'
# Here using the average of WTR_S and WTR_L
tWTR = '5ns'
# Read-to-Precharge 2 CK
tRTP = '2ns'
# Assume 2 cycles
tRTW = '2ns'
# Default different rank bus delay to 2 CK, @1000 MHz = 2 ns
tCS = '2ns'
tREFI = '3.9us'
|
|
# pylint: disable=missing-function-docstring,line-too-long
"""Test for diff_cover.violationsreporters.java_violations_reporter"""
from io import BytesIO
from textwrap import dedent
import pytest
from diff_cover.command_runner import CommandError
from diff_cover.violationsreporters import base
from diff_cover.violationsreporters.base import QualityReporter
from diff_cover.violationsreporters.java_violations_reporter import (
CheckstyleXmlDriver,
FindbugsXmlDriver,
PmdXmlDriver,
Violation,
checkstyle_driver,
)
@pytest.fixture(autouse=True)
def patch_so_all_files_exist(mocker):
mock = mocker.patch.object(base.os.path, "exists")
mock.returnvalue = True
@pytest.fixture
def process_patcher(mocker):
def _inner(return_value, status_code=0):
mocked_process = mocker.Mock()
mocked_process.returncode = status_code
mocked_process.communicate.return_value = return_value
mocked_subprocess = mocker.patch("diff_cover.command_runner.subprocess")
mocked_subprocess.Popen.return_value = mocked_process
return mocked_process
return _inner
class TestCheckstyleQualityReporterTest:
def test_no_such_file(self):
"""Expect that we get no results."""
quality = QualityReporter(checkstyle_driver)
result = quality.violations("")
assert result == []
def test_no_java_file(self):
"""Expect that we get no results because no Python files."""
quality = QualityReporter(checkstyle_driver)
file_paths = ["file1.coffee", "subdir/file2.js"]
for path in file_paths:
result = quality.violations(path)
assert result == []
def test_quality(self, process_patcher):
"""Integration test."""
# Patch the output of `checkstyle`
process_patcher(
(
dedent(
"""
[WARN] ../new_file.java:1:1: Line contains a tab character.
[WARN] ../new_file.java:13: 'if' construct must use '{}'s.
"""
)
.strip()
.encode("ascii"),
"",
)
)
expected_violations = [
Violation(1, "Line contains a tab character."),
Violation(13, "'if' construct must use '{}'s."),
]
# Parse the report
quality = QualityReporter(checkstyle_driver)
# Expect that the name is set
assert quality.name() == "checkstyle"
# Measured_lines is undefined for a
# quality reporter since all lines are measured
assert not quality.measured_lines("../new_file.java")
# Expect that we get violations for file1.java only
# We're not guaranteed that the violations are returned
# in any particular order.
actual_violations = quality.violations("../new_file.java")
assert len(actual_violations) == len(expected_violations)
for expected in expected_violations:
assert expected in actual_violations
class TestCheckstyleXmlQualityReporterTest:
@pytest.fixture(autouse=True)
def setup(self, mocker):
# Paths generated by git_path are always the given argument
_git_path_mock = mocker.patch(
"diff_cover.violationsreporters.java_violations_reporter.GitPathTool"
)
_git_path_mock.relative_path = lambda path: path
_git_path_mock.absolute_path = lambda path: path
def test_no_such_file(self):
quality = QualityReporter(CheckstyleXmlDriver())
# Expect that we get no results
result = quality.violations("")
assert result == []
def test_no_java_file(self):
quality = QualityReporter(CheckstyleXmlDriver())
file_paths = ["file1.coffee", "subdir/file2.js"]
# Expect that we get no results because no Java files
for path in file_paths:
result = quality.violations(path)
assert result == []
def test_quality(self, process_patcher):
# Patch the output of `checkstyle`
process_patcher(
(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<checkstyle version="8.0">
<file name="file1.java">
<error line="1" severity="error" message="Missing docstring"/>
<error line="2" severity="error" message="Unused variable 'd'"/>
<error line="2" severity="warning" message="TODO: Not the real way we'll store usages!"/>
<error line="579" severity="error" message="Unable to import 'rooted_paths'"/>
<error line="113" severity="error" message="Unused argument 'cls'"/>
<error line="150" severity="error" message="error while code parsing ([Errno 2] No such file or directory)"/>
<error line="149" severity="error" message="Comma not followed by a space"/>
</file>
<file name="path/to/file2.java">
<error line="100" severity="error" message="Access to a protected member"/>
</file>
</checkstyle>
"""
)
.strip()
.encode("ascii"),
"",
)
)
expected_violations = [
Violation(1, "error: Missing docstring"),
Violation(2, "error: Unused variable 'd'"),
Violation(2, "warning: TODO: Not the real way we'll store usages!"),
Violation(579, "error: Unable to import 'rooted_paths'"),
Violation(
150,
"error: error while code parsing ([Errno 2] No such file or directory)",
),
Violation(149, "error: Comma not followed by a space"),
Violation(113, "error: Unused argument 'cls'"),
]
# Parse the report
quality = QualityReporter(CheckstyleXmlDriver())
# Expect that the name is set
assert quality.name() == "checkstyle"
# Measured_lines is undefined for a
# quality reporter since all lines are measured
assert not quality.measured_lines("file1.java")
# Expect that we get violations for file1.java only
# We're not guaranteed that the violations are returned
# in any particular order.
actual_violations = quality.violations("file1.java")
assert len(actual_violations) == len(expected_violations)
for expected in expected_violations:
assert expected in actual_violations
def test_quality_error(self, mocker, process_patcher):
# Patch the output stderr/stdout and returncode of `checkstyle`
process_patcher(
(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<checkstyle version="8.0">
<file name="file1.java">
<error line="1" severity="error" message="Missing docstring"/>
</file>
</checkstyle>
"""
),
b"oops",
),
status_code=1,
)
# Parse the report
code = mocker.patch(
"diff_cover.violationsreporters.java_violations_reporter.run_command_for_code"
)
code.return_value = 0
quality = QualityReporter(CheckstyleXmlDriver())
with pytest.raises(CommandError):
quality.violations("file1.java")
def test_quality_pregenerated_report(self):
# When the user provides us with a pre-generated checkstyle report
# then use that instead of calling checkstyle directly.
checkstyle_reports = [
BytesIO(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<checkstyle version="8.0">
<file name="path/to/file.java">
<error line="1" severity="error" message="Missing docstring"/>
<error line="57" severity="warning" message="TODO the name of this method is a little bit confusing"/>
</file>
<file name="another/file.java">
<error line="41" severity="error" message="Specify string format arguments as logging function parameters"/>
<error line="175" severity="error" message="Operator not preceded by a space"/>
<error line="259" severity="error" message="Invalid name '' for type variable (should match [a-z_][a-z0-9_]{2,30}$)"/>
</file>
</checkstyle>
"""
)
.strip()
.encode("utf-8")
),
BytesIO(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<checkstyle version="8.0">
<file name="path/to/file.java">
<error line="183" severity="error" message="Invalid name '' for type argument (should match [a-z_][a-z0-9_]{2,30}$)"/>
</file>
<file name="another/file.java">
<error line="183" severity="error" message="Missing docstring"/>
</file>
</checkstyle>
"""
)
.strip()
.encode("utf-8")
),
]
# Generate the violation report
quality = QualityReporter(CheckstyleXmlDriver(), reports=checkstyle_reports)
# Expect that we get the right violations
expected_violations = [
Violation(1, "error: Missing docstring"),
Violation(
57, "warning: TODO the name of this method is a little bit confusing"
),
Violation(
183,
"error: Invalid name '' for type argument (should match [a-z_][a-z0-9_]{2,30}$)",
),
]
# We're not guaranteed that the violations are returned
# in any particular order.
actual_violations = quality.violations("path/to/file.java")
assert len(actual_violations) == len(expected_violations)
for expected in expected_violations:
assert expected in actual_violations
class TestFindbugsQualityReporterTest:
@pytest.fixture(autouse=True)
def setup(self, mocker):
# Paths generated by git_path are always the given argument
_git_path_mock = mocker.patch(
"diff_cover.violationsreporters.java_violations_reporter.GitPathTool"
)
_git_path_mock.relative_path = lambda path: path
_git_path_mock.absolute_path = lambda path: path
def test_no_such_file(self):
quality = QualityReporter(FindbugsXmlDriver())
# Expect that we get no results
result = quality.violations("")
assert result == []
def test_no_java_file(self):
quality = QualityReporter(FindbugsXmlDriver())
file_paths = ["file1.coffee", "subdir/file2.js"]
# Expect that we get no results because no Java files
for path in file_paths:
result = quality.violations(path)
assert result == []
def test_quality_pregenerated_report(self):
# When the user provides us with a pre-generated findbugs report
# then use that instead of calling findbugs directly.
findbugs_reports = [
BytesIO(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<BugCollection sequence="0" release="" analysisTimestamp="1512755361404" version="3.0.1" timestamp="1512755226000">
<BugInstance instanceOccurrenceNum="0" instanceHash="1967bf8c4d25c6b964f30356014aa9fb" rank="20" abbrev="Dm" category="I18N" priority="3" type="DM_CONVERT_CASE" instanceOccurrenceMax="0">
<ShortMessage>Consider using Locale parameterized version of invoked method</ShortMessage>
<LongMessage>Use of non-localized String.toUpperCase() or String.toLowerCase() in org.opensource.sample.file$1.isMultipart(HttpServletRequest)</LongMessage>
<Class classname="org.opensource.sample.file$1" primary="true">
<SourceLine classname="org.opensource.sample.file$1" start="94" end="103" sourcepath="path/to/file.java" sourcefile="file.java">
<Message>At file.java:[lines 94-103]</Message>
</SourceLine>
<Message>In class org.opensource.sample.file$1</Message>
</Class>
<Method isStatic="false" classname="org.opensource.sample.file$1" signature="(Ljavax/servlet/http/HttpServletRequest;)Z" name="isMultipart" primary="true">
<SourceLine endBytecode="181" classname="org.opensource.sample.file$1" start="97" end="103" sourcepath="file1.java" sourcefile="file1.java" startBytecode="0" />
<Message>In method org.opensource.sample.file$1.isMultipart(HttpServletRequest)</Message>
</Method>
<SourceLine endBytecode="6" classname="org.opensource.sample.file$1" start="97" end="97" sourcepath="path/to/file.java" sourcefile="file.java" startBytecode="6" primary="true">
<Message>At file.java:[line 97]</Message>
</SourceLine>
<SourceLine role="SOURCE_LINE_ANOTHER_INSTANCE" endBytecode="55" classname="org.opensource.sample.file$1" start="103" end="104" sourcepath="another/file.java" sourcefile="file.java" startBytecode="55">
<Message>Another occurrence at file.java:[line 103, 104]</Message>
</SourceLine>
</BugInstance>
</BugCollection>
"""
)
.strip()
.encode("utf-8")
),
BytesIO(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<BugCollection sequence="0" release="" analysisTimestamp="1512755361404" version="3.0.1" timestamp="1512755226000">
<BugInstance instanceOccurrenceNum="0" instanceHash="1967bf8c4d25c6b964f30356014aa9fb" rank="20" abbrev="Dm" category="I18N" priority="3" type="DM_CONVERT_CASE" instanceOccurrenceMax="0">
<ShortMessage>Consider using Locale parameterized version of invoked method</ShortMessage>
<LongMessage>Use of non-localized String.toUpperCase() or String.toLowerCase() in org.opensource.sample.file$1.isMultipart(HttpServletRequest)</LongMessage>
<Class classname="org.opensource.sample.file$1" primary="true">
<SourceLine classname="org.opensource.sample.file$1" start="94" end="103" sourcepath="path/to/file.java" sourcefile="file.java">
<Message>At file.java:[lines 94-103]</Message>
</SourceLine>
<Message>In class org.opensource.sample.file$1</Message>
</Class>
<Method isStatic="false" classname="org.opensource.sample.file$1" signature="(Ljavax/servlet/http/HttpServletRequest;)Z" name="isMultipart" primary="true">
<SourceLine endBytecode="181" classname="org.opensource.sample.file$1" start="97" end="103" sourcepath="file1.java" sourcefile="file1.java" startBytecode="0" />
<Message>In method org.opensource.sample.file$1.isMultipart(HttpServletRequest)</Message>
</Method>
<SourceLine endBytecode="6" classname="org.opensource.sample.file$1" start="183" end="183" sourcepath="path/to/file.java" sourcefile="file.java" startBytecode="6" primary="true">
<Message>At file.java:[line 97]</Message>
</SourceLine>
<SourceLine role="SOURCE_LINE_ANOTHER_INSTANCE" endBytecode="55" classname="org.opensource.sample.file$1" start="183" end="183" sourcepath="another/file.java" sourcefile="file.java" startBytecode="55">
<Message>Another occurrence at file.java:[line 183]</Message>
</SourceLine>
</BugInstance>
</BugCollection>
"""
)
.strip()
.encode("utf-8")
),
# this is a violation which is not bounded to a specific line. We'll skip those
BytesIO(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<BugCollection sequence="0" release="" analysisTimestamp="1512755361404" version="3.0.1" timestamp="1512755226000">
<BugInstance instanceOccurrenceNum="0" instanceHash="2820338ec68e2e75a81848c95d31167f" rank="19" abbrev="Se" category="BAD_PRACTICE" priority="3" type="SE_BAD_FIELD" instanceOccurrenceMax="0">
<ShortMessage>Non-transient non-serializable instance field in serializable class</ShortMessage>
<LongMessage>Class org.opensource.sample.file defines non-transient non-serializable instance field</LongMessage>
<SourceLine synthetic="true" classname="org.opensource.sample.file" sourcepath="path/to/file.java" sourcefile="file.java">
<Message>In file.java</Message>
</SourceLine>
</BugInstance>
</BugCollection>
"""
)
.strip()
.encode("utf-8")
),
]
# Generate the violation report
quality = QualityReporter(FindbugsXmlDriver(), reports=findbugs_reports)
# Expect that we get the right violations
expected_violations = [
Violation(
97,
"I18N: Consider using Locale parameterized version of invoked method",
),
Violation(
183,
"I18N: Consider using Locale parameterized version of invoked method",
),
]
# We're not guaranteed that the violations are returned
# in any particular order.
actual_violations = quality.violations("path/to/file.java")
assert len(actual_violations) == len(expected_violations)
for expected in expected_violations:
assert expected in actual_violations
class TestPmdXmlQualityReporterTest:
@pytest.fixture(autouse=True)
def setup(self, mocker):
# Paths generated by git_path are always the given argument
_git_path_mock = mocker.patch(
"diff_cover.violationsreporters.java_violations_reporter.GitPathTool"
)
_git_path_mock.relative_path = lambda path: path
_git_path_mock.absolute_path = lambda path: path
def test_no_such_file(self):
quality = QualityReporter(PmdXmlDriver())
# Expect that we get no results
result = quality.violations("")
assert result == []
def test_no_java_file(self):
quality = QualityReporter(PmdXmlDriver())
file_paths = ["file1.coffee", "subdir/file2.js"]
# Expect that we get no results because no Java files
for path in file_paths:
result = quality.violations(path)
assert result == []
def test_quality_pregenerated_report(self):
# When the user provides us with a pre-generated findbugs report
# then use that instead of calling findbugs directly.
pmd_reports = [
BytesIO(
dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<pmd version="5.6.1" timestamp="2019-06-24T15:47:13.429">
<file name="path/to/file.java">
<violation beginline="21" endline="118" begincolumn="8" endcolumn="1" rule="ClassMustHaveAuthorRule" ruleset="AlibabaJavaComments" package="com.huifu.devops.application.component" class="LingeringInputFilter" priority="3">
must have @author comment
</violation>
</file>
<file name="path/to/file.java">
<violation beginline="10" endline="10" begincolumn="29" endcolumn="63" rule="AbstractMethodOrInterfaceMethodMustUseJavadocRule" ruleset="AlibabaJavaComments" package="com.huifu.devops.application.component" class="PipelineExecutionStepVoConverter" method="convert" priority="3">
interface method must include javadoc comment
</violation>
</file>
</pmd>
"""
)
.strip()
.encode("utf-8")
)
]
pmd_xml_driver = PmdXmlDriver()
# Generate the violation report
quality = QualityReporter(pmd_xml_driver, reports=pmd_reports)
# Expect that pmd is not installed
assert not pmd_xml_driver.installed()
# Expect that we get the right violations
expected_violations = [
Violation(21, "ClassMustHaveAuthorRule: must have @author comment"),
Violation(
10,
"AbstractMethodOrInterfaceMethodMustUseJavadocRule: interface method must include javadoc comment",
),
]
# We're not guaranteed that the violations are returned
# in any particular order.
actual_violations = quality.violations("path/to/file.java")
assert len(actual_violations) == len(expected_violations)
for expected in expected_violations:
assert expected in actual_violations
|
|
from fuzzysearch.common import group_matches, Match, get_best_match_in_group, \
count_differences_with_maximum, consolidate_overlapping_matches
from fuzzysearch.substitutions_only import \
has_near_match_substitutions as hnm_subs, \
find_near_matches_substitutions as fnm_subs, \
find_near_matches_substitutions_lp as fnm_subs_lp, \
has_near_match_substitutions_lp as hnm_subs_lp, \
find_near_matches_substitutions_ngrams as fnm_subs_ngrams, \
has_near_match_substitutions_ngrams as hnm_subs_ngrams
from tests.compat import b, u, unittest
from tests.utils import skip_if_arguments_arent_byteslike
class TestSubstitionsOnlyBase(object):
def search(self, subsequence, sequence, max_subs):
raise NotImplementedError
def expectedOutcomes(self, search_result, expected_outcomes, *args, **kwargs):
raise NotImplementedError
def test_empty_sequence(self):
self.expectedOutcomes(self.search(b('PATTERN'), b(''), max_subs=0), [])
def test_empty_subsequence_exeption(self):
with self.assertRaises(ValueError):
self.search(b(''), b('TEXT'), max_subs=0)
def test_match_identical_sequence(self):
self.expectedOutcomes(
self.search(b('PATTERN'), b('PATTERN'), max_subs=0),
[Match(start=0, end=len('PATTERN'), dist=0, matched=b('PATTERN'))],
)
def test_substring(self):
substring = b('PATTERN')
text = b('aaaaaaaaaaPATTERNaaaaaaaaa')
expected_match = Match(start=10, end=17, dist=0, matched=b('PATTERN'))
self.expectedOutcomes(
self.search(substring, text, max_subs=0),
[expected_match],
)
self.expectedOutcomes(
self.search(substring, text, max_subs=1),
[expected_match],
)
self.expectedOutcomes(
self.search(substring, text, max_subs=2),
[expected_match],
)
def test_double_first_item(self):
self.expectedOutcomes(
self.search(b('def'), b('abcddefg'), max_subs=1),
[Match(start=4, end=7, dist=0, matched=b('def'))],
)
self.expectedOutcomes(
self.search(b('def'), b('abcddefg'), max_subs=2),
[Match(start=3, end=6, dist=2, matched=b('dde')),
Match(start=4, end=7, dist=0, matched=b('def'))],
)
def test_two_identical(self):
self.expectedOutcomes(
self.search(b('abc'), b('abcabc'), max_subs=1),
[Match(start=0, end=3, dist=0, matched=b('abc')),
Match(start=3, end=6, dist=0, matched=b('abc'))],
)
self.expectedOutcomes(
self.search(b('abc'), b('abcXabc'), max_subs=1),
[Match(start=0, end=3, dist=0, matched=b('abc')),
Match(start=4, end=7, dist=0, matched=b('abc'))],
)
def test_one_changed_in_middle(self):
substring = b('abcdefg')
pattern = b('abcXefg')
expected_match = Match(start=0, end=7, dist=1, matched=pattern)
self.expectedOutcomes(
self.search(substring, pattern, max_subs=0),
[],
)
self.expectedOutcomes(
self.search(substring, pattern, max_subs=1),
[expected_match],
)
self.expectedOutcomes(
self.search(substring, pattern, max_subs=2),
[expected_match],
)
def test_one_missing_in_middle(self):
substring = b('PATTERN')
text = b('aaaaaaaaaaPATERNaaaaaaaaa')
for max_subs in [0, 1, 2]:
self.expectedOutcomes(
self.search(substring, text, max_subs=max_subs),
[],
)
def test_one_changed_in_middle2(self):
substring = b('PATTERN')
text = b('aaaaaaaaaaPATtERNaaaaaaaaa')
expected_match = Match(start=10, end=17, dist=1, matched=b('PATtERN'))
self.expectedOutcomes(
self.search(substring, text, max_subs=0),
[],
)
self.expectedOutcomes(
self.search(substring, text, max_subs=1),
[expected_match],
)
self.expectedOutcomes(
self.search(substring, text, max_subs=2),
[expected_match],
)
def test_one_extra_in_middle(self):
substring = b('PATTERN')
text = b('aaaaaaaaaaPATTXERNaaaaaaaaa')
for max_subs in [0, 1, 2]:
self.expectedOutcomes(
self.search(substring, text, max_subs=max_subs),
[],
)
def test_all_different(self):
substring = b('AAAA')
text = b('ZZZZ')
for max_subs in [0, 1, 2, 3]:
self.expectedOutcomes(
self.search(substring, text, max_subs=max_subs),
[],
)
for max_subs in [4, 5]:
self.expectedOutcomes(
self.search(substring, text, max_subs=max_subs),
[Match(start=0, end=4, dist=4, matched=b('ZZZZ'))],
)
def test_dna_search(self):
# see: http://stackoverflow.com/questions/19725127/
text = b(''.join('''\
GACTAGCACTGTAGGGATAACAATTTCACACAGGTGGACAATTACATTGAAAATCACAGATTGGT
CACACACACATTGGACATACATAGAAACACACACACATACATTAGATACGAACATAGAAACACAC
ATTAGACGCGTACATAGACACAAACACATTGACAGGCAGTTCAGATGATGACGCCCGACTGATAC
TCGCGTAGTCGTGGGAGGCAAGGCACACAGGGGATAGG
'''.split()))
pattern = b('TGCACTGTAGGGATAACAAT')
self.expectedOutcomes(
self.search(pattern, text, max_subs=2),
[Match(start=4, end=24, dist=1, matched=text[4:24])],
)
def test_protein_search1(self):
# see:
# * BioPython archives from March 14th, 2014
# http://lists.open-bio.org/pipermail/biopython/2014-March/009030.html
# * https://github.com/taleinat/fuzzysearch/issues/3
text = b(''.join('''\
XXXXXXXXXXXXXXXXXXXGGGTTVTTSSAAAAAAAAAAAAAGGGTTLTTSSAAAAAAAAAAAA
AAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBGGGTTLTTSS
'''.split()))
pattern = b("GGGTTLTTSS")
self.expectedOutcomes(
self.search(pattern, text, max_subs=0),
[Match(start=42, end=52, dist=0, matched=text[42:52]),
Match(start=99, end=109, dist=0, matched=text[99:109])],
)
self.expectedOutcomes(
self.search(pattern, text, max_subs=1),
[Match(start=19, end=29, dist=1, matched=text[19:29]),
Match(start=42, end=52, dist=0, matched=text[42:52]),
Match(start=99, end=109, dist=0, matched=text[99:109])],
)
self.expectedOutcomes(
self.search(pattern, text, max_subs=2),
[Match(start=19, end=29, dist=1, matched=text[19:29]),
Match(start=42, end=52, dist=0, matched=text[42:52]),
Match(start=99, end=109, dist=0, matched=text[99:109])],
)
def test_protein_search2(self):
# see:
# * BioPython archives from March 14th, 2014
# http://lists.open-bio.org/pipermail/biopython/2014-March/009030.html
# * https://github.com/taleinat/fuzzysearch/issues/3
text = b(''.join('''\
XXXXXXXXXXXXXXXXXXXGGGTTVTTSSAAAAAAAAAAAAAGGGTTVTTSSAAAAAAAAAAA
AAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBGGGTTLTTSS
'''.split()))
pattern = b("GGGTTLTTSS")
self.expectedOutcomes(
self.search(pattern, text, max_subs=0),
[Match(start=99, end=109, dist=0, matched=text[99:109])],
)
self.expectedOutcomes(
self.search(pattern, text, max_subs=1),
[Match(start=19, end=29, dist=1, matched=text[19:29]),
Match(start=42, end=52, dist=1, matched=text[42:52]),
Match(start=99, end=109, dist=0, matched=text[99:109])],
)
self.expectedOutcomes(
self.search(pattern, text, max_subs=2),
[Match(start=19, end=29, dist=1, matched=text[19:29]),
Match(start=42, end=52, dist=1, matched=text[42:52]),
Match(start=99, end=109, dist=0, matched=text[99:109])],
)
def test_missing_at_beginning(self):
self.expectedOutcomes(
self.search(b("ATTEST"), b("TESTOSTERONE"), max_subs=2),
[],
)
def test_unicode_substring(self):
pattern = u('\u03A3\u0393')
text = u('\u03A0\u03A3\u0393\u0394')
self.expectedOutcomes(
self.search(pattern, text, max_subs=0),
[Match(1, 3, 0, matched=text[1:3])]
)
def test_max_substitutions_gte_subseq_len(self):
for max_subs in [1, 2, 5]:
self.expectedOutcomes(
self.search(b('b'), b('abc'), max_subs),
[Match(0, 1, 1, b('a')),
Match(1, 2, 0, b('b')),
Match(2, 3, 1, b('c'))]
)
for extra_subs in [0, 1, 7]:
self.expectedOutcomes(
self.search(b('PATTERN'), b('PATTERN'), len('PATTERN') + extra_subs),
[Match(0, len('PATTERN'), 0, b('PATTERN'))]
)
class TestFindNearMatchesSubstitions(TestSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
return fnm_subs(subsequence, sequence, max_subs)
def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):
best_from_grouped_results = [
get_best_match_in_group(group)
for group in group_matches(search_results)
]
best_from_grouped_exepected_outcomes = [
get_best_match_in_group(group)
for group in group_matches(expected_outcomes)
]
return self.assertEqual(best_from_grouped_results,
best_from_grouped_exepected_outcomes,
*args, **kwargs)
class TestFindNearMatchesSubstitionsLinearProgramming(TestSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
return list(fnm_subs_lp(subsequence, sequence, max_subs))
def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):
return self.assertEqual(search_results, expected_outcomes, *args, **kwargs)
class TestFindNearMatchesSubstitionsNgrams(TestSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
if max_subs >= len(subsequence):
self.skipTest("avoiding calling fnm_subs_ngrams() " +
"with max_subs >= len(subsequence)")
return fnm_subs_ngrams(subsequence, sequence, max_subs)
def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):
return self.assertEqual(
consolidate_overlapping_matches(search_results),
consolidate_overlapping_matches(expected_outcomes),
*args, **kwargs)
class TestHasNearMatchSubstitionsOnlyBase(TestSubstitionsOnlyBase):
def search(self, subsequence, sequence, max_subs):
raise NotImplementedError
def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):
return self.assertEqual(bool(search_results),
bool(expected_outcomes),
*args, **kwargs)
class TestHasNearMatchSubstitionsOnly(TestHasNearMatchSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
return hnm_subs(subsequence, sequence, max_subs)
class TestHasNearMatchSubstitionsOnlyNgrams(TestHasNearMatchSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
if max_subs >= len(subsequence):
self.skipTest("avoiding calling hnm_subs_ngrams() " +
"with max_subs >= len(subsequence)")
return hnm_subs_ngrams(subsequence, sequence, max_subs)
class TestHasNearMatchSubstitionsOnlyLp(TestHasNearMatchSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
return hnm_subs_lp(subsequence, sequence, max_subs)
try:
from fuzzysearch._substitutions_only import \
substitutions_only_has_near_matches_lp_byteslike as \
hnm_subs_lp_byteslike, \
substitutions_only_find_near_matches_lp_byteslike as \
fnm_subs_lp_byteslike, \
substitutions_only_has_near_matches_ngrams_byteslike as \
hnm_subs_ngrams_byteslike, \
substitutions_only_find_near_matches_ngrams_byteslike as \
fnm_subs_ngrams_byteslike
except ImportError:
pass
else:
class TestHasNearMatchesSubstitionsLpByteslike(
TestHasNearMatchSubstitionsOnlyBase,
unittest.TestCase
):
@skip_if_arguments_arent_byteslike
def search(self, subsequence, sequence, max_subs):
return hnm_subs_lp_byteslike(subsequence, sequence,
max_subs)
class TestHasNearMatchesSubstitionsNgramsByteslike(
TestHasNearMatchSubstitionsOnlyBase,
unittest.TestCase
):
@skip_if_arguments_arent_byteslike
def search(self, subsequence, sequence, max_subs):
if max_subs >= len(subsequence):
self.skipTest("avoiding calling hnm_subs_ngrams_byteslike() " +
"with max_subs >= len(subsequence)")
return hnm_subs_ngrams_byteslike(subsequence, sequence,
max_subs)
class TestFindNearMatchesSubstitionsLpByteslike(
TestSubstitionsOnlyBase,
unittest.TestCase
):
@skip_if_arguments_arent_byteslike
def search(self, subsequence, sequence, max_subs):
results = fnm_subs_lp_byteslike(subsequence, sequence,
max_subs)
matches = [
Match(
index,
index + len(subsequence),
count_differences_with_maximum(
sequence[index:index+len(subsequence)],
subsequence,
max_subs + 1,
),
matched=sequence[index:index+len(subsequence)]
)
for index in results
]
return matches
def expectedOutcomes(self, search_results, expected_outcomes,
*args, **kwargs):
return self.assertEqual(search_results, expected_outcomes,
*args, **kwargs)
class TestFindNearMatchesSubstitionsNgramsByteslike(
TestSubstitionsOnlyBase,
unittest.TestCase
):
@skip_if_arguments_arent_byteslike
def search(self, subsequence, sequence, max_subs):
results = fnm_subs_ngrams_byteslike(subsequence, sequence,
max_subs)
matches = [
Match(
index,
index + len(subsequence),
count_differences_with_maximum(
sequence[index:index+len(subsequence)],
subsequence,
max_subs + 1,
),
matched=sequence[index:index+len(subsequence)]
)
for index in results
]
return [
get_best_match_in_group(group)
for group in group_matches(matches)
]
def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):
return self.assertEqual(
consolidate_overlapping_matches(search_results),
consolidate_overlapping_matches(expected_outcomes),
*args, **kwargs)
|
|
# -*- coding: utf-8 -*-
#
#
# This source file is part of the FabSim software toolkit, which is distributed under the BSD 3-Clause license.
# Please refer to LICENSE for detailed information regarding the licensing.
#
# fab.py contains general-purpose FabSim routines.
from templates import *
from machines import *
from fabric.contrib.project import *
from xml.etree import ElementTree
import time
import re
import numpy as np
import yaml
import tempfile
from pprint import PrettyPrinter
pp=PrettyPrinter()
def add_local_paths(module_name):
# This variable encodes the default location for templates.
env.local_templates_path.insert(0, "$localroot/deploy/%s/templates" % (module_name))
# This variable encodes the default location for blackbox scripts.
env.local_blackbox_path.insert(0, "$localroot/deploy/%s/blackbox" % (module_name))
# This variable encodes the default location for Python scripts.
env.local_python_path.insert(0, "$localroot/deploy/%s/python" % (module_name))
@task
def print_local_environment():
print env
@task
def stat():
"""Check the remote message queue status"""
#TODO: Respect varying remote machine queue systems.
if not env.get('stat_postfix'):
return run(template("$stat -u $username"))
return run(template("$stat -u $username $stat_postfix"))
@task
def monitor():
"""Report on the queue status, ctrl-C to interrupt"""
while True:
execute(stat)
time.sleep(120)
def check_complete():
"""Return true if the user has no queued jobs"""
return stat()==""
@task
def wait_complete():
"""Wait until all jobs currently qsubbed are complete, then return"""
time.sleep(120)
while not check_complete():
time.sleep(120)
def with_template_job():
"""
Determine a generated job name from environment parameters, and then define additional environment parameters based on it.
"""
name=template(env.job_name_template)
if env.get('label'):
name='_'.join((env['label'],name))
with_job(name)
def with_job(name):
"""Augment the fabric environment with information regarding a particular job name.
Definitions created:
job_results: the remote location where job results should be stored
job_results_local: the local location where job results should be stored
"""
env.name=name
env.job_results=env.pather.join(env.results_path,name)
env.job_results_local=os.path.join(env.local_results,name)
env.job_results_contents=env.pather.join(env.job_results,'*')
env.job_results_contents_local=os.path.join(env.job_results_local,'*')
def with_template_config():
"""
Determine the name of a used or generated config from environment parameters, and then define additional environment parameters based on it.
"""
with_config(template(env.config_name_template))
def with_config(name):
"""Internal: augment the fabric environment with information regarding a particular configuration name.
Definitions created:
job_config_path: the remote location where the config files for the job should be stored
job_config_path_local: the local location where the config files for the job may be found
"""
env.config=name
env.job_config_path=env.pather.join(env.config_path,name)
env.job_config_path_local=os.path.join(env.local_configs,name)
env.job_config_contents=env.pather.join(env.job_config_path,'*')
env.job_config_contents_local=os.path.join(env.job_config_path_local,'*')
env.job_name_template_sh=template("%s.sh" % env.job_name_template) # name of the job sh submission script.
def with_profile(name):
"""Internal: augment the fabric environment with information regarding a particular profile name.
Definitions created:
job_profile_path: the remote location where the profile should be stored
job_profile_path_local: the local location where the profile files may be found
"""
env.profile=name
env.job_profile_path=env.pather.join(env.profiles_path,name)
env.job_profile_path_local=os.path.join(env.local_profiles,name)
env.job_profile_contents=env.pather.join(env.job_profile_path,'*')
env.job_profile_contents_local=os.path.join(env.job_profile_path_local,'*')
@task
def fetch_configs(config=''):
"""
Fetch config files from the remote, via rsync.
Specify a config directory, such as 'cylinder' to copy just one config.
Config files are stored as, e.g. cylinder/config.dat and cylinder/config.xml
Local path to use is specified in machines_user.json, and should normally point to a mount on entropy,
i.e. /store4/blood/username/config_files
This method is not intended for normal use, but is useful when the local machine cannot have an entropy mount,
so that files can be copied to a local machine from entropy, and then transferred to the compute machine,
via 'fab entropy fetch_configs; fab legion put_configs'
"""
with_config(config)
if env.manual_gsissh:
local(template("globus-url-copy -cd -r -sync gsiftp://$remote/$job_config_path/ file://$job_config_path_local/"))
else:
local(template("rsync -pthrvz $username@$remote:$job_config_path/ $job_config_path_local"))
@task
def put_configs(config=''):
"""
Transfer config files to the remote.
For use in launching jobs, via rsync.
Specify a config directory, such as 'cylinder' to copy just one configuration.
Config files are stored as, e.g. cylinder/config.dat and cylinder/config.xml
Local path to find config directories is specified in machines_user.json, and should normally point to a mount on entropy,
i.e. /store4/blood/username/config_files
If you can't mount entropy, 'fetch_configs' can be useful, via 'fab entropy fetch_configs; fab legion put_configs'
RECENT ADDITION: Added get_setup_fabric_dirs_string() so that the Fabric Directories are now created automatically whenever
a config file is uploaded.
"""
with_config(config)
run(template("%s; mkdir -p $job_config_path" % (get_setup_fabric_dirs_string())))
if env.manual_gsissh:
local(template("globus-url-copy -p 10 -cd -r -sync file://$job_config_path_local/ gsiftp://$remote/$job_config_path/"))
else:
rsync_project(local_dir=env.job_config_path_local+'/',remote_dir=env.job_config_path)
@task
def put_results(name=''):
"""
Transfer result files to a remote.
Local path to find result directories is specified in machines_user.json.
This method is not intended for normal use, but is useful when the local machine cannot have an entropy mount,
so that results from a local machine can be sent to entropy, via 'fab legion fetch_results; fab entropy put_results'
"""
with_job(name)
run(template("mkdir -p $job_results"))
if env.manual_gsissh:
local(template("globus-url-copy -p 10 -cd -r -sync file://$job_results_local/ gsiftp://$remote/$job_results/"))
else:
rsync_project(local_dir=env.job_results_local+'/',remote_dir=env.job_results)
@task
def fetch_results(name='',regex='',debug=False):
"""
Fetch results of remote jobs to local result store.
Specify a job name to transfer just one job.
Local path to store results is specified in machines_user.json, and should normally point to a mount on entropy,
i.e. /store4/blood/username/results.
If you can't mount entropy, 'put results' can be useful, via 'fab legion fetch_results; fab entropy put_results'
"""
if debug:
pp.pprint(env)
with_job(name)
if env.manual_gsissh:
local(template("globus-url-copy -cd -r -sync gsiftp://$remote/$job_results/%s file://$job_results_local/" % regex))
else:
local(template("rsync -pthrvz $username@$remote:$job_results/%s $job_results_local" % regex))
@task
def clear_results(name=''):
"""Completely wipe all result files from the remote."""
with_job(name)
run(template('rm -rf $job_results_contents'))
@task
def fetch_profiles(name=''):
"""
Fetch results of remote jobs to local result store.
Specify a job name to transfer just one job.
Local path to store results is specified in machines_user.json, and should normally point to a mount on entropy,
i.e. /store4/blood/username/results.
If you can't mount entropy, 'put results' can be useful, via 'fab legion fetch_results; fab entropy put_results'
"""
with_profile(name)
if env.manual_gsissh:
local(template("globus-url-copy -cd -r -sync gsiftp://$remote/$job_profile_path/ file://$job_profile_path_local/"))
else:
local(template("rsync -pthrvz $username@$remote:$job_profile_path/ $job_profile_path_local"))
@task
def put_profiles(name=''):
"""
Transfer result files to a remote.
Local path to find result directories is specified in machines_user.json.
This method is not intended for normal use, but is useful when the local machine cannot have an entropy mount,
so that results from a local machine can be sent to entropy, via 'fab legion fetch_results; fab entropy put_results'
"""
with_profile(name)
run(template("mkdir -p $job_profile_path"))
if env.manual_gsissh:
local(template("globus-url-copy -p 10 -cd -r -sync file://$job_profile_path_local/ gsiftp://$remote/$job_profile_path/"))
else:
rsync_project(local_dir=env.job_profile_path_local+'/',remote_dir=env.job_profile_path)
def get_setup_fabric_dirs_string():
"""
Returns the commands required to set up the fabric directories. This is not in the env, because modifying this
is likely to break FabSim in most cases.
This is stored in an individual function, so that the string can be appended in existing commands, reducing
the performance overhead.
"""
return 'mkdir -p $config_path; mkdir -p $results_path; mkdir -p $scripts_path'
@task
def setup_fabric_dirs(name=''):
"""
Creates the necessary fab dirs remotely.
"""
run(template(get_setup_fabric_dirs_string()))
def update_environment(*dicts):
for adict in dicts:
env.update(adict)
def calc_nodes():
# If we're not reserving whole nodes, then if we request less than one node's worth of cores, need to keep N<=n
env.coresusedpernode=env.corespernode
if int(env.coresusedpernode)>int(env.cores):
env.coresusedpernode=env.cores
env.nodes=int(env.cores)/int(env.coresusedpernode)
def job(*option_dictionaries):
"""Internal low level job launcher.
Parameters for the job are determined from the prepared fabric environment
Execute a generic job on the remote machine. Use lammps, regress, or test instead."""
update_environment(*option_dictionaries)
with_template_job()
# If the replicas parameter is defined, then we are dealing with an ensemble job. We will calculate the
# cores per replica by dividing up the total core count.
if 'replicas' in option_dictionaries[0].keys():
env.cores_per_replica = int(env.cores) / int(env.replicas)
# Use this to request more cores than we use, to measure performance without sharing impact
if env.get('cores_reserved')=='WholeNode' and env.get('corespernode'):
env.cores_reserved=(1+(int(env.cores)-1)/int(env.corespernode))*int(env.corespernode)
# If cores_reserved is not specified, temporarily set it based on the same as the number of cores
# Needs to be temporary if there's another job with a different number of cores which should also be defaulted to.
with settings(cores_reserved=env.get('cores_reserved') or env.cores):
calc_nodes()
if env.node_type:
env.node_type_restriction=template(env.node_type_restriction_template)
if 'replica_index' in option_dictionaries[0].keys():
print "replica_index found."
env.name = env.name + "_" + str(env.replica_index)
if 'lambda_index' in option_dictionaries[0].keys():
print "lambda_index found."
env.name = env.name + "_" + str(env.lambda_index)
env['job_name']=env.name[0:env.max_job_name_chars]
with settings(cores=1):
calc_nodes()
env.run_command_one_proc=template(env.run_command)
calc_nodes()
if env.get('nodes_new'):
env.nodes = env.nodes_new
env.run_command=template(env.run_command)
if env.get('run_ensemble_command') and env.get('cores_per_replica'):
env.run_ensemble_command=template(env.run_ensemble_command)
if env.get('run_ensemble_command_ties') and env.get('cores_per_replica_per_lambda'):
env.run_ensemble_command_ties=template(env.run_ensemble_command_ties)
env.job_script=script_templates(env.batch_header,env.script)
env.dest_name=env.pather.join(env.scripts_path,env.pather.basename(env.job_script))
put(env.job_script,env.dest_name)
if 'remote_path' in option_dictionaries[1].keys():
print "remote_path found."
env.job_results = env.remote_path
run(template("mkdir -p $job_results && cp $dest_name $job_results && chmod u+x $dest_name")) #bundled 3 ssh sessions into one to improve performance.
with tempfile.NamedTemporaryFile() as tempf:
tempf.write(yaml.dump(dict(env)))
tempf.flush() #Flush the file before we copy it.
put(tempf.name,env.pather.join(env.job_results,'env.yml'))
# Allow option to submit all preparations, but not actually submit the job
if not env.get("noexec",False):
with cd(env.job_results):
if env.module_load_at_connect:
with prefix(env.run_prefix):
run(template("$job_dispatch $dest_name"))
else:
run(template("$job_dispatch $dest_name"))
def input_to_range(arg,default):
ttype=type(default)
gen_regexp="\[([\d\.]+):([\d\.]+):([\d\.]+)\]" #regexp for a array generator like [1.2:3:0.2]
if not arg:
return [default]
match=re.match(gen_regexp,str(arg))
if match:
vals=list(map(ttype,match.groups()))
if ttype==int:
return range(*vals)
else:
return np.arange(*vals)
return [ttype(arg)]
@task
def get_running_location(job=None):
"""
Returns the node name where a given job is running.
"""
if job:
with_job(job)
env.running_node=run(template("cat $job_results/env_details.asc"))
def manual(cmd):
#From the fabric wiki, bypass fabric internal ssh control
commands=env.command_prefixes[:]
if env.get('cwd'):
commands.append("cd %s"%env.cwd)
commands.append(cmd)
manual_command=" && ".join(commands)
pre_cmd = "ssh -Y -p %(port)s %(user)s@%(host)s " % env
local(pre_cmd + "'"+manual_command+"'", capture=False)
def manual_gsissh(cmd):
# #From the fabric wiki, bypass fabric internal ssh control
commands=env.command_prefixes[:]
if env.get('cwd'):
commands.append("cd %s"%env.cwd)
commands.append(cmd)
manual_command=" && ".join(commands)
pre_cmd = "gsissh -t -p %(port)s %(host)s " % env
local(pre_cmd + "'"+manual_command+"'", capture=False)
def run(cmd):
if env.manual_gsissh:
return manual_gsissh(cmd)
elif env.manual_ssh:
# if env.manual_ssh:
return manual(cmd)
else:
return fabric.api.run(cmd)
def put(src,dest):
if env.manual_gsissh:
if os.path.isdir(src):
if src[-1] != '/':
env.manual_src=src+'/'
env.manual_dest=dest+'/'
else:
env.manual_src=src
env.manual_dest=dest
local(template("globus-url-copy -sync -r -cd -p 10 file://$manual_src gsiftp://$host/$manual_dest"))
elif env.manual_ssh:
env.manual_src=src
env.manual_dest=dest
local(template("scp $manual_src $user@$host:$manual_dest"))
else:
fabric.api.put(src,dest)
@task
def blackbox(script='ibi.sh', args=''):
""" black-box script execution. """
for p in env.local_blackbox_path:
script_file_path = os.path.join(p, script)
if os.path.exists(os.path.dirname(script_file_path)):
local("%s %s" % (script_file_path, args))
return
print "FabSim Error: could not find blackbox() script file. FabSim looked for it in the following directories: ", env.local_blackbox_path
@task
def probe(label="undefined"):
""" Scans a remote site for the presence of certain software. """
return run("module avail 2>&1 | grep %s" % label)
@task
def archive(prefix, archive_location):
""" Cleans results directories of core dumps and moves results to archive locations. """
if len(prefix)<1:
print "error: no prefix defined."
sys.exit()
print "LOCAL %s %s %s*" % (env.local_results, prefix, archive_location)
local("rm -f %s/*/core" % (env.local_results))
local("mv -i %s/%s* %s/" % (env.local_results, prefix, archive_location))
parent_path = os.sep.join(env.results_path.split(os.sep)[:-1])
print "REMOTE MOVE: mv %s/%s %s/Backup" % (env.results_path, prefix, parent_path)
run("mkdir -p %s/Backup" % (parent_path))
run("mv -i %s/%s* %s/Backup/" % (env.results_path, prefix, parent_path))
@task
def print_config(args=''):
""" Prints local environment """
for x in env:
print x,':',env[x]
|
|
""".. http:get:: /family
List the families
A successful response returns a JSON aray of :ref:`model-family` objects.
:param id: The user id.
:query filter: Filter the array of families against a property on the :ref:`model-family` object.
:reqheader Authorization: Basic auth with (id, access_token)
:resheader Content-Type: application/json
:status 200: OK
:status 400: The JSON request body could not be parsed
:status 401: Unauthorized
:status 422: See the response codes for more information
.. http:get:: /family/:id
Get a family.
A successful response returns a :ref:`model-family` JSON object.
:param id: The user id.
:reqheader Authorization: Basic auth with (id, access_token)
:resheader Content-Type: application/json
:status 200: OK
:status 400: The JSON request body could not be parsed
:status 401: Unauthorized
:status 422: See the response codes for more information
.. http:post:: /family
Create a family.
A successful response returns a :ref:`model-family` JSON object.
:param id: The user id.
:reqheader Authorization: Basic auth with (id, access_token)
:resheader Content-Type: application/json
:status 201: OK
:status 400: The JSON request body could not be parsed
:status 401: Unauthorized
:status 422: See the response codes for more information
.. http:patch:: /family/:id
Update a family.
A successful response returns a :ref:`model-family` JSON object.
:param id: The family id.
:reqheader Authorization: Basic auth with (id, access_token)
:resheader Content-Type: application/json
:status 200: OK
:status 400: The JSON request body could not be parsed
:status 401: Unauthorized
:status 422: See the response codes for more information
.. http:delete:: /family/:id
Delete a family.
:param id: The family id.
:reqheader Authorization: Basic auth with (id, access_token)
:resheader Content-Type: application/json
:status 204: OK
:status 400: The JSON request body could not be parsed
:status 401: Unauthorized
:status 422: See the response codes for more information
"""
import bottle
from bottle import request, response
import sqlalchemy as sa
from bauble import app, API_ROOT
from bauble.middleware import basic_auth, filter_param, build_counts
from bauble.model import Family, get_relation
family_column_names = [col.name for col in sa.inspect(Family).columns]
family_mutable = [col for col in family_column_names
if col not in ['id'] and not col.startswith('_')]
def resolve_family(next):
def _wrapped(*args, **kwargs):
request.family = request.session.query(Family).get(request.args['family_id'])
if not request.family:
bottle.abort(404, "Family not found")
return next(*args, **kwargs)
return _wrapped
def build_embedded(embed, family):
if embed == 'synonyms':
data = family.synonyms
else:
data = get_relation(Family, family.id, embed, session=request.session)
return (embed, [obj.json() for obj in data])
@app.get(API_ROOT + "/family")
@basic_auth
@filter_param(Family, family_column_names)
def index_family():
# TODO: we're not doing any sanitization or validation...see preggy or valipdate.py
families = request.filter if request.filter else request.session.query(Family)
return [family.json() for family in families]
@app.get(API_ROOT + "/family/<family_id:int>")
@basic_auth
@resolve_family
def get_family(family_id):
json_data = request.family.json()
if 'embed' in request.params:
embed_list = request.params.embed if isinstance(request.params.embed, list) \
else [request.params.embed]
embedded = map(lambda embed: build_embedded(embed, request.family), embed_list)
json_data.update(embedded)
return json_data
@app.route(API_ROOT + "/family/<family_id:int>", method='PATCH')
@basic_auth
@resolve_family
def patch_family(family_id):
if not request.json:
bottle.abort(400, 'The request doesn\'t contain a request body')
# TODO: restrict the columns to only those that are patchable which might be different
# than the columns that a postable
# create a copy of the request data with only the columns that are mutable
data = {col: request.json[col] for col in request.json.keys()
if col in family_mutable}
for key, value in data.items():
setattr(request.family, key, data[key])
request.session.commit()
return request.family.json()
@app.post(API_ROOT + "/family")
@basic_auth
def post_family():
if not request.json:
bottle.abort(400, 'The request doesn\'t contain a request body')
# create a copy of the request data with only the mutable columns
data = {col: request.json[col] for col in request.json.keys()
if col in family_mutable}
family = Family(**data)
request.session.add(family)
request.session.commit()
response.status = 201
return family.json()
@app.delete(API_ROOT + "/family/<family_id:int>")
@basic_auth
@resolve_family
def delete_family(family_id):
request.session.delete(request.family)
request.session.commit()
response.status = 204
@app.get(API_ROOT + "/family/<family_id:int>/synonyms")
@basic_auth
@resolve_family
def list_synonyms(family_id):
return request.family.synonyms
# @app.get(API_ROOT + "/family/<family_id:int>/synonyms/<synonym_id:int>")
# @basic_auth
# @resolve_family
# def get_synonym(family_id, synonym_id):
# return request.family.synonyms
@app.post(API_ROOT + "/family/<family_id:int>/synonyms")
@basic_auth
@resolve_family
def add_synonym(family_id):
synonym_json = request.json
if 'id' not in synonym_json:
bottle.abort(400, "No id in request body")
syn_family = request.session.query(Family).get(synonym_json['id'])
request.family.synonyms.append(syn_family)
request.session.commit()
response.status = 201
@app.delete(API_ROOT + "/family/<family_id:int>/synonyms/<synonym_id:int>")
@basic_auth
@resolve_family
def remove_synonym(family_id, synonym_id):
# synonym_id is the id of the family not the FamilySynonym object
syn_family = request.session.query(Family).get(synonym_id)
request.family.synonyms.remove(syn_family)
request.session.commit()
response.status = 204
@app.get(API_ROOT + "/family/<family_id:int>/count")
@basic_auth
@resolve_family
@build_counts(Family, 'family_id')
def count(family_id):
return request.counts
|
|
# Copyright 2021 The QHBM Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for inference on energy functions represented by a BitstringEnergy."""
import abc
import functools
import itertools
from typing import Union
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from qhbmlib.models import energy
from qhbmlib import utils
def preface_inference(f):
"""Wraps given function with things to run before every inference call.
Args:
f: The method of `EnergyInference` to wrap.
Returns:
wrapper: The wrapped function.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
self._preface_inference() # pylint: disable=protected-access
return f(self, *args, **kwargs)
return wrapper
class EnergyInferenceBase(tf.keras.layers.Layer, abc.ABC):
r"""Defines the interface for inference on BitstringEnergy objects.
Let $E$ be the energy function defined by a given `BitstringEnergy`, and let
$X$ be the set of bitstrings in the domain of $E$. Associated with $E$ is
a probability distribution
$$p(x) = \frac{e^{-E(x)}}{\sum_{y\in X} e^{-E(y)}},$$
which we call the Energy Based Model (EBM) associated with $E$. Inference
in this class means estimating quantities of interest relative to the EBM.
"""
def __init__(self,
input_energy: energy.BitstringEnergy,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes an EnergyInferenceBase.
Args:
input_energy: The parameterized energy function which defines this
distribution via the equations of an energy based model. This class
assumes that all parameters of `energy` are `tf.Variable`s and that
they are all returned by `energy.variables`.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(name=name)
self._energy = input_energy
self._energy.build([None, self._energy.num_bits])
self._tracked_variables = input_energy.variables
if len(self._tracked_variables) == 0:
self._checkpoint = False
else:
self._tracked_variables_checkpoint = [
tf.Variable(v.read_value(), trainable=False)
for v in self._tracked_variables
]
self._checkpoint = True
if initial_seed is None:
self._update_seed = tf.Variable(True, trainable=False)
else:
self._update_seed = tf.Variable(False, trainable=False)
self._seed = tf.Variable(
tfp.random.sanitize_seed(initial_seed), trainable=False)
self._first_inference = tf.Variable(True, trainable=False)
@property
def energy(self):
"""The energy function which sets the probabilities for this EBM."""
return self._energy
@property
def seed(self):
"""Current TFP compatible seed controlling sampling behavior.
PRNG seed; see tfp.random.sanitize_seed for details. This seed will be used
in the `sample` method. If None, the seed is updated after every inference
call. Otherwise, the seed is fixed.
"""
return self._seed
@seed.setter
def seed(self, initial_seed: Union[None, tf.Tensor]):
"""Sets a new value of the random seed.
Args:
initial_seed: see `self.seed` for details.
"""
if initial_seed is None:
self._update_seed.assign(True)
else:
self._update_seed.assign(False)
self._seed.assign(tfp.random.sanitize_seed(initial_seed))
@property
def variables_updated(self):
"""Returns True if tracked variables do not have the checkpointed values."""
if self._checkpoint:
variables_not_equal_list = tf.nest.map_structure(
lambda v, vc: tf.math.reduce_any(tf.math.not_equal(v, vc)),
self._tracked_variables, self._tracked_variables_checkpoint)
return tf.math.reduce_any(tf.stack(variables_not_equal_list))
else:
return False
def _checkpoint_variables(self):
"""Checkpoints the currently tracked variables."""
if self._checkpoint:
tf.nest.map_structure(lambda v, vc: vc.assign(v), self._tracked_variables,
self._tracked_variables_checkpoint)
def _preface_inference(self):
"""Things all energy inference methods do before proceeding.
Called by `preface_inference` before the wrapped inference method.
Currently includes:
- run `self._ready_inference` if this is first call of a wrapped function
- change the seed if not set by the user during initialization
- run `self._ready_inference` if tracked energy parameters changed
Note: subclasses should take care to call the superclass method.
"""
if self._first_inference:
self._checkpoint_variables()
self._ready_inference()
self._first_inference.assign(False)
if self._update_seed:
new_seed, _ = tfp.random.split_seed(self.seed)
self._seed.assign(new_seed)
if self.variables_updated:
self._checkpoint_variables()
self._ready_inference()
@abc.abstractmethod
def _ready_inference(self):
"""Performs computations common to all inference methods.
Contains inference code that must be run first if the variables of
`self.energy` have been updated since the last time inference was performed.
"""
@preface_inference
def call(self, inputs, *args, **kwargs):
"""Calls this layer on the given inputs."""
return self._call(inputs, *args, **kwargs)
@preface_inference
def entropy(self):
"""Returns an estimate of the entropy."""
return self._entropy()
@preface_inference
def expectation(self, function):
"""Returns an estimate of the expectation value of the given function.
Args:
function: Mapping from a 2D tensor of bitstrings to a possibly nested
structure. The structure must have atomic elements all of which are
float tensors with the same batch size as the input bitstrings.
"""
return self._expectation(function)
@preface_inference
def log_partition(self):
"""Returns an estimate of the log partition function."""
return self._log_partition()
@preface_inference
def sample(self, num_samples: int):
"""Returns samples from the EBM corresponding to `self.energy`.
Args:
num_samples: Number of samples to draw from the EBM.
"""
return self._sample(num_samples)
@abc.abstractmethod
def _call(self, inputs, *args, **kwargs):
"""Default implementation wrapped by `self.call`."""
raise NotImplementedError()
@abc.abstractmethod
def _entropy(self):
"""Default implementation wrapped by `self.entropy`."""
raise NotImplementedError()
@abc.abstractmethod
def _expectation(self, function):
"""Default implementation wrapped by `self.expectation`."""
raise NotImplementedError()
@abc.abstractmethod
def _log_partition(self):
"""Default implementation wrapped by `self.log_partition`."""
raise NotImplementedError()
@abc.abstractmethod
def _sample(self, num_samples: int):
"""Default implementation wrapped by `self.sample`."""
raise NotImplementedError()
class EnergyInference(EnergyInferenceBase):
"""Provides some default method implementations."""
def __init__(self,
input_energy: energy.BitstringEnergy,
num_expectation_samples: int,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes an EnergyInference.
Args:
input_energy: The parameterized energy function which defines this
distribution via the equations of an energy based model. This class
assumes that all parameters of `energy` are `tf.Variable`s and that
they are all returned by `energy.variables`.
num_expectation_samples: Number of samples to draw and use for estimating
the expectation value.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(input_energy, initial_seed, name)
self.num_expectation_samples = num_expectation_samples
def _expectation(self, function):
"""Default implementation wrapped by `self.expectation`.
Estimates an expectation value using sample averaging.
"""
@tf.custom_gradient
def _inner_expectation():
"""Enables derivatives."""
samples = tf.stop_gradient(self.sample(self.num_expectation_samples))
bitstrings, _, counts = utils.unique_bitstrings_with_counts(samples)
# TODO(#157): try to parameterize the persistence.
with tf.GradientTape() as values_tape:
# Adds variables in `self.energy` to `variables` argument of `grad_fn`.
values_tape.watch(self.energy.trainable_variables)
values = function(bitstrings)
average_of_values = tf.nest.map_structure(
lambda x: utils.weighted_average(counts, x), values)
def grad_fn(*upstream, variables):
"""See equation A5 in the QHBM paper appendix for details.
# TODO(#119): confirm equation number.
"""
function_grads = values_tape.gradient(
average_of_values,
variables,
output_gradients=upstream,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
flat_upstream = tf.nest.flatten(upstream)
flat_values = tf.nest.flatten(values)
combined_flat = tf.nest.map_structure(lambda x, y: x * y, flat_upstream,
flat_values)
combined_flat_sum = tf.nest.map_structure(
lambda x: tf.map_fn(tf.reduce_sum, x), combined_flat)
combined_sum = tf.reduce_sum(tf.stack(combined_flat_sum), 0)
average_of_combined_sum = utils.weighted_average(counts, combined_sum)
# Compute grad E terms.
with tf.GradientTape() as tape:
energies = self.energy(bitstrings)
energies_grads = tape.jacobian(
energies,
variables,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
average_of_energies_grads = tf.nest.map_structure(
lambda x: utils.weighted_average(counts, x), energies_grads)
product_of_averages = tf.nest.map_structure(
lambda x: x * average_of_combined_sum, average_of_energies_grads)
products = tf.nest.map_structure(
lambda x: tf.einsum("i...,i->i...", x, combined_sum),
energies_grads)
average_of_products = tf.nest.map_structure(
lambda x: utils.weighted_average(counts, x), products)
# Note: upstream gradient is already a coefficient in poa, aop, and fg.
return tuple(), [
poa - aop + fg for poa, aop, fg in zip(
product_of_averages, average_of_products, function_grads)
]
return average_of_values, grad_fn
return _inner_expectation()
def _log_partition(self):
"""Default implementation wrapped by `self.log_partition`."""
@tf.custom_gradient
def _inner_log_partition():
"""Wraps forward pass computaton."""
result = self._log_partition_forward_pass()
# Adds variables in `self.energy` to `variables` argument of `grad_fn`.
_ = [tf.identity(x) for x in self.energy.trainable_variables]
grad_fn = self._log_partition_grad_generator()
return result, grad_fn
return _inner_log_partition()
@abc.abstractmethod
def _log_partition_forward_pass(self):
"""Returns approximation to the log partition function."""
raise NotImplementedError()
def _log_partition_grad_generator(self):
"""Returns default estimator for the log partition function derivative."""
def grad_fn(upstream, variables):
"""See equation C2 in the appendix. TODO(#119)"""
def energy_grad(bitstrings):
"""Calculates the derivative with respect to the current variables."""
with tf.GradientTape() as tape:
energies = self.energy(bitstrings)
jac = tape.jacobian(
energies,
variables,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
return jac
energy_grad_expectation_list = self.expectation(energy_grad)
return tuple(), [
upstream * (-1.0 * ege) for ege in energy_grad_expectation_list
]
return grad_fn
class AnalyticEnergyInference(EnergyInference):
"""Uses an explicit categorical distribution to implement parent functions."""
def __init__(self,
input_energy: energy.BitstringEnergy,
num_expectation_samples: int,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes an AnalyticEnergyInference.
Internally, this class saves all possible bitstrings as a tensor, whose
energies are calculated relative to an input energy function for sampling
and other inference tasks.
Args:
input_energy: The parameterized energy function which defines this
distribution via the equations of an energy based model. This class
assumes that all parameters of `energy` are `tf.Variable`s and that
they are all returned by `energy.variables`.
num_expectation_samples: Number of samples to draw and use for estimating
the expectation value.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(input_energy, num_expectation_samples, initial_seed, name)
self._all_bitstrings = tf.constant(
list(itertools.product([0, 1], repeat=input_energy.num_bits)),
dtype=tf.int8)
self._logits_variable = tf.Variable(
-input_energy(self.all_bitstrings), trainable=False)
self._distribution = tfd.Categorical(logits=self._logits_variable)
@property
def all_bitstrings(self):
"""Returns every bitstring."""
return self._all_bitstrings
@property
def all_energies(self):
"""Returns the energy of every bitstring."""
return self.energy(self.all_bitstrings)
@property
def distribution(self):
"""Categorical distribution set during `self._ready_inference`."""
return self._distribution
def _ready_inference(self):
"""See base class docstring."""
self._logits_variable.assign(-self.all_energies)
def _call(self, inputs, *args, **kwargs):
"""See base class docstring."""
if inputs is None:
return self.distribution
else:
return self.sample(inputs)
def _entropy(self):
"""See base class docstring."""
return self.distribution.entropy()
def _log_partition_forward_pass(self):
"""See base class docstring."""
# TODO(#115)
return tf.reduce_logsumexp(self.distribution.logits_parameter())
def _sample(self, num_samples: int):
"""See base class docstring."""
return tf.gather(
self.all_bitstrings,
self.distribution.sample(num_samples, seed=self.seed),
axis=0)
class BernoulliEnergyInference(EnergyInference):
"""Manages inference for a Bernoulli defined by spin energies."""
def __init__(self,
input_energy: energy.BernoulliEnergy,
num_expectation_samples: int,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes a BernoulliEnergyInference.
Args:
input_energy: The parameterized energy function which defines this
distribution via the equations of an energy based model. This class
assumes that all parameters of `energy` are `tf.Variable`s and that
they are all returned by `energy.variables`.
num_expectation_samples: Number of samples to draw and use for estimating
the expectation value.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(input_energy, num_expectation_samples, initial_seed, name)
self._logits_variable = tf.Variable(input_energy.logits, trainable=False)
self._distribution = tfd.Bernoulli(
logits=self._logits_variable, dtype=tf.int8)
@property
def distribution(self):
"""Bernoulli distribution set during `self._ready_inference`."""
return self._distribution
def _ready_inference(self):
"""See base class docstring."""
self._logits_variable.assign(self.energy.logits)
def _call(self, inputs, *args, **kwargs):
"""See base class docstring."""
if inputs is None:
return self.distribution
else:
return self.sample(inputs)
def _entropy(self):
"""Returns the exact entropy.
The total entropy of a set of spins is the sum of each individual spin's
entropies.
"""
return tf.reduce_sum(self.distribution.entropy())
def _log_partition_forward_pass(self):
r"""Returns the exact log partition function.
For a single spin of energy $\theta$, the partition function is
$$Z_\theta = \exp(\theta) + \exp(-\theta).$$
Since each spin is independent, the total log partition function is
the sum of the individual spin log partition functions.
"""
thetas = 0.5 * self.energy.logits
single_log_partitions = tf.math.log(
tf.math.exp(thetas) + tf.math.exp(-thetas))
return tf.math.reduce_sum(single_log_partitions)
def _sample(self, num_samples: int):
"""See base class docstring"""
return self.distribution.sample(num_samples, seed=self.seed)
|
|
# Windows Azure Linux Agent
#
# Copyright 2016 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handle packages and modules to enable RDMA for IB networking
"""
import os
import re
import time
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib
dapl_config_paths = [
'/etc/dat.conf',
'/etc/rdma/dat.conf',
'/usr/local/etc/dat.conf'
]
def setup_rdma_device(nd_version, shared_conf):
logger.verbose("Parsing SharedConfig XML contents for RDMA details")
xml_doc = parse_doc(shared_conf.xml_text)
if xml_doc is None:
logger.error("Could not parse SharedConfig XML document")
return
instance_elem = find(xml_doc, "Instance")
if not instance_elem:
logger.error("Could not find <Instance> in SharedConfig document")
return
rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address")
if not rdma_ipv4_addr:
logger.error(
"Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document")
return
rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress")
if not rdma_mac_addr:
logger.error(
"Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document")
return
# add colons to the MAC address (e.g. 00155D33FF1D ->
# 00:15:5D:33:FF:1D)
rdma_mac_addr = ':'.join([rdma_mac_addr[i:i + 2]
for i in range(0, len(rdma_mac_addr), 2)])
logger.info("Found RDMA details. IPv4={0} MAC={1}".format(
rdma_ipv4_addr, rdma_mac_addr))
# Set up the RDMA device with collected informatino
RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr, nd_version).start()
logger.info("RDMA: device is set up")
return
class RDMAHandler(object):
driver_module_name = 'hv_network_direct'
nd_version = None
def get_rdma_version(self): # pylint: disable=R1710
"""Retrieve the firmware version information from the system.
This depends on information provided by the Linux kernel."""
if self.nd_version:
return self.nd_version
kvp_key_size = 512
kvp_value_size = 2048
driver_info_source = '/var/lib/hyperv/.kvp_pool_0'
base_kernel_err_msg = 'Kernel does not provide the necessary '
base_kernel_err_msg += 'information or the kvp daemon is not running.'
if not os.path.isfile(driver_info_source):
error_msg = 'RDMA: Source file "%s" does not exist. '
error_msg += base_kernel_err_msg
logger.error(error_msg % driver_info_source)
return
with open(driver_info_source, "rb") as pool_file:
while True:
key = pool_file.read(kvp_key_size)
value = pool_file.read(kvp_value_size)
if key and value:
key_0 = key.partition(b"\x00")[0]
if key_0:
key_0 = key_0.decode()
value_0 = value.partition(b"\x00")[0]
if value_0:
value_0 = value_0.decode()
if key_0 == "NdDriverVersion":
self.nd_version = value_0
return self.nd_version
else:
break
error_msg = 'RDMA: NdDriverVersion not found in "%s"'
logger.error(error_msg % driver_info_source)
return
@staticmethod
def is_kvp_daemon_running():
"""Look for kvp daemon names in ps -ef output and return True/False
"""
# for centos, the hypervkvpd and the hv_kvp_daemon both are ok.
# for suse, it uses hv_kvp_daemon
kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon']
exitcode, ps_out = shellutil.run_get_output("ps -ef")
if exitcode != 0:
raise Exception('RDMA: ps -ef failed: %s' % ps_out)
for n in kvp_daemon_names:
if n in ps_out:
logger.info('RDMA: kvp daemon (%s) is running' % n)
return True
else:
logger.verbose('RDMA: kvp daemon (%s) is not running' % n)
return False
def load_driver_module(self):
"""Load the kernel driver, this depends on the proper driver
to be installed with the install_driver() method"""
logger.info("RDMA: probing module '%s'" % self.driver_module_name)
result = shellutil.run('modprobe --first-time %s' % self.driver_module_name)
if result != 0:
error_msg = 'Could not load "%s" kernel module. '
error_msg += 'Run "modprobe --first-time %s" as root for more details'
logger.error(
error_msg % (self.driver_module_name, self.driver_module_name)
)
return False
logger.info('RDMA: Loaded the kernel driver successfully.')
return True
def install_driver_if_needed(self):
if self.nd_version:
if conf.enable_check_rdma_driver():
self.install_driver()
else:
logger.info('RDMA: check RDMA driver is disabled, skip installing driver')
else:
logger.info('RDMA: skip installing driver when ndversion not present\n')
def install_driver(self):
"""Install the driver. This is distribution specific and must
be overwritten in the child implementation."""
logger.error('RDMAHandler.install_driver not implemented')
def is_driver_loaded(self):
"""Check if the network module is loaded in kernel space"""
cmd = 'lsmod | grep ^%s' % self.driver_module_name
status, loaded_modules = shellutil.run_get_output(cmd) # pylint: disable=W0612
logger.info('RDMA: Checking if the module loaded.')
if loaded_modules:
logger.info('RDMA: module loaded.')
return True
logger.info('RDMA: module not loaded.')
return False
def reboot_system(self):
"""Reboot the system. This is required as the kernel module for
the rdma driver cannot be unloaded with rmmod"""
logger.info('RDMA: Rebooting system.')
ret = shellutil.run('shutdown -r now')
if ret != 0:
logger.error('RDMA: Failed to reboot the system')
dapl_config_paths = [
'/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']
class RDMADeviceHandler(object):
"""
Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma
interface.
"""
rdma_dev = '/dev/hvnd_rdma'
sriov_dir = '/sys/class/infiniband'
device_check_timeout_sec = 120
device_check_interval_sec = 1
ipoib_check_timeout_sec = 60
ipoib_check_interval_sec = 1
ipv4_addr = None
mac_addr = None
nd_version = None
def __init__(self, ipv4_addr, mac_addr, nd_version):
self.ipv4_addr = ipv4_addr
self.mac_addr = mac_addr
self.nd_version = nd_version
def start(self):
logger.info("RDMA: starting device processing.")
self.process()
logger.info("RDMA: completed device processing.")
def process(self):
try:
if not self.nd_version:
logger.info("RDMA: provisioning SRIOV RDMA device.")
self.provision_sriov_rdma()
else:
logger.info("RDMA: provisioning Network Direct RDMA device.")
self.provision_network_direct_rdma()
except Exception as e:
logger.error("RDMA: device processing failed: {0}".format(e))
def provision_network_direct_rdma(self):
RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr)
if not conf.enable_check_rdma_driver():
logger.info("RDMA: skip checking RDMA driver version")
RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr)
return
skip_rdma_device = False
module_name = "hv_network_direct"
retcode, out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False)
if retcode == 0:
module_name = out.strip()
else:
logger.info("RDMA: failed to resolve module name. Use original name")
retcode, out = shellutil.run_get_output("modprobe %s" % module_name)
if retcode != 0:
logger.error("RDMA: failed to load module %s" % module_name)
return
retcode, out = shellutil.run_get_output("modinfo %s" % module_name)
if retcode == 0:
version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) # pylint: disable=W1401
if version:
v1 = int(version.groups(0)[0])
v2 = int(version.groups(0)[1])
if v1 > 4 or v1 == 4 and v2 > 0:
logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later")
skip_rdma_device = True
else:
logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.")
else:
logger.warn("RDMA: failed to get module info on hv_network_direct.")
if not skip_rdma_device:
RDMADeviceHandler.wait_rdma_device(
self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.write_rdma_config_to_device(
self.rdma_dev, self.ipv4_addr, self.mac_addr)
RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr)
def provision_sriov_rdma(self):
(key, value) = self.read_ipoib_data()
if key:
# provision multiple IP over IB addresses
logger.info("RDMA: provisioning multiple IP over IB addresses")
self.provision_sriov_multiple_ib(value)
elif self.ipv4_addr:
logger.info("RDMA: provisioning single IP over IB address")
# provision a single IP over IB address
RDMADeviceHandler.wait_any_rdma_device(self.sriov_dir,
self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.update_iboip_interface(self.ipv4_addr,
self.ipoib_check_timeout_sec, self.ipoib_check_interval_sec)
else:
logger.info("RDMA: missing IP address")
def read_ipoib_data(self) :
# read from KVP pool 0 to figure out the IP over IB addresses
kvp_key_size = 512
kvp_value_size = 2048
driver_info_source = '/var/lib/hyperv/.kvp_pool_0'
if not os.path.isfile(driver_info_source):
logger.error("RDMA: can't read KVP pool 0")
return (None, None)
key_0 = None
value_0 = None
with open(driver_info_source, "rb") as pool_file:
while True:
key = pool_file.read(kvp_key_size)
value = pool_file.read(kvp_value_size)
if key and value:
key_0 = key.partition(b"\x00")[0]
if key_0 :
key_0 = key_0.decode()
if key_0 == "IPoIB_Data":
value_0 = value.partition(b"\x00")[0]
if value_0 :
value_0 = value_0.decode()
break
else:
break
if key_0 == "IPoIB_Data":
return (key_0, value_0)
return (None, None)
def provision_sriov_multiple_ib(self, value) :
mac_ip_array = []
values = value.split("|")
num_ips = len(values) - 1
# values[0] tells how many IPs. Format - NUMPAIRS:<number>
match = re.match(r"NUMPAIRS:(\d+)", values[0])
if match:
num = int(match.groups(0)[0])
if num != num_ips:
logger.error("RDMA: multiple IPs reported num={0} actual number of IPs={1}".format(num, num_ips))
return
else:
logger.error("RDMA: failed to find number of IP addresses in {0}".format(values[0]))
return
for i in range(1, num_ips+1):
# each MAC/IP entry is of format <MAC>:<IP>
match = re.match(r"([^:]+):(\d+\.\d+\.\d+\.\d+)", values[i])
if match:
mac_addr = match.groups(0)[0]
ipv4_addr = match.groups(0)[1]
mac_ip_array.append((mac_addr, ipv4_addr))
else:
logger.error("RDMA: failed to find MAC/IP address in {0}".format(values[i]))
return
# try to assign all MAC/IP addresses to IB interfaces
# retry for up to 60 times, with 1 seconds delay between each
retry = 60
while retry > 0:
count = self.update_iboip_interfaces(mac_ip_array)
if count == len(mac_ip_array):
return
time.sleep(1)
retry -= 1
logger.error("RDMA: failed to set all IP over IB addresses")
# Assign addresses to all IP over IB interfaces specified in mac_ip_array
# Return the number of IP addresses successfully assigned
def update_iboip_interfaces(self, mac_ip_array):
net_dir = "/sys/class/net"
nics = os.listdir(net_dir)
count = 0
for nic in nics:
# look for IBoIP interface of format ibXXX
if not re.match(r"ib\w+", nic):
continue
mac_addr = None
with open(os.path.join(net_dir, nic, "address")) as address_file:
mac_addr = address_file.read()
if not mac_addr:
logger.error("RDMA: can't read address for device {0}".format(nic))
continue
mac_addr = mac_addr.upper()
match = re.match(r".+(\w\w):(\w\w):(\w\w):\w\w:\w\w:(\w\w):(\w\w):(\w\w)\n", mac_addr)
if not match:
logger.error("RDMA: failed to parse address for device {0} address {1}".format(nic, mac_addr))
continue
# format an MAC address without :
mac_addr = ""
mac_addr = mac_addr.join(match.groups(0))
for mac_ip in mac_ip_array:
if mac_ip[0] == mac_addr:
ret = 0
try:
# bring up the interface and set its IP address
ip_command = ["ip", "link", "set", nic, "up"]
shellutil.run_command(ip_command)
ip_command = ["ip", "addr", "add", "{0}/16".format(mac_ip[1]), "dev", nic]
shellutil.run_command(ip_command)
except shellutil.CommandError as error:
ret = error.returncode
if ret == 0:
logger.info("RDMA: set address {0} to device {1}".format(mac_ip[1], nic))
if ret and ret != 2:
# return value 2 means the address is already set
logger.error("RDMA: failed to set IP address {0} on device {1}".format(mac_ip[1], nic))
else:
count += 1
break
return count
@staticmethod
def update_iboip_interface(ipv4_addr, timeout_sec, check_interval_sec):
logger.info("Wait for ib0 become available")
total_retries = timeout_sec / check_interval_sec
n = 0
found_ib0 = None
while not found_ib0 and n < total_retries:
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
found_ib0 = re.search("ib0", output, re.IGNORECASE)
if found_ib0:
break
time.sleep(check_interval_sec)
n += 1
if not found_ib0:
raise Exception("ib0 is not available")
netmask = 16
logger.info("RDMA: configuring IPv4 addr and netmask on ipoib interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig ib0 {0}".format(addr)) != 0:
raise Exception("Could set addr to {0} on ib0".format(addr))
logger.info("RDMA: ipoib address and netmask configured on interface")
@staticmethod
def update_dat_conf(paths, ipv4_addr):
"""
Looks at paths for dat.conf file and updates the ip address for the
infiniband interface.
"""
logger.info("Updating DAPL configuration file")
for f in paths:
logger.info("RDMA: trying {0}".format(f))
if not os.path.isfile(f):
logger.info(
"RDMA: DAPL config not found at {0}".format(f))
continue
logger.info("RDMA: DAPL config is at: {0}".format(f))
cfg = fileutil.read_file(f)
new_cfg = RDMADeviceHandler.replace_dat_conf_contents(
cfg, ipv4_addr)
fileutil.write_file(f, new_cfg)
logger.info("RDMA: DAPL configuration is updated")
return
raise Exception("RDMA: DAPL configuration file not found at predefined paths")
@staticmethod
def replace_dat_conf_contents(cfg, ipv4_addr):
old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" # pylint: disable=W1401
new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format(
ipv4_addr)
return re.sub(old, new, cfg)
@staticmethod
def write_rdma_config_to_device(path, ipv4_addr, mac_addr):
data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr)
logger.info(
"RDMA: Updating device with configuration: {0}".format(data))
with open(path, "w") as f:
logger.info("RDMA: Device opened for writing")
f.write(data)
logger.info("RDMA: Updated device with IPv4/MAC addr successfully")
@staticmethod
def generate_rdma_config(ipv4_addr, mac_addr):
return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr)
@staticmethod
def wait_rdma_device(path, timeout_sec, check_interval_sec):
logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec))
total_retries = timeout_sec / check_interval_sec
n = 0
while n < total_retries:
if os.path.exists(path):
logger.info("RDMA: device ready")
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def wait_any_rdma_device(directory, timeout_sec, check_interval_sec):
logger.info(
"RDMA: waiting for any Infiniband device at directory={0} timeout={1}s".format(
directory, timeout_sec))
total_retries = timeout_sec / check_interval_sec
n = 0
while n < total_retries:
r = os.listdir(directory)
if r:
logger.info("RDMA: device found in {0}".format(directory))
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def update_network_interface(mac_addr, ipv4_addr):
netmask = 16
logger.info("RDMA: will update the network interface with IPv4/MAC")
if_name = RDMADeviceHandler.get_interface_by_mac(mac_addr)
logger.info("RDMA: network interface found: {0}", if_name)
logger.info("RDMA: bringing network interface up")
if shellutil.run("ifconfig {0} up".format(if_name)) != 0:
raise Exception("Could not bring up RMDA interface: {0}".format(if_name))
logger.info("RDMA: configuring IPv4 addr and netmask on interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0:
raise Exception("Could set addr to {1} on {0}".format(if_name, addr))
logger.info("RDMA: network address and netmask configured on interface")
@staticmethod
def get_interface_by_mac(mac):
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
output = output.replace('\n', '')
match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac),
output, re.IGNORECASE)
if match is None:
raise Exception("Failed to get ifname with mac: {0}".format(mac))
output = match.group(0)
eths = re.findall(r"eth\d", output)
if eths is None or len(eths) == 0:
raise Exception("ifname with mac: {0} not found".format(mac))
return eths[-1]
|
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import msgpack
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.coros
import gevent_zmq as zmq
from .context import Context
class Sender(object):
def __init__(self, socket):
self._socket = socket
self._send_queue = gevent.queue.Queue(maxsize=0)
self._send_task = gevent.spawn(self._sender)
def __del__(self):
self.close()
def close(self):
if self._send_task:
self._send_task.kill()
def _sender(self):
running = True
for parts in self._send_queue:
for i in xrange(len(parts) - 1):
try:
self._socket.send(parts[i], flags=zmq.SNDMORE)
except gevent.GreenletExit:
if i == 0:
return
running = False
self._socket.send(parts[i], flags=zmq.SNDMORE)
self._socket.send(parts[-1])
if not running:
return
def __call__(self, parts):
self._send_queue.put(parts)
class Receiver(object):
def __init__(self, socket):
self._socket = socket
self._recv_queue = gevent.queue.Queue(maxsize=0)
self._recv_task = gevent.spawn(self._recver)
def __del__(self):
self.close()
def close(self):
if self._recv_task:
self._recv_task.kill()
def _recver(self):
running = True
while True:
parts = []
while True:
try:
part = self._socket.recv()
except gevent.GreenletExit:
running = False
if len(parts) == 0:
return
part = self._socket.recv()
parts.append(part)
if not self._socket.getsockopt(zmq.RCVMORE):
break
if not running:
break
self._recv_queue.put(parts)
def __call__(self):
return self._recv_queue.get()
class Event(object):
__slots__ = [ '_name', '_args', '_header' ]
def __init__(self, name, args, context, header=None):
self._name = name
self._args = args
if header is None:
self._header = {
'message_id': context.new_msgid(),
'v': 3
}
else:
self._header = header
@property
def header(self):
return self._header
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def args(self):
return self._args
def pack(self):
return msgpack.Packer().pack((self._header, self._name, self._args))
@staticmethod
def unpack(blob):
unpacker = msgpack.Unpacker()
unpacker.feed(blob)
unpacked_msg = unpacker.unpack()
try:
(header, name, args) = unpacked_msg
except Exception as e:
raise Exception('invalid msg format "{0}": {1}'.format(
unpacked_msg, e))
# Backward compatibility
if not isinstance(header, dict):
header = {}
return Event(name, args, None, header)
def __str__(self, ignore_args=False):
if ignore_args:
args = '[...]'
else:
args = self._args
try:
args = '<<{0}>>'.format(str(self.unpack(self._args)))
except:
pass
return '{0} {1} {2}'.format(self._name, self._header,
args)
class Events(object):
def __init__(self, zmq_socket_type, context=None):
self._zmq_socket_type = zmq_socket_type
self._context = context or Context.get_instance()
self._socket = zmq.Socket(self._context, zmq_socket_type)
self._send = self._socket.send_multipart
self._recv = self._socket.recv_multipart
if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.DEALER, zmq.ROUTER):
self._send = Sender(self._socket)
if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER):
self._recv = Receiver(self._socket)
@property
def recv_is_available(self):
return self._zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER)
def __del__(self):
try:
if not self._socket.closed:
self.close()
except AttributeError:
pass
def close(self):
try:
self._send.close()
except AttributeError:
pass
try:
self._recv.close()
except AttributeError:
pass
self._socket.close()
def _resolve_endpoint(self, endpoint, resolve=True):
if resolve:
endpoint = self._context.hook_resolve_endpoint(endpoint)
if isinstance(endpoint, (tuple, list)):
r = []
for sub_endpoint in endpoint:
r.extend(self._resolve_endpoint(sub_endpoint, resolve))
return r
return [endpoint]
def connect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.connect(endpoint_))
return r
def bind(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.bind(endpoint_))
return r
def create_event(self, name, args, xheader={}):
event = Event(name, args, context=self._context)
for k, v in xheader.items():
if k == 'zmqid':
continue
event.header[k] = v
return event
def emit_event(self, event, identity=None):
if identity is not None:
parts = list(identity)
parts.extend(['', event.pack()])
elif self._zmq_socket_type in (zmq.DEALER, zmq.ROUTER):
parts = ('', event.pack())
else:
parts = (event.pack(),)
self._send(parts)
def emit(self, name, args, xheader={}):
event = self.create_event(name, args, xheader)
identity = xheader.get('zmqid', None)
return self.emit_event(event, identity)
def recv(self):
parts = self._recv()
if len(parts) == 1:
identity = None
blob = parts[0]
else:
identity = parts[0:-2]
blob = parts[-1]
event = Event.unpack(blob)
if identity is not None:
event.header['zmqid'] = identity
return event
def setsockopt(self, *args):
return self._socket.setsockopt(*args)
@property
def context(self):
return self._context
class WrappedEvents(object):
def __init__(self, channel):
self._channel = channel
def close(self):
pass
@property
def recv_is_available(self):
return self._channel.recv_is_available
def create_event(self, name, args, xheader={}):
event = Event(name, args, self._channel.context)
event.header.update(xheader)
return event
def emit_event(self, event, identity=None):
event_payload = (event.header, event.name, event.args)
wrapper_event = self._channel.create_event('w', event_payload)
self._channel.emit_event(wrapper_event)
def emit(self, name, args, xheader={}):
wrapper_event = self.create_event(name, args, xheader)
self.emit_event(wrapper_event)
def recv(self, timeout=None):
wrapper_event = self._channel.recv()
(header, name, args) = wrapper_event.args
return Event(name, args, None, header)
@property
def context(self):
return self._channel.context
|
|
from __future__ import absolute_import
import mock
import django_dynamic_fixture as fixture
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from readthedocs.projects.forms import ProjectRelationshipForm
from readthedocs.projects.models import Project, ProjectRelationship
from readthedocs.rtd_tests.utils import create_user
class SubprojectFormTests(TestCase):
def test_empty_child(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
form = ProjectRelationshipForm(
{},
project=project,
user=user
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegexpMatches(
form.errors['child'][0],
r'This field is required.'
)
def test_nonexistent_child(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
self.assertFalse(Project.objects.filter(pk=9999).exists())
form = ProjectRelationshipForm(
{'child': 9999},
project=project,
user=user
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegexpMatches(
form.errors['child'][0],
r'Select a valid choice.'
)
def test_adding_subproject_fails_when_user_is_not_admin(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
project.users.add(user)
subproject = fixture.get(Project, slug='subproject')
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegexpMatches(
form.errors['child'][0],
r'Select a valid choice.'
)
def test_adding_subproject_passes_when_user_is_admin(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
project.users.add(user)
subproject = fixture.get(Project, slug='subproject')
subproject.users.add(user)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user
)
form.full_clean()
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(
[r.child for r in project.subprojects.all()],
[subproject]
)
def test_subproject_form_cant_create_sub_sub_project(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
subsubproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject
)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject, subsubproject],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subsubproject.pk},
project=subproject,
user=user
)
# The subsubproject is valid here, as far as the child check is
# concerned, but the parent check should fail.
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
['', subsubproject.pk],
)
form.full_clean()
self.assertEqual(len(form.errors['parent']), 1)
self.assertRegexpMatches(
form.errors['parent'][0],
r'Subproject nesting is not supported'
)
def test_excludes_existing_subprojects(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject
)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user
)
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
[''],
)
def test_exclude_self_project_as_subproject(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
form = ProjectRelationshipForm(
{'child': project.pk},
project=project,
user=user
)
self.assertFalse(form.is_valid())
self.assertNotIn(
project.id,
[proj_id for (proj_id, __) in form.fields['child'].choices]
)
@override_settings(PUBLIC_DOMAIN='readthedocs.org')
class ResolverBase(TestCase):
def setUp(self):
with mock.patch('readthedocs.projects.models.broadcast'):
self.owner = create_user(username='owner', password='test')
self.tester = create_user(username='tester', password='test')
self.pip = fixture.get(Project, slug='pip', users=[self.owner], main_language_project=None)
self.subproject = fixture.get(Project, slug='sub', language='ja',
users=[ self.owner],
main_language_project=None)
self.translation = fixture.get(Project, slug='trans', language='ja',
users=[ self.owner],
main_language_project=None)
self.pip.add_subproject(self.subproject)
self.pip.translations.add(self.translation)
relation = self.pip.subprojects.first()
relation.alias = 'sub_alias'
relation.save()
fixture.get(Project, slug='sub_alias', language='ya')
@override_settings(
PRODUCTION_DOMAIN='readthedocs.org',
USE_SUBDOMAIN=False,
)
def test_resolver_subproject_alias(self):
resp = self.client.get('/docs/pip/projects/sub_alias/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp._headers['location'][1],
'http://readthedocs.org/docs/pip/projects/sub_alias/ja/latest/'
)
@override_settings(USE_SUBDOMAIN=True)
def test_resolver_subproject_subdomain_alias(self):
resp = self.client.get('/projects/sub_alias/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp._headers['location'][1],
'http://pip.readthedocs.org/projects/sub_alias/ja/latest/'
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.vision_v1p2beta1.services.image_annotator import (
ImageAnnotatorAsyncClient,
)
from google.cloud.vision_v1p2beta1.services.image_annotator import ImageAnnotatorClient
from google.cloud.vision_v1p2beta1.services.image_annotator import transports
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.type import latlng_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ImageAnnotatorClient._get_default_mtls_endpoint(None) is None
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient,]
)
def test_image_annotator_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "vision.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.ImageAnnotatorGrpcTransport, "grpc"),
(transports.ImageAnnotatorGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_image_annotator_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient,]
)
def test_image_annotator_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "vision.googleapis.com:443"
def test_image_annotator_client_get_transport_class():
transport = ImageAnnotatorClient.get_transport_class()
available_transports = [
transports.ImageAnnotatorGrpcTransport,
]
assert transport in available_transports
transport = ImageAnnotatorClient.get_transport_class("grpc")
assert transport == transports.ImageAnnotatorGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
def test_image_annotator_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ImageAnnotatorClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ImageAnnotatorClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc", "true"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc", "false"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_image_annotator_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient]
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
def test_image_annotator_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_image_annotator_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ImageAnnotatorClient,
transports.ImageAnnotatorGrpcTransport,
"grpc",
grpc_helpers,
),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_image_annotator_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_image_annotator_client_client_options_from_dict():
with mock.patch(
"google.cloud.vision_v1p2beta1.services.image_annotator.transports.ImageAnnotatorGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ImageAnnotatorClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ImageAnnotatorClient,
transports.ImageAnnotatorGrpcTransport,
"grpc",
grpc_helpers,
),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_image_annotator_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"vision.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
scopes=None,
default_host="vision.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [image_annotator.BatchAnnotateImagesRequest, dict,]
)
def test_batch_annotate_images(request_type, transport: str = "grpc"):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
response = client.batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateImagesResponse)
def test_batch_annotate_images_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
client.batch_annotate_images()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
@pytest.mark.asyncio
async def test_batch_annotate_images_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.BatchAnnotateImagesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateImagesResponse()
)
response = await client.batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateImagesResponse)
@pytest.mark.asyncio
async def test_batch_annotate_images_async_from_dict():
await test_batch_annotate_images_async(request_type=dict)
def test_batch_annotate_images_flattened():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
assert arg == mock_val
def test_batch_annotate_images_flattened_error():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_annotate_images(
image_annotator.BatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
@pytest.mark.asyncio
async def test_batch_annotate_images_flattened_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateImagesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_annotate_images_flattened_error_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_annotate_images(
image_annotator.BatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
@pytest.mark.parametrize(
"request_type", [image_annotator.AsyncBatchAnnotateFilesRequest, dict,]
)
def test_async_batch_annotate_files(request_type, transport: str = "grpc"):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.async_batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_async_batch_annotate_files_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
client.async_batch_annotate_files()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
@pytest.mark.asyncio
async def test_async_batch_annotate_files_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.AsyncBatchAnnotateFilesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.async_batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_async_batch_annotate_files_async_from_dict():
await test_async_batch_annotate_files_async(request_type=dict)
def test_async_batch_annotate_files_flattened():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.async_batch_annotate_files(
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
assert arg == mock_val
def test_async_batch_annotate_files_flattened_error():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.async_batch_annotate_files(
image_annotator.AsyncBatchAnnotateFilesRequest(),
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
@pytest.mark.asyncio
async def test_async_batch_annotate_files_flattened_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.async_batch_annotate_files(
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_async_batch_annotate_files_flattened_error_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.async_batch_annotate_files(
image_annotator.AsyncBatchAnnotateFilesRequest(),
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ImageAnnotatorClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ImageAnnotatorClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ImageAnnotatorGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ImageAnnotatorGrpcTransport,)
def test_image_annotator_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ImageAnnotatorTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_image_annotator_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.vision_v1p2beta1.services.image_annotator.transports.ImageAnnotatorTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ImageAnnotatorTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"batch_annotate_images",
"async_batch_annotate_files",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_image_annotator_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.vision_v1p2beta1.services.image_annotator.transports.ImageAnnotatorTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ImageAnnotatorTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id="octopus",
)
def test_image_annotator_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.vision_v1p2beta1.services.image_annotator.transports.ImageAnnotatorTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ImageAnnotatorTransport()
adc.assert_called_once()
def test_image_annotator_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ImageAnnotatorClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ImageAnnotatorGrpcTransport, grpc_helpers),
(transports.ImageAnnotatorGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_image_annotator_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"vision.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
scopes=["1", "2"],
default_host="vision.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_image_annotator_host_no_port():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="vision.googleapis.com"
),
)
assert client.transport._host == "vision.googleapis.com:443"
def test_image_annotator_host_with_port():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="vision.googleapis.com:8000"
),
)
assert client.transport._host == "vision.googleapis.com:8000"
def test_image_annotator_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageAnnotatorGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_image_annotator_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageAnnotatorGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_image_annotator_grpc_lro_client():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_image_annotator_grpc_lro_async_client():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ImageAnnotatorClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ImageAnnotatorClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ImageAnnotatorClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ImageAnnotatorClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ImageAnnotatorClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ImageAnnotatorClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ImageAnnotatorClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ImageAnnotatorClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ImageAnnotatorClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ImageAnnotatorClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ImageAnnotatorTransport, "_prep_wrapped_messages"
) as prep:
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ImageAnnotatorTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ImageAnnotatorClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport),
(ImageAnnotatorAsyncClient, transports.ImageAnnotatorGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss functions to be used by LayerCollection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.distributions.python.ops import onehot_categorical
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
@six.add_metaclass(abc.ABCMeta)
class LossFunction(object):
"""Abstract base class for loss functions.
Note that unlike typical loss functions used in neural networks these are
summed and not averaged across cases in the batch, since this is what the
users of this class (FisherEstimator and MatrixVectorProductComputer) will
be expecting. The implication of this is that you will may want to
normalize things like Fisher-vector products by the batch size when you
use this class. It depends on the use case.
"""
@abc.abstractproperty
def targets(self):
"""The targets being predicted by the model.
Returns:
None or Tensor of appropriate shape for calling self._evaluate() on.
"""
pass
@abc.abstractproperty
def inputs(self):
"""The inputs to the loss function (excluding the targets)."""
pass
@property
def input_minibatches(self):
"""A `list` of inputs to the loss function, separated by minibatch.
Typically there will be one minibatch per tower in a multi-tower setup.
Returns a list consisting of `self.inputs` by default; `LossFunction`s
supporting registering multiple minibatches should override this method.
Returns:
A `list` of `Tensor`s representing
"""
return [self.inputs]
@property
def num_registered_minibatches(self):
"""Number of minibatches registered for this LossFunction.
Typically equal to the number of towers in a multi-tower setup.
Returns:
An `int` representing the number of registered minibatches.
"""
return len(self.input_minibatches)
def evaluate(self):
"""Evaluate the loss function on the targets."""
if self.targets is not None:
# We treat the targets as "constant". It's only the inputs that get
# "back-propped" through.
return self._evaluate(array_ops.stop_gradient(self.targets))
else:
raise Exception("Cannot evaluate losses with unspecified targets.")
@abc.abstractmethod
def _evaluate(self, targets):
"""Evaluates the negative log probability of the targets.
Args:
targets: Tensor that distribution can calculate log_prob() of.
Returns:
negative log probability of each target, summed across all targets.
"""
pass
@abc.abstractmethod
def multiply_hessian(self, vector):
"""Right-multiply a vector by the Hessian.
Here the 'Hessian' is the Hessian matrix (i.e. matrix of 2nd-derivatives)
of the loss function with respect to its inputs.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by the Hessian. Will be of the same shape(s)
as the 'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_hessian_factor(self, vector):
"""Right-multiply a vector by a factor B of the Hessian.
Here the 'Hessian' is the Hessian matrix (i.e. matrix of 2nd-derivatives)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
Note that B can be any matrix satisfying B * B^T = H where H is the Hessian,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be of the shape given by the
'hessian_factor_inner_shape' property.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_hessian_factor_transpose(self, vector):
"""Right-multiply a vector by the transpose of a factor B of the Hessian.
Here the 'Hessian' is the Hessian matrix (i.e. matrix of 2nd-derivatives)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
Note that B can be any matrix satisfying B * B^T = H where H is the Hessian,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by B^T. Will be of the shape given by the
'hessian_factor_inner_shape' property.
"""
pass
@abc.abstractmethod
def multiply_hessian_factor_replicated_one_hot(self, index):
"""Right-multiply a replicated-one-hot vector by a factor B of the Hessian.
Here the 'Hessian' is the Hessian matrix (i.e. matrix of 2nd-derivatives)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
A 'replicated-one-hot' vector means a tensor which, for each slice along the
batch dimension (assumed to be dimension 0), is 1.0 in the entry
corresponding to the given index and 0 elsewhere.
Note that B can be any matrix satisfying B * B^T = H where H is the Hessian,
but will agree with the one used in the other methods of this class.
Args:
index: A tuple representing in the index of the entry in each slice that
is 1.0. Note that len(index) must be equal to the number of elements
of the 'hessian_factor_inner_shape' tensor minus one.
Returns:
The vector right-multiplied by B^T. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractproperty
def hessian_factor_inner_shape(self):
"""The shape of the tensor returned by multiply_hessian_factor."""
pass
@abc.abstractproperty
def hessian_factor_inner_static_shape(self):
"""Static version of hessian_factor_inner_shape."""
pass
@six.add_metaclass(abc.ABCMeta)
class NegativeLogProbLoss(LossFunction):
"""Abstract base class for loss functions that are negative log probs."""
def __init__(self, seed=None):
self._default_seed = seed
super(NegativeLogProbLoss, self).__init__()
@property
def inputs(self):
return self.params
@abc.abstractproperty
def params(self):
"""Parameters to the underlying distribution."""
pass
@abc.abstractmethod
def multiply_fisher(self, vector):
"""Right-multiply a vector by the Fisher.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by the Fisher. Will be of the same shape(s)
as the 'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_fisher_factor(self, vector):
"""Right-multiply a vector by a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribtion (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
Note that B can be any matrix satisfying B * B^T = F where F is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be of the shape given by the
'fisher_factor_inner_shape' property.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_fisher_factor_transpose(self, vector):
"""Right-multiply a vector by the transpose of a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribtion (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
Note that B can be any matrix satisfying B * B^T = F where F is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by B^T. Will be of the shape given by the
'fisher_factor_inner_shape' property.
"""
pass
@abc.abstractmethod
def multiply_fisher_factor_replicated_one_hot(self, index):
"""Right-multiply a replicated-one-hot vector by a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribtion (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
A 'replicated-one-hot' vector means a tensor which, for each slice along the
batch dimension (assumed to be dimension 0), is 1.0 in the entry
corresponding to the given index and 0 elsewhere.
Note that B can be any matrix satisfying B * B^T = H where H is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
index: A tuple representing in the index of the entry in each slice that
is 1.0. Note that len(index) must be equal to the number of elements
of the 'fisher_factor_inner_shape' tensor minus one.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractproperty
def fisher_factor_inner_shape(self):
"""The shape of the tensor returned by multiply_fisher_factor."""
pass
@abc.abstractproperty
def fisher_factor_inner_static_shape(self):
"""Static version of fisher_factor_inner_shape."""
pass
@abc.abstractmethod
def sample(self, seed):
"""Sample 'targets' from the underlying distribution."""
pass
def evaluate_on_sample(self, seed=None):
"""Evaluates the log probability on a random sample.
Args:
seed: int or None. Random seed for this draw from the distribution.
Returns:
Log probability of sampled targets, summed across examples.
"""
if seed is None:
seed = self._default_seed
# We treat the targets as "constant". It's only the inputs that get
# "back-propped" through.
return self._evaluate(array_ops.stop_gradient(self.sample(seed)))
# TODO(jamesmartens): should this just inherit from object to avoid "diamond"
# inheritance, or is there a better way?
class NaturalParamsNegativeLogProbLoss(NegativeLogProbLoss):
"""Base class for neg log prob losses whose inputs are 'natural' parameters.
Note that the Hessian and Fisher for natural parameters of exponential-
family models are the same, hence the purpose of this class.
See here: https://arxiv.org/abs/1412.1193
'Natural parameters' are defined for exponential-family models. See for
example: https://en.wikipedia.org/wiki/Exponential_family
"""
def multiply_hessian(self, vector):
return self.multiply_fisher(vector)
def multiply_hessian_factor(self, vector):
return self.multiply_fisher_factor(vector)
def multiply_hessian_factor_transpose(self, vector):
return self.multiply_fisher_factor_transpose(vector)
def multiply_hessian_factor_replicated_one_hot(self, index):
return self.multiply_fisher_factor_replicated_one_hot(index)
@property
def hessian_factor_inner_shape(self):
return self.fisher_factor_inner_shape
@property
def hessian_factor_inner_static_shape(self):
return self.fisher_factor_inner_shape
class DistributionNegativeLogProbLoss(NegativeLogProbLoss):
"""Base class for neg log prob losses that use the TF Distribution classes."""
def __init__(self, seed=None):
super(DistributionNegativeLogProbLoss, self).__init__(seed=seed)
@abc.abstractproperty
def dist(self):
"""The underlying tf.distributions.Distribution."""
pass
def _evaluate(self, targets):
return -math_ops.reduce_sum(self.dist.log_prob(targets))
def sample(self, seed):
return self.dist.sample(seed=seed)
class NormalMeanNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for a normal distribution parameterized by a mean vector.
Note that the covariance is treated as a constant 'var' times the identity.
Also note that the Fisher for such a normal distribution with respect the mean
parameter is given by:
F = (1/var) * I
See for example https://www.ii.pwr.edu.pl/~tomczak/PDF/[JMT]Fisher_inf.pdf.
"""
def __init__(self, mean, var=0.5, targets=None, seed=None):
self._mean = mean
self._var = var
self._targets = targets
super(NormalMeanNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return normal.Normal(loc=self._mean, scale=math_ops.sqrt(self._var))
@property
def params(self):
return self._mean
def multiply_fisher(self, vector):
return (1. / self._var) * vector
def multiply_fisher_factor(self, vector):
return self._var**-0.5 * vector
def multiply_fisher_factor_transpose(self, vector):
return self.multiply_fisher_factor(vector) # it's symmetric in this case
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
ones_slice = array_ops.expand_dims(
array_ops.ones(array_ops.shape(self._mean)[:1], dtype=self._mean.dtype),
axis=-1)
output_slice = self._var**-0.5 * ones_slice
return insert_slice_in_zeros(output_slice, 1, int(self._mean.shape[1]),
index[0])
@property
def fisher_factor_inner_shape(self):
return array_ops.shape(self._mean)
@property
def fisher_factor_inner_static_shape(self):
return self._mean.shape
class NormalMeanVarianceNegativeLogProbLoss(DistributionNegativeLogProbLoss):
"""Negative log prob loss for a normal distribution with mean and variance.
This class parameterizes a multivariate normal distribution with n independent
dimensions. Unlike `NormalMeanNegativeLogProbLoss`, this class does not
assume the variance is held constant. The Fisher Information for n = 1
is given by,
F = [[1 / variance, 0],
[ 0, 0.5 / variance^2]]
where the parameters of the distribution are concatenated into a single
vector as [mean, variance]. For n > 1, the mean parameter vector is
concatenated with the variance parameter vector.
See https://www.ii.pwr.edu.pl/~tomczak/PDF/[JMT]Fisher_inf.pdf for derivation.
"""
def __init__(self, mean, variance, targets=None, seed=None):
assert len(mean.shape) == 2, "Expect 2D mean tensor."
assert len(variance.shape) == 2, "Expect 2D variance tensor."
self._mean = mean
self._variance = variance
self._scale = math_ops.sqrt(variance)
self._targets = targets
super(NormalMeanVarianceNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return normal.Normal(loc=self._mean, scale=self._scale)
@property
def params(self):
return self._mean, self._variance
def _concat(self, mean, variance):
return array_ops.concat([mean, variance], axis=-1)
def _split(self, params):
return array_ops.split(params, 2, axis=-1)
@property
def _fisher_mean(self):
return 1. / self._variance
@property
def _fisher_mean_factor(self):
return 1. / self._scale
@property
def _fisher_var(self):
return 1. / (2 * math_ops.square(self._variance))
@property
def _fisher_var_factor(self):
return 1. / (math_ops.sqrt(2.) * self._variance)
def multiply_fisher(self, vecs):
mean_vec, var_vec = vecs
return (self._fisher_mean * mean_vec, self._fisher_var * var_vec)
def multiply_fisher_factor(self, vecs):
mean_vec, var_vec = self._split(vecs)
return (self._fisher_mean_factor * mean_vec,
self._fisher_var_factor * var_vec)
def multiply_fisher_factor_transpose(self, vecs):
mean_vec, var_vec = vecs
return self._concat(self._fisher_mean_factor * mean_vec,
self._fisher_var_factor * var_vec)
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
index = index[0]
if index < int(self._mean.shape[-1]):
# Index corresponds to mean parameter.
mean_slice = self._fisher_mean_factor[:, index]
mean_slice = array_ops.expand_dims(mean_slice, axis=-1)
mean_output = insert_slice_in_zeros(mean_slice, 1, int(
self._mean.shape[1]), index)
var_output = array_ops.zeros_like(mean_output)
else:
index -= int(self._mean.shape[-1])
# Index corresponds to variance parameter.
var_slice = self._fisher_var_factor[:, index]
var_slice = array_ops.expand_dims(var_slice, axis=-1)
var_output = insert_slice_in_zeros(var_slice, 1,
int(self._variance.shape[1]), index)
mean_output = array_ops.zeros_like(var_output)
return mean_output, var_output
@property
def fisher_factor_inner_shape(self):
return array_ops.concat(
[
array_ops.shape(self._mean)[:-1],
2 * array_ops.shape(self._mean)[-1:]
],
axis=0)
@property
def fisher_factor_inner_static_shape(self):
shape = self._mean.shape.as_list()
return tensor_shape.TensorShape(shape[-1:] + [2 * shape[-1]])
def multiply_hessian(self, vector):
raise NotImplementedError()
def multiply_hessian_factor(self, vector):
raise NotImplementedError()
def multiply_hessian_factor_transpose(self, vector):
raise NotImplementedError()
def multiply_hessian_factor_replicated_one_hot(self, index):
raise NotImplementedError()
@property
def hessian_factor_inner_shape(self):
raise NotImplementedError()
@property
def hessian_factor_inner_static_shape(self):
raise NotImplementedError()
class CategoricalLogitsNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for a categorical distribution parameterized by logits.
Note that the Fisher (for a single case) of a categorical distribution, with
respect to the natural parameters (i.e. the logits), is given by:
F = diag(p) - p*p^T
where p = softmax(logits). F can be factorized as F = B * B^T where
B = diag(q) - p*q^T
where q is the entry-wise square root of p. This is easy to verify using the
fact that q^T*q = 1.
"""
def __init__(self, logits, targets=None, seed=None):
"""Instantiates a CategoricalLogitsNegativeLogProbLoss.
Args:
logits: Tensor of shape [batch_size, output_size]. Parameters for
underlying distribution.
targets: None or Tensor of shape [output_size]. Each elements contains an
index in [0, output_size).
seed: int or None. Default random seed when sampling.
"""
self._logits_components = []
self._targets_components = []
self.register_additional_minibatch(logits, targets=targets)
super(CategoricalLogitsNegativeLogProbLoss, self).__init__(seed=seed)
def register_additional_minibatch(self, logits, targets=None):
"""Register an additiona minibatch's worth of parameters.
Args:
logits: Tensor of shape [batch_size, output_size]. Parameters for
underlying distribution.
targets: None or Tensor of shape [batch_size, output_size]. Each row must
be a one-hot vector.
"""
self._logits_components.append(logits)
self._targets_components.append(targets)
@property
def _logits(self):
return array_ops.concat(self._logits_components, axis=0)
@property
def input_minibatches(self):
return self._logits_components
@property
def targets(self):
if all(target is None for target in self._targets_components):
return None
return array_ops.concat(self._targets_components, axis=0)
@property
def dist(self):
return categorical.Categorical(logits=self._logits)
@property
def _probs(self):
return self.dist.probs
@property
def _sqrt_probs(self):
return math_ops.sqrt(self._probs)
@property
def params(self):
return self._logits
def multiply_fisher(self, vector):
probs = self._probs
return vector * probs - math_ops.reduce_sum(vector * probs, axis=1) * probs
def multiply_fisher_factor(self, vector):
probs = self._probs
sqrt_probs = self._sqrt_probs
return sqrt_probs * vector - probs * math_ops.reduce_sum(
sqrt_probs * vector, axis=1, keep_dims=True)
def multiply_fisher_factor_transpose(self, vector):
probs = self._probs
sqrt_probs = self._sqrt_probs
return sqrt_probs * vector - sqrt_probs * math_ops.reduce_sum(
probs * vector, axis=1, keep_dims=True)
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
probs = self._probs
sqrt_probs = self._sqrt_probs
sqrt_probs_slice = array_ops.expand_dims(sqrt_probs[:, index[0]], -1)
padded_slice = insert_slice_in_zeros(sqrt_probs_slice, 1,
int(sqrt_probs.shape[1]), index[0])
return padded_slice - probs * sqrt_probs_slice
@property
def fisher_factor_inner_shape(self):
return array_ops.shape(self._logits)
@property
def fisher_factor_inner_static_shape(self):
return self._logits.shape
class MultiBernoulliNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for multiple Bernoulli distributions param'd by logits.
Represents N independent Bernoulli distributions where N = len(logits). Its
Fisher Information matrix is given by,
F = diag(p * (1-p))
p = sigmoid(logits)
As F is diagonal with positive entries, its factor B is,
B = diag(sqrt(p * (1-p)))
"""
def __init__(self, logits, targets=None, seed=None):
self._logits = logits
self._targets = targets
super(MultiBernoulliNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return bernoulli.Bernoulli(logits=self._logits)
@property
def _probs(self):
return self.dist.probs
@property
def params(self):
return self._logits
def multiply_fisher(self, vector):
return self._probs * (1 - self._probs) * vector
def multiply_fisher_factor(self, vector):
return math_ops.sqrt(self._probs * (1 - self._probs)) * vector
def multiply_fisher_factor_transpose(self, vector):
return self.multiply_fisher_factor(vector) # it's symmetric in this case
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
probs_slice = array_ops.expand_dims(self._probs[:, index[0]], -1)
output_slice = math_ops.sqrt(probs_slice * (1 - probs_slice))
return insert_slice_in_zeros(output_slice, 1, int(self._logits.shape[1]),
index[0])
@property
def fisher_factor_inner_shape(self):
return array_ops.shape(self._logits)
@property
def fisher_factor_inner_static_shape(self):
return self._logits.shape
def insert_slice_in_zeros(slice_to_insert, dim, dim_size, position):
"""Inserts slice into a larger tensor of zeros.
Forms a new tensor which is the same shape as slice_to_insert, except that
the dimension given by 'dim' is expanded to the size given by 'dim_size'.
'position' determines the position (index) at which to insert the slice within
that dimension.
Assumes slice_to_insert.shape[dim] = 1.
Args:
slice_to_insert: The slice to insert.
dim: The dimension which to expand with zeros.
dim_size: The new size of the 'dim' dimension.
position: The position of 'slice_to_insert' in the new tensor.
Returns:
The new tensor.
Raises:
ValueError: If the slice's shape at the given dim is not 1.
"""
slice_shape = slice_to_insert.shape
if slice_shape[dim] != 1:
raise ValueError("Expected slice_to_insert.shape to have {} dim of 1, but "
"was {}".format(dim, slice_to_insert.shape[dim]))
before = [0] * int(len(slice_shape))
after = before[:]
before[dim] = position
after[dim] = dim_size - position - 1
return array_ops.pad(slice_to_insert, list(zip(before, after)))
class OnehotCategoricalLogitsNegativeLogProbLoss(
CategoricalLogitsNegativeLogProbLoss):
"""Neg log prob loss for a categorical distribution with onehot targets.
Identical to CategoricalLogitsNegativeLogProbLoss except that the underlying
distribution is OneHotCategorical as opposed to Categorical.
"""
@property
def dist(self):
return onehot_categorical.OneHotCategorical(logits=self._logits)
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from trove.common import cfg
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.datastore.experimental.postgresql.service.access import (
PgSqlAccess)
from trove.openstack.common import log as logging
from trove.common.i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
IGNORE_USERS_LIST = CONF.get(CONF.datastore_manager).ignore_users
class PgSqlUsers(PgSqlAccess):
"""Mixin implementing the user CRUD API.
This mixin has a dependency on the PgSqlAccess mixin.
"""
def create_user(self, context, users):
"""Create users and grant privileges for the specified databases.
The users parameter is a list of dictionaries in the following form:
{"_name": "", "_password": "", "_databases": [{"_name": ""}, ...]}
"""
for user in users:
LOG.debug(
"{guest_id}: Creating user {name} with password {password}."
.format(
guest_id=CONF.guest_id,
name=user['_name'],
password=user['_password'],
)
)
LOG.info(
_("{guest_id}: Creating user {name} with password {password}.")
.format(
guest_id=CONF.guest_id,
name=user['_name'],
password="<SANITIZED>",
)
)
pgutil.psql(
pgutil.UserQuery.create(
name=user['_name'],
password=user['_password'],
),
timeout=30,
)
self.grant_access(
context,
user['_name'],
None,
[d['_name'] for d in user['_databases']],
)
def list_users(
self,
context,
limit=None,
marker=None,
include_marker=False,
):
"""List all users on the instance along with their access permissions.
Return value is a list of dictionaries in the following form:
[{"_name": "", "_password": None, "_host": None,
"_databases": [{"_name": ""}, ...]}, ...]
"""
results = pgutil.query(
pgutil.UserQuery.list(ignore=IGNORE_USERS_LIST),
timeout=30,
)
# Convert results into dictionaries.
results = (
{
'_name': r[0].strip(),
'_password': None,
'_host': None,
'_databases': self.list_access(context, r[0], None),
}
for r in results
)
# Force __iter__ of generator until marker found.
if marker is not None:
try:
item = results.next()
while item['_name'] != marker:
item = results.next()
except StopIteration:
pass
remainder = None
if limit is not None:
remainder = results
results = itertools.islice(results, limit)
results = tuple(results)
next_marker = None
if remainder is not None:
try:
next_marker = remainder.next()
except StopIteration:
pass
return results, next_marker
def delete_user(self, context, user):
"""Delete the specified user.
The user parameter is a dictionary in the following form:
{"_name": ""}
"""
LOG.info(
_("{guest_id}: Dropping user {name}.").format(
guest_id=CONF.guest_id,
name=user['_name'],
)
)
pgutil.psql(
pgutil.UserQuery.drop(name=user['_name']),
timeout=30,
)
def get_user(self, context, username, hostname):
"""Return a single user matching the criteria.
The username and hostname parameter are strings.
The return value is a dictionary in the following form:
{"_name": "", "_host": None, "_password": None,
"_databases": [{"_name": ""}, ...]}
Where "_databases" is a list of databases the user has access to.
"""
results = pgutil.query(
pgutil.UserQuery.get(name=username),
timeout=30,
)
results = tuple(results)
if len(results) < 1:
return None
return {
"_name": results[0][0],
"_host": None,
"_password": None,
"_databases": self.list_access(context, username, None),
}
def change_passwords(self, context, users):
"""Change the passwords of one or more existing users.
The users parameter is a list of dictionaries in the following form:
{"name": "", "password": ""}
"""
for user in users:
LOG.debug(
"{guest_id}: Changing password for {user} to {password}."
.format(
guest_id=CONF.guest_id,
user=user['name'],
password=user['password'],
)
)
LOG.info(
_("{guest_id}: Changing password for {user} to {password}.")
.format(
guest_id=CONF.guest_id,
user=user['name'],
password="<SANITIZED>",
)
)
pgutil.psql(
pgutil.UserQuery.update_password(
user=user['name'],
password=user['password'],
),
timeout=30,
)
def update_attributes(self, context, username, hostname, user_attrs):
"""Change the attributes of one existing user.
The username and hostname parameters are strings.
The user_attrs parameter is a dictionary in the following form:
{"password": "", "name": ""}
Each key/value pair in user_attrs is optional.
"""
if user_attrs.get('password') is not None:
self.change_passwords(
context,
(
{
"name": username,
"password": user_attrs['password'],
},
),
)
if user_attrs.get('name') is not None:
access = self.list_access(context, username, None)
LOG.info(
_("{guest_id}: Changing username for {old} to {new}.").format(
guest_id=CONF.guest_id,
old=username,
new=user_attrs['name'],
)
)
pgutil.psql(
pgutil.psql.UserQuery.update_name(
old=username,
new=user_attrs['name'],
),
timeout=30,
)
# Regrant all previous access after the name change.
LOG.info(
_("{guest_id}: Regranting permissions from {old} to {new}.")
.format(
guest_id=CONF.guest_id,
old=username,
new=user_attrs['name'],
)
)
self.grant_access(
context,
username=user_attrs['name'],
hostname=None,
databases=(db['_name'] for db in access)
)
|
|
# This file was generated automatically by generate_protocols.py
from nintendo.nex import notification, rmc, common, streams
import logging
logger = logging.getLogger(__name__)
class ConnectionData(common.Structure):
def __init__(self):
super().__init__()
self.station = None
self.connection_id = None
def check_required(self, settings, version):
for field in ['station', 'connection_id']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream, version):
self.station = stream.stationurl()
self.connection_id = stream.u32()
def save(self, stream, version):
self.check_required(stream.settings, version)
stream.stationurl(self.station)
stream.u32(self.connection_id)
class NintendoLoginData(common.Structure):
def __init__(self):
super().__init__()
self.token = None
def check_required(self, settings, version):
for field in ['token']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream, version):
self.token = stream.string()
def save(self, stream, version):
self.check_required(stream.settings, version)
stream.string(self.token)
class SecureConnectionProtocol:
METHOD_REGISTER = 1
METHOD_REQUEST_CONNECTION_DATA = 2
METHOD_REQUEST_URLS = 3
METHOD_REGISTER_EX = 4
METHOD_TEST_CONNECTIVITY = 5
METHOD_REPLACE_URL = 6
METHOD_SEND_REPORT = 7
PROTOCOL_ID = 0xB
class SecureConnectionClient(SecureConnectionProtocol):
def __init__(self, client):
self.settings = client.settings
self.client = client
async def register(self, urls):
logger.info("SecureConnectionClient.register()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.list(urls, stream.stationurl)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_REGISTER, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
obj = rmc.RMCResponse()
obj.result = stream.result()
obj.connection_id = stream.u32()
obj.public_station = stream.stationurl()
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.register -> done")
return obj
async def request_connection_data(self, cid, pid):
logger.info("SecureConnectionClient.request_connection_data()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(cid)
stream.pid(pid)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_REQUEST_CONNECTION_DATA, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
obj = rmc.RMCResponse()
obj.result = stream.bool()
obj.connection_data = stream.list(ConnectionData)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.request_connection_data -> done")
return obj
async def request_urls(self, cid, pid):
logger.info("SecureConnectionClient.request_urls()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(cid)
stream.pid(pid)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_REQUEST_URLS, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
obj = rmc.RMCResponse()
obj.result = stream.bool()
obj.urls = stream.list(stream.stationurl)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.request_urls -> done")
return obj
async def register_ex(self, urls, login_data):
logger.info("SecureConnectionClient.register_ex()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.list(urls, stream.stationurl)
stream.anydata(login_data)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_REGISTER_EX, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
obj = rmc.RMCResponse()
obj.result = stream.result()
obj.connection_id = stream.u32()
obj.public_station = stream.stationurl()
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.register_ex -> done")
return obj
async def test_connectivity(self):
logger.info("SecureConnectionClient.test_connectivity()")
#--- request ---
stream = streams.StreamOut(self.settings)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_TEST_CONNECTIVITY, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.test_connectivity -> done")
async def replace_url(self, url, new):
logger.info("SecureConnectionClient.replace_url()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.stationurl(url)
stream.stationurl(new)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_REPLACE_URL, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.replace_url -> done")
async def send_report(self, report_id, data):
logger.info("SecureConnectionClient.send_report()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(report_id)
stream.qbuffer(data)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_SEND_REPORT, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("SecureConnectionClient.send_report -> done")
class SecureConnectionServer(SecureConnectionProtocol):
def __init__(self):
self.methods = {
self.METHOD_REGISTER: self.handle_register,
self.METHOD_REQUEST_CONNECTION_DATA: self.handle_request_connection_data,
self.METHOD_REQUEST_URLS: self.handle_request_urls,
self.METHOD_REGISTER_EX: self.handle_register_ex,
self.METHOD_TEST_CONNECTIVITY: self.handle_test_connectivity,
self.METHOD_REPLACE_URL: self.handle_replace_url,
self.METHOD_SEND_REPORT: self.handle_send_report,
}
async def logout(self, client):
pass
async def handle(self, client, method_id, input, output):
if method_id in self.methods:
await self.methods[method_id](client, input, output)
else:
logger.warning("Unknown method called on SecureConnectionServer: %i", method_id)
raise common.RMCError("Core::NotImplemented")
async def handle_register(self, client, input, output):
logger.info("SecureConnectionServer.register()")
#--- request ---
urls = input.list(input.stationurl)
response = await self.register(client, urls)
#--- response ---
if not isinstance(response, rmc.RMCResponse):
raise RuntimeError("Expected RMCResponse, got %s" %response.__class__.__name__)
for field in ['result', 'connection_id', 'public_station']:
if not hasattr(response, field):
raise RuntimeError("Missing field in RMCResponse: %s" %field)
output.result(response.result)
output.u32(response.connection_id)
output.stationurl(response.public_station)
async def handle_request_connection_data(self, client, input, output):
logger.info("SecureConnectionServer.request_connection_data()")
#--- request ---
cid = input.u32()
pid = input.pid()
response = await self.request_connection_data(client, cid, pid)
#--- response ---
if not isinstance(response, rmc.RMCResponse):
raise RuntimeError("Expected RMCResponse, got %s" %response.__class__.__name__)
for field in ['result', 'connection_data']:
if not hasattr(response, field):
raise RuntimeError("Missing field in RMCResponse: %s" %field)
output.bool(response.result)
output.list(response.connection_data, output.add)
async def handle_request_urls(self, client, input, output):
logger.info("SecureConnectionServer.request_urls()")
#--- request ---
cid = input.u32()
pid = input.pid()
response = await self.request_urls(client, cid, pid)
#--- response ---
if not isinstance(response, rmc.RMCResponse):
raise RuntimeError("Expected RMCResponse, got %s" %response.__class__.__name__)
for field in ['result', 'urls']:
if not hasattr(response, field):
raise RuntimeError("Missing field in RMCResponse: %s" %field)
output.bool(response.result)
output.list(response.urls, output.stationurl)
async def handle_register_ex(self, client, input, output):
logger.info("SecureConnectionServer.register_ex()")
#--- request ---
urls = input.list(input.stationurl)
login_data = input.anydata()
response = await self.register_ex(client, urls, login_data)
#--- response ---
if not isinstance(response, rmc.RMCResponse):
raise RuntimeError("Expected RMCResponse, got %s" %response.__class__.__name__)
for field in ['result', 'connection_id', 'public_station']:
if not hasattr(response, field):
raise RuntimeError("Missing field in RMCResponse: %s" %field)
output.result(response.result)
output.u32(response.connection_id)
output.stationurl(response.public_station)
async def handle_test_connectivity(self, client, input, output):
logger.info("SecureConnectionServer.test_connectivity()")
#--- request ---
await self.test_connectivity(client)
async def handle_replace_url(self, client, input, output):
logger.info("SecureConnectionServer.replace_url()")
#--- request ---
url = input.stationurl()
new = input.stationurl()
await self.replace_url(client, url, new)
async def handle_send_report(self, client, input, output):
logger.info("SecureConnectionServer.send_report()")
#--- request ---
report_id = input.u32()
data = input.qbuffer()
await self.send_report(client, report_id, data)
async def register(self, *args):
logger.warning("SecureConnectionServer.register not implemented")
raise common.RMCError("Core::NotImplemented")
async def request_connection_data(self, *args):
logger.warning("SecureConnectionServer.request_connection_data not implemented")
raise common.RMCError("Core::NotImplemented")
async def request_urls(self, *args):
logger.warning("SecureConnectionServer.request_urls not implemented")
raise common.RMCError("Core::NotImplemented")
async def register_ex(self, *args):
logger.warning("SecureConnectionServer.register_ex not implemented")
raise common.RMCError("Core::NotImplemented")
async def test_connectivity(self, *args):
logger.warning("SecureConnectionServer.test_connectivity not implemented")
raise common.RMCError("Core::NotImplemented")
async def replace_url(self, *args):
logger.warning("SecureConnectionServer.replace_url not implemented")
raise common.RMCError("Core::NotImplemented")
async def send_report(self, *args):
logger.warning("SecureConnectionServer.send_report not implemented")
raise common.RMCError("Core::NotImplemented")
|
|
"""
Module containing MPG Ranch nocturnal flight call (NFC) detector.
The detector looks for NFCs in a single audio input channel by scoring
a sequence of input records, producing a clip when the score rises above
a threshold. The input records typically overlap. For each input record,
the detector computes a spectrogram and applies a convolutional neural
network to the spectrogram to obtain a score.
The `TseepDetector` and `ThrushDetector` classes of this module are
configured to detect tseep and thrush NFCs, respectively.
The detectors of this module use the classifiers of the
`vesper.mpg_ranch.nfc_coarse_classifier_4_1` package for
distinguishing audio segments that contain NFCs from segments that
do not.
When run on 17 nights of recordings made in Ithaca, NY from 2021-04-03
through 2021-04-19 the detectors of this module produced the same
clips as those produced by the corresponding detectors of the
`vesper.mpg_ranch.nfc_detector_1_0` module. The detectors of this
module were run with TensorFlow 2.5.0rc1 and the detectors of the
other module with TensorFlow 1.15.5. Each of the thrush detectors
produced 12094 clips and each of the tseep detectors produced 5476
clips. The clips produced by corrseponding detectors had exactly the
same start indices and lengths, and the scores (on a scale of 0 to 100)
of the clips of each matching pair differed by less than .001.
"""
import logging
# import time
import numpy as np
import tensorflow as tf
from vesper.util.detection_score_file_writer import DetectionScoreFileWriter
from vesper.util.sample_buffer import SampleBuffer
from vesper.util.settings import Settings
import vesper.mpg_ranch.nfc_coarse_classifier_4_1.classifier_utils \
as classifier_utils
import vesper.mpg_ranch.nfc_coarse_classifier_4_1.dataset_utils \
as dataset_utils
import vesper.signal.resampling_utils as resampling_utils
import vesper.util.open_mp_utils as open_mp_utils
import vesper.util.signal_utils as signal_utils
# TODO: Consider specifying threshold on a scale from 0 to 100 rather
# than on a scale from 0 to 1, since that's how scores are presented
# in the UI.
_TSEEP_SETTINGS = Settings(
clip_type='Tseep',
input_chunk_size=3600,
hop_size=50,
threshold=.41,
initial_clip_padding=.1,
clip_duration=.4
)
_THRUSH_SETTINGS = Settings(
clip_type='Thrush',
input_chunk_size=3600,
hop_size=50,
threshold=.70,
initial_clip_padding=.2,
clip_duration=.6
)
_DETECTOR_SAMPLE_RATE = 24000
# Constants controlling detection score output. The output is written to
# a stereo audio file with detector audio input samples in one channel
# and detection scores in the other. It is useful for detector debugging,
# but should be disabled in production.
_SCORE_OUTPUT_ENABLED = False
_SCORE_FILE_PATH_FORMAT = '/Users/harold/Desktop/{} Detector Scores.wav'
_SCORE_OUTPUT_START_OFFSET = 3600 # seconds
_SCORE_OUTPUT_DURATION = 1000 # seconds
_SCORE_SCALE_FACTOR = 10000
class _Detector:
"""
MPG Ranch NFC detector.
An instance of this class operates on a single audio channel. It has a
`detect` method that takes a NumPy array of samples. The method can be
called repeatedly with consecutive sample arrays. The `complete_detection`
method should be called after the final call to the `detect` method.
During detection, each time the detector detects a clip it notifies
a listener by invoking the listener's `process_clip` method. The
`process_clip` method must accept two arguments, the start index and
length of the detected clip.
See the `_TSEEP_SETTINGS` and `_THRUSH_SETTINGS` objects above for
settings that make a `_Detector` detect higher-frequency and
lower-frequency NFCs, respectively, using the MPG Ranch tseep and
thrush coarse classifiers. The `TseepDetector` and `ThrushDetector`
classes of this module subclass the `_Detector` class with fixed
settings, namely `_TSEEP_SETTINGS` and `_THRUSH_SETTINGS`, respectively.
"""
def __init__(
self, settings, input_sample_rate, listener,
extra_thresholds=None):
open_mp_utils.work_around_multiple_copies_issue()
# Suppress TensorFlow INFO and DEBUG log messages.
logging.getLogger('tensorflow').setLevel(logging.WARN)
self._settings = settings
self._input_sample_rate = input_sample_rate
self._listener = listener
s2f = signal_utils.seconds_to_frames
s = self._settings
fs = self._input_sample_rate
self._input_buffer = None
self._input_chunk_size = s2f(s.input_chunk_size, fs)
self._thresholds = self._get_thresholds(extra_thresholds)
self._clip_start_offset = -s2f(s.initial_clip_padding, fs)
self._clip_length = s2f(s.clip_duration, fs)
self._input_chunk_start_index = 0
self._classifier_settings = self._load_classifier_settings()
self._model = self._load_model()
s = self._classifier_settings
if s.waveform_sample_rate != _DETECTOR_SAMPLE_RATE:
raise ValueError((
'Classifier neural network sample rate is {} Hz rather '
'than the expected {} Hz.').format(
s.waveform_sample_rate, _DETECTOR_SAMPLE_RATE))
fs = s.waveform_sample_rate
self._classifier_sample_rate = fs
self._classifier_waveform_length = s2f(s.waveform_duration, fs)
fraction = self._settings.hop_size / 100
self._hop_size = s2f(fraction * s.waveform_duration, fs)
if _SCORE_OUTPUT_ENABLED:
file_path = _SCORE_FILE_PATH_FORMAT.format(settings.clip_type)
self._score_file_writer = DetectionScoreFileWriter(
file_path, self._input_sample_rate, _SCORE_SCALE_FACTOR,
self._hop_size, _SCORE_OUTPUT_START_OFFSET,
_SCORE_OUTPUT_DURATION)
# settings = self._classifier_settings.__dict__
# names = sorted(settings.keys())
# for name in names:
# print('{}: {}'.format(name, settings[name]))
@property
def settings(self):
return self._settings
@property
def input_sample_rate(self):
return self._input_sample_rate
@property
def listener(self):
return self._listener
def _get_thresholds(self, extra_thresholds):
thresholds = set([self._settings.threshold])
if extra_thresholds is not None:
thresholds |= set(extra_thresholds)
return sorted(thresholds)
def _load_classifier_settings(self):
s = self._settings
path = classifier_utils.get_settings_file_path(s.clip_type)
logging.info('Loading classifier settings from "{}"...'.format(path))
return Settings.create_from_yaml_file(path)
def _load_model(self):
s = self._settings
path = classifier_utils.get_keras_model_file_path(s.clip_type)
logging.info(f'Loading classifier model from "{path}"...')
return tf.keras.models.load_model(path)
def detect(self, samples):
if self._input_buffer is None:
self._input_buffer = SampleBuffer(samples.dtype)
self._input_buffer.write(samples)
self._process_input_chunks()
def _process_input_chunks(self, process_all_samples=False):
# Process as many chunks of input samples of size
# `self._input_chunk_size` as possible.
while len(self._input_buffer) >= self._input_chunk_size:
chunk = self._input_buffer.read(self._input_chunk_size)
self._process_input_chunk(chunk)
# If indicated, process any remaining input samples as one chunk.
# The size of the chunk will differ from `self._input_chunk_size`.
if process_all_samples and len(self._input_buffer) != 0:
chunk = self._input_buffer.read()
self._process_input_chunk(chunk)
def _process_input_chunk(self, samples):
input_length = len(samples)
if self._classifier_sample_rate != self._input_sample_rate:
# need to resample input
# When the input sample rate is 22050 Hz or 44100 Hz,
# we resample as though it were 22000 Hz or 44000 Hz,
# respectively, resulting in an actual resampled rate of
# about 24055 Hz rather than 24000 Hz. This allows us to
# resample much faster, and has little or no effect on the
# clips [NEED TO SHOW THIS] output by the detector, since
# the change to the resampled rate is small (only about a
# quarter of a percent), and the detector is fairly
# insensitive to small changes in the frequency and duration
# of NFCs. We account for such sample rate substitutions
# when computing the start index in the input signal of a
# detected clip in the `_notify_listener_of_clips` method,
# below.
#
# The lack of rigor inherent in this trick will always make
# the processing of 22050 Hz and 44100 Hz input a little
# questionable. In the future, I hope to obviate the trick by
# implementing faster but proper resampling of 22050 Hz and
# 44100 Hz input.
if self._input_sample_rate == 22050:
self._purported_input_sample_rate = 22000
elif self._input_sample_rate == 44100:
self._purported_input_sample_rate = 44000
else:
self._purported_input_sample_rate = self._input_sample_rate
# start_time = time.time()
samples = resampling_utils.resample_to_24000_hz(
samples, self._purported_input_sample_rate)
# processing_time = time.time() - start_time
# input_duration = input_length / self._input_sample_rate
# rate = input_duration / processing_time
# print((
# 'Resampled {:.1f} seconds of input in {:.1f} seconds, '
# 'or {:.1f} times faster than real time.').format(
# input_duration, processing_time, rate))
else:
# don't need to resample input
self._purported_input_sample_rate = self._input_sample_rate
self._waveforms = _get_analysis_records(
samples, self._classifier_waveform_length, self._hop_size)
# print('Scoring chunk waveforms...')
# start_time = time.time()
s = self._classifier_settings
dataset = \
dataset_utils.create_spectrogram_dataset_from_waveforms_array(
self._waveforms, dataset_utils.DATASET_MODE_INFERENCE, s,
batch_size=64, feature_name=s.model_input_name)
scores = self._model.predict(dataset).flatten()
# elapsed_time = time.time() - start_time
# num_waveforms = self._waveforms.shape[0]
# rate = num_waveforms / elapsed_time
# print((
# 'Scored {} waveforms in {:.1f} seconds, a rate of {:.1f} '
# 'waveforms per second.').format(
# num_waveforms, elapsed_time, rate))
if _SCORE_OUTPUT_ENABLED:
self._score_file_writer.write(samples, scores)
for threshold in self._thresholds:
peak_indices = signal_utils.find_peaks(scores, threshold)
peak_scores = scores[peak_indices]
self._notify_listener_of_clips(
peak_indices, peak_scores, input_length, threshold)
self._input_chunk_start_index += input_length
def _notify_listener_of_clips(
self, peak_indices, peak_scores, input_length, threshold):
# print('Clips:')
start_offset = self._input_chunk_start_index + self._clip_start_offset
peak_indices *= self._hop_size
for i, score in zip(peak_indices, peak_scores):
# Convert classification index to input index, accounting for
# any difference between classification sample rate and input
# rate.
f = self._input_sample_rate / self._purported_input_sample_rate
classification_sample_rate = f * self._classifier_sample_rate
t = signal_utils.get_duration(i, classification_sample_rate)
i = signal_utils.seconds_to_frames(t, self._input_sample_rate)
clip_start_index = i + start_offset
clip_end_index = clip_start_index + self._clip_length
chunk_end_index = self._input_chunk_start_index + input_length
if clip_start_index < 0:
logging.warning(
'Rejected clip that started before beginning of '
'recording.')
elif clip_end_index > chunk_end_index:
# clip might extend past end of recording, since it extends
# past the end of this chunk (we do not know whether or
# not the current chunk is the last)
logging.warning(
'Rejected clip that ended after end of recording chunk.')
else:
# all clip samples are in the recording interval extending
# from the beginning of the recording to the end of the
# current chunk
# print(
# ' {} {}'.format(clip_start_index, self._clip_length))
annotations = {'Detector Score': 100 * score}
self._listener.process_clip(
clip_start_index, self._clip_length, threshold,
annotations)
def complete_detection(self):
"""
Completes detection after the `detect` method has been called
for all input.
"""
self._process_input_chunks(process_all_samples=True)
self._listener.complete_processing()
if _SCORE_OUTPUT_ENABLED:
self._score_file_writer.close()
# TODO: The following two functions were copied from
# vesper.util.time_frequency_analysis_utils. They should probably both
# be public, and in a more general-purpose module.
def _get_analysis_records(samples, record_size, hop_size):
"""
Creates a sequence of hopped sample records from the specified samples.
This method uses a NumPy array stride trick to create the desired
sequence as a view of the input samples that can be created at very
little cost. The caveat is that the view should only be read from,
and never written to, since when the hop size is less than the
record size the view's records overlap in memory.
The trick is from the `_fft_helper` function of the
`scipy.signal.spectral` module of SciPy.
"""
# Get result shape.
num_samples = samples.shape[-1]
num_vectors = _get_num_analysis_records(num_samples, record_size, hop_size)
shape = samples.shape[:-1] + (num_vectors, record_size)
# Get result strides.
stride = samples.strides[-1]
strides = samples.strides[:-1] + (hop_size * stride, stride)
return np.lib.stride_tricks.as_strided(samples, shape, strides)
def _get_num_analysis_records(num_samples, record_size, hop_size):
if record_size <= 0:
raise ValueError('Record size must be positive.')
elif hop_size <= 0:
raise ValueError('Hop size must be positive.')
elif hop_size > record_size:
raise ValueError('Hop size must not exceed record size.')
if num_samples < record_size:
# not enough samples for any records
return 0
else:
# have enough samples for at least one record
overlap = record_size - hop_size
return (num_samples - overlap) // hop_size
class TseepDetector(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1'
def __init__(self, sample_rate, listener, extra_thresholds=None):
super().__init__(
_TSEEP_SETTINGS, sample_rate, listener, extra_thresholds)
def _tseep_settings(threshold, hop_size=50):
return Settings(
_TSEEP_SETTINGS,
threshold=threshold / 100,
hop_size=hop_size)
def _thrush_settings(threshold, hop_size=50):
return Settings(
_THRUSH_SETTINGS,
threshold=threshold / 100,
hop_size=hop_size)
class TseepDetector90(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 90'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(90)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector80(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 80'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(80)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector70(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 70'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(70)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector60(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 60'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(60)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector60_25(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 60 25'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(60, 25)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector60_12(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 60 12.5'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(60, 12.5)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector50(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 50'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(50)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector40(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 40'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(40)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector30(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 30'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(30)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class TseepDetector20(_Detector):
extension_name = 'MPG Ranch Tseep Detector 1.1 20'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _tseep_settings(20)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1'
def __init__(self, sample_rate, listener, extra_thresholds=None):
super().__init__(
_THRUSH_SETTINGS, sample_rate, listener, extra_thresholds)
class ThrushDetector90(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 90'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(90)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector80(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 80'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(80)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector70(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 70'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(70)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector70_25(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 70 25'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(70, 25)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector70_12(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 70 12.5'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(70, 12.5)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector60(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 60'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(60)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector50(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 50'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(50)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector40(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 40'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(40)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector30(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 30'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(30)
super().__init__(settings, sample_rate, listener, extra_thresholds)
class ThrushDetector20(_Detector):
extension_name = 'MPG Ranch Thrush Detector 1.1 20'
def __init__(self, sample_rate, listener, extra_thresholds=None):
settings = _thrush_settings(20)
super().__init__(settings, sample_rate, listener, extra_thresholds)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import pytest
import matplotlib.pyplot as plt
import numpy as np
from cgpm.regressions.linreg import LinearRegression
from cgpm.utils import config as cu
from cgpm.utils import general as gu
from cgpm.utils import test as tu
CCTYPES, DISTARGS = cu.parse_distargs([
'normal',
'categorical(k=4)',
'lognormal',
'poisson',
'bernoulli',
'exponential',
'geometric',
'vonmises'])
D, Zv, Zc = tu.gen_data_table(
50, [1], [[.33, .33, .34]], CCTYPES, DISTARGS, [.8]*len(CCTYPES),
rng=gu.gen_rng(0))
CCTYPES = CCTYPES[1:]
CCARGS = DISTARGS[1:]
OUTPUTS = [0]
INPUTS = range(1, len(CCTYPES)+1)
CCARGS[CCTYPES.index('bernoulli')] = {'k':2}
D = D.T
def test_incorporate():
linreg = LinearRegression(
OUTPUTS, INPUTS,
distargs={'inputs':{'stattypes': CCTYPES, 'statargs': CCARGS}},
rng=gu.gen_rng(0))
# Incorporate first 20 rows.
for rowid, row in enumerate(D[:20]):
observation = {0: row[0]}
inputs = {i:row[i] for i in linreg.inputs}
linreg.incorporate(rowid, observation, inputs)
# Unincorporating row 20 should raise.
with pytest.raises(ValueError):
linreg.unincorporate(20)
# Unincorporate all rows.
for rowid in xrange(20):
linreg.unincorporate(rowid)
# Unincorporating row 0 should raise.
with pytest.raises(ValueError):
linreg.unincorporate(0)
# Incorporating with wrong covariate dimensions should raise.
with pytest.raises(ValueError):
observation = {0: D[0,0]}
inputs = {i:v for (i, v) in enumerate(D[0])}
linreg.incorporate(0, observation, inputs)
# Incorporating with missing observation should raise.
with pytest.raises(ValueError):
observation = {0: None}
inputs = {i:v for (i, v) in enumerate(D[0])}
linreg.incorporate(0, observation, inputs)
# Incorporating with wrong observation should raise.
with pytest.raises(ValueError):
observation = {1: 2}
inputs = {i:v for (i, v) in enumerate(D[0])}
linreg.incorporate(0, observation, inputs)
# Incorporate some more rows.
for rowid, row in enumerate(D[:10]):
observation = {0: row[0]}
inputs = {i:row[i] for i in linreg.inputs}
linreg.incorporate(rowid, observation, inputs)
def test_logpdf_score():
linreg = LinearRegression(
OUTPUTS, INPUTS,
distargs={'inputs':{'stattypes': CCTYPES, 'statargs': CCARGS}},
rng=gu.gen_rng(0))
for rowid, row in enumerate(D[:10]):
observation = {0: row[0]}
inputs = {i:row[i] for i in linreg.inputs}
linreg.incorporate(rowid, observation, inputs)
linreg.transition_hypers(N=10)
assert linreg.logpdf_score() < 0
def test_logpdf_predictive():
linreg = LinearRegression(
OUTPUTS, INPUTS,
distargs={'inputs':{'stattypes': CCTYPES, 'statargs': CCARGS}},
rng=gu.gen_rng(0))
Dx0 = D[D[:,1]==0]
Dx1 = D[D[:,1]==1]
Dx2 = D[D[:,1]==2]
Dx3 = D[D[:,1]==3]
for i, row in enumerate(Dx0[1:]):
linreg.incorporate(i, {0: row[0]}, {i: row[i] for i in linreg.inputs})
linreg.transition_hypers(N=10)
# Ensure can compute predictive for seen class 0.
linreg.logpdf(None, {0: Dx0[0,0]},
inputs={i: Dx0[0,i] for i in linreg.inputs})
# Ensure can compute predictive for unseen class 1.
linreg.logpdf(None, {0: Dx1[0,0]},
inputs={i: Dx1[0,i] for i in linreg.inputs})
# Ensure can compute predictive for unseen class 2.
linreg.logpdf(None, {0: Dx2[0,0]},
inputs={i: Dx2[0,i] for i in linreg.inputs})
# Ensure can compute predictive for unseen class 3.
linreg.logpdf(None, {0: Dx3[0,0]},
inputs={i: Dx3[0,i] for i in linreg.inputs})
# Ensure can compute predictive for nan.
with pytest.raises(ValueError):
linreg.logpdf(None, {0: np.nan},
inputs={i: Dx0[0,i] for i in linreg.inputs})
# Ensure can compute predictive for missing targets.
with pytest.raises(ValueError):
linreg.logpdf(None, {7: 10},
inputs={i: Dx0[0,i] for i in linreg.inputs})
def test_simulate():
linreg = LinearRegression(
OUTPUTS, INPUTS,
distargs={'inputs':{'stattypes': CCTYPES, 'statargs': CCARGS}},
rng=gu.gen_rng(0))
for rowid, row in enumerate(D[:25]):
linreg.incorporate(rowid, {0:row[0]}, {i:row[i] for i in linreg.inputs})
linreg.transition_hypers(N=10)
# Use a deserialized version for simulating.
metadata = linreg.to_metadata()
builder = getattr(
importlib.import_module(metadata['factory'][0]),
metadata['factory'][1])
linreg = builder.from_metadata(metadata, rng=gu.gen_rng(1))
_, ax = plt.subplots()
xpred, xtrue = [], []
for row in D[25:]:
xtrue.append(row[0])
inputs = {i: row[i] for i in linreg.inputs}
samples = [linreg.simulate(None, [0], None, inputs)[0]
for _i in xrange(100)]
xpred.append(samples)
xpred = np.asarray(xpred)
xmeans = np.mean(xpred, axis=1)
xlow = np.percentile(xpred, 25, axis=1)
xhigh = np.percentile(xpred, 75, axis=1)
ax.plot(range(len(xtrue)), xmeans, color='g')
ax.fill_between(range(len(xtrue)), xlow, xhigh, color='g', alpha='.3')
ax.scatter(range(len(xtrue)), xtrue, color='r')
# plt.close('all')
def test_missing_inputs():
outputs = [0]
inputs = [2, 4, 6]
distargs = {
'inputs': {
'stattypes': ['normal', 'categorical', 'categorical'],
'statargs': [None, {'k': 4}, {'k': 1}]
},
}
linreg = LinearRegression(
outputs=outputs,
inputs=inputs,
distargs=distargs,
rng=gu.gen_rng(1))
# Incorporate invalid cateogry 4:100. The first term is the bias, the second
# terms is {2:1}, the next four terms are the dummy code of {4:100}, and the
# last term is the code for {6:0}
rowid = 0
with pytest.raises(ValueError):
linreg.incorporate(rowid, {0:1}, {2:1, 4:100, 6:0})
# Incorporate invalid cateogry 6:1. The first term is the bias, the second
# terms is {2:5}, the next four terms are the dummy code of {4:3}, and the
# last term is the code for {6:0}. Since linreg has a wildcard category
# for composition with the CRP mixture, should be handled without error.
rowid = 1
linreg.incorporate(rowid, {0:2}, {2:5, 4:3, 6:1})
assert linreg.data.Y[rowid] == [1, 5, 0, 0, 0, 1, 0]
# Incorporate missing cateogry for input 6. The first term is the bias, the
# second terms is {2:5}, the next four terms are the dummy code of {4:0}
# and the last term is the code for {6:missing}.
rowid = 2
with pytest.raises(ValueError):
linreg.incorporate(rowid, {0:5}, {2:6, 4:0})
# Missing input 2 should be imputed to av(1,5,7) == 4.
rowid = 3
with pytest.raises(ValueError):
linreg.incorporate(rowid, {0:4}, {4:1, 6:0})
linreg.transition_hypers(N=10)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_utils import timeutils
import requests
import six
from six.moves.urllib import parse as urlparse
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.db import api as db_api
from heat.engine import api
from heat.engine import scheduler
from heat.objects import resource as resource_objects
from heat.objects import software_config as software_config_object
from heat.objects import software_deployment as software_deployment_object
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
class SoftwareConfigService(service.Service):
def show_software_config(self, cnxt, config_id):
sc = software_config_object.SoftwareConfig.get_by_id(cnxt, config_id)
return api.format_software_config(sc)
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
scs = software_config_object.SoftwareConfig.get_all(
cnxt,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
result = [api.format_software_config(sc, detail=False) for sc in scs]
return result
def create_software_config(self, cnxt, group, name, config,
inputs, outputs, options):
sc = software_config_object.SoftwareConfig.create(cnxt, {
'group': group,
'name': name,
'config': {
'inputs': inputs,
'outputs': outputs,
'options': options,
'config': config
},
'tenant': cnxt.tenant_id})
return api.format_software_config(sc)
def delete_software_config(self, cnxt, config_id):
software_config_object.SoftwareConfig.delete(cnxt, config_id)
def list_software_deployments(self, cnxt, server_id):
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
result = [api.format_software_deployment(sd) for sd in all_sd]
return result
def metadata_software_deployments(self, cnxt, server_id):
if not server_id:
raise ValueError(_('server_id must be specified'))
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
# filter out the sds with None config
flt_sd = six.moves.filterfalse(lambda sd: sd.config is None,
all_sd)
# sort the configs by config name, to give the list of metadata a
# deterministic and controllable order.
flt_sd_s = sorted(flt_sd, key=lambda sd: sd.config.name)
result = [api.format_software_config(sd.config) for sd in flt_sd_s]
return result
@resource_objects.retry_on_conflict
def _push_metadata_software_deployments(
self, cnxt, server_id, stack_user_project_id):
rs = db_api.resource_get_by_physical_resource_id(cnxt, server_id)
if not rs:
return
deployments = self.metadata_software_deployments(cnxt, server_id)
md = rs.rsrc_metadata or {}
md['deployments'] = deployments
rows_updated = db_api.resource_update(
cnxt, rs.id, {'rsrc_metadata': md}, rs.atomic_key)
if not rows_updated:
action = _('deployments of server %s') % server_id
raise exception.ConcurrentTransaction(action=action)
metadata_put_url = None
metadata_queue_id = None
for rd in rs.data:
if rd.key == 'metadata_put_url':
metadata_put_url = rd.value
if rd.key == 'metadata_queue_id':
metadata_queue_id = rd.value
if metadata_put_url:
json_md = jsonutils.dumps(md)
requests.put(metadata_put_url, json_md)
if metadata_queue_id:
project = stack_user_project_id
token = self._get_user_token(cnxt, rs, project)
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(project, token)
queue = zaqar.queue(metadata_queue_id)
queue.post({'body': md, 'ttl': zaqar_plugin.DEFAULT_TTL})
def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
container, object_name = urlparse.urlparse(
deploy_signal_id).path.split('/')[-2:]
swift_plugin = cnxt.clients.client_plugin('swift')
swift = swift_plugin.client()
try:
headers = swift.head_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise
lm = headers.get('last-modified')
last_modified = swift_plugin.parse_last_modified(lm)
prev_last_modified = sd.updated_at
if prev_last_modified:
# assume stored as utc, convert to offset-naive datetime
prev_last_modified = prev_last_modified.replace(tzinfo=None)
if prev_last_modified and (last_modified <= prev_last_modified):
return sd
try:
(headers, obj) = swift.get_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI(
'Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise
if obj:
self.signal_software_deployment(
cnxt, sd.id, jsonutils.loads(obj),
last_modified.isoformat())
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def _get_user_token(self, cnxt, rs, project):
user = password = None
for rd in rs.data:
if rd.key == 'password':
password = crypt.decrypt(rd.decrypt_method, rd.value)
if rd.key == 'user_id':
user = rd.value
keystone = cnxt.clients.client('keystone')
return keystone.stack_domain_user_token(
user_id=user, project_id=project, password=password)
def _refresh_zaqar_software_deployment(self, cnxt, sd, deploy_queue_id):
rs = db_api.resource_get_by_physical_resource_id(cnxt, sd.server_id)
project = sd.stack_user_project_id
token = self._get_user_token(cnxt, rs, project)
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(project, token)
queue = zaqar.queue(deploy_queue_id)
messages = list(queue.pop())
if messages:
self.signal_software_deployment(
cnxt, sd.id, messages[0].body, None)
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def check_software_deployment(self, cnxt, deployment_id, timeout):
def _check():
while True:
sd = self._show_software_deployment(cnxt, deployment_id)
if sd.status != rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
return
yield
scheduler.TaskRunner(_check)(timeout=timeout)
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
return api.format_software_deployment(sd)
def _show_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
if sd.status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
c = sd.config.config
input_values = dict((i['name'], i['value']) for i in c['inputs'])
transport = input_values.get('deploy_signal_transport')
if transport == 'TEMP_URL_SIGNAL':
sd = self._refresh_swift_software_deployment(
cnxt, sd, input_values.get('deploy_signal_id'))
elif transport == 'ZAQAR_SIGNAL':
sd = self._refresh_zaqar_software_deployment(
cnxt, sd, input_values.get('deploy_queue_id'))
return sd
def show_software_deployment(self, cnxt, deployment_id):
sd = self._show_software_deployment(cnxt, deployment_id)
return api.format_software_deployment(sd)
def create_software_deployment(self, cnxt, server_id, config_id,
input_values, action, status,
status_reason, stack_user_project_id,
deployment_id=None):
if deployment_id is None:
deployment_id = str(uuid.uuid4())
sd = software_deployment_object.SoftwareDeployment.create(cnxt, {
'id': deployment_id,
'config_id': config_id,
'server_id': server_id,
'input_values': input_values,
'tenant': cnxt.tenant_id,
'stack_user_project_id': stack_user_project_id,
'action': action,
'status': status,
'status_reason': status_reason})
self._push_metadata_software_deployments(
cnxt, server_id, stack_user_project_id)
return api.format_software_deployment(sd)
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at):
if not deployment_id:
raise ValueError(_('deployment_id must be specified'))
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
status = sd.status
if not status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
# output values are only expected when in an IN_PROGRESS state
return
details = details or {}
output_status_code = rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
ov = sd.output_values or {}
status = None
status_reasons = {}
status_code = details.get(output_status_code)
if status_code and str(status_code) != '0':
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[output_status_code] = _(
'Deployment exited with non-zero status code: %s'
) % details.get(output_status_code)
event_reason = 'deployment %s failed (%s)' % (deployment_id,
status_code)
else:
event_reason = 'deployment %s succeeded' % deployment_id
for output in sd.config.config['outputs'] or []:
out_key = output['name']
if out_key in details:
ov[out_key] = details[out_key]
if output.get('error_output', False):
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[out_key] = details[out_key]
event_reason = 'deployment %s failed' % deployment_id
for out_key in rpc_api.SOFTWARE_DEPLOYMENT_OUTPUTS:
ov[out_key] = details.get(out_key)
if status == rpc_api.SOFTWARE_DEPLOYMENT_FAILED:
# build a status reason out of all of the values of outputs
# flagged as error_output
status_reasons = [' : '.join((k, six.text_type(status_reasons[k])))
for k in status_reasons]
status_reason = ', '.join(status_reasons)
else:
status = rpc_api.SOFTWARE_DEPLOYMENT_COMPLETE
status_reason = _('Outputs received')
self.update_software_deployment(
cnxt, deployment_id=deployment_id,
output_values=ov, status=status, status_reason=status_reason,
config_id=None, input_values=None, action=None,
updated_at=updated_at)
# Return a string describing the outcome of handling the signal data
return event_reason
def update_software_deployment(self, cnxt, deployment_id, config_id,
input_values, output_values, action,
status, status_reason, updated_at):
update_data = {}
if config_id:
update_data['config_id'] = config_id
if input_values:
update_data['input_values'] = input_values
if output_values:
update_data['output_values'] = output_values
if action:
update_data['action'] = action
if status:
update_data['status'] = status
if status_reason:
update_data['status_reason'] = status_reason
if updated_at:
update_data['updated_at'] = timeutils.normalize_time(
timeutils.parse_isotime(updated_at))
else:
update_data['updated_at'] = timeutils.utcnow()
sd = software_deployment_object.SoftwareDeployment.update_by_id(
cnxt, deployment_id, update_data)
# only push metadata if this update resulted in the config_id
# changing, since metadata is just a list of configs
if config_id:
self._push_metadata_software_deployments(
cnxt, sd.server_id, sd.stack_user_project_id)
return api.format_software_deployment(sd)
def delete_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
software_deployment_object.SoftwareDeployment.delete(
cnxt, deployment_id)
self._push_metadata_software_deployments(
cnxt, sd.server_id, sd.stack_user_project_id)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.disable_cudnn_autotune
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, expected_output_shape=None,
validate_training=True, adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, backend.dtype(y),
expected_output_dtype, kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = models.Sequential()
model.add(layers.Input(shape=input_shape[1:], dtype=input_dtype))
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.saved_model_format = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def saved_model_format_scope(value):
"""Provides a scope within which the savde model format to test is `value`.
The saved model format gets restored to its original value upon exiting the
scope.
Arguments:
value: saved model format value
Yields:
The provided value.
"""
previous_value = _thread_local_data.saved_model_format
try:
_thread_local_data.saved_model_format = value
yield value
finally:
# Restore saved model format to initial value.
_thread_local_data.saved_model_format = previous_value
def get_save_format():
if _thread_local_data.saved_model_format is None:
raise ValueError(
'Cannot call `get_save_format()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.saved_model_format
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = models.Sequential()
if input_dim:
model.add(layers.Dense(num_hidden, activation='relu', input_dim=input_dim))
else:
model.add(layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = layers.Input(shape=(input_dim,))
outputs = layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = layers.Dense(num_classes, activation=activation)(outputs)
return models.Model(inputs, outputs)
class SmallSubclassMLP(models.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes, use_bn=False, use_dp=False):
super(SmallSubclassMLP, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.layer_a = layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = layers.Dense(num_classes, activation=activation)
if self.use_dp:
self.dp = layers.Dropout(0.5)
if self.use_bn:
self.bn = layers.BatchNormalization(axis=-1)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(models.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(models.Model):
"""A Keras subclass model."""
def __init__(self, model_layers, *args, **kwargs):
"""Instantiate a model.
Args:
model_layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of input_tensor -> the input
tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(model_layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(model_layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(models.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func, *args, **kwargs):
super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs)
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
model_layers = []
for layer in self._layer_generating_func():
model_layers.append(layer)
self.all_layers = model_layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(model_layers,
input_shape=None,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
"""Builds a model from a sequence of layers.
Args:
model_layers: The layers used to build the network.
input_shape: Shape tuple of the input or 'TensorShape' instance.
input_dtype: Datatype of the input.
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
Returns:
A Keras model.
"""
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
inputs = layers.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
return _SubclassModel(model_layers, name=name, input_tensor=inputs)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: model_layers
return _SubclassModelCustomBuild(layer_generating_func, name=name)
if model_type == 'sequential':
model = models.Sequential(name=name)
if input_shape:
model.add(
layers.InputLayer(
input_shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse))
for layer in model_layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = layers.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
outputs = inputs
for layer in model_layers:
outputs = layer(outputs)
return models.Model(inputs, outputs, name=name)
raise ValueError('Unknown model type {}'.format(model_type))
class Bias(layers.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
class _MultiIOSubclassModel(models.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None, name=None):
super(_MultiIOSubclassModel, self).__init__(name=name)
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
elif isinstance(inputs, dict):
a = inputs['input_1']
b = inputs['input_2']
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(models.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return models.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return tf_decorator.make_decorator(fn, wrapper)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ODE solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.integrate.python.ops import odes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class OdeIntTest(test.TestCase):
def setUp(self):
super(OdeIntTest, self).setUp()
# simple defaults (solution is a sin-wave)
matrix = constant_op.constant([[0, 1], [-1, 0]], dtype=dtypes.float64)
self.func = lambda y, t: math_ops.matmul(matrix, y)
self.y0 = np.array([[1.0], [0.0]])
def test_odeint_exp(self):
# Test odeint by an exponential function:
# dy / dt = y, y(0) = 1.0.
# Its analytical solution is y = exp(t).
func = lambda y, t: y
y0 = constant_op.constant(1.0, dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, y0, t)
self.assertIn('odeint', y_solved.name)
self.assertEqual(y_solved.get_shape(), tensor_shape.TensorShape([11]))
with self.test_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.exp(t)
self.assertAllClose(y_true, y_solved)
def test_odeint_complex(self):
# Test a complex, linear ODE:
# dy / dt = k * y, y(0) = 1.0.
# Its analytical solution is y = exp(k * t).
k = 1j - 0.1
func = lambda y, t: k * y
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, 1.0 + 0.0j, t)
with self.test_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.exp(k * t)
self.assertAllClose(y_true, y_solved)
def test_odeint_riccati(self):
# The Ricatti equation is:
# dy / dt = (y - t) ** 2 + 1.0, y(0) = 0.5.
# Its analytical solution is y = 1.0 / (2.0 - t) + t.
func = lambda t, y: (y - t)**2 + 1.0
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, np.float64(0.5), t)
with self.test_session() as sess:
y_solved = sess.run(y_solved)
y_true = 1.0 / (2.0 - t) + t
self.assertAllClose(y_true, y_solved)
def test_odeint_2d_linear(self):
# Solve the 2D linear differential equation:
# dy1 / dt = 3.0 * y1 + 4.0 * y2,
# dy2 / dt = -4.0 * y1 + 3.0 * y2,
# y1(0) = 0.0,
# y2(0) = 1.0.
# Its analytical solution is
# y1 = sin(4.0 * t) * exp(3.0 * t),
# y2 = cos(4.0 * t) * exp(3.0 * t).
matrix = constant_op.constant(
[[3.0, 4.0], [-4.0, 3.0]], dtype=dtypes.float64)
func = lambda y, t: math_ops.matmul(matrix, y)
y0 = constant_op.constant([[0.0], [1.0]], dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, y0, t)
with self.test_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.zeros((len(t), 2, 1))
y_true[:, 0, 0] = np.sin(4.0 * t) * np.exp(3.0 * t)
y_true[:, 1, 0] = np.cos(4.0 * t) * np.exp(3.0 * t)
self.assertAllClose(y_true, y_solved, atol=1e-5)
def test_odeint_higher_rank(self):
func = lambda y, t: y
y0 = constant_op.constant(1.0, dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
for shape in [(), (1,), (1, 1)]:
expected_shape = (len(t),) + shape
y_solved = odes.odeint(func, array_ops.reshape(y0, shape), t)
self.assertEqual(y_solved.get_shape(),
tensor_shape.TensorShape(expected_shape))
with self.test_session() as sess:
y_solved = sess.run(y_solved)
self.assertEquals(y_solved.shape, expected_shape)
def test_odeint_all_dtypes(self):
func = lambda y, t: y
t = np.linspace(0.0, 1.0, 11)
for y0_dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
for t_dtype in [dtypes.float32, dtypes.float64]:
y0 = math_ops.cast(1.0, y0_dtype)
y_solved = odes.odeint(func, y0, math_ops.cast(t, t_dtype))
with self.test_session() as sess:
y_solved = sess.run(y_solved)
expected = np.asarray(np.exp(t))
self.assertAllClose(y_solved, expected, rtol=1e-5)
self.assertEqual(dtypes.as_dtype(y_solved.dtype), y0_dtype)
def test_odeint_required_dtypes(self):
with self.assertRaisesRegexp(TypeError, '`y0` must have a floating point'):
odes.odeint(self.func, math_ops.cast(self.y0, dtypes.int32), [0, 1])
with self.assertRaisesRegexp(TypeError, '`t` must have a floating point'):
odes.odeint(self.func, self.y0, math_ops.cast([0, 1], dtypes.int32))
def test_odeint_runtime_errors(self):
with self.assertRaisesRegexp(ValueError, 'cannot supply `options` without'):
odes.odeint(self.func, self.y0, [0, 1], options={'first_step': 1.0})
y = odes.odeint(
self.func,
self.y0, [0, 1],
method='dopri5',
options={'max_num_steps': 0})
with self.test_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'max_num_steps'):
sess.run(y)
y = odes.odeint(self.func, self.y0, [1, 0])
with self.test_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'monotonic increasing'):
sess.run(y)
def test_odeint_different_times(self):
# integrate steps should be independent of interpolation times
times0 = np.linspace(0, 10, num=11, dtype=float)
times1 = np.linspace(0, 10, num=101, dtype=float)
with self.test_session() as sess:
y_solved_0, info_0 = sess.run(
odes.odeint(
self.func, self.y0, times0, full_output=True))
y_solved_1, info_1 = sess.run(
odes.odeint(
self.func, self.y0, times1, full_output=True))
self.assertAllClose(y_solved_0, y_solved_1[::10])
self.assertEqual(info_0['num_func_evals'], info_1['num_func_evals'])
self.assertAllEqual(info_0['integrate_points'], info_1['integrate_points'])
self.assertAllEqual(info_0['error_ratio'], info_1['error_ratio'])
def test_odeint_5th_order_accuracy(self):
t = [0, 20]
kwargs = dict(
full_output=True, method='dopri5', options=dict(max_num_steps=2000))
with self.test_session() as sess:
_, info_0 = sess.run(
odes.odeint(
self.func, self.y0, t, rtol=0, atol=1e-6, **kwargs))
_, info_1 = sess.run(
odes.odeint(
self.func, self.y0, t, rtol=0, atol=1e-9, **kwargs))
self.assertAllClose(
info_0['integrate_points'].size * 1000**0.2,
float(info_1['integrate_points'].size),
rtol=0.01)
class StepSizeTest(test.TestCase):
def test_error_ratio_one(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(1.0))
with self.test_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 0.9)
def test_ifactor(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(0.0))
with self.test_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 10.0)
def test_dfactor(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(1e6))
with self.test_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 0.2)
class InterpolationTest(test.TestCase):
def test_5th_order_polynomial(self):
# this should be an exact fit
f = lambda x: x**4 + x**3 - 2 * x**2 + 4 * x + 5
f_prime = lambda x: 4 * x**3 + 3 * x**2 - 4 * x + 4
coeffs = odes._interp_fit(
f(0.0), f(10.0), f(5.0), f_prime(0.0), f_prime(10.0), 10.0)
times = np.linspace(0, 10, dtype=np.float32)
y_fit = array_ops.stack(
[odes._interp_evaluate(coeffs, 0.0, 10.0, t) for t in times])
y_expected = f(times)
with self.test_session() as sess:
y_actual = sess.run(y_fit)
self.assertAllClose(y_expected, y_actual)
# attempt interpolation outside bounds
y_invalid = odes._interp_evaluate(coeffs, 0.0, 10.0, 100.0)
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(y_invalid)
if __name__ == '__main__':
test.main()
|
|
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, to_numeric
import pandas._testing as tm
@pytest.fixture(params=[None, "ignore", "raise", "coerce"])
def errors(request):
return request.param
@pytest.fixture(params=[True, False])
def signed(request):
return request.param
@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"])
def transform(request):
return request.param
@pytest.fixture(params=[47393996303418497800, 100000000000000000000])
def large_val(request):
return request.param
@pytest.fixture(params=[True, False])
def multiple_elts(request):
return request.param
@pytest.fixture(
params=[
(lambda x: Index(x, name="idx"), tm.assert_index_equal),
(lambda x: Series(x, name="ser"), tm.assert_series_equal),
(lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal),
]
)
def transform_assert_equal(request):
return request.param
@pytest.mark.parametrize(
"input_kwargs,result_kwargs",
[
(dict(), dict(dtype=np.int64)),
(dict(errors="coerce", downcast="integer"), dict(dtype=np.int8)),
],
)
def test_empty(input_kwargs, result_kwargs):
# see gh-16302
ser = Series([], dtype=object)
result = to_numeric(ser, **input_kwargs)
expected = Series([], **result_kwargs)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("last_val", ["7", 7])
def test_series(last_val):
ser = Series(["1", "-3.14", last_val])
result = to_numeric(ser)
expected = Series([1, -3.14, 7])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[1, 3, 4, 5],
[1.0, 3.0, 4.0, 5.0],
# Bool is regarded as numeric.
[True, False, True, True],
],
)
def test_series_numeric(data):
ser = Series(data, index=list("ABCD"), name="EFG")
result = to_numeric(ser)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize(
"data,msg",
[
([1, -3.14, "apple"], 'Unable to parse string "apple" at position 2'),
(
["orange", 1, -3.14, "apple"],
'Unable to parse string "orange" at position 0',
),
],
)
def test_error(data, msg):
ser = Series(data)
with pytest.raises(ValueError, match=msg):
to_numeric(ser, errors="raise")
@pytest.mark.parametrize(
"errors,exp_data", [("ignore", [1, -3.14, "apple"]), ("coerce", [1, -3.14, np.nan])]
)
def test_ignore_error(errors, exp_data):
ser = Series([1, -3.14, "apple"])
result = to_numeric(ser, errors=errors)
expected = Series(exp_data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"errors,exp",
[
("raise", 'Unable to parse string "apple" at position 2'),
("ignore", [True, False, "apple"]),
# Coerces to float.
("coerce", [1.0, 0.0, np.nan]),
],
)
def test_bool_handling(errors, exp):
ser = Series([True, False, "apple"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
expected = Series(exp)
tm.assert_series_equal(result, expected)
def test_list():
ser = ["1", "-3.14", "7"]
res = to_numeric(ser)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"data,arr_kwargs",
[
([1, 3, 4, 5], dict(dtype=np.int64)),
([1.0, 3.0, 4.0, 5.0], dict()),
# Boolean is regarded as numeric.
([True, False, True, True], dict()),
],
)
def test_list_numeric(data, arr_kwargs):
result = to_numeric(data)
expected = np.array(data, **arr_kwargs)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(dtype="O"), dict()])
def test_numeric(kwargs):
data = [1, -3.14, 7]
ser = Series(data, **kwargs)
result = to_numeric(ser)
expected = Series(data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"columns",
[
# One column.
"a",
# Multiple columns.
["a", "b"],
],
)
def test_numeric_df_columns(columns):
# see gh-14827
df = DataFrame(
dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"],
b=[1.0, 2.0, 3.0, 4.0],
)
)
expected = DataFrame(dict(a=[1.2, 3.14, np.inf, 0.1], b=[1.0, 2.0, 3.0, 4.0]))
df_copy = df.copy()
df_copy[columns] = df_copy[columns].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1],
[[3.14, 1.0], 1.6, 0.1],
),
([np.array([decimal.Decimal(3.14), 1.0]), 0.1], [[3.14, 1.0], 0.1]),
],
)
def test_numeric_embedded_arr_likes(data, exp_data):
# Test to_numeric with embedded lists and arrays
df = DataFrame(dict(a=data))
df["a"] = df["a"].apply(to_numeric)
expected = DataFrame(dict(a=exp_data))
tm.assert_frame_equal(df, expected)
def test_all_nan():
ser = Series(["a", "b", "c"])
result = to_numeric(ser, errors="coerce")
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_type_check(errors):
# see gh-11776
df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
@pytest.mark.parametrize("val", [1, 1.1, 20001])
def test_scalar(val, signed, transform):
val = -val if signed else val
assert to_numeric(transform(val)) == float(val)
def test_really_large_scalar(large_val, signed, transform, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
val_is_string = isinstance(val, str)
if val_is_string and errors in (None, "raise"):
msg = "Integer out of range. at position 0"
with pytest.raises(ValueError, match=msg):
to_numeric(val, **kwargs)
else:
expected = float(val) if (errors == "coerce" and val_is_string) else val
tm.assert_almost_equal(to_numeric(val, **kwargs), expected)
def test_really_large_in_arr(large_val, signed, transform, multiple_elts, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
extra_elt = "string"
arr = [val] + multiple_elts * [extra_elt]
val_is_string = isinstance(val, str)
coercing = errors == "coerce"
if errors in (None, "raise") and (val_is_string or multiple_elts):
if val_is_string:
msg = "Integer out of range. at position 0"
else:
msg = 'Unable to parse string "string" at position 1'
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
exp_val = float(val) if (coercing and val_is_string) else val
expected = [exp_val]
if multiple_elts:
if coercing:
expected.append(np.nan)
exp_dtype = float
else:
expected.append(extra_elt)
exp_dtype = object
else:
exp_dtype = float if isinstance(exp_val, (int, float)) else object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
def test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors):
# see gh-24910
#
# Even if we discover that we have to hold float, does not mean
# we should be lenient on subsequent elements that fail to be integer.
kwargs = dict(errors=errors) if errors is not None else dict()
arr = [str(-large_val if signed else large_val)]
if multiple_elts:
arr.insert(0, large_val)
if errors in (None, "raise"):
index = int(multiple_elts)
msg = f"Integer out of range. at position {index}"
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
if errors == "coerce":
expected = [float(i) for i in arr]
exp_dtype = float
else:
expected = arr
exp_dtype = object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
@pytest.mark.parametrize(
"errors,checker",
[
("raise", 'Unable to parse string "fail" at position 0'),
("ignore", lambda x: x == "fail"),
("coerce", lambda x: np.isnan(x)),
],
)
def test_scalar_fail(errors, checker):
scalar = "fail"
if isinstance(checker, str):
with pytest.raises(ValueError, match=checker):
to_numeric(scalar, errors=errors)
else:
assert checker(to_numeric(scalar, errors=errors))
@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, np.nan, 3, np.nan]])
def test_numeric_dtypes(data, transform_assert_equal):
transform, assert_equal = transform_assert_equal
data = transform(data)
result = to_numeric(data)
assert_equal(result, data)
@pytest.mark.parametrize(
"data,exp",
[
(["1", "2", "3"], np.array([1, 2, 3], dtype="int64")),
(["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])),
],
)
def test_str(data, exp, transform_assert_equal):
transform, assert_equal = transform_assert_equal
result = to_numeric(transform(data))
expected = transform(exp)
assert_equal(result, expected)
def test_datetime_like(tz_naive_fixture, transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture)
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_timedelta(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.timedelta_range("1 days", periods=3, freq="D")
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_period(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.period_range("2011-01", periods=3, freq="M", name="")
inp = transform(idx)
if isinstance(inp, Index):
result = to_numeric(inp)
expected = transform(idx.asi8)
assert_equal(result, expected)
else:
# TODO: PeriodDtype, so support it in to_numeric.
pytest.skip("Missing PeriodDtype support in to_numeric")
@pytest.mark.parametrize(
"errors,expected",
[
("raise", "Invalid object type at position 0"),
("ignore", Series([[10.0, 2], 1.0, "apple"])),
("coerce", Series([np.nan, 1.0, np.nan])),
],
)
def test_non_hashable(errors, expected):
# see gh-13324
ser = Series([[10.0, 2], 1.0, "apple"])
if isinstance(expected, str):
with pytest.raises(TypeError, match=expected):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, expected)
def test_downcast_invalid_cast():
# see gh-13352
data = ["1", 2, 3]
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
to_numeric(data, downcast=invalid_downcast)
def test_errors_invalid_value():
# see gh-26466
data = ["1", 2, 3]
invalid_error_value = "invalid"
msg = "invalid error value specified"
with pytest.raises(ValueError, match=msg):
to_numeric(data, errors=invalid_error_value)
@pytest.mark.parametrize(
"data",
[
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),
],
)
@pytest.mark.parametrize(
"kwargs,exp_dtype",
[
# Basic function tests.
(dict(), np.int64),
(dict(downcast=None), np.int64),
# Support below np.float32 is rare and far between.
(dict(downcast="float"), np.dtype(np.float32).char),
# Basic dtype support.
(dict(downcast="unsigned"), np.dtype(np.typecodes["UnsignedInteger"][0])),
],
)
def test_downcast_basic(data, kwargs, exp_dtype):
# see gh-13352
result = to_numeric(data, **kwargs)
expected = np.array([1, 2, 3], dtype=exp_dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize(
"data",
[
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),
],
)
def test_signed_downcast(data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data():
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = to_numeric(data, errors="ignore", downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned():
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize(
"data,expected",
[
(["1.1", 2, 3], np.array([1.1, 2, 3], dtype=np.float64)),
(
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array(
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00], dtype=np.float64
),
),
],
)
def test_ignore_downcast_cannot_convert_float(data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"downcast,expected_dtype",
[("integer", np.int16), ("signed", np.int16), ("unsigned", np.uint16)],
)
def test_downcast_not8bit(downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"dtype,downcast,min_max",
[
("int8", "integer", [iinfo(np.int8).min, iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min, iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int32).min, iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int64).min, iinfo(np.int64).max]),
("uint8", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max]),
("uint16", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max]),
("uint32", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max]),
("uint64", "unsigned", [iinfo(np.uint64).min, iinfo(np.uint64).max]),
("int16", "integer", [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
("int32", "integer", [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
("int64", "integer", [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
("int16", "integer", [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
("uint16", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
("uint32", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
("uint64", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
],
)
def test_downcast_limits(dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = to_numeric(Series(min_max), downcast=downcast)
assert series.dtype == dtype
@pytest.mark.parametrize(
"ser,expected",
[
(
pd.Series([0, 9223372036854775808]),
pd.Series([0, 9223372036854775808], dtype=np.uint64),
)
],
)
def test_downcast_uint64(ser, expected):
# see gh-14422:
# BUG: to_numeric doesn't work uint64 numbers
result = pd.to_numeric(ser, downcast="unsigned")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
[200, 300, "", "NaN", 30000000000000000000],
[200, 300, np.nan, np.nan, 30000000000000000000],
),
(
["12345678901234567890", "1234567890", "ITEM"],
[12345678901234567890, 1234567890, np.nan],
),
],
)
def test_coerce_uint64_conflict(data, exp_data):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
result = to_numeric(Series(data), errors="coerce")
expected = Series(exp_data, dtype=float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"errors,exp",
[
("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])),
("raise", "Unable to parse string"),
],
)
def test_non_coerce_uint64_conflict(errors, exp):
# see gh-17007 and gh-17125
#
# For completeness.
ser = Series(["12345678901234567890", "1234567890", "ITEM"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dc1", ["integer", "float", "unsigned"])
@pytest.mark.parametrize("dc2", ["integer", "float", "unsigned"])
def test_downcast_empty(dc1, dc2):
# GH32493
tm.assert_numpy_array_equal(
pd.to_numeric([], downcast=dc1),
pd.to_numeric([], downcast=dc2),
check_dtype=False,
)
def test_failure_to_convert_uint64_string_to_NaN():
# GH 32394
result = to_numeric("uint64", errors="coerce")
assert np.isnan(result)
ser = Series([32, 64, np.nan])
result = to_numeric(pd.Series(["32", "64", "uint64"]), errors="coerce")
tm.assert_series_equal(result, ser)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timing benchmark for AlexNet inference.
To run, use:
bazel run -c opt --config=cuda \
third_party/tensorflow/models/image/alexnet:alexnet_benchmark
Across 100 steps on batch size = 128.
Forward pass:
Run on Tesla K40c: 145 +/- 1.5 ms / batch
Run on Titan X: 70 +/- 0.1 ms / batch
Forward-backward pass:
Run on Tesla K40c: 480 +/- 48 ms / batch
Run on Titan X: 244 +/- 30 ms / batch
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100,
"""Number of batches to run.""")
def print_activations(t):
print(t.op.name, ' ', t.get_shape().as_list())
def inference(images):
"""Build the AlexNet model.
Args:
images: Images Tensor
Returns:
pool5: the last Tensor in the convolutional component of AlexNet.
parameters: a list of Tensors corresponding to the weights and biases of the
AlexNet model.
"""
parameters = []
# conv1
with tf.name_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
print_activations(conv1)
parameters += [kernel, biases]
# lrn1
# TODO(shlens, jiayq): Add a GPU version of local response normalization.
# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool1')
print_activations(pool1)
# conv2
with tf.name_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv2)
# pool2
pool2 = tf.nn.max_pool(conv2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool2')
print_activations(pool2)
# conv3
with tf.name_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv3)
# conv4
with tf.name_scope('conv4') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv4)
# conv5
with tf.name_scope('conv5') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv5)
# pool5
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool5')
print_activations(pool5)
return pool5, parameters
def time_tensorflow_run(session, target, info_string):
"""Run the computation to obtain the target tensor and print timing stats.
Args:
session: the TensorFlow session to run the computation under.
target: the targe Tensor that is passed to the session's run() function.
info_string: a string summarizing this run, to be printed with the stats.
Returns:
None
"""
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in xrange(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print ('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def run_benchmark():
"""Run the benchmark on AlexNet."""
with tf.Graph().as_default():
# Generate some dummy images.
image_size = 224
# Note that our padding definition is slightly different the cuda-convnet.
# In order to force the model to start with the same activations sizes,
# we add 3 to the image_size and employ VALID padding above.
images = tf.Variable(tf.random_normal([FLAGS.batch_size,
image_size,
image_size, 3],
dtype=tf.float32,
stddev=1e-1))
# Build a Graph that computes the logits predictions from the
# inference model.
pool5, parameters = inference(images)
# Build an initialization operation.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=config)
sess.run(init)
# Run the forward benchmark.
time_tensorflow_run(sess, pool5, "Forward")
# Add a simple objective so we can calculate the backward pass.
objective = tf.nn.l2_loss(pool5)
# Compute the gradient with respect to all the parameters.
grad = tf.gradients(objective, parameters)
# Run the backward benchmark.
time_tensorflow_run(sess, grad, "Forward-backward")
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
|
|
# Copyright 2016 Sean Dague
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import subprocess
import time
import paho.mqtt.client as paho
from arwn import temperature
from arwn import handlers
from arwn.vendor.RFXtrx import lowlevel as ll
from arwn.vendor.RFXtrx.pyserial import PySerialTransport
logger = logging.getLogger(__name__)
IS_NONE = 0
IS_TEMP = 1 << 0
IS_BARO = 1 << 1
IS_WIND = 1 << 2
IS_RAIN = 1 << 3
IS_HUMID = 1 << 4
IS_MOIST = 1 << 5
# List of known sensor models from rtl_433, please feel free to patch
# and add any that you have here.
TH_SENSORS = ("THGR810", "THGR122N", "BHTR968")
MOIST_SENSORS = ("Springfield Temperature & Moisture")
WIND_SENSORS = ("WGR800")
RAIN_SENSORS = ("PCR800")
BARO_SENSORS = ("BHTR968")
MAX_TEMP = 150
MIN_TEMP = -40
class SensorPacket(object):
"""Convert RFXtrx packet to native packet for ARWN"""
def _set_type(self, packet):
logger.debug("Type: %d", self.stype)
if self.stype != IS_NONE:
return
if isinstance(packet, dict):
model = packet.get("model", "")
if model.startswith("Oregon-"):
model = model[7:]
if model in TH_SENSORS:
self.stype |= IS_TEMP
self.stype |= IS_HUMID
if model in BARO_SENSORS:
self.stype |= IS_BARO
if model in RAIN_SENSORS:
self.stype |= IS_RAIN
if model in WIND_SENSORS:
self.stype |= IS_WIND
if model in MOIST_SENSORS:
self.stype |= IS_TEMP
self.stype |= IS_MOIST
# if this is an RFXCOM packet
if isinstance(packet, ll.TempHumid):
self.stype |= IS_TEMP
if isinstance(packet, ll.TempHumidBaro):
self.stype |= IS_TEMP
if isinstance(packet, ll.RainGauge):
self.stype |= IS_RAIN
if isinstance(packet, ll.Wind):
self.stype |= IS_WIND
if self.stype == IS_NONE:
logger.warning("Unknown sensor type: %s", packet)
@property
def is_temp(self):
return self.stype & IS_TEMP
@property
def is_baro(self):
return self.stype & IS_BARO
@property
def is_rain(self):
return self.stype & IS_RAIN
@property
def is_wind(self):
return self.stype & IS_WIND
@property
def is_moist(self):
return self.stype & IS_MOIST
def __init__(self, stype=IS_NONE, bat=0, sensor_id=0, **kwargs):
self.stype = stype
self.bat = bat,
self.sensor_id = sensor_id
self.data = {}
self.data.update(kwargs)
def from_json(self, data):
logger.debug("Packet json; %s", data)
self._set_type(data)
self.bat = data.get("battery", "NA")
if "id" in data:
self.sensor_id = "%2.2x:%2.2x" % (data['id'],
data.get('channel', 0))
elif "sid" in data:
self.sensor_id = "%2.2x:%2.2x" % (data['sid'],
data.get('channel', 0))
if self.stype & IS_TEMP:
temp = temperature.Temperature(
"%sC" % data['temperature_C']).as_F()
self.data['temp'] = round(temp.to_F(), 1)
self.data['units'] = 'F'
# note, we always assume HUMID sensors are temp sensors
if self.stype & IS_HUMID:
self.data['dewpoint'] = round(temp.dewpoint(data['humidity']), 1)
self.data['humid'] = round(data['humidity'], 1)
if self.stype & IS_MOIST:
self.data['moisture'] = data['moisture']
if self.stype & IS_BARO:
self.data['pressure'] = data['pressure_hPa']
if self.stype & IS_RAIN:
# rtl_433 already converts to non metric here
self.data['total'] = round(data['rain_in'], 2)
self.data['rate'] = round(data['rain_rate_in_h'], 2)
self.data['units'] = 'in'
if self.stype & IS_WIND:
mps2mph = 2.23694
speed = round(float(data['average']) * mps2mph, 1)
gust = round(float(data['gust']) * mps2mph, 1)
self.data['direction'] = data['direction']
self.data['speed'] = speed
self.data['gust'] = gust
self.data['units'] = 'mph'
def from_packet(self, packet):
self._set_type(packet)
self.bat = getattr(packet, 'battery', -1)
self.sensor_id = packet.id_string
if self.stype & IS_TEMP:
temp = temperature.Temperature("%sC" % packet.temp).as_F()
self.data['temp'] = round(temp.to_F(), 1)
self.data['dewpoint'] = round(temp.dewpoint(packet.humidity), 1)
self.data['humid'] = round(packet.humidity, 1)
self.data['units'] = 'F'
if self.stype & IS_BARO:
self.data['pressure'] = packet.baro
if self.stype & IS_RAIN:
self.data['total'] = round(packet.raintotal / 25.4, 2)
self.data['rate'] = round(packet.rainrate / 25.4, 2)
self.data['units'] = 'in'
if self.stype & IS_WIND:
mps2mph = 2.23694
speed = round(float(packet.average_speed) * mps2mph, 1)
gust = round(float(packet.gust) * mps2mph, 1)
self.data['direction'] = packet.direction
self.data['speed'] = speed
self.data['gust'] = gust
self.data['units'] = 'mph'
def as_json(self, **kwargs):
data = dict(bat=self.bat, sensor_id=self.sensor_id)
data.update(self.data)
data.update(kwargs)
return data
class MQTT(object):
def __init__(self, server, config, port=1883):
client = paho.Client()
handlers.setup()
self.server = server
self.port = port
self.config = config
self.root = config["mqtt"].get("root", "arwn")
self.status_topic = "%s/status" % self.root
def on_connect(client, userdata, flags, rc):
status = {'status': 'alive', 'timestamp': int(time.time())}
client.subscribe("%s/#" % self.root)
client.publish(
self.status_topic, json.dumps(status), qos=2, retain=True)
client.will_set(self.status_topic,
json.dumps(status_dead), retain=True)
def on_message(client, userdata, msg):
payload = json.loads(msg.payload)
handlers.run(self, msg.topic, payload)
return True
status_dead = {'status': 'dead'}
client.will_set(self.status_topic,
json.dumps(status_dead), qos=2, retain=True)
client.on_connect = on_connect
client.on_message = on_message
client.connect(self.server, self.port)
client.loop_start()
self.client = client
def reconnect(self):
self.client.disconnect()
self.client.connect(self.server, self.port)
def send(self, topic, payload, retain=False):
topic = "%s/%s" % (self.root, topic)
logger.debug("Sending %s => %s", topic, payload)
self.client.publish(topic, json.dumps(payload), retain=retain)
class RFXCOMCollector(object):
def __init__(self, device):
self.transport = PySerialTransport(device)
self.transport.reset()
self.unparsable = 0
def __iter__(self):
return self
def next(self):
try:
event = self.transport.receive_blocking()
self.unparsable = 0
except Exception:
logger.exception("Got an unparsable byte")
self.unparsable += 1
if self.unparsable > 10:
raise
return None
logger.debug(event)
# general case, temp, rain, wind
packet = SensorPacket()
packet.from_packet(event.device.pkt)
return packet
class RTL433Collector(object):
def __init__(self, devices=None):
cmd = ["rtl_433", "-F", "json"]
logger.error(devices)
logger.error(type(devices))
if type(devices) is list:
for d in devices:
cmd.append("-R")
cmd.append("%s" % d)
logger.info("starting cmd: %s" % cmd)
self.rtl = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
def __iter__(self):
return self
def next(self):
line = self.rtl.stdout.readline()
data = json.loads(line)
self.log_data(data)
packet = SensorPacket()
packet.from_json(data)
return packet
def log_data(self, data):
fields = [
("model", "(%(model)s)"),
("id", "%(id)d:%(channel)d"),
("sid", "%(sid)d:%(channel)d"),
("temperature_C", "%(temperature_C)sC"),
("temperature", "%(temperature)sF"),
("humidity", "%(humidity)s%%"),
("moisture", "Moist:%(moisture)s"),
("pressure_hPa", "%(pressure_hPa)shPa"),
("direction", u"%(direction)s" + u"\u00b0"),
("gust", "Gust:%(gust)s"),
("average", "Speed:%(average)s"),
("rain_total", "Total:%(rain_total)s"),
("rain_rate", "Rate:%(rain_rate)s"),
("battery", "bat:%(battery)s")
]
subset = []
for item in fields:
name = item[0]
fmt = item[1]
if name in data:
subset.append(fmt)
fmtstr = " ".join(subset)
data["channel"] = data.get("channel", 0)
try:
logger.debug(fmtstr, data)
except Exception:
logger.error(data)
logger.error(subset)
pass
class Dispatcher(object):
def __init__(self, config):
self._get_collector(config)
self.names = config["names"]
server = config['mqtt']['server']
self.mqtt = MQTT(server, config)
self.config = config
logger.debug("Config => %s", self.config)
def _get_collector(self, config):
col = config.get("collector")
if col:
ctype = col.get("type")
if ctype == "rtl433":
# devices to limit to
devices = col.get("devices", None)
self.collector = RTL433Collector(devices)
elif ctype == "rfxcom":
device = col["device"]
self.collector = RFXCOMCollector(device)
else:
# fall back for existing configs
device = config["device"]
self.collector = RFXCOMCollector(device)
def loopforever(self):
for packet in self.collector:
if packet is None:
continue
now = int(time.time())
logger.debug("%s", packet)
# we send barometer sensors twice
if packet.is_baro:
self.mqtt.send("barometer", packet.as_json(
units="mbar",
timestamp=now))
if packet.is_moist:
# The reading of the moisture packets goes flakey a bit, apply
# some basic boundary conditions to it.
if packet.data['moisture'] > 10 or packet.data['temp'] > 150:
logger.warn(
"Packet moisture data makes no sense: %s => %s" %
(packet, packet.as_json()))
continue
name = self.names.get(packet.sensor_id)
if name:
topic = "moisture/%s" % name
self.mqtt.send(topic, packet.as_json(
units=".",
timestamp=now))
if packet.is_temp:
if packet.data['temp'] > MAX_TEMP or packet.data['temp'] < MIN_TEMP:
logger.warn(
"Packet temp data makes no sense: %s => %s" %
(packet, packet.as_json()))
continue
name = self.names.get(packet.sensor_id)
if name:
topic = "temperature/%s" % name
else:
topic = "unknown/%s" % packet.sensor_id
self.mqtt.send(topic, packet.as_json(timestamp=now))
if packet.is_wind:
self.mqtt.send("wind", packet.as_json(timestamp=now))
if packet.is_rain:
self.mqtt.send("rain", packet.as_json(timestamp=now))
|
|
__author__ = 'waziz'
import re
from os.path import isfile, basename, splitext
import logging
from chisel.smt import Yield, Derivation, Tree, SVector
import math
from glob import glob
from collections import defaultdict, deque
from wmap import WMap
import gzip
from io import TextIOWrapper
def smart_ropen(path):
"""Open file in reading mode directly or through gzip depending on extension."""
if path.endswith('.gz'):
#return TextIOWrapper(gzip.open(path, 'rb')) # python3
return gzip.open(path, 'rb')
else:
return open(path, 'r')
def smart_wopen(path):
"""Open file in writing mode directly or through gzip depending on extension."""
if path.endswith('.gz'):
#return TextIOWrapper(gzip.open(path, 'wb')) # python3
return gzip.open(path, 'wb')
else:
return open(path, 'w')
# TODO: generalise format to be able to read lines with repeated keys
def read_config(path):
"""
Read a config file made of key=value entries
:param path: path to file
:return: dictionary containing the key-value pairs in the config file
"""
config = {}
with smart_ropen(path) as f:
for line in f:
if line.startswith('#'):
continue
k, v = line.strip().split('=')
config[k] = v
return config
# TODO: generalise format to be robust to little differences (e.g. FName=FWeight)
def read_weights(path, scaling=1.0):
"""
Read a file made of `FName FWeight` entries
:param path:
:param scaling:
:return:
"""
weights = {}
with smart_ropen(path) as f:
for line in f:
if line.startswith('#'):
continue
k, v = line.strip().split()
weights[k] = float(v) * scaling
return weights
class SegmentMetaData(object):
"""
A simple container for input segments
"""
def __init__(self, sid, src, grammar, refs=[]):
self.sid_ = sid
self.grammar_ = grammar
self.src_ = src
self.refs_ = tuple(refs)
@property
def id(self):
return self.sid_
@property
def src(self):
return self.src_
@property
def refs(self):
return self.refs_
@property
def grammar(self):
return self.grammar_
def __str__(self):
return 'grammar=%s\tsrc=%s' % (self.grammar_, self.src_)
def to_sgm(self, dump_refs=True, lc_input=False, lc_ref=False):
if dump_refs and self.refs_:
return '<seg grammar="{1}" id="{0}">{2}</seg> ||| {3}'.format(self.sid_,
self.grammar_,
self.src_ if not lc_input else str(self.src_).lower(),
' ||| '.join(str(ref) if not lc_ref else str(ref).lower() for ref in self.refs_))
else:
return '<seg grammar="{1}" id="{0}">{2}</seg>'.format(self.sid_,
self.grammar_,
self.src_ if not lc_input else str(self.src_).lower())
@staticmethod
def parse(line, mode, sid=None, grammar_dir=None):
# parse line
if mode == 'plain':
args = parse_plain(line)
if mode == 'cdec':
args = parse_cdec_sgml(line)
else:
raise Exception('unknown input format: %s' % mode)
# overrides sentence id
if sid is not None:
args['sid'] = sid
# overrides grammar
if grammar_dir is not None:
grammar_path = '{0}/grammar.{1}'.format(grammar_dir, args['sid'])
if not isfile(grammar_path):
grammar_path += '.gz'
args['grammar'] = grammar_path
# sanity checks
if not isfile(args['grammar']):
raise Exception('Grammar file not found: %s' % args['grammar'])
# construct segment
return SegmentMetaData(**args)
def parse_cdec_sgml(sgml_str):
"""parses an sgml-formatted line as cdec would
returns a dicts with grammar, id, src and tgt"""
parts = sgml_str.split(' ||| ')
if not parts:
raise Exception('Missing fields' % sgml_str)
pattern = re.compile('<seg grammar="([^"]+)" id="([0-9]+)">(.+)<\/seg>')
match = pattern.match(parts[0])
if match is None:
raise Exception('Bad sgml: %s' % parts[0])
groups = match.groups()
return {'grammar': groups[0],
'sid': int(groups[1]),
'src': Yield(groups[2]),
'refs': [Yield(ref.strip()) for ref in parts[1:]]}
def parse_plain(plain_str):
fields = plain_str.split(' ||| ')
if len(fields) == 0:
raise Exception('Missing fields: %s' % plain_str)
args = {'src': Yield(fields[0])}
if len(fields) > 1:
args = {'refs': Yield(fields[1:])}
return args
def list_numbered_files(basedir, sort=True, reverse=False):
paths = glob('{0}/[0-9]*'.format(basedir))
ids = [int(splitext(basename(path))[0]) for path in paths]
if not sort:
return zip(ids, paths)
else:
return sorted(zip(ids, paths), key=lambda pair: pair[0], reverse=reverse)
def next_block(fi):
"""Yields the next block of non-empty lines from an input stream (stripped lines are returned in a list)"""
block = []
for line in fi:
line = line.strip()
if not line and len(block):
yield block
block = []
else:
block.append(line)
if len(block):
yield block
def read_block(fi):
"""Read and returns one block of non-empty lines from an input stream (stripped lines are returned in a list)"""
block = []
for line in fi:
line = line.strip()
if not line and len(block):
break
block.append(line)
return block
def read_sampled_derivations(iterable):
"""
Read a file structed as follows
[proxy]
feature=value
...
[target]
feature=value
...
[samples]
# count projection vector
...
"""
reserved = set(['[proxy]', '[target]', '[samples]'])
def read_weights(Q):
wmap = {}
while Q:
line = Q.popleft()
if not line or line.startswith('#'): # discarded lines
continue
if line in reserved: # stop criterion
Q.appendleft(line)
break
try: # parse a pair
k, v = line.split('=')
wmap[k] = float(v)
except:
raise ValueError('Incorrectly formatted key-value pair: %s', line)
return wmap
def read_samples(Q):
samples = []
while Q:
line = Q.popleft()
if not line or line.startswith('#'): # discarded lines
continue
if line in reserved: # stop criterion
Q.appendleft(line)
break
try: # parse a row
count, derivation, fmap = line.split('\t')
d = Derivation(tree=Tree(derivationstr=derivation),
vector=SVector(fmap),
count=long(count))
samples.append(d)
except:
raise ValueError('Incorrectly formatted sample: %s' % line)
return samples
Q = deque(line.strip() for line in iterable)
qmap = {}
pmap = {}
samples = []
while Q:
line = Q.popleft()
if not line or line.startswith('#'):
continue
if line == '[proxy]':
qmap = read_weights(Q)
elif line == '[target]':
pmap = read_weights(Q)
elif line == '[samples]':
samples = read_samples(Q)
return samples, WMap(qmap.iteritems()), WMap(pmap.iteritems())
def sampled_derivations_from_file(input_file):
return read_sampled_derivations(smart_ropen(input_file))
|
|
from .ast_node import AstNode
from .errors import AstBuilderException
class AstBuilder:
def __init__(self):
self.reset()
def reset(self):
self.stack = [AstNode('None')]
self.comments = []
def start_rule(self, rule_type):
self.stack.append(AstNode(rule_type))
def end_rule(self, rule_type):
node = self.stack.pop()
self.current_node().add(node.rule_type, self.transform_node(node))
def build(self, token):
if token.matched_type == 'Comment':
self.comments.append({
'type': 'Comment',
'location': self.get_location(token),
'text': token.matched_text
})
else:
self.current_node().add(token.matched_type, token)
def get_result(self):
return self.current_node().get_single('Feature')
def current_node(self):
return self.stack[-1]
def get_location(self, token, column=None):
# TODO: translated from JS... is it right?
return (token.location if (not column or column == 0) else
{'line': token.location['line'], 'column': column})
def get_tags(self, node):
tags = []
tags_node = node.get_single('Tags')
if not tags_node:
return tags
for token in tags_node.get_tokens('TagLine'):
tags += [{'type': 'Tag',
'location': self.get_location(token, tag_item['column']),
'name': tag_item['text']} for tag_item in token.matched_items]
return tags
def get_table_rows(self, node):
rows = [{'type': 'TableRow',
'location': self.get_location(token),
'cells': self.get_cells(token)} for token in node.get_tokens('TableRow')]
self.ensure_cell_count(rows)
return rows
def ensure_cell_count(self, rows):
if not rows:
return
cell_count = len(rows[0]['cells'])
for row in rows:
if len(row['cells']) != cell_count:
raise AstBuilderException("inconsistent cell count within the table",
row['location'])
def get_cells(self, table_row_token):
return [{'type': 'TableCell',
'location': self.get_location(table_row_token, cell_item['column']),
'value': cell_item['text']} for cell_item in table_row_token.matched_items]
def get_description(self, node):
return node.get_single('Description')
def get_steps(self, node):
return node.get_items('Step')
def transform_node(self, node):
if node.rule_type == 'Step':
step_line = node.get_token('StepLine')
step_argument = None
if node.get_single('DataTable'):
step_argument = node.get_single('DataTable')
elif node.get_single('DocString'):
step_argument = node.get_single('DocString')
return self.reject_nones({
'type': node.rule_type,
'location': self.get_location(step_line),
'keyword': step_line.matched_keyword,
'text': step_line.matched_text,
'argument': step_argument
})
elif node.rule_type == 'DocString':
separator_token = node.get_tokens('DocStringSeparator')[0]
content_type = separator_token.matched_text
line_tokens = node.get_tokens('Other')
content = '\n'.join([t.matched_text for t in line_tokens])
return self.reject_nones({
'type': node.rule_type,
'location': self.get_location(separator_token),
'contentType': content_type,
'content': content
})
elif node.rule_type == 'DataTable':
rows = self.get_table_rows(node)
return self.reject_nones({
'type': node.rule_type,
'location': rows[0]['location'],
'rows': rows,
})
elif node.rule_type == 'Background':
background_line = node.get_token('BackgroundLine')
description = self.get_description(node)
steps = self.get_steps(node)
return self.reject_nones({
'type': node.rule_type,
'location': self.get_location(background_line),
'keyword': background_line.matched_keyword,
'name': background_line.matched_text,
'description': description,
'steps': steps
})
elif node.rule_type == 'Scenario_Definition':
tags = self.get_tags(node)
scenario_node = node.get_single('Scenario')
if scenario_node:
scenario_line = scenario_node.get_token('ScenarioLine')
description = self.get_description(scenario_node)
steps = self.get_steps(scenario_node)
return self.reject_nones({
'type': scenario_node.rule_type,
'tags': tags,
'location': self.get_location(scenario_line),
'keyword': scenario_line.matched_keyword,
'name': scenario_line.matched_text,
'description': description,
'steps': steps
})
else:
scenario_outline_node = node.get_single('ScenarioOutline')
if not scenario_outline_node:
raise RuntimeError('Internal grammar error')
scenario_outline_line = scenario_outline_node.get_token('ScenarioOutlineLine')
description = self.get_description(scenario_outline_node)
steps = self.get_steps(scenario_outline_node)
examples = scenario_outline_node.get_items('Examples_Definition')
return self.reject_nones({
'type': scenario_outline_node.rule_type,
'tags': tags,
'location': self.get_location(scenario_outline_line),
'keyword': scenario_outline_line.matched_keyword,
'name': scenario_outline_line.matched_text,
'description': description,
'steps': steps,
'examples': examples
})
elif node.rule_type == 'Examples_Definition':
tags = self.get_tags(node)
examples_node = node.get_single('Examples')
examples_line = examples_node.get_token('ExamplesLine')
description = self.get_description(examples_node)
rows = self.get_table_rows(examples_node)
return self.reject_nones({
'type': examples_node.rule_type,
'tags': tags,
'location': self.get_location(examples_line),
'keyword': examples_line.matched_keyword,
'name': examples_line.matched_text,
'description': description,
'tableHeader': rows[0],
'tableBody': rows[1:]
})
elif node.rule_type == 'Description':
line_tokens = node.get_tokens('Other')
# Trim trailing empty lines
last_non_empty = next(i for i, j in reversed(list(enumerate(line_tokens)))
if j.matched_text)
description = '\n'.join([token.matched_text for token in
line_tokens[:last_non_empty + 1]])
return description
elif node.rule_type == 'Feature':
header = node.get_single('Feature_Header')
if not header:
return
tags = self.get_tags(header)
feature_line = header.get_token('FeatureLine')
if not feature_line:
return
background = node.get_single('Background')
scenario_definitions = node.get_items('Scenario_Definition')
description = self.get_description(header)
language = feature_line.matched_gherkin_dialect
return self.reject_nones({
'type': node.rule_type,
'tags': tags,
'location': self.get_location(feature_line),
'language': language,
'keyword': feature_line.matched_keyword,
'name': feature_line.matched_text,
'description': description,
'background': background,
'scenarioDefinitions': scenario_definitions,
'comments': self.comments
})
else:
return node
def reject_nones(self, values):
return {k: v for k, v in values.items() if v is not None} # only None should be rejected
|
|
################################################################################
# Home page.
################################################################################
# Import Statements.
import sys
from PyQt4 import QtGui, QtCore
from core.repository import MovieRepository
from core.repository import Wait
from core.utils import StringUtils
from core.domainobjects import MovieModel
import core.Constants as Constants
from ui.home.MovieTable import MovieTable
from ui.home.AddMovieRequest import AddMovieRequest
import ui.DefaultComponents
################################################################################
# Global Declarations
mdb = MovieRepository()
################################################################################
class MovieManager(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MovieManager, self).__init__(parent)
self.createWidget()
self.statusBar().showMessage("Manage Movies Collection", 100000)
self.setWindowTitle("Chiya(Paridhi) also Called Puiyan | Ki Dukan")
self.setGeometry(200, 500,500,500)
self.setWindowIcon(QtGui.QIcon("icons/windowicon.png"))
def createWidget(self):
#central view
self.centerContainer = CentralWidget(self)
#add toolbar
MovieManagerToolBar().create(self)
def refreshTableView(self, movies=[]):
self.centerContainer.refresh(movies)
def addNewRequestClicked(self):
self.centerContainer.addNewRequestClicked()
def showSearchWidget(self):
self.centerContainer.showSearchWidget()
def updateView(self, movieModel):
self.centerContainer.updateView(movieModel)
def show_view(self):
self.show()
################################################################################
class CentralWidget(QtGui.QWidget):
def __init__(self, mainWindow):
super(CentralWidget, self).__init__()
self.create(mainWindow)
def create(self, mainWindow):
self.movieTable = MovieTable(mainWindow)
#add data to movie table
movies = mdb.loadAllMovies()
self.refresh(movies)
vbox = QtGui.QVBoxLayout()
self.searchWidget = SearchWidget(mainWindow)
vbox.addWidget(self.searchWidget)
vbox.addWidget(self.movieTable)
hbox = QtGui.QHBoxLayout()
#widget to add and update movies
self.addMovieRequest = AddMovieRequest(mainWindow)
hbox.addWidget(self.addMovieRequest)
self.addMovieRequest.hide()
vbox.addLayout(hbox)
self.setLayout(vbox)
mainWindow.setCentralWidget(self)
def addNewRequestClicked(self):
self.addMovieRequest.showAddView()
def updateView(self, movieModel):
self.addMovieRequest.showUpdateView(movieModel)
def refresh(self, movies = []):
self.movieTable.updateData(movies)
def showSearchWidget(self):
self.searchWidget.showView()
def searchForMovie(self):
self.centerContainer
print "search clicked"
################################################################################
class MovieManagerToolBar:
def create(self, mainWindow):
toolbar = mainWindow.addToolBar("Home Tools")
toolbar.setStyleSheet("border:1;"
"color : black;")
# Add New Request
newRequest = QtGui.QAction(QtGui.QIcon("icons/addmovie.png"),
"Add New Request", toolbar)
newRequest.setShortcut("Ctrl+N")
newRequest.setStatusTip("Add new Movie")
mainWindow.connect(newRequest, QtCore.SIGNAL('triggered()'),
mainWindow.addNewRequestClicked)
toolbar.addAction(newRequest)
#search box
search = QtGui.QAction(QtGui.QIcon("icons/search.png"),
"Search by Name", toolbar)
search.setShortcut("Ctrl+F")
search.setStatusTip("Search for Movie")
mainWindow.connect(search, QtCore.SIGNAL("triggered()"),
mainWindow.showSearchWidget)
toolbar.addAction(search)
# Add Separator
toolbar.addSeparator()
#exit action
exit = QtGui.QAction(QtGui.QIcon("icons/exit.png"), "Exit", toolbar)
exit.setShortcut("Ctrl+Q")
exit.setStatusTip("Exit application")
mainWindow.connect(exit, QtCore.SIGNAL("triggered()"),
QtCore.SLOT("close()"))
toolbar.addAction(exit)
#prepare menubar
menubar = mainWindow.menuBar()
fileMenu = menubar.addMenu("&Manager")
fileMenu.addAction(newRequest)
fileMenu.addSeparator()
fileMenu.addAction(exit)
searchMenu = menubar.addMenu("&Search")
searchMenu.addAction(search)
################################################################################
class SearchWidget(QtGui.QFrame):
def __init__(self, mainWindow):
super(SearchWidget, self).__init__()
self.mainWindow = mainWindow
self.createWidget()
self.setStyleSheet(Constants.WIDGET_STYLE_WITH_BORDER)
def createWidget(self):
hbox = QtGui.QHBoxLayout(self)
hbox.addStretch(1)
#create box
self.searchBox = QtGui.QLineEdit(self)
self.connect(self.searchBox,
QtCore.SIGNAL("cursorPositionChanged (int,int)"),
self.searchButtonClicked)
self.searchBox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
label = ui.DefaultComponents.SystemLabel(self, "Search By Movie Name")
searchButton = ui.DefaultComponents.SystemButton(self, " Search ")
searchButton.connect(searchButton,
QtCore.SIGNAL("clicked()"),
self.searchButtonClicked)
cancelButton = ui.DefaultComponents.SystemButton(self, " Cancel ")
cancelButton.connect(cancelButton,
QtCore.SIGNAL("clicked()"),
self.cancelButtonClicked)
hbox.addWidget(label)
hbox.addWidget(self.searchBox, 2)
hbox.addWidget(searchButton, 1)
hbox.addWidget(cancelButton, 1)
self.setLayout(hbox)
self.hide()
def showView(self):
self.show()
self.searchBox.clear()
def searchButtonClicked(self):
nameToSearch = self.searchBox.text()
movies = mdb.findMatchingMovies(str(nameToSearch))
self.mainWindow.refreshTableView(movies)
def cancelButtonClicked(self):
self.hide()
movies = mdb.loadAllMovies()
self.mainWindow.refreshTableView(movies)
def updateCurrentIndexFor(self, itemText):
index = self.findText(itemText)
self.setCurrentIndex(index)
################################################################################
|
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
|
import acm
import ael
import datetime
from datetime import date
import re
import time
import HTI_Util
import HTI_ExcelReport2
import csv
import os
import win32com.client
import HTI_FeedTrade_EDD_Util
ttSaveToFile = "Check this to save the report instead of showing it. Much faster than excel."
ttCSV = "Check this to export the report in CSV format"
ttFileName = "File name and path of the report. YYYYMMDD in the file name will be replaced by the valuation date."
ttInclExpiredIns = "Check this to show expired instrument positions in the report."
col_ptyid = 0
col_alias = 1
col_fullname = 2
col_fullname2 = 3
col_lei = 4
col_red_code = 5
col_attention = 6
col_hostid = 7
col_address = 8
col_address2 = 9
col_zipcode = 10
col_city = 11
col_country = 12
col_contact = 13
col_telephone = 14
col_fax = 15
col_telex = 16
col_email = 17
col_bic = 18
col_bis_status = 19
col_business_status_chlnbr = 20
col_legal_form_chlnbr = 21
col_document_type_chlnbr = 22
col_document_date = 23
col_calcagent = 24
col_consolidate_chlnbr = 25
col_type = 26
col_issuer = 27
col_correspondent_bank = 28
col_isda_member = 29
col_netting = 30
col_not_trading = 31
col_time_zone = 32
col_relation_chlnbr = 33
col_guarantor_ptynbr = 34
col_group_limit = 35
col_rating1_chlnbr = 36
col_rating2_chlnbr = 37
col_rating3_chlnbr = 38
col_rating_agency_chlnbr = 39
col_risk_country_chlnbr = 40
col_rating_chlnbr = 41
col_rating_date = 42
col_cls = 43
col_bankruptcy = 44
col_failure_to_pay = 45
col_obl_acceleration = 46
col_obl_default = 47
col_repudiation = 48
col_restructuring = 49
col_price_access_control = 50
col_issuer_accounts = 51
col_free1_chlnbr = 52
col_free2_chlnbr = 53
col_free3_chlnbr = 54
col_free4_chlnbr = 55
col_free5_chlnbr = 56
col_free1 = 57
col_free2 = 58
col_free3 = 59
col_free4 = 60
col_free5 = 61
col_AE_ACES_GRP_CDE = 62
col_BBG_COMPANY = 63
col_BBG_ISSUER = 64
col_BBG_PARTY_CODE = 65
col_BBG_PARTY_CODE1 = 66
col_Client_Id = 67
col_Commission_Rate = 68
col_MSS_ACC_CODE = 69
col_MSS_SI_TRADE_TYPE = 70
col_Pty_All_Time_LTV = 71
col_Pty_Trigger_LTV = 72
col_Pty_Terminate_LTV = 73
col_VCON_PARTY_CODE = 74
# added by Louis Wong on 20170315
col_Indpendent_Amt_Rate = 75
col_Rounding_Amount = 76
col_Rounding_Amount_Ccy = 77
col_SBL_Code = 78
col_Threshold = 79
col_Threshold_Ccy = 80
col_Minimum_Transfer_Amount = 81
col_Minimum_Transfer_Amount_Ccy = 82
def disable_variables(variables, enable = 0):
for i in variables:
for j in ael_variables:
if i == j[0]:
j[9] = enable
def cb(index, fieldValues):
global ael_variables
if ael_variables[index][0] == 'saveToFile':
disable_variables(('party_fileName',), fieldValues[index])
return fieldValues
def getAllBUParties():
parties = []
acmServer = acm.FACMServer()
strUserGroup = acmServer.User().UserGroup().Name()
if strUserGroup == 'ED':
strUserGroup = 'EDD'
if strUserGroup in ('Sales', 'Traders', 'Trading Admin'):
strUserGroup = 'FICC'
needfilterBU = HTI_Util.isFilterBU(strUserGroup)
for pty in ael.Party.select("type = 'Client'"):
if pty.hostid == strUserGroup or needfilterBU == False:
parties.append(pty.display_id())
for pty in ael.Party.select("type = 'Broker'"):
if pty.hostid == strUserGroup or needfilterBU == False:
parties.append(pty.display_id())
for pty in ael.Party.select("type = 'Counterparty'"):
if pty.hostid == strUserGroup or needfilterBU == False:
parties.append(pty.display_id())
parties.sort()
return parties
ael_variables = [['counterparties', 'Counterparties', 'string', getAllBUParties(), '', 0, 1, 'Counterparties', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), None, 0, 1, 'Portfolio', None, 1], \
['title', 'Report title', 'string', None, 'Prime Brokerage Party Definition Report', 1, 0, 'Report Title', None, 1], \
['saveToFile', 'Save to File', 'int', [1, 0], 0, 0, 0, ttSaveToFile, cb, 1], \
['saveToCSV', 'CSV format', 'int', [1, 0], 0, 0, 0, ttCSV, None, 1], \
['party_fileName', 'Party File name', 'string', None, 'c:\\temp\\Party_YYYYMMDD.csv', 0, 0, ttFileName, None, 1]]
def ael_main(dict):
acmServer = acm.FACMServer()
strUserGroup = acmServer.User().UserGroup().Name()
if strUserGroup == 'ED':
strUserGroup = 'EDD'
if strUserGroup in ('Sales', 'Traders', 'Trading Admin'):
strUserGroup = 'FICC'
needfilterBU = HTI_Util.isFilterBU(strUserGroup)
gen_Party_Report(dict, strUserGroup, needfilterBU)
print 'Finished'
def get_filename(filename, reportDate):
filename = filename.replace('.xlsx', '')
filename = filename.replace('.xls', '')
filename = filename.replace('.csv', '')
filename = filename.replace("YYYY", reportDate.to_string('%Y'))
filename = filename.replace("MM", reportDate.to_string('%m'))
filename = filename.replace("DD", reportDate.to_string('%d'))
filename = filename + '.xlsx'
return filename
def gen_Party_Report(dict, strUserGroup, needfilterBU):
title = dict['title']
saveToFile = dict['saveToFile']
fileName = dict['party_fileName']
saveToCSV = dict['saveToCSV']
counterparties = dict['counterparties']
pty_list = ''
for pty in counterparties:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
rptContent = []
strSql = """
select distinct(t.counterparty_ptynbr) 'counterparty_ptynbr'
into temp_trade_party
from trade t, portfolio pf, instrument i
where t.status not in ('Void', 'Simulated')
and t.prfnbr = pf.prfnbr
and t.insaddr = i.insaddr and i.instype = 'TotalReturnSwap'
@portfolio_list
select p.ptyid, p.ptyid2 'alias', p.fullname, p.fullname2, p.legal_entity_id 'LEI', p.red_code 'RED', p.attention, p.hostid,
p.address, p.address2, p.zipcode, p.city, p.country, p.contact1 'contact', p.telephone, p.fax, p.telex, p.email, p.swift 'BIC',
p.bis_status, p.business_status_chlnbr, p.legal_form_chlnbr, p.document_type_chlnbr, p.document_date, p.calcagent,
p.consolidate_chlnbr, p.type, p.issuer, p.correspondent_bank, p.isda_member, p.netting, p.not_trading, p.time_zone,
p.relation_chlnbr, p.guarantor_ptynbr, p.group_limit, p.rating1_chlnbr, p.rating2_chlnbr, p.rating3_chlnbr, p.rating_agency_chlnbr,
p.risk_country_chlnbr, p.rating_chlnbr, p.rating_date, p.cls 'CLS Bank Type',
p.bankruptcy, p.failure_to_pay, p.obl_acceleration, p.obl_default, p.repudiation, p.restructuring,
p.price_access_control, p.issuer_accounts,
p.free1_chlnbr, p.free2_chlnbr, p.free3_chlnbr, p.free4_chlnbr, p.free5_chlnbr,
p.free1, p.free2, p.free3, p.free4, p.free5,
add_info(p, 'AE_ACES_GRP_CDE') 'AE_ACES_GRP_CDE',
add_info(p, 'BBG_COMPANY') 'BBG_COMPANY',
add_info(p, 'BBG_ISSUER') 'BBG_ISSUER',
add_info(p, 'BBG_PARTY_CODE') 'BBG_PARTY_CODE',
add_info(p, 'BBG_PARTY_CODE1') 'BBG_PARTY_CODE1',
add_info(p, 'Client Id') 'Client Id',
add_info(p, 'Commission Rate (%)') 'Commission Rate (%)',
add_info(p, 'MSS_ACC_CODE') 'MSS_ACC_CODE',
add_info(p, 'MSS_SI_TRADE_TYPE') 'MSS_SI_TRADE_TYPE',
add_info(p, 'Pty All Time LTV') 'Pty All Time LTV',
add_info(p, 'Pty Trigger LTV') 'Pty Trigger LTV',
add_info(p, 'Pty Terminate LTV') 'Pty Terminate LTV',
add_info(p, 'VCON_PARTY_CODE') 'VCON_PARTY_CODE',
add_info(p, 'Independent Amt (%)') 'Independent Amt (%)',
add_info(p, 'Rounding Amount') 'Rounding_Amount',
add_info(p, 'Rounding Amount Ccy') 'Rounding_Amount_Ccy',
add_info(p, 'SBL Code') 'SBL_Code',
add_info(p, 'Threshold') 'Threshold',
add_info(p, 'Threshold Ccy') 'Threshold_Ccy',
add_info(p, 'Min Transfer Amount') 'Minimum_Transfer_Amount',
add_info(p, 'Min Tfr Amt Ccy') 'Min_Tfr_Amt_Ccy'
from party p, temp_trade_party t
@party_list
and (p.hostid = '@strUserGroup' or (@needfilterBU))
and p.ptynbr = t.counterparty_ptynbr
"""
if pty_list == '':
strSql = strSql.replace('@party_list', 'where 1=1')
else:
strSql = strSql.replace('@party_list', 'where p.ptyid in (' + pty_list + ')')
if pf_list == '':
strSql = strSql.replace('@portfolio_list', ' ')
else:
strSql = strSql.replace('@portfolio_list', 'and pf.prfid in (' + pf_list + ')')
strSql = strSql.replace('@strUserGroup', strUserGroup)
if needfilterBU == True:
strSql = strSql.replace('@needfilterBU', '1=2')
else:
strSql = strSql.replace('@needfilterBU', '1=1')
print strSql
rs = ael.asql(strSql)
trdnbr = None
columns, buf = rs
cnt = 0
rptContent = []
for table in buf:
for row in table:
business_status_chlnbr = row[col_business_status_chlnbr]
legal_form_chlnbr = row[col_legal_form_chlnbr]
document_type_chlnbr = row[col_document_type_chlnbr]
consolidate_chlnbr = row[col_consolidate_chlnbr]
relation_chlnbr = row[col_relation_chlnbr]
rating1_chlnbr = row[col_rating1_chlnbr]
rating2_chlnbr = row[col_rating2_chlnbr]
rating3_chlnbr = row[col_rating3_chlnbr]
rating_agency_chlnbr = row[col_rating_agency_chlnbr]
risk_country_chlnbr = row[col_risk_country_chlnbr]
rating_chlnbr = row[col_rating_chlnbr]
free1_chlnbr = row[col_free1_chlnbr]
free2_chlnbr = row[col_free2_chlnbr]
free3_chlnbr = row[col_free3_chlnbr]
free4_chlnbr = row[col_free4_chlnbr]
free5_chlnbr = row[col_free5_chlnbr]
chl_business_status = acm.FChoiceList[business_status_chlnbr]
if chl_business_status != None:
entry_business_status = chl_business_status.Name()
else:
entry_business_status = ''
chl_legal_form = acm.FChoiceList[legal_form_chlnbr]
if chl_legal_form != None:
entry_legal_form = chl_legal_form.Name()
else:
entry_legal_form = ''
chl_document_type = acm.FChoiceList[document_type_chlnbr]
if chl_document_type != None:
entry_document_type = chl_document_type.Name()
else:
entry_document_type = ''
chl_consolidate = acm.FChoiceList[consolidate_chlnbr]
if chl_consolidate != None:
entry_consolidate = chl_consolidate.Name()
else:
entry_consolidate = ''
chl_relation = acm.FChoiceList[relation_chlnbr]
if chl_relation != None:
entry_relation = chl_relation.Name()
else:
entry_relation = ''
chl_rating1 = acm.FChoiceList[rating1_chlnbr]
if chl_rating1 != None:
entry_rating1 = chl_rating1.Name()
else:
entry_rating1 = ''
chl_rating2 = acm.FChoiceList[rating2_chlnbr]
if chl_rating2 != None:
entry_rating2 = chl_rating2.Name()
else:
entry_rating2 = ''
chl_rating3 = acm.FChoiceList[rating3_chlnbr]
if chl_rating3 != None:
entry_rating3 = chl_rating3.Name()
else:
entry_rating3 = ''
chl_rating_agency = acm.FChoiceList[rating_agency_chlnbr]
if chl_rating_agency != None:
entry_rating_agency = chl_rating_agency.Name()
else:
entry_rating_agency = ''
chl_risk_country = acm.FChoiceList[risk_country_chlnbr]
if chl_risk_country != None:
entry_risk_country = chl_risk_country.Name()
else:
entry_risk_country = ''
chl_rating = acm.FChoiceList[rating_chlnbr]
if chl_rating != None:
entry_rating = chl_rating.Name()
else:
entry_rating = ''
chl_free1 = acm.FChoiceList[free1_chlnbr]
if chl_free1 != None:
entry_free1 = chl_free1.Name()
else:
entry_free1 = ''
chl_free2 = acm.FChoiceList[free2_chlnbr]
if chl_free2 != None:
entry_free2 = chl_free2.Name()
else:
entry_free2 = ''
chl_free3 = acm.FChoiceList[free3_chlnbr]
if chl_free3 != None:
entry_free3 = chl_free3.Name()
else:
entry_free3 = ''
chl_free4 = acm.FChoiceList[free4_chlnbr]
if chl_free4 != None:
entry_free4 = chl_free4.Name()
else:
entry_free4 = ''
chl_free5 = acm.FChoiceList[free5_chlnbr]
if chl_free5 != None:
entry_free5 = chl_free5.Name()
else:
entry_free5 = ''
if row[col_guarantor_ptynbr] == 0:
guarantor = ''
else:
acm_guarantor = acm.FParty[row[col_guarantor_ptynbr]]
if acm_guarantor == None:
guarantor = ''
else:
guarantor = acm_guarantor.Name()
r_row = [
row[col_ptyid],
row[col_alias],
row[col_fullname],
row[col_fullname2],
row[col_lei],
row[col_red_code],
row[col_attention],
row[col_hostid],
row[col_address],
row[col_address2],
row[col_zipcode],
row[col_city],
row[col_country],
row[col_contact],
row[col_telephone],
row[col_fax],
row[col_telex],
row[col_email],
row[col_bic],
row[col_bis_status],
entry_business_status,
entry_legal_form,
entry_document_type,
row[col_document_date],
row[col_calcagent],
entry_consolidate,
row[col_type],
to_yesno(row[col_issuer]),
to_yesno(row[col_correspondent_bank]),
to_yesno(row[col_isda_member]),
to_yesno(row[col_netting]),
to_yesno(row[col_not_trading]),
row[col_time_zone],
entry_relation,
guarantor,
to_yesno(row[col_group_limit]),
entry_rating1,
entry_rating2,
entry_rating3,
entry_rating_agency,
entry_risk_country,
entry_rating,
row[col_rating_date],
row[col_cls],
to_yesno(row[col_bankruptcy]),
to_yesno(row[col_failure_to_pay]),
to_yesno(row[col_obl_acceleration]),
to_yesno(row[col_obl_default]),
to_yesno(row[col_repudiation]),
to_yesno(row[col_restructuring]),
to_yesno(row[col_price_access_control]),
to_yesno(row[col_issuer_accounts]),
entry_free1,
entry_free2,
entry_free3,
entry_free4,
entry_free5,
row[col_free1],
row[col_free2],
row[col_free3],
row[col_free4],
row[col_free5],
row[col_AE_ACES_GRP_CDE],
row[col_BBG_COMPANY],
row[col_BBG_ISSUER],
row[col_BBG_PARTY_CODE],
row[col_BBG_PARTY_CODE1],
row[col_Client_Id],
row[col_Commission_Rate],
row[col_MSS_ACC_CODE],
row[col_MSS_SI_TRADE_TYPE],
row[col_Pty_All_Time_LTV],
row[col_Pty_Trigger_LTV],
row[col_Pty_Terminate_LTV],
row[col_VCON_PARTY_CODE],
row[col_Indpendent_Amt_Rate],
row[col_Rounding_Amount],
row[col_Rounding_Amount_Ccy],
row[col_SBL_Code],
row[col_Threshold],
row[col_Threshold_Ccy],
row[col_Minimum_Transfer_Amount],
row[col_Minimum_Transfer_Amount_Ccy]
]
rptContent.append(r_row)
header = ['Name', 'Alias', 'Full Name', 'Additional Full Name', 'LEI', 'RED', 'Attention', 'Host ID', \
'Address', 'Additional Address', 'zip Code', 'City', 'Country', 'Contact', 'Telephone', 'Fax', 'Telex', 'Email', 'BIC', \
'Bis_status', 'Business Status', 'Legal Form', 'Document Type', 'Document Date', 'Calc Agent', \
'Consolidate', 'Type', 'Issuer', 'Correspondent Bank', 'ISDA Member', 'Cash Flow Netting', 'Not Trading', 'Time Zone', \
'Relation', 'Guarantor', 'Group Limit', 'Rating1', 'Rating2', 'Rating3', 'Rating Agency', \
'Country of Risk', 'User Rating', 'Rating Date', 'CLS Bank Type', \
'Bankruptcy', 'Failure To Pay', 'Obl Acceleration', 'Obl Default', 'Repudiation', 'Restructuring', \
'Price Access Control', 'Issuer Accounts', \
'Free Choice List 1', 'Free Choice List 2', 'Free Choice List 3', 'Free Choice List 4', 'Free Choice List 5', \
'Free Text 1', 'Free Text 2', 'Free Text 3', 'Free Text 4', 'Free Text 5', \
'AE_ACES_GRP_CDE', \
'BBG_COMPANY', \
'BBG_ISSUER', \
'BBG_PARTY_CODE', \
'BBG_PARTY_CODE1', \
'Client Id', \
'Commission Rate (%)', \
'MSS_ACC_CODE', \
'MSS_SI_TRADE_TYPE', \
'Pty All Time LTV', \
'Pty Trigger LTV', \
'Pty Terminate LTV', \
'VCON_PARTY_CODE', \
'Independent Amount (%)', \
'Rounding Amount', \
'Rounding Amount Ccy', \
'SBL Code', \
'Threshold', \
'Threshold Ccy', \
'Min Transfer Amount', \
'Min Tfr Amt Ccy']
if not saveToFile or not saveToCSV:
font = HTI_ExcelReport2.Font()
font.bold = True
reportData = HTI_ExcelReport2.ReportData()
reportData.newSheet = True
reportData.headerText = header
report = PartyReportLayout(title)
reportData.rows = rptContent
report.addReportData(reportData, {'SUM': [], 'COL_TEXT': [], 'CUSTOM_TEXT': {'COL': [], 'TEXT': []}})
if not saveToFile:
report.show()
else:
if saveToCSV:
reportDate = ael.date_today()
fileName = fileName.replace('.xlsx', '')
fileName = fileName.replace('.xls', '')
fileName = fileName.replace('.csv', '')
fileName = fileName.replace("YYYY", reportDate.to_string('%Y'))
fileName = fileName.replace("MM", reportDate.to_string('%m'))
fileName = fileName.replace("DD", reportDate.to_string('%d'))
fileName = fileName + '.csv'
if os.path.exists(fileName):
os.remove(fileName)
csvData = []
csvData.append(header)
csvData = csvData + rptContent
try:
try:
outPutFile = open(fileName,'wb')
csvWriter = csv.writer(outPutFile, delimiter=',', quotechar='"')
for row in csvData:
csvWriter.writerow(row)
outPutFile.flush()
finally:
outPutFile.close()
except Exception, msg:
print str(msg)
raise Exception(msg)
else:
# save Excel Format
reportDate = ael.date_today()
fileName = fileName.replace('.xlsx', '')
fileName = fileName.replace('.xls', '')
fileName = fileName.replace('.csv', '')
fileName = fileName.replace("YYYY", reportDate.to_string('%Y'))
fileName = fileName.replace("MM", reportDate.to_string('%m'))
fileName = fileName.replace("DD", reportDate.to_string('%d'))
fileName = fileName + '.xlsx'
if os.path.exists(fileName):
os.remove(fileName)
report.saveNoQuit(fileName)
def to_account(accnbr):
acm_acc = acm.FAccount[accnbr]
if acm_acc != None:
return acm_acc.Name()
else:
return ''
def to_instrument(insaddr):
acm_ins = acm.FInstrument[insaddr]
if acm_ins != None:
return acm_ins.Name()
else:
return ''
def to_party(ptynbr):
acm_pty = acm.FParty[ptynbr]
if acm_pty != None:
return acm_pty.Name()
else:
return ''
def to_choice(choice_seqnbr):
chl = acm.FChoiceList[choice_seqnbr]
if chl != None:
entry = chl.Name()
else:
entry = ''
return entry
def to_yesno(result_bool):
if result_bool == True:
return 'Yes'
else:
return 'No'
def to_partyAliasType(alias_type):
acm_party_alias_type = acm.FPartyAliasType[alias_type]
if acm_party_alias_type != None:
return acm_party_alias_type.Name()
else:
return ''
class PartyReportLayout(HTI_ExcelReport2.CommonLayoutReport):
title = ''
def __init__(self, title):
self.title = title
HTI_ExcelReport2.CommonLayoutReport.__init__(self)
def reportHeader(self, currentRow, reportIndex, excelApp):
excelApp.Cells(currentRow[self.ROW], 1).Value = self.title
excelApp.Cells(currentRow[self.ROW], 1).Font.Bold = True
currentRow[self.ROW] = currentRow[self.ROW] + 1
HTI_ExcelReport2.CommonLayoutReport.reportHeader(self, currentRow, reportIndex, excelApp)
def reportEnd(self, excelApp):
HTI_ExcelReport2.CommonLayoutReport.reportEnd(self, excelApp)
excelApp.Cells(1, 1).Select()
|
|
from copy import deepcopy
import inspect
import pydoc
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import (
async_mark,
skip_if_no,
)
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
timedelta_range,
)
import pandas._testing as tm
class TestDataFrameMisc:
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_empty(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
DataFrame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
@async_mark()
@td.check_file_leaks
async def test_tab_complete_warning(self, ip, frame_or_series):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
if frame_or_series is DataFrame:
code = "from pandas import DataFrame; obj = DataFrame()"
else:
code = "from pandas import Series; obj = Series(dtype=object)"
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning;
# appears resolved 2021-02-02
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("obj.", 1))
def test_attrs(self):
df = DataFrame({"A": [2, 3]})
assert df.attrs == {}
df.attrs["version"] = 1
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem (no copy)
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
def test_set_flags(self, allows_duplicate_labels, frame_or_series):
obj = DataFrame({"A": [1, 2]})
key = (0, 0)
if frame_or_series is Series:
obj = obj["A"]
key = 0
result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels)
if allows_duplicate_labels is None:
# We don't update when it's not provided
assert result.flags.allows_duplicate_labels is True
else:
assert result.flags.allows_duplicate_labels is allows_duplicate_labels
# We made a copy
assert obj is not result
# We didn't mutate obj
assert obj.flags.allows_duplicate_labels is True
# But we didn't copy data
result.iloc[key] = 0
assert obj.iloc[key] == 0
# Now we do copy.
result = obj.set_flags(
copy=True, allows_duplicate_labels=allows_duplicate_labels
)
result.iloc[key] = 10
assert obj.iloc[key] == 0
def test_constructor_expanddim(self):
# GH#33628 accessing _constructor_expanddim should not raise NotImplementedError
# GH38782 pandas has no container higher than DataFrame (two-dim), so
# DataFrame._constructor_expand_dim, doesn't make sense, so is removed.
df = DataFrame()
msg = "'DataFrame' object has no attribute '_constructor_expanddim'"
with pytest.raises(AttributeError, match=msg):
df._constructor_expanddim(np.arange(27).reshape(3, 3, 3))
@skip_if_no("jinja2")
def test_inspect_getmembers(self):
# GH38740
df = DataFrame()
with tm.assert_produces_warning(None):
inspect.getmembers(df)
|
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, '_templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'Flask>=0.10.1',
'Jinja2>=2.7',
'MarkupSafe>=0.18',
'Werkzeug>=0.9.1',
'greenlet>=0.4.1',
'itsdangerous>=0.21',
'python-dateutil>=2.1',
'requests>=1.2.3',
'six>=1.5.2',
'pygments>=1.6',
'pystache>=0.5.3',
'markdown>=2.3.1',
'PyYAML>=3.10',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
# cli
# 'click>=3.3',
# tests
# 'pytest'
# 'mock>=1.0.1',
'colorama>=0.2.7'
]
if sys.version_info[:2] == (2, 6):
REQUIRES.append('argparse>=1.1')
# if sys.platform != "win32":
# REQUIRES.append('redis>=2.7.6')
if platform.python_implementation() != "PyPy":
# You need to install PyPy's fork of NumPy to make it work:
# pip install git+https://bitbucket.org/pypy/numpy.git
# Also pandas is not yet working with PyPy .
REQUIRES.extend([
'numpy>=1.7.1',
'pandas>=0.11.0'
])
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builder',
'bokeh.charts.builder.tests',
'bokeh.charts.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.storage',
'bokeh.server.tests',
'bokeh.server.utils',
'bokeh.server.views',
'bokeh.server.websocket',
'bokeh.server.zmq',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='info@continuum.io',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from contextlib import contextmanager
import os
import shutil
import StringIO
import unittest
import zipfile
from zincutils.zinc_analysis import ZincAnalysis
from zincutils.zinc_analysis_element import ZincAnalysisElement
from zincutils.zinc_analysis_parser import ZincAnalysisParser
from zincutils.contextutil import Timer, environment_as, temporary_dir
# Setting this environment variable tells the test to generate new test data (see below).
_TEST_DATA_SOURCE_ENV_VAR = 'ZINC_ANALYSIS_TEST_DATA_SOURCE'
@contextmanager
def _temp_test_dir(zipfile_name):
"""Yields a test directory containing the files in the specified zipfile."""
zipfile_path = os.path.join(os.path.dirname(__file__), 'testdata', zipfile_name)
with temporary_dir() as tmpdir:
zf = zipfile.ZipFile(zipfile_path, 'r')
zf.extractall(tmpdir)
yield tmpdir
class ZincAnalysisTestBase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.total_time = 0
def _time(self, work, msg):
with Timer() as timer:
ret = work()
elapsed = timer.elapsed
print('%s in %f seconds.' % (msg, elapsed))
self.total_time += elapsed
return ret
class ZincAnalysisTestSimple(ZincAnalysisTestBase):
# Test a simple example that is non-trivial, but still small enough to verify manually.
def test_simple(self):
with environment_as(ZINCUTILS_SORTED_ANALYSIS='1'):
def get_test_analysis_path(name):
return os.path.join(os.path.dirname(__file__), 'testdata', 'simple', name)
def get_analysis_text(name):
with open(get_test_analysis_path(name), 'r') as fp:
return fp.read()
def parse_analyis(name):
return ZincAnalysisParser().parse_from_path(get_test_analysis_path(name))
def analysis_to_string(analysis):
buf = StringIO.StringIO()
analysis.write(buf)
return buf.getvalue()
full_analysis = parse_analyis('simple.analysis')
analysis_splits = full_analysis.split([
[b'/src/pants/examples/src/scala/org/pantsbuild/example/hello/welcome/Welcome.scala'],
[b'/src/pants/examples/src/scala/org/pantsbuild/example/hello/exe/Exe.scala'],
])
self.assertEquals(len(analysis_splits), 2)
def compare_split(i):
expected_filename = 'simple_split{0}.analysis'.format(i)
# First compare as objects. This verifies that __eq__ works, but is weaker than the
# text comparison because in some cases there can be small text differences that don't
# affect logical equivalence.
expected_analyis = parse_analyis(expected_filename)
self.assertTrue(expected_analyis.is_equal_to(analysis_splits[i]))
# Then compare as text. In this simple case we expect them to be byte-for-byte equal.
expected = get_analysis_text(expected_filename)
actual = analysis_to_string(analysis_splits[i])
self.assertMultiLineEqual(expected, actual)
compare_split(0)
compare_split(1)
# Now merge and check that we get what we started with.
merged_analysis = ZincAnalysis.merge(analysis_splits)
# Check that they compare as objects.
self.assertTrue(full_analysis.is_equal_to(merged_analysis))
# Check that they compare as text.
expected = get_analysis_text('simple.analysis')
actual = analysis_to_string(merged_analysis)
self.assertMultiLineEqual(expected, actual)
# Now check rebasing.
orig = iter(get_analysis_text('simple.analysis').splitlines(True))
expected_rebased = get_analysis_text('simple.rebased.analysis')
buf = StringIO.StringIO()
ZincAnalysisParser().rebase(orig, buf, b'/src/pants', b'$PANTS_HOME')
rebased = buf.getvalue()
self.assertMultiLineEqual(expected_rebased, rebased)
# And rebasing+filtering.
orig = iter(get_analysis_text('simple.analysis').splitlines(True))
expected_filtered_rebased = get_analysis_text('simple.rebased.filtered.analysis')
buf = StringIO.StringIO()
ZincAnalysisParser().rebase(orig, buf, b'/src/pants', b'$PANTS_HOME',
b'/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk')
filtered_rebased = buf.getvalue()
self.assertMultiLineEqual(expected_filtered_rebased, filtered_rebased)
class ZincAnalysisTestComplex(ZincAnalysisTestBase):
# Test on complex analysis files.
def test_complex(self):
with environment_as(ZINCUTILS_SORTED_ANALYSIS='1'):
if os.environ.get(_TEST_DATA_SOURCE_ENV_VAR):
print('\n>>>>>>>>> {} set: skipping test, generating canonical test data instead.'.format(
_TEST_DATA_SOURCE_ENV_VAR))
self._generate_testworthy_splits()
return
parser = ZincAnalysisParser()
with _temp_test_dir('complex.zip') as testdir:
# Parse analysis files.
analysis_files = [os.path.join(testdir, f)
for f in os.listdir(testdir)
if f.endswith(b'.analysis') and not f.endswith(b'.merged.analysis')]
num_analyses = len(analysis_files)
def parse(f):
return parser.parse_from_path(f)
analyses = self._time(lambda: [parse(f) for f in analysis_files],
'Parsed %d files' % num_analyses)
# Write them back out individually.
writeout_dir = os.path.join(testdir, b'write')
os.mkdir(writeout_dir)
def write(file_name, analysis):
outpath = os.path.join(writeout_dir, file_name)
analysis.write_to_path(outpath)
def _write_all():
for analysis_file, analysis in zip(analysis_files, analyses):
write(os.path.basename(analysis_file), analysis)
self._time(_write_all, 'Wrote %d files' % num_analyses)
# Merge them.
merged_analysis = self._time(lambda: ZincAnalysis.merge(analyses),
'Merged %d files' % num_analyses)
# Write merged analysis to file.
merged_analysis_path = os.path.join(writeout_dir, b'merged.analysis')
self._time(lambda: merged_analysis.write_to_path(merged_analysis_path),
'Wrote merged analysis to %s' % merged_analysis_path)
# Read merged analysis from file.
merged_analysis2 = self._time(lambda: parser.parse_from_path(merged_analysis_path),
'Read merged analysis from %s' % merged_analysis_path)
# Read the expected merged analysis from file.
expected_merged_analysis_path = os.path.join(testdir, b'all.merged.analysis')
expected_merged_analysis = self._time(
lambda: parser.parse_from_path(expected_merged_analysis_path),
'Read expected merged analysis from %s' % expected_merged_analysis_path)
# Compare the merge result with the re-read one.
diffs = merged_analysis.diff(merged_analysis2)
self.assertTrue(merged_analysis.is_equal_to(merged_analysis2), ''.join(
[unicode(diff) for diff in diffs]))
# Compare the merge result with the expected.
diffs = expected_merged_analysis.diff(merged_analysis2)
self.assertTrue(expected_merged_analysis.is_equal_to(merged_analysis2), ''.join(
[unicode(diff) for diff in diffs]))
# Split the merged analysis back to individual analyses.
sources_per_analysis = [a.stamps.sources.keys() for a in analyses]
split_analyses = self._time(lambda: merged_analysis2.split(
sources_per_analysis, catchall=True),
'Split back into %d analyses' % num_analyses)
self.assertEquals(num_analyses + 1, len(split_analyses)) # +1 for the catchall.
catchall_analysis = split_analyses[-1]
# We expect an empty catchall.
self.assertEquals(0, len(catchall_analysis.stamps.sources))
# Diff the original analyses and the split ones.
# Write the split to the tmpdir, for ease of debugging on failure.
splits_dir = os.path.join(testdir, b'splits')
os.mkdir(splits_dir)
for analysis_file, analysis, split_analysis in zip(analysis_files, analyses, split_analyses):
outfile_path = os.path.join(splits_dir, os.path.basename(analysis_file))
split_analysis.write_to_path(outfile_path)
diffs = analysis.diff(split_analysis)
# Note that it's not true in general that merging splits and then splitting them back out
# should yield the exact same analysis. Some small differences can happen. For example:
# splitA may have an external src->class on a class from a source file in splitB; When
# merging, that becomes a src->src dependency; And when splitting back out that src
# dependency becomes a dependency on a representative class, which may not be
# the original class SplitA depended on.
#
# This comparison works here only because we've taken care to prepare test data for which
# it should hold. See _generate_testworthy_splits below for how to do so.
self.assertTrue(analysis.is_equal_to(split_analysis),
''.join([unicode(diff) for diff in diffs]))
print('Total time: %f seconds' % self.total_time)
def _generate_testworthy_splits(self):
"""Take some non-canonical analysis files and generate test data from them.
The resulting files will be "canonical". That is, merging and re-splitting them will yield
the same files. Therefore the resulting files can be used as test data (after eyeballing them
to ensure no pathologies).
An easy way to generate input for this function is to run a scala compile on some targets using
--strategy=isolated. Then .pants.d/compile/jvm/scala/isolated-analysis/ will contain a bunch
of per-target analysis files.
Those files can be anonymized (see anonymize_analysis.py), ideally with some non-ASCII words
thrown in (as explained there), and then you can point this function to those anonymized
files by setting ZINC_ANALYSIS_TEST_DATA_SOURCE=<dir> in the environment and running this test.
Note: Yes, it's slightly problematic that we're using the very code we're testing to generate
the test inputs. Hence the need to spot-check for obvious pathologies.
"""
original_splits_dir = os.environ.get(_TEST_DATA_SOURCE_ENV_VAR)
canonical_dir = os.path.join(original_splits_dir, 'canonical')
if os.path.exists(canonical_dir):
shutil.rmtree(canonical_dir, True)
os.mkdir(canonical_dir)
original_split_filenames = [f.decode('utf-8') for f in os.listdir(original_splits_dir)]
original_splits_files = [os.path.join(original_splits_dir, f)
for f in original_split_filenames if f.endswith('.analysis')]
parser = ZincAnalysisParser()
original_split_analyses = [parser.parse_from_path(f) for f in original_splits_files]
merged_analysis = ZincAnalysis.merge(original_split_analyses)
merged_analysis.write_to_path(os.path.join(canonical_dir, 'all.merged.analysis'))
# Split the merged analysis back to individual analyses.
sources_per_analysis = [a.stamps.sources.keys() for a in original_split_analyses]
split_analyses = merged_analysis.split(sources_per_analysis, os.path.dirname(__file__))
for original_split_file, split_analysis in zip(original_splits_files, split_analyses):
outpath = os.path.join(canonical_dir, os.path.basename(original_split_file))
split_analysis.write_to_path(outpath)
print('Wrote canonical analysis data to {}'.format(canonical_dir))
class ZincAnalysisTestLarge(ZincAnalysisTestBase):
# Test on a couple of large files, primarily for benchmarking.
# Note that we don't set ZINCUTILS_SORTED_ANALYSIS='1', as we want to benchmark production
# performance (without unnecessary sorting).
def test_large(self):
parser = ZincAnalysisParser()
with _temp_test_dir('large.zip') as testdir:
print('Operating in test dir: {}'.format(testdir))
# Parse analysis files.
analysis_file_names = [b'downstream.analysis', b'upstream.analysis']
analysis_files = [os.path.join(testdir, f) for f in analysis_file_names]
def msg(prefix):
return '{0} [{1}]'.format(prefix, ', '.join(analysis_file_names))
analyses = self._time(lambda: [parser.parse_from_path(f) for f in analysis_files],
msg('Parsed'))
# Write them back out individually.
writeout_dir = os.path.join(testdir, b'write')
os.mkdir(writeout_dir)
def write(file_name, analysis):
outpath = os.path.join(writeout_dir, file_name)
analysis.write_to_path(outpath)
def _write_all():
for analysis_file, analysis in zip(analysis_files, analyses):
write(os.path.basename(analysis_file), analysis)
self._time(_write_all, msg('Wrote'))
# Merge them.
merged_analysis = self._time(lambda: ZincAnalysis.merge(analyses), msg('Merged'))
# Write merged analysis to file.
merged_analysis_path = os.path.join(testdir, b'merged.analysis')
self._time(lambda: merged_analysis.write_to_path(merged_analysis_path), msg('Wrote merge of'))
# Split the merged analysis.
sources_per_analysis = [a.stamps.sources.keys() for a in analyses]
self._time(lambda: merged_analysis.split(sources_per_analysis, catchall=True), msg('Split'))
# Rebase the merged analysis.
rebased_analysis_path = os.path.join(testdir, b'rebased.merged.analysis')
self._time(lambda: ZincAnalysisParser().rebase_from_path(merged_analysis_path, rebased_analysis_path,
b'/Users/kermit/src/acme.web', b'$PANTS_HOME'), msg('Rebase'))
print('Total time: %f seconds' % self.total_time)
class ZincAnalysisTestSorting(ZincAnalysisTestBase):
class FakeElement(ZincAnalysisElement):
headers = ('foo', )
def test_sort(self):
unsorted_arg = { '{}'.format(n): ['f1', 'f2', 'f0'] for n in range(9, -1, -1) }
expected = ('foo:\n30 items\n' +
''.join('{n} -> f0\n{n} -> f1\n{n} -> f2\n'.format(n=n) for n in range(0, 10)))
def do_test(elem):
# The values of a single key should be sorted in memory.
for n in range(0, 9):
self.assertEquals(['f0', 'f1', 'f2'], elem.args[0]['{}'.format(n)])
# And the keys themselves (and their values) should be sorted when writing.
buf = StringIO.StringIO()
elem.write(buf)
output = buf.getvalue()
self.assertMultiLineEqual(expected, output)
always_sorted_elem = self.FakeElement([unsorted_arg], always_sort=True)
do_test(always_sorted_elem)
# Unsorted elements must still sort if the environment var is set.
with environment_as(ZINCUTILS_SORTED_ANALYSIS='1'):
unsorted_elem = self.FakeElement([unsorted_arg])
do_test(unsorted_elem)
|
|
import tempfile
import os.path
from stat import S_IXUSR
from os import makedirs, stat, symlink, chmod, environ
from shutil import rmtree
from galaxy.tools.deps import DependencyManager, INDETERMINATE_DEPENDENCY
from galaxy.tools.deps.resolvers.galaxy_packages import GalaxyPackageDependency
from galaxy.tools.deps.resolvers.modules import ModuleDependencyResolver, ModuleDependency
from galaxy.util.bunch import Bunch
from contextlib import contextmanager
from subprocess import Popen, PIPE
def test_tool_dependencies():
# Setup directories
with __test_base_path() as base_path:
for name, version, sub in [ ( "dep1", "1.0", "env.sh" ), ( "dep1", "2.0", "bin" ), ( "dep2", "1.0", None ) ]:
if sub == "bin":
p = os.path.join( base_path, name, version, "bin" )
else:
p = os.path.join( base_path, name, version )
try:
makedirs( p )
except:
pass
if sub == "env.sh":
__touch( os.path.join( p, "env.sh" ) )
dm = DependencyManager( default_base_path=base_path )
dependency = dm.find_dep( "dep1", "1.0" )
assert dependency.script == os.path.join( base_path, 'dep1', '1.0', 'env.sh' )
assert dependency.path == os.path.join( base_path, 'dep1', '1.0' )
assert dependency.version == "1.0"
dependency = dm.find_dep( "dep1", "2.0" )
assert dependency.script == None
assert dependency.path == os.path.join( base_path, 'dep1', '2.0' )
assert dependency.version == "2.0"
## Test default versions
symlink( os.path.join( base_path, 'dep1', '2.0'), os.path.join( base_path, 'dep1', 'default' ) )
dependency = dm.find_dep( "dep1", None )
assert dependency.version == "2.0"
## Test default resolve will be fall back on default package dependency
## when using the default resolver.
dependency = dm.find_dep( "dep1", "2.1" )
assert dependency.version == "2.0" # 2.0 is defined as default_version
TEST_REPO_USER = "devteam"
TEST_REPO_NAME = "bwa"
TEST_REPO_CHANGESET = "12abcd41223da"
TEST_VERSION = "0.5.9"
def test_toolshed_set_enviornment_requiremetns():
with __test_base_path() as base_path:
test_repo = __build_test_repo('set_environment')
dm = DependencyManager( default_base_path=base_path )
env_settings_dir = os.path.join(base_path, "environment_settings", TEST_REPO_NAME, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET)
os.makedirs(env_settings_dir)
dependency = dm.find_dep( TEST_REPO_NAME, version=None, type='set_environment', installed_tool_dependencies=[test_repo] )
assert dependency.version == None
assert dependency.script == os.path.join(env_settings_dir, "env.sh")
def test_toolshed_package_requirements():
with __test_base_path() as base_path:
test_repo = __build_test_repo('package', version=TEST_VERSION)
dm = DependencyManager( default_base_path=base_path )
package_dir = __build_ts_test_package(base_path)
dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] )
assert dependency.version == TEST_VERSION
assert dependency.script == os.path.join(package_dir, "env.sh")
def test_toolshed_tools_fallback_on_manual_dependencies():
with __test_base_path() as base_path:
dm = DependencyManager( default_base_path=base_path )
test_repo = __build_test_repo('package', version=TEST_VERSION)
env_path = __setup_galaxy_package_dep(base_path, "dep1", "1.0")
dependency = dm.find_dep( "dep1", version="1.0", type='package', installed_tool_dependencies=[test_repo] )
assert dependency.version == "1.0"
assert dependency.script == env_path
def test_toolshed_greater_precendence():
with __test_base_path() as base_path:
dm = DependencyManager( default_base_path=base_path )
test_repo = __build_test_repo('package', version=TEST_VERSION)
ts_package_dir = __build_ts_test_package(base_path)
gx_env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION)
ts_env_path = os.path.join(ts_package_dir, "env.sh")
dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] )
assert dependency.script != gx_env_path # Not the galaxy path, it should be the tool shed path used.
assert dependency.script == ts_env_path
def __build_ts_test_package(base_path, script_contents=''):
package_dir = os.path.join(base_path, TEST_REPO_NAME, TEST_VERSION, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET)
__touch(os.path.join(package_dir, 'env.sh'), script_contents)
return package_dir
def test_module_dependency_resolver():
with __test_base_path() as temp_directory:
module_script = os.path.join(temp_directory, "modulecmd")
__write_script(module_script, '''#!/bin/sh
cat %s/example_output 1>&2;
''' % temp_directory)
with open(os.path.join(temp_directory, "example_output"), "w") as f:
# Subset of module avail from MSI cluster.
f.write('''
-------------------------- /soft/modules/modulefiles ---------------------------
JAGS/3.2.0-gcc45
JAGS/3.3.0-gcc4.7.2
ProbABEL/0.1-3
ProbABEL/0.1-9e
R/2.12.2
R/2.13.1
R/2.14.1
R/2.15.0
R/2.15.1
R/3.0.1(default)
abokia-blast/2.0.2-130524/ompi_intel
abokia-blast/2.0.2-130630/ompi_intel
--------------------------- /soft/intel/modulefiles ----------------------------
advisor/2013/update1 intel/11.1.075 mkl/10.2.1.017
advisor/2013/update2 intel/11.1.080 mkl/10.2.5.035
advisor/2013/update3 intel/12.0 mkl/10.2.7.041
''')
resolver = ModuleDependencyResolver(None, modulecmd=module_script)
module = resolver.resolve( name="R", version=None, type="package" )
assert module.module_name == "R"
assert module.module_version == None
module = resolver.resolve( name="R", version="3.0.1", type="package" )
assert module.module_name == "R"
assert module.module_version == "3.0.1"
module = resolver.resolve( name="R", version="3.0.4", type="package" )
assert module == INDETERMINATE_DEPENDENCY
def test_module_dependency():
with __test_base_path() as temp_directory:
## Create mock modulecmd script that just exports a variable
## the way modulecmd sh load would, but also validate correct
## module name and version are coming through.
mock_modulecmd = os.path.join(temp_directory, 'modulecmd')
__write_script(mock_modulecmd, '''#!/bin/sh
if [ $3 != "foomodule/1.0" ];
then
exit 1
fi
echo 'FOO="bar"'
''')
resolver = Bunch(modulecmd=mock_modulecmd)
dependency = ModuleDependency(resolver, "foomodule", "1.0")
__assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) )
def __write_script(path, contents):
with open(path, 'w') as f:
f.write(contents)
st = stat(path)
chmod(path, st.st_mode | S_IXUSR)
def test_galaxy_dependency_object_script():
with __test_base_path() as base_path:
## Create env.sh file that just exports variable FOO and verify it
## shell_commands export it correctly.
env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION, "export FOO=\"bar\"")
dependency = GalaxyPackageDependency(env_path, os.path.dirname(env_path), TEST_VERSION)
__assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) )
def test_shell_commands_built():
## Test that dependency manager builds valid shell commands for a list of
## requirements.
with __test_base_path() as base_path:
dm = DependencyManager( default_base_path=base_path )
__setup_galaxy_package_dep( base_path, TEST_REPO_NAME, TEST_VERSION, contents="export FOO=\"bar\"" )
mock_requirements = [ Bunch(type="package", version=TEST_VERSION, name=TEST_REPO_NAME ) ]
commands = dm.dependency_shell_commands( mock_requirements )
__assert_foo_exported( commands )
def __assert_foo_exported( commands ):
command = ["bash", "-c", "%s; echo \"$FOO\"" % "".join(commands)]
process = Popen(command, stdout=PIPE)
output = process.communicate()[0].strip()
assert output == 'bar', "Command %s exports FOO as %s, not bar" % (command, output)
def __setup_galaxy_package_dep(base_path, name, version, contents=""):
dep_directory = os.path.join( base_path, name, version )
env_path = os.path.join( dep_directory, "env.sh" )
__touch( env_path, contents )
return env_path
def __touch( fname, data=None ):
dirname = os.path.dirname( fname )
if not os.path.exists( dirname ):
makedirs( dirname )
f = open( fname, 'w' )
try:
if data:
f.write( data )
finally:
f.close()
def __build_test_repo(type, version=None):
return Bunch(
owner=TEST_REPO_USER,
name=TEST_REPO_NAME,
type=type,
version=version,
tool_shed_repository=Bunch(
owner=TEST_REPO_USER,
name=TEST_REPO_NAME,
installed_changeset_revision=TEST_REPO_CHANGESET
)
)
@contextmanager
def __test_base_path():
base_path = tempfile.mkdtemp()
try:
yield base_path
finally:
rmtree(base_path)
def test_parse():
with __parse_resolvers('''<dependency_resolvers>
<tool_shed_packages />
<galaxy_packages />
</dependency_resolvers>
''') as dependency_resolvers:
assert 'ToolShed' in dependency_resolvers[0].__class__.__name__
assert 'Galaxy' in dependency_resolvers[1].__class__.__name__
with __parse_resolvers('''<dependency_resolvers>
<galaxy_packages />
<tool_shed_packages />
</dependency_resolvers>
''') as dependency_resolvers:
assert 'Galaxy' in dependency_resolvers[0].__class__.__name__
assert 'ToolShed' in dependency_resolvers[1].__class__.__name__
with __parse_resolvers('''<dependency_resolvers>
<galaxy_packages />
<tool_shed_packages />
<galaxy_packages versionless="true" />
</dependency_resolvers>
''') as dependency_resolvers:
assert not dependency_resolvers[0].versionless
assert dependency_resolvers[2].versionless
with __parse_resolvers('''<dependency_resolvers>
<galaxy_packages />
<tool_shed_packages />
<galaxy_packages base_path="/opt/galaxy/legacy/"/>
</dependency_resolvers>
''') as dependency_resolvers:
# Unspecified base_paths are both default_base_paths
assert dependency_resolvers[0].base_path == dependency_resolvers[1].base_path
# Can specify custom base path...
assert dependency_resolvers[2].base_path == "/opt/galaxy/legacy"
# ... that is different from the default.
assert dependency_resolvers[0].base_path != dependency_resolvers[2].base_path
def test_uses_tool_shed_dependencies():
with __dependency_manager('''<dependency_resolvers>
<galaxy_packages />
</dependency_resolvers>
''') as dm:
assert not dm.uses_tool_shed_dependencies()
with __dependency_manager('''<dependency_resolvers>
<tool_shed_packages />
</dependency_resolvers>
''') as dm:
assert dm.uses_tool_shed_dependencies()
def test_config_module_defaults():
with __parse_resolvers('''<dependency_resolvers>
<modules prefetch="false" />
</dependency_resolvers>
''') as dependency_resolvers:
module_resolver = dependency_resolvers[0]
assert module_resolver.module_checker.__class__.__name__ == "AvailModuleChecker"
def test_config_modulepath():
# Test reads and splits MODULEPATH if modulepath is not specified.
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" modulepath="/opt/modules/modulefiles:/usr/local/modules/modulefiles" />
</dependency_resolvers>
''') as dependency_resolvers:
assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"]
def test_config_MODULEPATH():
# Test reads and splits MODULEPATH if modulepath is not specified.
with __environ({"MODULEPATH": "/opt/modules/modulefiles:/usr/local/modules/modulefiles"}):
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" />
</dependency_resolvers>
''') as dependency_resolvers:
assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"]
def test_config_MODULESHOME():
# Test fallbacks to read MODULESHOME if modulepath is not specified and
# neither is MODULEPATH.
with __environ({"MODULESHOME": "/opt/modules"}, remove="MODULEPATH"):
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" />
</dependency_resolvers>
''') as dependency_resolvers:
assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles"]
def test_config_module_directory_searcher():
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" modulepath="/opt/Modules/modulefiles" />
</dependency_resolvers>
''') as dependency_resolvers:
module_resolver = dependency_resolvers[0]
assert module_resolver.module_checker.directories == ["/opt/Modules/modulefiles"]
@contextmanager
def __environ(values, remove=[]):
"""
Modify the environment for a test, adding/updating values in dict `values` and
removing any environment variables mentioned in list `remove`.
"""
new_keys = set(environ.keys()) - set(values.keys())
old_environ = environ.copy()
try:
environ.update(values)
for to_remove in remove:
try:
del environ[remove]
except KeyError:
pass
yield
finally:
environ.update(old_environ)
for key in new_keys:
del environ[key]
@contextmanager
def __parse_resolvers(xml_content):
with __dependency_manager(xml_content) as dm:
yield dm.dependency_resolvers
@contextmanager
def __dependency_manager(xml_content):
with __test_base_path() as base_path:
f = tempfile.NamedTemporaryFile()
f.write(xml_content)
f.flush()
dm = DependencyManager( default_base_path=base_path, conf_file=f.name )
yield dm
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._restore_points_operations import build_create_request_initial, build_delete_request_initial, build_get_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RestorePointsOperations:
"""RestorePointsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
restore_point_collection_name: str,
restore_point_name: str,
parameters: "_models.RestorePoint",
**kwargs: Any
) -> "_models.RestorePoint":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RestorePoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RestorePoint')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
restore_point_name=restore_point_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RestorePoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
restore_point_collection_name: str,
restore_point_name: str,
parameters: "_models.RestorePoint",
**kwargs: Any
) -> AsyncLROPoller["_models.RestorePoint"]:
"""The operation to create the restore point. Updating properties of an existing restore point is
not allowed.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param restore_point_collection_name: The name of the restore point collection.
:type restore_point_collection_name: str
:param restore_point_name: The name of the restore point.
:type restore_point_name: str
:param parameters: Parameters supplied to the Create restore point operation.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.RestorePoint
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RestorePoint or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.RestorePoint]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RestorePoint"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
restore_point_name=restore_point_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('RestorePoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
restore_point_collection_name: str,
restore_point_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
restore_point_name=restore_point_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
restore_point_collection_name: str,
restore_point_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to delete the restore point.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param restore_point_collection_name: The name of the Restore Point Collection.
:type restore_point_collection_name: str
:param restore_point_name: The name of the restore point.
:type restore_point_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
restore_point_name=restore_point_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
restore_point_collection_name: str,
restore_point_name: str,
**kwargs: Any
) -> "_models.RestorePoint":
"""The operation to get the restore point.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param restore_point_collection_name: The name of the restore point collection.
:type restore_point_collection_name: str
:param restore_point_name: The name of the restore point.
:type restore_point_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RestorePoint, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.RestorePoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RestorePoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
restore_point_name=restore_point_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RestorePoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}'} # type: ignore
|
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import json
import time
import sys
import argparse
import hmac
import urllib
import fnmatch
import socket
import urllib.request
import subprocess
from http.client import HTTPConnection
from http.client import HTTPSConnection
"""
This tool builds a release from the a given elasticsearch branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done. The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisit checks ie. check for Java 1.7 being presend or S3 credentials available as env variables
- detect the version to release from the specified branch (--branch) or the current branch
- creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
- builds the artifacts and runs smoke-tests on the build zip & tar.gz files
- commits the new version and merges the release branch into the source branch
- creates a tag and pushes the commit to the specified origin (--remote)
- publishes the releases to Sonatype and S3
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
- Boto for S3 Upload ($ apt-get install python-boto)
- RPM for RPM building ($ apt-get install rpm)
- S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
"""
env = os.environ
PLUGINS = [('license', 'elasticsearch/license/latest'),
('marvel', 'elasticsearch/marvel/latest'),
('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
def log(msg):
log_plain('\n%s' % msg)
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
try:
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
JAVA_HOME = env['JAVA7_HOME']
except KeyError:
pass #no JAVA7_HOME - we rely on JAVA_HOME
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
# Verifies the java version. We guarantee that we run with Java 1.7
# If 1.7 is not available fail the build!
def verify_mvn_java_version(version, mvn):
s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
if 'Java version: %s' % version not in s:
raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the hash of the given tag revision
def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
verify_java_version('1.7') # we require to build with 1.7
verify_mvn_java_version('1.7', MVN)
# Utility that returns the name of the release branch for a given version
def release_branch(version):
return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
run('git checkout %s' % src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(release)))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % (release)
replacement = '<version>%s</version>' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beat1
release = release.replace('.', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(version_file, callback)
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % (file))
# Executes a git commit with 'release [version]' as the commit message
def commit_release(release):
run('git commit -m "release [%s]"' % release)
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
def run_mvn(*cmd):
for c in cmd:
run('%s; %s %s' % (java_exe(), MVN, c))
def build_release(run_tests=False, dry_run=True, cpus=1, bwc_version=None):
target = 'deploy'
if dry_run:
target = 'package'
if run_tests:
run_mvn('clean',
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
if bwc_version:
print('Running Backwards compatibilty tests against version [%s]' % (bwc_version))
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
run_mvn('clean %s -DskipTests' % (target))
success = False
try:
run_mvn('-DskipTests rpm:rpm')
success = True
finally:
if not success:
print("""
RPM Bulding failed make sure "rpm" tools are installed.
Use on of the following commands to install:
$ brew install rpm # on OSX
$ apt-get install rpm # on Ubuntu et.al
""")
# Uses the github API to fetch open tickets for the given release version
# if it finds any tickets open for that version it will throw an exception
def ensure_no_open_tickets(version):
version = "v%s" % version
conn = HTTPSConnection('api.github.com')
try:
log('Checking for open tickets on Github for version %s' % version)
log('Check if node is available')
conn.request('GET', '/repos/elasticsearch/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
res = conn.getresponse()
if res.status == 200:
issues = json.loads(res.read().decode("utf-8"))
if issues:
urls = []
for issue in issues:
urls.append(issue['html_url'])
raise RuntimeError('Found open issues for release version %s:\n%s' % (version, '\n'.join(urls)))
else:
log("No open issues found for version %s" % version)
else:
raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
except socket.error as e:
log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
#that is ok it might not be there yet
finally:
conn.close()
def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
for _ in range(timeout):
conn = HTTPConnection(host, port, timeout)
try:
log('Waiting until node becomes available for 1 second')
time.sleep(1)
log('Check if node is available')
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
log("Failed while waiting for node - Exception: [%s]" % e)
#that is ok it might not be there yet
finally:
conn.close()
return False
# Ensures we are using a true Lucene release, not a snapshot build:
def verify_lucene_version():
s = open('pom.xml', encoding='utf-8').read()
if 'download.elasticsearch.org/lucenesnapshots' in s:
raise RuntimeError('pom.xml contains download.elasticsearch.org/lucenesnapshots repository: remove that before releasing')
m = re.search(r'<lucene.version>(.*?)</lucene.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.version in pom.xml')
lucene_version = m.group(1)
m = re.search(r'<lucene.maven.version>(.*?)</lucene.maven.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.maven.version in pom.xml')
lucene_maven_version = m.group(1)
if lucene_version != lucene_maven_version:
raise RuntimeError('pom.xml is still using a snapshot release of lucene (%s): cutover to a real lucene release before releasing' % lucene_maven_version)
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
run('git checkout %s' % src_branch)
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
def artifact_names(release, path = ''):
return [os.path.join(path, 'elasticsearch-%s.%s' % (release, t)) for t in ['deb', 'tar.gz', 'zip']]
def get_artifacts(release):
common_artifacts = artifact_names(release, 'target/releases/')
for f in common_artifacts:
if not os.path.isfile(f):
raise RuntimeError('Could not find required artifact at %s' % f)
rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s-1.noarch.rpm' % release)
if os.path.isfile(rpm):
log('RPM [%s] contains: ' % rpm)
run('rpm -pqli %s' % rpm)
# this is an oddness of RPM that is attches -1 so we have to rename it
renamed_rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s.noarch.rpm' % release)
shutil.move(rpm, renamed_rpm)
common_artifacts.append(renamed_rpm)
else:
raise RuntimeError('Could not find required artifact at %s' % rpm)
return common_artifacts
# Checks the jar files in each package
# Barfs if any of the package jar files differ
def check_artifacts_for_same_jars(artifacts):
jars = []
for file in artifacts:
if file.endswith('.zip'):
jars.append(subprocess.check_output("unzip -l %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
if file.endswith('.tar.gz'):
jars.append(subprocess.check_output("tar tzvf %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
if file.endswith('.rpm'):
jars.append(subprocess.check_output("rpm -pqli %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
if file.endswith('.deb'):
jars.append(subprocess.check_output("dpkg -c %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
if len(set(jars)) != 1:
raise RuntimeError('JAR contents of packages are not the same, please check the package contents. Use [unzip -l], [tar tzvf], [dpkg -c], [rpm -pqli] to inspect')
# Generates sha1 checsums for all files
# and returns the checksum files as well
# as the given files in a list
def generate_checksums(files):
res = []
for release_file in files:
directory = os.path.dirname(release_file)
file = os.path.basename(release_file)
checksum_file = '%s.sha1.txt' % file
if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)):
raise RuntimeError('Failed to generate checksum for file %s' % release_file)
res = res + [os.path.join(directory, checksum_file), release_file]
return res
def download_and_verify(release, files, plugins=None, base_url='https://download.elasticsearch.org/elasticsearch/elasticsearch'):
print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
for file in files:
name = os.path.basename(file)
url = '%s/%s' % (base_url, name)
abs_file_path = os.path.join(tmp_dir, name)
print(' Downloading %s' % (url))
downloaded_files.append(abs_file_path)
urllib.request.urlretrieve(url, abs_file_path)
url = ''.join([url, '.sha1.txt'])
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1.txt']))
urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
log('Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s; %s %s %s' % (java_exe(), es_plugin_path, '-install', plugin))
plugin_names[name] = True
if release.startswith("0.90."):
background = '' # 0.90.x starts in background automatically
else:
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -Des.node.bench=true -Des.script.disable_dynamic=false %s'
% (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup()
try:
try:
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.cluster=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.request('POST', '/_cluster/nodes/_local/_shutdown')
time.sleep(1) # give the node some time to shut down
if conn.getresponse().status != 200:
raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
finally:
conn.close()
shutil.rmtree(tmp_dir)
def merge_tag_push(remote, src_branch, release_version, dry_run):
run('git checkout %s' % src_branch)
run('git merge %s' % release_branch(release_version))
run('git tag v%s' % release_version)
if not dry_run:
run('git push %s %s' % (remote, src_branch)) # push the commit
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s' % remote)
def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True):
location = os.path.dirname(os.path.realpath(__file__))
for artifact in artifacts:
if dry_run:
print('Skip Uploading %s to Amazon S3' % artifact)
else:
print('Uploading %s to Amazon S3' % artifact)
# requires boto to be installed but it is not available on python3k yet so we use a dedicated tool
run('python %s/upload-s3.py --file %s ' % (location, os.path.abspath(artifact)))
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
def check_s3_credentials():
if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None):
raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3')
VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
# finds the highest available bwc version to test against
def find_bwc_version(release_version, bwc_dir='backwards'):
log(' Lookup bwc version in directory [%s]' % bwc_dir)
bwc_version = None
if os.path.exists(bwc_dir) and os.path.isdir(bwc_dir):
max_version = [int(x) for x in release_version.split('.')]
for dir in os.listdir(bwc_dir):
if os.path.isdir(os.path.join(bwc_dir, dir)) and dir.startswith('elasticsearch-'):
version = [int(x) for x in dir[len('elasticsearch-'):].split('.')]
if version < max_version: # bwc tests only against smaller versions
if (not bwc_version) or version > [int(x) for x in bwc_version.split('.')]:
bwc_version = dir[len('elasticsearch-'):]
log(' Using bwc version [%s]' % bwc_version)
else:
log(' bwc directory [%s] does not exists or is not a directory - skipping' % bwc_dir)
return bwc_version
def ensure_checkout_is_clean(branchName):
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we are on the right branch (NOTE: a bit weak, since we default to current branch):
if 'On branch %s' % branchName not in s:
raise RuntimeError('git status does not show branch %s: got:\n%s' % (branchName, s))
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin %s": got:\n%s' % (branchName, s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--cpus', '-c', metavar='1', default=1,
help='The number of cpus to use for running the test. Default is [1]')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--smoke', '-s', dest='smoke', default='',
help='Smoke tests the given release')
parser.add_argument('--bwc', '-w', dest='bwc', metavar='backwards', default='backwards',
help='Backwards compatibility version path to use to run compatibility tests against')
parser.set_defaults(dryrun=True)
parser.set_defaults(smoke=None)
args = parser.parse_args()
bwc_path = args.bwc
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
cpus = args.cpus
build = not args.smoke
smoke_test_version = args.smoke
if os.path.exists(LOG):
raise RuntimeError('please remove old release log %s first' % LOG)
if not dry_run:
check_s3_credentials()
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
if build:
ensure_checkout_is_clean(src_branch)
verify_lucene_version()
release_version = find_release_version(src_branch)
ensure_no_open_tickets(release_version)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash()
run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version)
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(release_version)))
success = False
try:
pending_files = [POM_FILE, VERSION_FILE]
remove_maven_snapshot(POM_FILE, release_version)
remove_version_snapshot(VERSION_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(release_version)
pending_files = update_reference_docs(release_version)
version_head_hash = None
# split commits for docs and version to enable easy cherry-picking
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
version_head_hash = get_head_hash()
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to Sonatype - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
artifacts = get_artifacts(release_version)
print('Checking if all artifacts contain the same jars')
check_artifacts_for_same_jars(artifacts)
artifacts_and_checksum = generate_checksums(artifacts)
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
merge_tag_push(remote, src_branch, release_version, dry_run)
print(' publish artifacts to S3 -- dry_run: %s' % dry_run)
publish_artifacts(artifacts_and_checksum, dry_run=dry_run)
cherry_pick_command = '.'
if version_head_hash:
cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash)
pending_msg = """
Release successful pending steps:
* create a new vX.Y.Z label on github for the next release, with label color #dddddd (https://github.com/elasticsearch/elasticsearch/labels)
* publish the maven artifacts on Sonatype: https://oss.sonatype.org/index.html
- here is a guide: http://central.sonatype.org/pages/releasing-the-deployment.html
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
* announce the release on the website / blog post
* tweet about the release
* announce the release in the google group/mailinglist
* Move to a Snapshot version to the current branch for the next point release%(cherry_pick)s
"""
print(pending_msg % { 'version' : release_version, 'cherry_pick' : cherry_pick_command} )
success = True
finally:
if not success:
run('git reset --hard HEAD')
run('git checkout %s' % src_branch)
elif dry_run:
run('git reset --hard %s' % head_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version:
fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import MultiIndex, Series
import pandas.util.testing as tm
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize("sort", [None, False])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(idx, case, sort, method):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case, sort=sort)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_base(idx, sort):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case, sort=sort)
if sort is None:
tm.assert_index_equal(result, second.sort_values())
assert tm.equalContents(result, second)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3], sort=sort)
@pytest.mark.parametrize("sort", [None, False])
def test_union_base(idx, sort):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second, sort=sort)
if sort is None:
tm.assert_index_equal(union, everything.sort_values())
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case, sort=sort)
if sort is None:
tm.assert_index_equal(result, everything.sort_values())
assert tm.equalContents(result, everything)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3], sort=sort)
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(idx, sort):
second = idx[4:]
answer = idx[:4]
result = idx.difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
assert result.equals(answer)
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = idx.difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
idx.difference([1, 2, 3], sort=sort)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference(idx, sort):
first = idx[1:]
second = idx[:-1]
answer = idx[[-1, 0]]
result = first.symmetric_difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3], sort=sort)
def test_empty(idx):
# GH 15270
assert not idx.empty
assert idx[:0].empty
@pytest.mark.parametrize("sort", [None, False])
def test_difference(idx, sort):
first = idx
result = first.difference(idx[-3:], sort=sort)
vals = idx[:-3].values
if sort is None:
vals = sorted(vals)
expected = MultiIndex.from_tuples(vals, sortorder=0, names=idx.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == idx.names
tm.assert_index_equal(result, expected)
# empty difference: reflexive
result = idx.difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: superset
result = idx[-3:].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: degenerate
result = idx[:0].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# names not the same
chunklet = idx[-3:]
chunklet.names = ["foo", "baz"]
result = first.difference(chunklet, sort=sort)
assert result.names == (None, None)
# empty, but non-equal
result = idx.difference(idx.sortlevel(1)[0], sort=sort)
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values, sort=sort)
assert result.equals(first[:0])
# name from empty array
result = first.difference([], sort=sort)
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([("foo", "one")], sort=sort)
expected = pd.MultiIndex.from_tuples(
[("bar", "one"), ("baz", "two"), ("foo", "two"), ("qux", "one"), ("qux", "two")]
)
expected.names = first.names
assert first.names == result.names
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3, 4, 5], sort=sort)
def test_difference_sort_special():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
# sort=None, the default
result = idx.difference([])
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_difference_sort_special_true():
# TODO decide on True behaviour
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
result = idx.difference([], sort=True)
expected = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_difference_sort_incomparable():
# GH-24959
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = pd.MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
# sort=None, the default
# MultiIndex.difference deviates here from other difference
# implementations in not catching the TypeError
with pytest.raises(TypeError):
result = idx.difference(other)
# sort=False
result = idx.difference(other, sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_difference_sort_incomparable_true():
# TODO decide on True behaviour
# # sort=True, raises
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = pd.MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
with pytest.raises(TypeError):
idx.difference(other, sort=True)
@pytest.mark.parametrize("sort", [None, False])
def test_union(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_union = piece1.union(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_union, idx.sort_values())
assert tm.equalContents(the_union, idx)
# corner case, pass self or empty thing:
the_union = idx.union(idx, sort=sort)
assert the_union is idx
the_union = idx.union(idx[:0], sort=sort)
assert the_union is idx
# won't work in python 3
# tuples = _index.values
# result = _index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(idx)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = _index.union(other)
# assert result.equals(result2)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_int = piece1.intersection(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_int, idx[3:5])
assert tm.equalContents(the_int, idx[3:5])
# corner case, pass self
the_int = idx.intersection(idx, sort=sort)
assert the_int is idx
# empty intersection: disjoint
empty = idx[:2].intersection(idx[2:], sort=sort)
expected = idx[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = _index.values
# result = _index & tuples
# assert result.equals(tuples)
def test_intersect_equal_sort():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_intersect_equal_sort_true():
# TODO decide on True behaviour
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
sorted_ = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_other_empty(slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
# MultiIndex does not special case empty.union(idx)
# tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_empty_sort(slice_):
# TODO decide on True behaviour
# # sort=True
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
other = idx[:0]
result = idx.union(other, sort=True)
expected = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_union_sort_other_incomparable():
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
# default, sort=None
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_incomparable_sort():
# TODO decide on True behaviour
# # sort=True
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
with pytest.raises(TypeError, match="Cannot compare"):
idx.union(idx[:1], sort=True)
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_setops_disallow_true(method):
idx1 = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
idx2 = pd.MultiIndex.from_product([["b", "c"], [1, 2]])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
|
|
""" class to use as a general purpose GUI input widget
and a few convenience functions for validating data inputs
"""
"""
Copyright (c) <2014>, <Neil Thomas>, <NeilT-UK>, <dc_fm@hotmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
# works with Python 3.4
# should work with Python 2.7
try:
# Python 3 spelling
import tkinter as tki
import tkinter.messagebox as tkm
import tkinter.filedialog as tkf
except ImportError:
# Python 2 spelling
import Tkinter as tki
import tkMessageBox as tkm
import tkFileDialog as tkf
import json
class EntryLine(tki.Frame):
""" a combination of label, entry and help button, for validated gui entry of data"""
def __init__(self, parent, text='no label', data='', conv=None, update=None, width=15, default=None):
""" text optional used to label the entry
data optional used to initialise the Entry box
uses empty string if none supplied
conv optional conversion function, which also validates the entry
note that int, float and str can be used
return string unchanged (str) if omitted or None
conv must return object if entry is valid
its __doc__ string is available as help
if the docstring starts with [x], x is put on the help button
(TODO have to work on tooltips sometime!)
raise ValueError if the entry is invalid (int, float and str do this)
The err from ValueError is saved for the help button
so more info can be given about failure to validate
update optional the name of the function to call when entries are updated
needed for the execute when all valid functionality
width optional defaults to 15, maybe should make this adaptive sometime
default optional defaults to None
returns if the text string produces an invalid result
allows .get() to be called asynchronously, and return valid
results to the calling program
"""
tki.Frame.__init__(self, master=parent) # tki refuses to work with super!
self.update = update
self.conv = conv
self.text = text
self.default = default
# init the properties
self.err = ''
self.value = None
self.valid = False
# do the label
self.label = tki.Label(self, text=text, width=width)
self.label.grid(row=0, column=0)
# do the conversion function
cdoc = conv.__doc__ # easier to type
if conv:
# we have a conversion function specified
# is it one of the builtins?
if conv == int:
help_face = 'i'
self.conv_help = 'builtin int() function'
elif conv == float:
help_face = 'f'
self.conv_help = 'builtin float() function'
elif conv == str:
help_face = 'str'
self.conv_help = 'builtin str() function'
else:
# none of those, so does it have a docstring?
if cdoc:
# yes, does it start with a help_face?
face_end = cdoc.find(']')
if (cdoc[0] == '[') and (face_end != -1) and (face_end < 6):
help_face = cdoc[1:face_end]
else:
help_face = '?'
# is the help prompt truncated in the docstring?
if '[end_help]' in cdoc:
self.conv_help = cdoc[:cdoc.find('[end_help]')]
else:
self.conv_help = cdoc
# this could be done by knowing .find() returns -1 if not found
# and slicing [;find()] regardless
# but that's bordering on the tricky, this is clearer
else:
self.conv_help = 'no documentation\navailable for\nthis conversion'
help_face = '?'
else:
self.conv = str
help_face = 'str'
self.conv_help = 'returned as string'
# do the entry
self.var = tki.StringVar()
self.entry = tki.Entry(self, textvariable=self.var, width=width)
self.entry.grid(row=0, column=1)
self.var.trace('w', self._changed)
self.entry.bind('<Return>', self._returned)
# do the help button
self.help_but = tki.Button(self,
text=help_face,
command=self._show_help,
width=5,
takefocus=0) # don't take part in tab-focus
# took a while to figure this out
self.help_but.grid(row=0, column=2)
# initialise it, which triggers the trace, _changed and validation
self.var.set(str(data))
def _returned(self, *args):
self.update(enter=True)
def _show_help(self):
tkm.showinfo('conversion information', '{}\n{}'.format(self.conv_help, self.err))
def _changed(self, *args):
ent_val = self.var.get()
self.val_string = ent_val
try:
self.value = self.conv(ent_val)
self.entry.config(bg='white')
self.err = ''
self.valid = True
except ValueError as err:
try:
self.value = self.conv(self.default) # we convert the default value
except (TypeError, ValueError):
self.value = self.default # which if it can't be converted (None) is returned intact
self.entry.config(bg='orange')
self.err = err
self.valid = False
self.update()
def put(self, value):
""" allows the client to change the displayed value"""
self.var.set(value)
class GUI_inputs(tki.LabelFrame):
""" A GUI data input convenience class, with tab-able fields and verified data"""
def __init__(self, parent, text="Neil's Input Widget", execute=None,
exec_label='execute',
loadsave=None,
**kwargs):
""" initialise with text for the LabelFrame
set execute to the name of a function to be called on execute
execute button greyed out until all entries are valid
set loadsave=True to put up load/save buttons
"""
tki.LabelFrame.__init__(self, master=parent, text=text)
self.kwargs = kwargs
# we have a dict of entries
self.entries = {} # the data entry widgets
self.row = 0
# if there's a execute supplied, put up a button for it, on the last row
self.execute_func = execute
self.exec_label = exec_label
if execute:
# an execute button
self.execute_but = tki.Button(self, text=self.exec_label,
command=self.execute_func,
state=tki.DISABLED)
self.execute_but.grid(row=99, column=1) #MAXROWS anyone?
# a tick box for the enter binding
self.exec_ent_var = tki.IntVar()
self.exec_check = tki.Checkbutton(self, text='exec on enter', variable=self.exec_ent_var)
self.exec_check.grid(row=99, column=0)
# if we want load/save functionality, True for current path
if loadsave:
self.load_but = tki.Button(self, text='load', command=self._load_func)
self.load_but.grid(row=100, column=0)
self.save_but = tki.Button(self, text='save', command=self._save_func)
self.save_but.grid(row=100, column=1)
def add(self, key, disp_name='', data='', conv=None, default=None):
""" add a new data entry line to the input widget
key required key for the entry, must be unique on this widget
the returned dict uses this as the key
use a string, but other objects do work as well
disp_name optional labels the data entry, uses the key if omitted
data optional initial string data for the entry box
conv optional A function, that takes a string, returns an object
or raises ValueError if it can't understand the string
int and float do this
default optional When GUI_inputs is the client, execute is always greyed
if there are any invalid entries. However as a server,
.get() might be called when entries are invalid. Default
provides a default response for invalid entries, to avoid
the calling program having to try: all return values
"""
if key in self.entries:
raise ValueError('duplicate key name >>>{}<<<'.format(key))
if not disp_name:
disp_name = str(key)
mle = EntryLine(self, disp_name, data, conv, self.update, default=default, **self.kwargs)
mle.grid(row=self.row, column=0, columnspan=2)
self.row += 1
self.entries[key] = mle
def _load_func(self):
""" read a json encoded list of tuples
update the GUI with the entries
warn if there are too many or too few
If a tuple contains only two data, assume to be key and value"""
load_name = tkf.askopenfilename(filetypes = [('JSON files', '.json'), ('all files', '.*')])
if load_name:
with open(load_name, 'rt') as load_file:
updates = json.load(load_file)
# make a dict with what to update, and what to update it with
# maybe there should be more error checking here
# but printing out what gets used and not, into a gui, gives you a chance
try:
src_keys = [x[0] for x in updates]
data = [x[-1] for x in updates]
except IndexError:
print("can't understand the load data file, are all fields present?")
return
src_dict = dict(zip(src_keys, data))
dst_keys = self.entries.keys()
# using sets to do this is a bit more elegant and explicit than
# looping and testing over both sets of keys
can_update = set(src_keys) & set(dst_keys)
not_updated = set(dst_keys).difference(src_keys)
not_used = set(src_keys).difference(dst_keys)
for key in can_update:
self.set_data(key, src_dict[key])
print('"{}" was updated to "{}"'.format(key, src_dict[key]))
for key in not_updated:
print('Warning - "{}" found on GUI but not in load file, not updated'.format(key))
for key in not_used:
print('Warning - "{}" found in load file but not on GUI, not used'.format(key))
def _save_func(self):
""" put a list of (key, display_name, value_string) tuples
into a json encoded file, or into the shell window if no file is specified
'key' is effectively the variable name used in the code
'display_name' is for documentation on save, ignored on load
'value_string' is whatever is present, whether valid or not"""
# retrieve and format the data
keys = self.entries.keys()
names = [x.text for x in self.entries.values()]
data = [x.val_string for x in self.entries.values()]
save_stuff = list(zip(keys, names, data)) # zip returns an iterator in python3!
save_name = tkf.asksaveasfilename(filetypes = [('JSON files', '.json'), ('all files', '.*')])
if save_name:
if not ('.' in save_name):
save_name += '.json'
with open(save_name, 'wt') as save_file:
json.dump(save_stuff, save_file)
else:
print(save_stuff)
def update(self, enter=False):
""" called when something has changed, or enter has been hit
this is a clumsy interface, not sure its well thought out
in fact it confused me when I returned to the code
but now I think I know what's going on"""
# only need to worry about this when there's a execute button to handle
if self.execute_func:
# get the valid properties of each entry
valids = [x.valid for x in self.entries.values()]
if all(valids):
self.execute_but.config(state=tki.NORMAL)
if enter and (self.exec_ent_var.get() == 1):
self.execute_func()
else:
self.execute_but.config(state=tki.DISABLED)
def get(self):
""" return a dict of the converted results"""
output = dict(zip(self.entries.keys(), [x.value for x in self.entries.values()]))
return output
def set_data(self, key, data):
""" set the entry field of 'key' to data"""
self.entries[key].put(data)
# conversion functions, for example, and to be used by the application
def float_pair(x):
"""[f,f] Two floats seperated by a comma
[end_help]
example non-trivial conversion function
not all of docstring intended to be displayed as help
throw ValueError from two locations, one from split, one from float
return a list of the values
"""
fields = x.split(',')
if len(fields) != 2:
raise ValueError('need two fields seperated by one comma')
output = []
for field in fields: # float() will ValueError if it's wrong
output.append(float(field))
return output
def list_of_floats(x):
"""[lof] list of floats
One float, or several floats separated by commas"""
# try to eliminate the simplest problem
if ',' in x:
if x.rstrip()[-1] == ',':
raise ValueError('no final value')
out = []
fields = x.split(',') # this will always work without error
for field in fields:
out.append(float(field)) # and float() will ValueError if it
return out # doesn't understand the string
if __name__ == '__main__':
def execute_func():
# get data in converted form from the widgets
print('executing with')
print( full.get(), basic.get())
# show updating data on the forms
try:
execute_func.count += 1
except Exception:
execute_func.count = 0
basic.set_data('key 1', '{}'.format(execute_func.count))
# define additional conversion functions in the calling application
def int16(x):
"""[16] a base16 (hexadecimal) integer"""
return int(x, base=16)
def cryptic_conv(x):
# there happens to be no docstring for this conversion function
return int(x)
root = tki.Tk()
basic = GUI_inputs(root)
basic.pack()
basic.add('key 1')
basic.add('key 2')
full = GUI_inputs(root, 'full fat', execute=execute_func, loadsave=True, width=20)
full.pack()
full.add('examp 1')
full.add('we1', conv=str, data=3)
full.add('an int', conv=int, default=7)
full.add('we2', 'disp4we2', data=999)
full.add('pair', 'f_pair', '3,4', float_pair, default='7,8' )
full.add('adr', 'hex address', '0xC0DE', int16)
full.add('float_list', 'float list', '3, 4, 5', list_of_floats, )
full.add('cryp', 'no doc string', 6, cryptic_conv)
get_but = tki.Button(root, text='force get', command=execute_func)
get_but.pack()
root.mainloop()
|
|
# -*- coding: utf-8 -*-
from time import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
# Default configuration
contourParams = dict(
zdir='z',
alpha=0.5,
zorder=1,
antialiased=True,
cmap=cm.PuRd_r
)
surfaceParams = dict(
rstride=1,
cstride=1,
linewidth=0.1,
edgecolors='k',
alpha=0.5,
antialiased=True,
cmap=cm.PuRd_r
)
class BaseProblem:
def __init__(self, dim=None, bounds=None, default_bounds=(-1, 1), name=None):
if bounds is None:
bounds = [default_bounds]
if dim is not None:
bounds = [default_bounds] * dim
self.dimensions = len(bounds)
self.bounds = bounds
self.name = name or self.__class__.__name__
def __call__(self, *args, **kwargs):
return self.evaluate(*args, **kwargs)
def evaluate(self, x):
raise NotImplementedError('Implement the evaluation function')
def plot2d(self, points=100, figure=None, figsize=(12, 8), contour=True, contour_levels=20,
imshow_kwds=None, contour_kwds=None):
if imshow_kwds is None:
imshow_kwds = dict(cmap=cm.PuRd_r)
if contour_kwds is None:
contour_kwds = dict(cmap=cm.PuRd_r)
xbounds, ybounds = self.bounds[0], self.bounds[1]
x = np.linspace(min(xbounds), max(xbounds), points)
y = np.linspace(min(xbounds), max(xbounds), points)
X, Y = np.meshgrid(x, y)
Z = self(np.asarray([X, Y]))
if figure is None:
fig = plt.figure(figsize=figsize)
else:
fig = figure
ax = fig.gca()
if contour:
ax.contourf(X, Y, Z, contour_levels, **contour_kwds)
else:
im = ax.imshow(Z, **imshow_kwds)
if figure is None:
plt.show()
return fig, ax
def plot3d(self, points=100, contour_levels=20, ax3d=None, figsize=(12, 8),
view_init=None, surface_kwds=None, contour_kwds=None):
from mpl_toolkits.mplot3d import Axes3D
contour_settings = dict(contourParams)
surface_settings = dict(surfaceParams)
if contour_kwds is not None:
contour_settings.update(contour_kwds)
if surface_kwds is not None:
surface_settings.update(surface_kwds)
xbounds, ybounds = self.bounds[0], self.bounds[1]
x = np.linspace(min(xbounds), max(xbounds), points)
y = np.linspace(min(ybounds), max(ybounds), points)
X, Y = np.meshgrid(x, y)
Z = self(np.asarray([X, Y]))
if ax3d is None:
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
if view_init is not None:
ax.view_init(*view_init)
else:
ax = ax3d
# Make the background transparent
ax.patch.set_alpha(0.0)
# Make each axis pane transparent as well
ax.w_xaxis.set_pane_color((0.0, 0.0, 0.0, 0.0))
ax.w_yaxis.set_pane_color((0.0, 0.0, 0.0, 0.0))
ax.w_zaxis.set_pane_color((0.0, 0.0, 0.0, 0.0))
surf = ax.plot_surface(X, Y, Z, **surface_settings)
contour_settings['offset'] = np.min(Z)
cont = ax.contourf(X, Y, Z, contour_levels, **contour_settings)
if ax3d is None:
plt.show()
return ax
def __repr__(self):
return '{} {}D'.format(self.name, self.dimensions)
class Slowdown(BaseProblem):
def __init__(self, problem, us=1000):
super().__init__(bounds=problem.bounds, name='{} (~{} us)'.format(problem.name, us))
self.problem = problem
self.us = us
def evaluate(self, x):
start = time() * 1e6
result = 0
while time() * 1e6 - start < self.us:
result = self.problem.evaluate(x)
return result
class Ackley(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-5, 5), a=20, b=0.2, c=2 * np.pi):
super().__init__(dim, bounds, default_bounds)
self.a = a
self.b = b
self.c = c
def evaluate(self, x):
n = len(x)
s1 = sum(np.power(x, 2))
s2 = sum(np.cos(self.c * x))
return -self.a * np.exp(-self.b * np.sqrt(s1 / n)) - np.exp(s2 / n) + self.a + np.exp(1)
class Rastrigin(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-5.12, 5.12), a=10):
super().__init__(dim, bounds, default_bounds)
self.a = a
def evaluate(self, x):
d = len(x)
s = np.power(x, 2) - self.a * np.cos(2 * np.pi * x)
return self.a * d + sum(s)
class Rosenbrock(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-10, 10), z_shift=0):
super().__init__(dim, bounds, default_bounds)
self.z_shift = z_shift
def evaluate(self, x):
return sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0) + self.z_shift
class CrossInTray(BaseProblem):
def __init__(self, bounds=None, default_bounds=(-10, 10)):
super().__init__(2, bounds, default_bounds)
def evaluate(self, x):
x1, x2 = x[0], x[1]
return -0.0001 * (np.abs(
np.sin(x1) * np.sin(x2) * np.exp(np.abs(100 - np.sqrt(x1 ** 2 + x2 ** 2) / np.pi))) + 1) ** 0.1
class EggHolder(BaseProblem):
def __init__(self, bounds=None, default_bounds=(-512, 512)):
super().__init__(2, bounds, default_bounds)
def evaluate(self, x):
x1, x2 = x[0], x[1]
return -(x2 + 47) * np.sin(np.sqrt(np.abs(x2 + x1 / 2 + 47))) - x1 * np.sin(np.sqrt(np.abs(x1 - (x2 + 47))))
class HolderTable(BaseProblem):
def __init__(self, bounds=None, default_bounds=(-10, 10)):
super().__init__(2, bounds, default_bounds)
def evaluate(self, x):
x1, x2 = x[0], x[1]
return -np.abs(np.sin(x1) * np.cos(x2) * np.exp(np.abs(1 - np.sqrt(x1 ** 2 + x2 ** 2) / np.pi)))
class Easom(BaseProblem):
def __init__(self, bounds=None, default_bounds=(-100, 100)):
super().__init__(2, bounds, default_bounds)
def evaluate(self, x):
x1, x2 = x[0], x[1]
return -np.cos(x1) * np.cos(x2) * np.exp(-(x1 - np.pi) ** 2 - (x2 - np.pi) ** 2)
class StyblinskiTang(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-5, 5)):
super().__init__(dim, bounds, default_bounds)
def evaluate(self, x):
return sum(x ** 4 - 16 * x ** 2 + 5 * x) / 2
class Michalewicz(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(0, np.pi), m=10):
super().__init__(dim, bounds, default_bounds)
self.m = m
def evaluate(self, x):
c = 0
for i in range(0, len(x)):
c += np.sin(x[i]) * np.sin(( (i+1) * x[i]**2)/np.pi) ** (2*self.m)
return -c
class Schwefel(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-500, 500)):
super().__init__(dim, bounds, default_bounds)
def evaluate(self, x):
d = len(x)
return 418.9829*d - sum(x*np.sin(np.sqrt(np.abs(x))))
class Levy(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-10, 10)):
super().__init__(dim, bounds, default_bounds)
def evaluate(self, x):
w = 1 + (x - 1) / 4
wp = w[:-1]
wd = w[-1]
a = np.sin(np.pi * w[0]) ** 2
b = sum((wp - 1) ** 2 * (1 + 10 * np.sin(np.pi * wp + 1) ** 2))
c = (wd - 1) ** 2 * (1 + np.sin(2 * np.pi * wd) ** 2)
return a + b + c
class DixonPrice(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-10, 10)):
super().__init__(dim, bounds, default_bounds)
def evaluate(self, x):
c = 0
for i in range(1, len(x)):
c += i * (2 * x[i] ** 2 - x[i-1]) ** 2
return (x[0] - 1) ** 2 + c
class Griewank(BaseProblem):
def __init__(self, dim=2, bounds=None, default_bounds=(-600, 600)):
super().__init__(dim, bounds, default_bounds)
def evaluate(self, x):
a = sum(x ** 2 / 4000)
b = 1
for i in range(len(x)):
b *= np.cos(x[i] / np.sqrt(i+1))
return a - b + 1
|
|
#!/usr/bin/env python
"""Test classes for ACL-related testing."""
from typing import Optional
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server.gui import api_call_context
from grr_response_server.gui.api_plugins import user as api_user
from grr_response_server.rdfvalues import objects as rdf_objects
def CreateUser(username):
"""Creates a user."""
data_store.REL_DB.WriteGRRUser(username)
def CreateAdminUser(username):
data_store.REL_DB.WriteGRRUser(
username, user_type=rdf_objects.GRRUser.UserType.USER_TYPE_ADMIN)
def BuildClientApprovalRequest(
client_id: Optional[str] = None,
requestor_username: Optional[str] = None,
reason: Optional[str] = None) -> rdf_objects.ApprovalRequest:
return rdf_objects.ApprovalRequest(
approval_type=rdf_objects.ApprovalRequest.ApprovalType
.APPROVAL_TYPE_CLIENT,
subject_id=client_id or "C.1234",
requestor_username=requestor_username or "testuser",
reason=reason or "foo/test1234",
expiration_time=rdfvalue.RDFDatetime.Now() +
rdfvalue.Duration.From(1, rdfvalue.DAYS))
class AclTestMixin(object):
"""Mixing providing ACL-related helper methods."""
def CreateUser(self, username):
return CreateUser(username)
def CreateAdminUser(self, username):
"""Creates a user and makes it an admin."""
return CreateAdminUser(username)
def RequestClientApproval(self,
client_id,
reason="Running tests",
requestor=None,
email_cc_address=None,
approver=u"approver"):
"""Create an approval request to be sent to approver."""
if not requestor:
requestor = self.test_username
self.CreateUser(requestor)
self.CreateUser(approver)
args = api_user.ApiCreateClientApprovalArgs(
client_id=client_id,
approval=api_user.ApiClientApproval(
reason=reason,
notified_users=[approver],
email_cc_addresses=([email_cc_address]
if email_cc_address else [])))
handler = api_user.ApiCreateClientApprovalHandler()
result = handler.Handle(
args, context=api_call_context.ApiCallContext(username=requestor))
return result.id
def GrantClientApproval(self,
client_id,
requestor=None,
approval_id=None,
approver=u"approver",
admin=True):
"""Grant an approval from approver to delegate.
Args:
client_id: Client id.
requestor: username string of the user receiving approval.
approval_id: id of the approval to grant.
approver: username string of the user granting approval.
admin: If True, make approver an admin user.
Raises:
ValueError: if approval_id is empty.
"""
if not approval_id:
raise ValueError("approval_id can't be empty.")
if not requestor:
requestor = self.test_username
self.CreateUser(requestor)
if admin:
self.CreateAdminUser(approver)
else:
self.CreateUser(approver)
if not requestor:
requestor = self.test_username
args = api_user.ApiGrantClientApprovalArgs(
client_id=client_id, username=requestor, approval_id=approval_id)
handler = api_user.ApiGrantClientApprovalHandler()
handler.Handle(
args, context=api_call_context.ApiCallContext(username=approver))
def RequestAndGrantClientApproval(self,
client_id,
requestor=None,
reason="Running tests",
approver=u"approver",
admin=True):
"""Request and grant client approval for a given client."""
approval_id = self.RequestClientApproval(
client_id, requestor=requestor, approver=approver, reason=reason)
self.GrantClientApproval(
client_id,
requestor=requestor,
approval_id=approval_id,
approver=approver,
admin=admin)
return approval_id
def ListClientApprovals(self, requestor=None):
requestor = requestor or self.test_username
handler = api_user.ApiListClientApprovalsHandler()
return handler.Handle(
api_user.ApiListClientApprovalsArgs(),
context=api_call_context.ApiCallContext(username=requestor)).items
def RequestHuntApproval(self,
hunt_id,
requestor=None,
reason="Running tests",
email_cc_address=None,
approver=u"approver"):
"""Request hunt approval for a given hunt."""
if not requestor:
requestor = self.test_username
self.CreateUser(requestor)
self.CreateUser(approver)
args = api_user.ApiCreateHuntApprovalArgs(
hunt_id=hunt_id,
approval=api_user.ApiHuntApproval(
reason=reason,
notified_users=[approver],
email_cc_addresses=([email_cc_address]
if email_cc_address else [])))
handler = api_user.ApiCreateHuntApprovalHandler()
result = handler.Handle(
args, context=api_call_context.ApiCallContext(username=requestor))
return result.id
def GrantHuntApproval(self,
hunt_id,
requestor=None,
approval_id=None,
approver=u"approver",
admin=True):
"""Grants an approval for a given hunt."""
if not approval_id:
raise ValueError("approval_id can't be empty.")
if not requestor:
requestor = self.test_username
self.CreateUser(requestor)
if admin:
self.CreateAdminUser(approver)
else:
self.CreateUser(approver)
args = api_user.ApiGrantHuntApprovalArgs(
hunt_id=hunt_id, username=requestor, approval_id=approval_id)
handler = api_user.ApiGrantHuntApprovalHandler()
handler.Handle(
args, context=api_call_context.ApiCallContext(username=approver))
def RequestAndGrantHuntApproval(self,
hunt_id,
requestor=None,
reason="test",
email_cc_address=None,
approver=u"approver",
admin=True):
"""Requests and grants hunt approval for a given hunt."""
approval_id = self.RequestHuntApproval(
hunt_id,
requestor=requestor,
reason=reason,
email_cc_address=email_cc_address,
approver=approver)
self.GrantHuntApproval(
hunt_id,
requestor=requestor,
approval_id=approval_id,
approver=approver,
admin=admin)
return approval_id
def ListHuntApprovals(self, requestor=None):
requestor = requestor or self.test_username
handler = api_user.ApiListHuntApprovalsHandler()
return handler.Handle(
api_user.ApiListHuntApprovalsArgs(),
context=api_call_context.ApiCallContext(username=requestor)).items
def RequestCronJobApproval(self,
cron_job_id,
requestor=None,
reason="Running tests",
email_cc_address=None,
approver=u"approver"):
"""Request cron job approval for a given cron job."""
if not requestor:
requestor = self.test_username
self.CreateUser(requestor)
self.CreateUser(approver)
args = api_user.ApiCreateCronJobApprovalArgs(
cron_job_id=cron_job_id,
approval=api_user.ApiCronJobApproval(
reason=reason,
notified_users=[approver],
email_cc_addresses=([email_cc_address]
if email_cc_address else [])))
handler = api_user.ApiCreateCronJobApprovalHandler()
result = handler.Handle(
args, context=api_call_context.ApiCallContext(username=requestor))
return result.id
def GrantCronJobApproval(self,
cron_job_id,
requestor=None,
approval_id=None,
approver="approver",
admin=True):
"""Grants an approval for a given cron job."""
if not requestor:
requestor = self.test_username
if not approval_id:
raise ValueError("approval_id can't be empty.")
self.CreateUser(requestor)
if admin:
self.CreateAdminUser(approver)
else:
self.CreateUser(approver)
args = api_user.ApiGrantCronJobApprovalArgs(
cron_job_id=cron_job_id, username=requestor, approval_id=approval_id)
handler = api_user.ApiGrantCronJobApprovalHandler()
handler.Handle(
args, context=api_call_context.ApiCallContext(username=approver))
def RequestAndGrantCronJobApproval(self,
cron_job_id,
requestor=None,
reason="test",
email_cc_address=None,
approver=u"approver",
admin=True):
"""Requests and grants an approval for a given cron job."""
approval_id = self.RequestCronJobApproval(
cron_job_id,
requestor=requestor,
reason=reason,
email_cc_address=email_cc_address,
approver=approver)
self.GrantCronJobApproval(
cron_job_id,
requestor=requestor,
approval_id=approval_id,
approver=approver,
admin=admin)
return approval_id
def ListCronJobApprovals(self, requestor=None):
requestor = requestor or self.test_username
handler = api_user.ApiListCronJobApprovalsHandler()
return handler.Handle(
api_user.ApiListCronJobApprovalsArgs(),
context=api_call_context.ApiCallContext(username=requestor)).items
|
|
"""Carrot compatibility interface.
See http://packages.python.org/pypi/carrot for documentation.
"""
from __future__ import absolute_import, unicode_literals
from itertools import count
from . import messaging
from .entity import Exchange, Queue
from .five import items
__all__ = ['Publisher', 'Consumer']
# XXX compat attribute
entry_to_queue = Queue.from_dict
def _iterconsume(connection, consumer, no_ack=False, limit=None):
consumer.consume(no_ack=no_ack)
for iteration in count(0): # for infinity
if limit and iteration >= limit:
raise StopIteration
yield connection.drain_events()
class Publisher(messaging.Producer):
"""Carrot compatible producer."""
exchange = ''
exchange_type = 'direct'
routing_key = ''
durable = True
auto_delete = False
_closed = False
def __init__(self, connection, exchange=None, routing_key=None,
exchange_type=None, durable=None, auto_delete=None,
channel=None, **kwargs):
if channel:
connection = channel
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
if auto_delete is not None:
self.auto_delete = auto_delete
if durable is not None:
self.durable = durable
if not isinstance(self.exchange, Exchange):
self.exchange = Exchange(name=self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
super(Publisher, self).__init__(connection, self.exchange, **kwargs)
def send(self, *args, **kwargs):
return self.publish(*args, **kwargs)
def close(self):
super(Publisher, self).close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def backend(self):
return self.channel
class Consumer(messaging.Consumer):
"""Carrot compatible consumer."""
queue = ''
exchange = ''
routing_key = ''
exchange_type = 'direct'
durable = True
exclusive = False
auto_delete = False
exchange_type = 'direct'
_closed = False
def __init__(self, connection, queue=None, exchange=None,
routing_key=None, exchange_type=None, durable=None,
exclusive=None, auto_delete=None, **kwargs):
self.backend = connection.channel()
if durable is not None:
self.durable = durable
if exclusive is not None:
self.exclusive = exclusive
if auto_delete is not None:
self.auto_delete = auto_delete
self.queue = queue or self.queue
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
exchange = Exchange(self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
queue = Queue(self.queue,
exchange=exchange,
routing_key=self.routing_key,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete)
super(Consumer, self).__init__(self.backend, queue, **kwargs)
def revive(self, channel):
self.backend = channel
super(Consumer, self).revive(channel)
def close(self):
self.cancel()
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __iter__(self):
return self.iterqueue(infinite=True)
def fetch(self, no_ack=None, enable_callbacks=False):
if no_ack is None:
no_ack = self.no_ack
message = self.queues[0].get(no_ack)
if message:
if enable_callbacks:
self.receive(message.payload, message)
return message
def process_next(self):
raise NotImplementedError('Use fetch(enable_callbacks=True)')
def discard_all(self, filterfunc=None):
if filterfunc is not None:
raise NotImplementedError(
'discard_all does not implement filters')
return self.purge()
def iterconsume(self, limit=None, no_ack=None):
return _iterconsume(self.connection, self, no_ack, limit)
def wait(self, limit=None):
it = self.iterconsume(limit)
return list(it)
def iterqueue(self, limit=None, infinite=False):
for items_since_start in count(): # for infinity
item = self.fetch()
if (not infinite and item is None) or \
(limit and items_since_start >= limit):
raise StopIteration
yield item
class ConsumerSet(messaging.Consumer):
def __init__(self, connection, from_dict=None, consumers=None,
channel=None, **kwargs):
if channel:
self._provided_channel = True
self.backend = channel
else:
self._provided_channel = False
self.backend = connection.channel()
queues = []
if consumers:
for consumer in consumers:
queues.extend(consumer.queues)
if from_dict:
for queue_name, queue_options in items(from_dict):
queues.append(Queue.from_dict(queue_name, **queue_options))
super(ConsumerSet, self).__init__(self.backend, queues, **kwargs)
def iterconsume(self, limit=None, no_ack=False):
return _iterconsume(self.connection, self, no_ack, limit)
def discard_all(self):
return self.purge()
def add_consumer_from_dict(self, queue, **options):
return self.add_queue(Queue.from_dict(queue, **options))
def add_consumer(self, consumer):
for queue in consumer.queues:
self.add_queue(queue)
def revive(self, channel):
self.backend = channel
super(ConsumerSet, self).revive(channel)
def close(self):
self.cancel()
if not self._provided_channel:
self.channel.close()
|
|
"""
Module of library functions, to perform core scaling operations on reflection
tables and experiment lists. Some functions, such as create_scaling_model and
merging statistics calculations are called from the main dials.scale script,
whereas others are provided as library functions for calling from custom
scripts. The functions defined here should ideally only require reflection
tables and ExperimentList objects (and sometimes phil_scope objects if
necessary), and return common dials objects such as reflection tables and
ExperimentLists.
"""
from __future__ import annotations
import logging
from copy import deepcopy
from unittest.mock import Mock
import numpy as np
import pkg_resources
import iotbx.merging_statistics
from cctbx import crystal, miller, uctbx
from dxtbx.model import Experiment
from dxtbx.util import ersatz_uuid4
from iotbx import cif, mtz
from libtbx import Auto, phil
from dials.algorithms.scaling.Ih_table import IhTable
from dials.algorithms.scaling.model.model import KBScalingModel, PhysicalScalingModel
from dials.algorithms.scaling.scaling_utilities import (
DialsMergingStatisticsError,
calculate_prescaling_correction,
)
from dials.array_family import flex
from dials.util import Sorry
from dials.util.options import ArgumentParser
logger = logging.getLogger("dials")
def set_image_ranges_in_scaling_models(experiments):
"""Set the batch range in scaling models if not already set."""
for exp in experiments:
if exp.scan:
valid_image_ranges = exp.scan.get_valid_image_ranges(exp.identifier)
if "valid_image_range" not in exp.scaling_model.configdict:
# only set if not currently set i.e. set initial
exp.scaling_model.set_valid_image_range(exp.scan.get_image_range())
if exp.scaling_model.configdict["valid_image_range"] != [
valid_image_ranges[0][0],
valid_image_ranges[-1][1],
]:
# first and last values in whole list of tuples
exp.scaling_model.limit_image_range(
(valid_image_ranges[0][0], valid_image_ranges[-1][1])
)
return experiments
def choose_initial_scaling_intensities(reflection_table, intensity_choice="profile"):
"""Choose which intensities to initially use for scaling. The LP, QE and
partiality corrections are also applied. Two new columns are
added to the reflection table 'intensity' and 'variance', which have
all corrections applied except an inverse scale factor."""
if intensity_choice == "profile":
intensity_choice = "prf" # rename to allow string matching with refl table
if "intensity.prf.value" not in reflection_table:
intensity_choice = "sum"
elif intensity_choice == "prf":
if (
reflection_table.get_flags(reflection_table.flags.integrated_prf).count(
True
)
== 0
):
logger.warning(
"No profile fitted reflections in this dataset, using summation intensities"
)
intensity_choice = "sum"
reflection_table = calculate_prescaling_correction(reflection_table)
conv = reflection_table["prescaling_correction"]
# if prf/sum, use those. If combine, use prf else sum for each refl.
if intensity_choice == "prf":
reflection_table["intensity"] = reflection_table["intensity.prf.value"] * conv
reflection_table["variance"] = (
reflection_table["intensity.prf.variance"] * conv * conv
)
else:
# first fill in summation intensities.
if "partiality" in reflection_table:
inverse_partiality = flex.double(reflection_table.size(), 1.0)
nonzero_partiality_sel = reflection_table["partiality"] > 0.0
good_refl = reflection_table.select(reflection_table["partiality"] > 0.0)
inverse_partiality.set_selected(
nonzero_partiality_sel.iselection(), 1.0 / good_refl["partiality"]
)
reflection_table["intensity"] = (
reflection_table["intensity.sum.value"] * conv * inverse_partiality
)
reflection_table["variance"] = reflection_table[
"intensity.sum.variance"
] * flex.pow2(conv * inverse_partiality)
if "partiality.inv.variance" in reflection_table:
reflection_table["variance"] += (
reflection_table["intensity.sum.value"]
* conv
* reflection_table["partiality.inv.variance"]
)
else:
reflection_table["intensity"] = (
reflection_table["intensity.sum.value"] * conv
)
reflection_table["variance"] = (
reflection_table["intensity.sum.variance"] * conv * conv
)
if intensity_choice == "combine":
# now overwrite prf if we have it.
sel = reflection_table.get_flags(reflection_table.flags.integrated_prf)
isel = sel.iselection()
Iprf = (reflection_table["intensity.prf.value"] * conv).select(sel)
Vprf = (reflection_table["intensity.prf.variance"] * conv * conv).select(
sel
)
reflection_table["intensity"].set_selected(isel, Iprf)
reflection_table["variance"].set_selected(isel, Vprf)
variance_mask = reflection_table["variance"] <= 0.0
reflection_table.set_flags(
variance_mask, reflection_table.flags.excluded_for_scaling
)
return reflection_table
def scale_against_target(
reflection_table,
experiment,
target_reflection_table,
target_experiment,
params=None,
model="KB",
):
"""Determine scale factors for a single dataset, by scaling against a target
reflection table. Requires a single reflection table for the reflections to
scale and the target dataset, and an ExperimentList for both datasets. The
params option can also be specified, if None then the default scaling
configuration is used. The scaling model can be specified individually.
Returns the reflection table, with added columns 'inverse_scale_factor' and
'inverse_scale_factor_variance'."""
if not params:
phil_scope = phil.parse(
"""
include scope dials.algorithms.scaling.scaling_options.phil_scope
include scope dials.algorithms.scaling.model.model.model_phil_scope
include scope dials.algorithms.scaling.scaling_refiner.scaling_refinery_phil_scope
""",
process_includes=True,
)
parser = ArgumentParser(phil=phil_scope, check_format=False)
params, _ = parser.parse_args(args=[], quick_parse=True)
params.model = model
from dials.algorithms.scaling.scaler_factory import TargetScalerFactory
reflections = [reflection_table, target_reflection_table]
experiment.append(target_experiment[0])
experiments = create_scaling_model(params, experiment, reflections)
experiments[-1].scaling_model.set_scaling_model_as_scaled()
scaler = TargetScalerFactory.create(params, experiments, reflections)
scaler.perform_scaling()
scaler.expand_scales_to_all_reflections(calc_cov=True)
return scaler.unscaled_scalers[0].reflection_table
def scale_single_dataset(reflection_table, experiment, params=None, model="physical"):
"""Determine scale factors for a single dataset. Requires a reflection table
and an ExperimentList with a single experiment. A custom params option can be
specified, if not the default scaling params option will be used, with default
configuration options. The model can be individually specified.
Returns the reflection table, with added columns 'inverse_scale_factor' and
'inverse_scale_factor_variance'."""
if not params:
phil_scope = phil.parse(
"""
include scope dials.algorithms.scaling.model.model.model_phil_scope
include scope dials.algorithms.scaling.scaling_options.phil_scope
include scope dials.algorithms.scaling.scaling_refiner.scaling_refinery_phil_scope
""",
process_includes=True,
)
parser = ArgumentParser(phil=phil_scope, check_format=False)
params, _ = parser.parse_args(args=[], quick_parse=True)
params.model = model
from dials.algorithms.scaling.scaler_factory import SingleScalerFactory
experiments = create_scaling_model(params, experiment, [reflection_table])
scaler = SingleScalerFactory.create(params, experiments[0], reflection_table)
from dials.algorithms.scaling.algorithm import scaling_algorithm
scaler = scaling_algorithm(scaler)
return scaler.reflection_table
def create_scaling_model(params, experiments, reflections):
"""Loop through the experiments, creating the scaling models."""
autos = [None, Auto, "auto", "Auto"]
use_auto_model = params.model in autos
# Determine non-auto model to use outside the loop over datasets.
if not use_auto_model:
model_class = None
for entry_point in pkg_resources.iter_entry_points("dxtbx.scaling_model_ext"):
if entry_point.name == params.model:
model_class = entry_point.load()
break
if not model_class:
raise ValueError(f"Unable to create scaling model of type {params.model}")
for expt, refl in zip(experiments, reflections):
if not expt.scaling_model or params.overwrite_existing_models:
# need to make a new model
if use_auto_model:
if not expt.scan:
model = KBScalingModel
else: # set model as physical unless scan < 1.0 degree
osc_range = expt.scan.get_oscillation_range()
abs_osc_range = abs(osc_range[1] - osc_range[0])
if abs_osc_range < 1.0:
model = KBScalingModel
else:
model = PhysicalScalingModel
else:
model = model_class
expt.scaling_model = model.from_data(params, expt, refl)
else:
# allow for updating of an existing model.
expt.scaling_model.update(params)
return experiments
def create_Ih_table(
experiments, reflections, selections=None, n_blocks=1, anomalous=False
):
"""Create an Ih table from a list of experiments and reflections. Optionally,
a selection list can also be given, to select data from each reflection table.
Allow an unequal number of experiments and reflections, as only need to
extract one space group value (can optionally check all same if many)."""
if selections:
assert len(selections) == len(
reflections
), """Must have an equal number of
reflection tables and selections in the input lists."""
space_group_0 = experiments[0].crystal.get_space_group()
for experiment in experiments:
assert (
experiment.crystal.get_space_group() == space_group_0
), """The space
groups of all experiments must be equal."""
input_tables = []
indices_lists = []
for i, reflection in enumerate(reflections):
if "inverse_scale_factor" not in reflection:
reflection["inverse_scale_factor"] = flex.double(reflection.size(), 1.0)
if selections:
input_tables.append(reflection.select(selections[i]))
indices_lists.append(selections[i].iselection())
else:
input_tables.append(reflection)
indices_lists = None
Ih_table = IhTable(
input_tables,
space_group_0,
indices_lists,
nblocks=n_blocks,
anomalous=anomalous,
)
return Ih_table
def scaled_data_as_miller_array(
reflection_table_list,
experiments,
best_unit_cell=None,
anomalous_flag=False,
wavelength=None,
):
"""Get a scaled miller array from an experiment and reflection table."""
if len(reflection_table_list) > 1:
joint_table = flex.reflection_table()
for reflection_table in reflection_table_list:
# better to just create many miller arrays and join them?
refl_for_joint_table = flex.reflection_table()
for col in [
"miller_index",
"intensity.scale.value",
"inverse_scale_factor",
"intensity.scale.variance",
]:
refl_for_joint_table[col] = reflection_table[col]
good_refl_sel = ~reflection_table.get_flags(
reflection_table.flags.bad_for_scaling, all=False
)
refl_for_joint_table = refl_for_joint_table.select(good_refl_sel)
joint_table.extend(refl_for_joint_table)
else:
reflection_table = reflection_table_list[0]
good_refl_sel = ~reflection_table.get_flags(
reflection_table.flags.bad_for_scaling, all=False
)
joint_table = reflection_table.select(good_refl_sel)
# Filter out negative scale factors to avoid merging statistics errors.
# These are not removed from the output data, as it is likely one would
# want to do further analysis e.g. delta cc1/2 and rescaling, to exclude
# certain data and get better scale factors for all reflections.
pos_scales = joint_table["inverse_scale_factor"] > 0
if pos_scales.count(False) > 0:
logger.info(
"""There are %s reflections with non-positive scale factors which
will not be used for calculating merging statistics""",
pos_scales.count(False),
)
joint_table = joint_table.select(pos_scales)
if best_unit_cell is None:
best_unit_cell = determine_best_unit_cell(experiments)
miller_set = miller.set(
crystal_symmetry=crystal.symmetry(
unit_cell=best_unit_cell,
space_group=experiments[0].crystal.get_space_group(),
assert_is_compatible_unit_cell=False,
),
indices=joint_table["miller_index"],
anomalous_flag=anomalous_flag,
)
i_obs = miller.array(
miller_set,
data=joint_table["intensity.scale.value"] / joint_table["inverse_scale_factor"],
)
i_obs.set_observation_type_xray_intensity()
i_obs.set_sigmas(
flex.sqrt(joint_table["intensity.scale.variance"])
/ joint_table["inverse_scale_factor"]
)
if not wavelength:
wavelength = np.mean([expt.beam.get_wavelength() for expt in experiments])
i_obs.set_info(
miller.array_info(
source="DIALS",
source_type="reflection_tables",
wavelength=wavelength,
)
)
return i_obs
def determine_best_unit_cell(experiments):
"""Set the median unit cell as the best cell, for consistent d-values across
experiments."""
uc_params = [flex.double() for i in range(6)]
for exp in experiments:
unit_cell = (
exp.crystal.get_recalculated_unit_cell() or exp.crystal.get_unit_cell()
)
for i, p in enumerate(unit_cell.parameters()):
uc_params[i].append(p)
best_unit_cell = uctbx.unit_cell(parameters=[flex.median(p) for p in uc_params])
if len(experiments) > 1:
logger.info("Using median unit cell across experiments : %s", best_unit_cell)
return best_unit_cell
def merging_stats_from_scaled_array(
scaled_miller_array, n_bins=20, use_internal_variance=False, anomalous=True
):
"""Calculate the normal and anomalous merging statistics."""
if scaled_miller_array.is_unique_set_under_symmetry():
raise DialsMergingStatisticsError(
"Dataset contains no equivalent reflections, merging statistics "
"cannot be calculated."
)
try:
result = iotbx.merging_statistics.dataset_statistics(
i_obs=scaled_miller_array,
n_bins=n_bins,
anomalous=False,
sigma_filtering=None,
eliminate_sys_absent=False,
use_internal_variance=use_internal_variance,
cc_one_half_significance_level=0.01,
)
except (RuntimeError, Sorry) as e:
raise DialsMergingStatisticsError(
f"Error encountered during merging statistics calculation:\n{e}"
)
anom_result = None
if anomalous:
intensities_anom = scaled_miller_array.as_anomalous_array()
intensities_anom = intensities_anom.map_to_asu().customized_copy(
info=scaled_miller_array.info()
)
if intensities_anom.is_unique_set_under_symmetry():
logger.warning(
"Anomalous dataset contains no equivalent reflections, anomalous "
"merging statistics cannot be calculated."
)
else:
try:
anom_result = iotbx.merging_statistics.dataset_statistics(
i_obs=intensities_anom,
n_bins=n_bins,
anomalous=True,
sigma_filtering=None,
cc_one_half_significance_level=0.01,
eliminate_sys_absent=False,
use_internal_variance=use_internal_variance,
)
except (RuntimeError, Sorry) as e:
logger.warning(
"Error encountered during anomalous merging statistics "
"calculation:\n%s",
e,
exc_info=True,
)
return result, anom_result
def intensity_array_from_cif_file(cif_file):
"""Return an intensity miller array from a cif file."""
structures = cif.reader(file_path=cif_file).build_crystal_structures()
try:
model = structures["1"]
except KeyError:
raise KeyError("Unable to extract structure from cif file")
ic = (
model.structure_factors(anomalous_flag=True, d_min=0.4, algorithm="direct")
.f_calc()
.as_intensity_array()
)
return ic
def create_datastructures_for_target_mtz(experiments, mtz_file):
"""Read a merged mtz file and extract miller indices, intensities and
variances."""
m = mtz.object(mtz_file)
ind = m.extract_miller_indices()
cols = m.columns()
col_dict = {c.label(): c for c in cols}
r_t = flex.reflection_table()
if "I" in col_dict: # nice and simple
r_t["miller_index"] = ind
r_t["intensity"] = col_dict["I"].extract_values().as_double()
r_t["variance"] = flex.pow2(col_dict["SIGI"].extract_values().as_double())
elif "IMEAN" in col_dict: # nice and simple
r_t["miller_index"] = ind
r_t["intensity"] = col_dict["IMEAN"].extract_values().as_double()
r_t["variance"] = flex.pow2(col_dict["SIGIMEAN"].extract_values().as_double())
elif "I(+)" in col_dict: # need to combine I+ and I- together into target Ih
if col_dict["I(+)"].n_valid_values() == 0: # use I(-)
r_t["miller_index"] = ind
r_t["intensity"] = col_dict["I(-)"].extract_values().as_double()
r_t["variance"] = flex.pow2(
col_dict["SIGI(-)"].extract_values().as_double()
)
elif col_dict["I(-)"].n_valid_values() == 0: # use I(+)
r_t["miller_index"] = ind
r_t["intensity"] = col_dict["I(+)"].extract_values().as_double()
r_t["variance"] = flex.pow2(
col_dict["SIGI(+)"].extract_values().as_double()
)
else: # Combine both - add together then use Ih table to calculate I and sigma
r_tplus = flex.reflection_table()
r_tminus = flex.reflection_table()
r_tplus["miller_index"] = ind
r_tplus["intensity"] = col_dict["I(+)"].extract_values().as_double()
r_tplus["variance"] = flex.pow2(
col_dict["SIGI(+)"].extract_values().as_double()
)
r_tminus["miller_index"] = ind
r_tminus["intensity"] = col_dict["I(-)"].extract_values().as_double()
r_tminus["variance"] = flex.pow2(
col_dict["SIGI(-)"].extract_values().as_double()
)
r_tplus.extend(r_tminus)
r_tplus.set_flags(
flex.bool(r_tplus.size(), False), r_tplus.flags.bad_for_scaling
)
r_tplus = r_tplus.select(r_tplus["variance"] != 0.0)
Ih_table = create_Ih_table(
[experiments[0]], [r_tplus], anomalous=True
).blocked_data_list[0]
r_t["intensity"] = Ih_table.Ih_values
inv_var = Ih_table.sum_in_groups(Ih_table.weights, output="per_refl")
r_t["variance"] = 1.0 / inv_var
r_t["miller_index"] = Ih_table.miller_index
else:
raise KeyError("Unable to find intensities (tried I, IMEAN, I(+)/I(-))")
logger.info(f"Extracted {r_t.size()} intensities from target mtz")
r_t = r_t.select(r_t["variance"] > 0.0)
if r_t.size() == 0:
raise ValueError("No reflections with positive sigma remain after filtering")
r_t["d"] = (
miller.set(
crystal_symmetry=crystal.symmetry(
space_group=m.space_group(), unit_cell=m.crystals()[0].unit_cell()
),
indices=r_t["miller_index"],
)
.d_spacings()
.data()
)
r_t.set_flags(flex.bool(r_t.size(), True), r_t.flags.integrated)
exp = Experiment()
exp.crystal = deepcopy(experiments[0].crystal)
exp.identifier = ersatz_uuid4()
r_t.experiment_identifiers()[len(experiments)] = exp.identifier
r_t["id"] = flex.int(r_t.size(), len(experiments))
# create a new KB scaling model for the target and set as scaled to fix scale
# for targeted scaling.
params = Mock()
params.KB.decay_correction.return_value = False
exp.scaling_model = KBScalingModel.from_data(params, [], [])
exp.scaling_model.set_scaling_model_as_scaled() # Set as scaled to fix scale.
return exp, r_t
def create_datastructures_for_structural_model(reflections, experiments, cif_file):
"""Read a cif file, calculate intensities and scale them to the average
intensity of the reflections. Return an experiment and reflection table to
be used for the structural model in scaling."""
# read model, compute Fc, square to F^2
ic = intensity_array_from_cif_file(cif_file)
exp = deepcopy(experiments[0])
params = Mock()
params.decay_correction.return_value = False
exp.scaling_model = KBScalingModel.from_data(params, [], [])
exp.scaling_model.set_scaling_model_as_scaled() # Set as scaled to fix scale.
# Now put the calculated I's on roughly a common scale with the data.
miller_indices = flex.miller_index([])
intensities = flex.double([])
for refl in reflections:
miller_indices.extend(refl["miller_index"])
intensities.extend(refl["intensity.prf.value"])
miller_set = miller.set(
crystal_symmetry=crystal.symmetry(
space_group=experiments[0].crystal.get_space_group()
),
indices=miller_indices,
anomalous_flag=True,
)
idata = miller.array(miller_set, data=intensities)
match = idata.match_indices(ic)
pairs = match.pairs()
icalc = flex.double()
iobs = flex.double()
miller_idx = flex.miller_index()
for p in pairs:
# Note : will create miller_idx duplicates in i_calc - problem?
iobs.append(idata.data()[p[0]])
icalc.append(ic.data()[p[1]])
miller_idx.append(ic.indices()[p[1]])
icalc *= flex.sum(iobs) / flex.sum(icalc)
rt = flex.reflection_table()
rt["intensity"] = icalc
rt["miller_index"] = miller_idx
exp.identifier = ersatz_uuid4()
rt.experiment_identifiers()[len(experiments)] = exp.identifier
rt["id"] = flex.int(rt.size(), len(experiments))
return exp, rt
|
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
from alife_milestone1 import *
class PlantGrowth:
# Transformations to move from a cell to the 9 neighboring cells.
# These are the column values.
col_transform = [0, 0, -1, 1, -1, 1, 1, -1]
# Transformations to move from a cell to the 9 neighboring cells.
# These are the row values.
row_transform = [-1, 1, 0, 0, -1, 1, -1, 1]
def __init__(self):
# Used to hold the new cells that have grown.
self.new_composition = [[False for j in range(PlantUniverse.UNIVERSE_HEIGHT)]
for i in range(PlantUniverse.UNIVERSE_HEIGHT)]
def calc_distance(self, v1, v1_start, v2, v2_start, l):
sum = 0
for i in range(0, l):
d = v1[v1_start + i] - v2[v2_start + i]
sum = sum + (d * d)
return math.sqrt(sum)
def get_growth_potential(self, universe, row, col, genome):
"""
Calculate the growth potential for a candidate cell. Evaluates the distance between the candidate cell's info
vector and the two growth vectors in the genome. The minimum of these two vectors will be returned if
it is below a specified minimum threshold.
@param universe The universe to evaluate.
@param row The row to evaluate.
@param col The column to evaluate.
@param genome The genome.
@return The minimum distance.
"""
cellVec = universe.get_cell_info_vector(row, col)
d1 = self.calc_distance(cellVec, 0, genome, PlantUniverse.CELL_VECTOR_LENGTH * 2,
PlantUniverse.CELL_VECTOR_LENGTH)
d2 = self.calc_distance(cellVec, 0, genome, PlantUniverse.CELL_VECTOR_LENGTH * 3,
PlantUniverse.CELL_VECTOR_LENGTH)
result = min(d1, d2)
if result > PlantUniverse.MIN_GROWTH_DIST:
result = -1
return result
def evaluate_neighbors(self, universe, row, col, genome, allow_root, allow_surface):
"""
Evaluate neighbors to see where to grow into.
@param universe The universe.
@param row The row.
@param col The column.
@param genome The genome.
@param allowRoot Are roots allowed?
* @param allowSurface Is surface growth allowed.
"""
growth_target_row = row
growth_target_col = col
growth_target_score = float("inf")
for i in range(0, len(PlantGrowth.col_transform)):
eval_col = col + PlantGrowth.col_transform[i]
eval_row = row + PlantGrowth.row_transform[i]
if not allow_root and eval_row >= PlantUniverse.GROUND_LINE:
continue
if not allow_surface and eval_row < PlantUniverse.GROUND_LINE:
continue
if universe.is_valid(eval_row, eval_col):
p = self.get_growth_potential(universe, eval_row, eval_col, genome)
if p > 0:
if p < growth_target_score:
growth_target_score = p
growth_target_row = eval_row
growth_target_col = eval_col
# Grow new cell, if requested, did we ever set target row & col to anything?
if growth_target_row <> row or growth_target_col <> col:
self.new_composition[growth_target_row][growth_target_col] = True
def run_growth(self, universe, genome):
"""
Run a growth cycle for the universe.
@param universe The universe.
@param genome The genome.
"""
# Does this plant have enough roots to grow?
if universe.surface_count == 0:
return
# The amount of leafy material per root nourishment. A higher number indicates
# more root nourishment than leafs.
root_ratio = universe.root_count / universe.surface_count
allow_root = root_ratio < 0.5
allow_surface = root_ratio > 0.5
# Reset the new composition to be the composition of the current universe
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
self.new_composition[row][col] = False
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
cell = universe.grid[row][col]
# see if we want to change the composition
if row < PlantUniverse.GROUND_LINE:
cell_vec = universe.get_cell_info_vector(row, col)
d1 = self.calc_distance(cell_vec, 0, genome, 0, PlantUniverse.CELL_VECTOR_LENGTH)
d2 = self.calc_distance(cell_vec, 0, genome, PlantUniverse.CELL_VECTOR_LENGTH,
PlantUniverse.CELL_VECTOR_LENGTH)
if d1 < d2:
cell.leafyness = cell.leafyness * PlantUniverse.STEM_TRANSITION
# Evaluate growth into each neighbor cell
if universe.can_grow(row, col):
self.evaluate_neighbors(universe, row, col, genome, allow_root, allow_surface)
# Copy the new composition back to the universe
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
cell = universe.grid[row][col]
if self.new_composition[row][col]:
if row >= PlantUniverse.GROUND_LINE:
# Roots are always 100% stem for transfer.
cell.leafyness = 0
else:
cell.leafyness = 1.0
cell.energy = 1.0
cell.nourishment = 1.0
class PlantPhysics:
def distribute_energy(self, universe):
"""
Distribute the sunlight energy in the universe.
@param universe The universe.
"""
# Distribute sun energy downward
sunlight = [0] * PlantUniverse.UNIVERSE_WIDTH
for i in range(0, len(sunlight)):
sunlight[i] = 1.0
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
# no sun underground
if row >= PlantUniverse.GROUND_LINE:
# blocked
decay = 0
else:
# no decay until otherwise calculated
decay = 1
cell = universe.grid[row][col]
cell.calculated_sunlight = sunlight[col]
# Collect resources for live cells
if cell.is_alive():
# Live cells cause the sunlight to decay (shade)
decay *= PlantUniverse.DECAY * cell.leafyness
# Set the energy based on sunlight level and composition of the live cell
my_energy = cell.calculated_sunlight * cell.leafyness
trans_energy = universe.calculate_transfer_energy(row, col) * (1.0 - cell.leafyness)
e = max(my_energy, trans_energy)
e = max(PlantUniverse.MIN_LIVING_ENERGY, e)
cell.energy = e
sunlight[col] = sunlight[col] * decay
def distribute_nourishment(self, universe):
"""
Distribute nourishment in the universe.
@param universe The universe.
"""
root_count = 0
surface_count = 0
# Distribute sun energy downward
water_table = [1.0] * PlantUniverse.UNIVERSE_WIDTH
for row in range(PlantUniverse.UNIVERSE_HEIGHT - 1, -1, -1):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
# no water above ground
if row < PlantUniverse.GROUND_LINE:
# blocked
decay = 0
else:
# no decay until otherwise calculated
decay = 1
cell = universe.grid[row][col]
cell.calculated_water = water_table[col]
# Collect resources for live cells
if cell.is_alive():
# Live cells cause the water to decay (roots collect)
decay *= PlantUniverse.DECAY
# Set the energy based on sunlight level and composition of the live cell
my_water = cell.calculated_water * cell.leafyness
trans_water = universe.calculate_transfer_nourishment(row, col) * (1.0 - cell.leafyness)
n = max(my_water, trans_water)
n = max(PlantUniverse.MIN_LIVING_ENERGY, n)
cell.nourishment = n
# update the root and surface counts
if row >= PlantUniverse.GROUND_LINE:
root_count += cell.nourishment
else:
surface_count += cell.leafyness
water_table[col] = water_table[col] * decay
universe.root_count = root_count
universe.surface_count = surface_count
def run_physics(self, universe):
self.distribute_energy(universe)
self.distribute_nourishment(universe)
class PlantBoxMilestone2:
SAMPLE_PLANT = [
0.08414097456375995, 0.11845586131703176, 0.1868971940834313, 0.4346911204161327,
0.024190631402031804, 0.5773526701833149, 0.8997253827355136, 0.9267311086327318,
0.04639229538493471, 0.8190692654645835, 0.06531672676605614, 0.026431639742068264,
0.31497914852215286, 1.0276526539348398, 0.03303133293309127, 0.35946010922382937]
def __init__(self):
# Setup the seed.
self.universe = PlantUniverse()
self.universe.reset()
self.physics = PlantPhysics()
self.growth = PlantGrowth()
self.cycle = 0
# Init TK
self.root = Tk()
# A sample plant that we will animate.
self.display = DisplayPlant(self.root, self.universe)
self.display.update()
self.update_clock()
self.root.mainloop()
def update_clock(self):
self.physics.run_physics(self.universe)
self.growth.run_growth(self.universe, PlantBoxMilestone2.SAMPLE_PLANT)
self.display.update()
self.cycle = self.cycle + 1
if self.cycle < PlantUniverse.EVALUATION_CYCLES:
self.root.after(100, self.update_clock)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides ShapeDiffFinder, which finds differences in OTF/TTF glyph shapes.
ShapeDiffFinder takes in two paths, to font binaries. It then provides methods
that compare these fonts, storing results in a report dictionary. These methods
are `find_area_diffs`, which compares glyph areas, `find_rendered_diffs`, which
compares harfbuzz output using PIL, and `find_shape_diffs`, which takes the
difference of shapes and calculates the area.
Some caveats: glyph areas can be the same even if the shapes are wildly
different (though they're useful for shapes that should be identical except
for some offset). Image comparison is usually either slow (hi-res) or inaccurate
(lo-res). Still, these are usually useful for raising red flags and catching
large errors.
"""
from __future__ import division
import os
from PIL import Image
import re
import StringIO
import subprocess
import tempfile
import booleanOperations
from defcon import Glyph
from fontTools.pens.basePen import BasePen
from fontTools.ttLib import TTFont
from ufoLib.pointPen import PointToSegmentPen
from nototools.glyph_area_pen import GlyphAreaPen
from nototools import hb_input
GDEF_UNDEF = 0
GDEF_MARK = 3
GDEF_LABELS = ['no class', 'base', 'ligature', 'mark', 'component']
class ShapeDiffFinder:
"""Provides methods to report diffs in glyph shapes between OT Fonts."""
def __init__(
self, file_a, file_b, stats, ratio_diffs=False, diff_threshold=0):
self.path_a = file_a
self.font_a = TTFont(self.path_a)
self.glyph_set_a = self.font_a.getGlyphSet()
self.gdef_a = {}
if 'GDEF' in self.font_a:
self.gdef_a = self.font_a['GDEF'].table.GlyphClassDef.classDefs
self.path_b = file_b
self.font_b = TTFont(self.path_b)
self.glyph_set_b = self.font_b.getGlyphSet()
self.gdef_b = {}
if 'GDEF' in self.font_b:
self.gdef_b = self.font_b['GDEF'].table.GlyphClassDef.classDefs
for stat_type in (
'compared', 'untested', 'unmatched', 'unicode_mismatch',
'gdef_mark_mismatch', 'zero_width_mismatch', 'input_mismatch'):
if stat_type not in stats:
stats[stat_type] = []
self.stats = stats
self.ratio_diffs = ratio_diffs
self.diff_threshold = diff_threshold
self.basepath = os.path.basename(file_a)
def find_area_diffs(self):
"""Report differences in glyph areas."""
self.build_names()
pen_a = GlyphAreaPen(self.glyph_set_a)
pen_b = GlyphAreaPen(self.glyph_set_b)
mismatched = {}
for name in self.names:
self.glyph_set_a[name].draw(pen_a)
area_a = pen_a.pop()
self.glyph_set_b[name].draw(pen_b)
area_b = pen_b.pop()
if area_a != area_b:
mismatched[name] = (area_a, area_b)
stats = self.stats['compared']
calc = self._calc_ratio if self.ratio_diffs else self._calc_diff
for name, areas in mismatched.items():
stats.append((calc(areas), name, self.basepath, areas[0], areas[1]))
def find_rendered_diffs(self, font_size=128, render_path=None):
"""Find diffs of glyphs as rendered by harfbuzz."""
hb_input_generator_a = hb_input.HbInputGenerator(self.font_a)
hb_input_generator_b = hb_input.HbInputGenerator(self.font_b)
if render_path:
font_name, _ = os.path.splitext(self.basepath)
render_path = os.path.join(render_path, font_name)
if not os.path.exists(render_path):
os.makedirs(render_path)
self.build_names()
diffs = []
for name in self.names:
class_a = self.gdef_a.get(name, GDEF_UNDEF)
class_b = self.gdef_b.get(name, GDEF_UNDEF)
if GDEF_MARK in (class_a, class_b) and class_a != class_b:
self.stats['gdef_mark_mismatch'].append((
self.basepath, name, GDEF_LABELS[class_a],
GDEF_LABELS[class_b]))
continue
width_a = self.glyph_set_a[name].width
width_b = self.glyph_set_b[name].width
zwidth_a = width_a == 0
zwidth_b = width_b == 0
if zwidth_a != zwidth_b:
self.stats['zero_width_mismatch'].append((
self.basepath, name, width_a, width_b))
continue
hb_args_a = hb_input_generator_a.input_from_name(name, pad=zwidth_a)
hb_args_b = hb_input_generator_b.input_from_name(name, pad=zwidth_b)
if hb_args_a != hb_args_b:
self.stats['input_mismatch'].append((
self.basepath, name, hb_args_a, hb_args_b))
continue
# ignore unreachable characters
if not hb_args_a:
self.stats['untested'].append((self.basepath, name))
continue
features, text = hb_args_a
# ignore null character
if unichr(0) in text:
continue
img_file_a = StringIO.StringIO(subprocess.check_output([
'hb-view', '--font-size=%d' % font_size,
'--features=%s' % ','.join(features), self.path_a, text]))
img_file_b = StringIO.StringIO(subprocess.check_output([
'hb-view', '--font-size=%d' % font_size,
'--features=%s' % ','.join(features), self.path_b, text]))
img_a = Image.open(img_file_a)
img_b = Image.open(img_file_b)
width_a, height_a = img_a.size
width_b, height_b = img_b.size
data_a = img_a.getdata()
data_b = img_b.getdata()
img_file_a.close()
img_file_b.close()
width, height = max(width_a, width_b), max(height_a, height_b)
offset_ax = (width - width_a) // 2
offset_ay = (height - height_a) // 2
offset_bx = (width - width_b) // 2
offset_by = (height - height_b) // 2
diff = 0
for y in range(height):
for x in range(width):
ax, ay = x - offset_ax, y - offset_ay
bx, by = x - offset_bx, y - offset_by
if (ax < 0 or bx < 0 or ax >= width_a or bx >= width_b or
ay < 0 or by < 0 or ay >= height_a or by >= height_b):
diff += 1
else:
diff += abs(data_a[ax + ay * width_a] -
data_b[bx + by * width_b]) / 255
if self.ratio_diffs:
diff /= (width * height)
if render_path and diff > self.diff_threshold:
img_cmp = Image.new('RGB', (width, height))
data_cmp = list(img_cmp.getdata())
self._project(data_a, width_a, height_a,
data_cmp, width, height, 1)
self._project(data_b, width_b, height_b,
data_cmp, width, height, 0)
for y in range(height):
for x in range(width):
i = x + y * width
r, g, b = data_cmp[i]
assert b == 0
data_cmp[i] = r, g, min(r, g)
img_cmp.putdata(data_cmp)
img_cmp.save(self._rendered_png(render_path, name))
diffs.append((name, diff))
mismatched = {}
for name, diff in diffs:
if diff > self.diff_threshold:
mismatched[name] = diff
stats = self.stats['compared']
for name, diff in mismatched.items():
stats.append((diff, name, self.basepath))
def _project(
self, src_data, src_width, src_height,
dst_data, width, height, channel):
"""Project a single-channel image onto a channel of an RGB image."""
offset_x = (width - src_width) // 2
offset_y = (height - src_height) // 2
for y in range(src_height):
for x in range(src_width):
src_i = x + y * src_width
dst_i = x + offset_x + (y + offset_y) * width
pixel = list(dst_data[dst_i])
pixel[channel] = src_data[src_i]
dst_data[dst_i] = tuple(pixel)
def find_shape_diffs(self):
"""Report differences in glyph shapes, using BooleanOperations."""
self.build_names()
area_pen = GlyphAreaPen(None)
pen = PointToSegmentPen(area_pen)
mismatched = {}
for name in self.names:
glyph_a = Glyph()
glyph_b = Glyph()
self.glyph_set_a[name].draw(
Qu2CuPen(glyph_a.getPen(), self.glyph_set_a))
self.glyph_set_b[name].draw(
Qu2CuPen(glyph_b.getPen(), self.glyph_set_b))
booleanOperations.xor(list(glyph_a), list(glyph_b), pen)
area = abs(area_pen.pop())
if area:
mismatched[name] = (area)
stats = self.stats['compared']
for name, area in mismatched.items():
stats.append((area, name, self.basepath))
def find_area_shape_diff_products(self):
"""Report product of differences in glyph areas and glyph shapes."""
self.find_area_diffs()
old_compared = self.stats['compared']
self.stats['compared'] = []
self.find_shape_diffs()
new_compared = {n: d for d, n, _ in self.stats['compared']}
for i, (diff, name, font, area_a, area_b) in enumerate(old_compared):
if font != self.basepath:
continue
new_diff = diff * new_compared.get(name, 0)
old_compared[i] = new_diff, name, font, area_a, area_b
self.stats['compared'] = old_compared
def build_names(self):
"""Build a list of glyph names shared between the fonts."""
if hasattr(self, 'names'):
return
stats = self.stats['unmatched']
names_a = set(self.font_a.getGlyphOrder())
names_b = set(self.font_b.getGlyphOrder())
if names_a != names_b:
stats.append((self.basepath, names_a - names_b, names_b - names_a))
self.names = names_a & names_b
stats = self.stats['unicode_mismatch']
reverse_cmap_a = hb_input.build_reverse_cmap(self.font_a)
reverse_cmap_b = hb_input.build_reverse_cmap(self.font_b)
mismatched = {}
for name in self.names:
unival_a = reverse_cmap_a.get(name)
unival_b = reverse_cmap_b.get(name)
if unival_a != unival_b:
mismatched[name] = (unival_a, unival_b)
if mismatched:
stats.append((self.basepath, mismatched.items()))
self.names -= set(mismatched.keys())
@staticmethod
def dump(stats, whitelist, out_lines, include_vals, multiple_fonts):
"""Return the results of run diffs.
Args:
stats: List of tuples with diff data which is sorted and printed.
whitelist: Names of glyphs to exclude from report.
out_lines: Number of diff lines to print.
include_vals: Include the values that have been diffed in report.
multiple_fonts: Designates whether stats have been accumulated from
multiple fonts, if so then font names will be printed as well.
"""
report = []
compared = sorted(
s for s in stats['compared'] if s[1] not in whitelist)
compared.reverse()
fmt = '%s %s'
if include_vals:
fmt += ' (%s vs %s)'
if multiple_fonts:
fmt = '%s ' + fmt
report.append('%d differences in glyph shape' % len(compared))
for line in compared[:out_lines]:
# print <font> <glyph> <vals>; stats are sorted in reverse priority
line = tuple(reversed(line[:3])) + tuple(line[3:])
# ignore font name if just one pair of fonts was compared
if not multiple_fonts:
line = line[1:]
report.append(fmt % line)
report.append('')
for font, set_a, set_b in stats['unmatched']:
report.append("Glyph coverage doesn't match in %s" % font)
report.append(' in A but not B: %s' % sorted(set_a))
report.append(' in B but not A: %s' % sorted(set_b))
report.append('')
for font, mismatches in stats['unicode_mismatch']:
report.append("Glyph unicode values don't match in %s" % font)
for name, univals in sorted(mismatches):
univals = [(('0x%04X' % v) if v else str(v)) for v in univals]
report.append(' %s: %s in A, %s in B' %
(name, univals[0], univals[1]))
report.append('')
ShapeDiffFinder._add_simple_report(
report, stats['gdef_mark_mismatch'],
'%s: Mark class mismatch for %s (%s vs %s)')
ShapeDiffFinder._add_simple_report(
report, stats['zero_width_mismatch'],
'%s: Zero-width mismatch for %s (%d vs %d)')
ShapeDiffFinder._add_simple_report(
report, stats['input_mismatch'],
'%s: Harfbuzz input mismatch for %s (%s vs %s)')
ShapeDiffFinder._add_simple_report(
report, stats['untested'],
'%s: %s not tested (unreachable?)')
return '\n'.join(report)
@staticmethod
def _add_simple_report(report, stats, fmt):
for stat in sorted(stats):
report.append(fmt % stat)
if stats:
report.append('')
def _calc_diff(self, vals):
"""Calculate an area difference."""
a, b = vals
return abs(a - b)
def _calc_ratio(self, vals):
"""Calculate an area ratio."""
a, b = vals
if not (a or b):
return 0
if abs(a) > abs(b):
a, b = b, a
return 1 - a / b
def _rendered_png(self, render_path, glyph_name):
glyph_filename = re.sub(r'([A-Z_])', r'\1_', glyph_name) + '.png'
return os.path.join(render_path, glyph_filename)
class Qu2CuPen(BasePen):
def __init__(self, pen, glyphSet):
BasePen.__init__(self, glyphSet)
self.pen = pen
def _moveTo(self, pt):
self.pen.moveTo(pt)
def _lineTo(self, pt):
self.pen.lineTo(pt)
def _curveToOne(self, pt1, pt2, pt3):
self.pen.curveTo(pt1, pt2, pt3)
def _closePath(self):
self.pen.closePath()
def _endPath(self):
self.pen.endPath()
|
|
from decimal import Decimal
from django.conf import settings as django_settings
# TODO: ensure all values can be overridden from django_settings
PAYPAL_MODE = getattr(django_settings,
'PAYPAL_MODE',
'DEV')
PAYPAL_USER = getattr(django_settings,
'PAYPAL_USER',
False)
PAYPAL_PASSWORD = getattr(django_settings,
'PAYPAL_PASSWORD',
False)
PAYPAL_SIGNATURE = getattr(django_settings,
'PAYPAL_SIGNATURE',
False)
PAYPAL_OUTCOME = getattr(django_settings,
'PAYPAL_OUTCOME',
'PASS')
STRIPE = getattr(django_settings,
'STRIPE',
False)
STRIPE_MODE = getattr(django_settings,
'STRIPE_MODE',
'DEV')
STRIPE_BASE_URL = getattr(django_settings,
'STRIPE_BASE_URL',
'https://api.stripe.com/v1')
STRIPE_OUTCOME = getattr(django_settings,
'STRIPE_OUTCOME',
'PASS')
"""The url string for the basket"""
CCCHECKOUT_BASKET_URL = getattr(django_settings,
'CCCHECKOUT_BASKET_URL',
'ccbasket:basket')
"""The url string to redirect to after a successful checkout"""
CCCHECKOUT_SUCCESS_URL = getattr(django_settings,
'CCCHECKOUT_SUCCESS_URL',
'cccheckout:complete')
"""The adapter to use to take the basket and format it into the format expected
for the basket"""
CCCHECKOUT_ADAPTER = getattr(django_settings,
'CCCHECKOUT_ADAPTER',
'cccheckout.adapters.ccbasket_adapter')
"""Allow guest checkout"""
CCCHECKOUT_ALLOW_GUEST = getattr(django_settings,
'CCCHECKOUT_ALLOW_GUEST',
True)
"""Globaly disable vouchers and discount codes"""
"""Allow The Purchase of Vouchers"""
CCCHECKOUT_ALLOW_VOUCHERS = getattr(django_settings,
'CCCHECKOUT_ALLOW_VOUCHERS',
True)
"""Display vouchers in the admin"""
CCCHECKOUT_SHOW_VOUCHERS_IN_ADMIN = getattr(django_settings,
'CCCHECKOUT_SHOW_VOUCHERS_IN_ADMIN',
True)
"""Allow discount codes"""
CCCHECKOUT_ALLOW_DISCOUNT_CODES = getattr(django_settings,
'CCCHECKOUT_ALLOW_DISCOUNT_CODES',
True)
"""Show discount codes in the admin"""
CCCHECKOUT_SHOW_DISCOUNT_CODES_IN_ADMIN = getattr(django_settings,
'CCCHECKOUT_SHOW_DISCOUNT_CODES_IN_ADMIN',
True)
"""Discount classes"""
CCCHECKOUT_DISCOUNT_MODELS = getattr(django_settings,
'CCCHECKOUT_DISCOUNT_MODELS',
('cccheckout.models.DiscountCode',))
# The form to use for customers
CCCHECKOUT_CUSTOMER_FORM = getattr(django_settings,
'CCCHECKOUT_CUSTOMER_FORM',
'cccheckout.forms.CustomerForm')
# POSTAGE_MODELS
CCCHECKOUT_POSTAGE_MODELS = getattr(django_settings,
'CCCHECKOUT_POSTAGE_MODELS',
('cccheckout.models.Line',))
"""
If the below is other than than 0 then tax will be added at the checkout stage
to all items unless the model class has an getattr(model_cls/model_instance,
`NO_TAX`) == True attribute
"""
CCCHECKOUT_TAX = getattr(django_settings,
'CCCHECKOUT_TAX',
Decimal('0.00'))
"""The payment providers to use for the checkout.
A payment provider should be a form that accepts the checkout object.
"""
CCCHECKOUT_PAYMENT_FORMS = getattr(django_settings,
'CCCHECKOUT_PAYMENT_FORMS',
('cccheckout.payments.stripe.forms.StripeForm',
'cccheckout.payments.paypalexpresscheckout.forms.PaypalExpressCheckoutForm',))
"""Countries"""
COUNTRIES = (
('GB', 'United Kingdom'),
('AF', 'Afghanistan'),
('AX', 'Aland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, The Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Cote d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libyan Arab Jamahiriya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, The Former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('AN', 'Netherlands Antilles'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Reunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthelemy'),
('SH', 'Saint Helena'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Islands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
)
CCCHECKOUT_COUNTRIES = getattr(django_settings,
'CCCHECKOUT_COUNTRIES',
COUNTRIES)
|
|
#
# Copyright (c) 2016 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.nec import volume_common
from cinder.volume.drivers.nec import volume_helper
xml_out = '''
<REQUEST>
<CMD_REQUEST cmd_name="/opt/iSMCliGateway/impl/query/iSMquery"
arg="-cinder -xml -all "
version="Version 9.4.001">
<CHAPTER name="Disk Array">
<OBJECT name="Disk Array">
<SECTION name="Disk Array Detail Information">
<UNIT name="Product ID">M310</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Logical Disk">
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0000</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT</UNIT>
<UNIT name="LD Capacity">1073741824</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">MV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0001</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">backup_SDV0001</UNIT>
<UNIT name="LD Capacity">5368709120</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0003</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">31HxzqBiAFTUxxOlcVn3EA</UNIT>
<UNIT name="LD Capacity">1073741824</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">RV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0004</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT_back</UNIT>
<UNIT name="LD Capacity">1073741824</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">RV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0005</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">20000009910200140005</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">RV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0006</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT_l</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0007</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">20000009910200140007</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0008</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">20000009910200140008</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0009</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">20000009910200140009</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000a</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">2000000991020012000A</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000b</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">2000000991020012000B</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000c</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">2000000991020012000C</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000d</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">yEUHrXa5AHMjOZZLb93eP</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000e</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">4T7JpyqI3UuPlKeT9D3VQF</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">SV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0fff</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">Pool0000_SYV0FFF</UNIT>
<UNIT name="LD Capacity">8589934592</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">---</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Pool">
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Pool Capacity">281320357888</UNIT>
<UNIT name="Used Pool Capacity">84020297728</UNIT>
<UNIT name="Free Pool Capacity">197300060160</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Pool Capacity">89657442304</UNIT>
<UNIT name="Used Pool Capacity">6710886400</UNIT>
<UNIT name="Free Pool Capacity">82946555904</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0002</UNIT>
<UNIT name="Pool Capacity">1950988894208</UNIT>
<UNIT name="Used Pool Capacity">18446744073441116160</UNIT>
<UNIT name="Free Pool Capacity">1951257329664</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0003</UNIT>
<UNIT name="Pool Capacity">1950988894208</UNIT>
<UNIT name="Used Pool Capacity">18446744073441116160</UNIT>
<UNIT name="Free Pool Capacity">1951257329664</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Controller">
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-00</UNIT>
<UNIT name="WWPN">2100000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-01</UNIT>
<UNIT name="WWPN">2200000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-02</UNIT>
<UNIT name="IP Address">192.168.1.90</UNIT>
<UNIT name="Link Status">Link Down</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-03</UNIT>
<UNIT name="IP Address">192.168.1.91</UNIT>
<UNIT name="Link Status">Link Down</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-00</UNIT>
<UNIT name="WWPN">2900000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-01</UNIT>
<UNIT name="WWPN">2A00000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-02</UNIT>
<UNIT name="IP Address">192.168.2.92</UNIT>
<UNIT name="Link Status">Link Down</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-03</UNIT>
<UNIT name="IP Address">192.168.2.93</UNIT>
<UNIT name="Link Status">Link Up</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Access Control">
<OBJECT name="LD Set(FC)">
<SECTION name="LD Set(FC) Information">
<UNIT name="Platform">LX</UNIT>
<UNIT name="LD Set Name">OpenStack1</UNIT>
</SECTION>
<SECTION name="Path List">
<UNIT name="Path">1000-0090-FAA0-786B</UNIT>
</SECTION>
<SECTION name="Path List">
<UNIT name="Path">1000-0090-FAA0-786A</UNIT>
</SECTION>
<SECTION name="LUN/LD List">
<UNIT name="LUN(h)">0000</UNIT>
<UNIT name="LDN(h)">0005</UNIT>
</SECTION>
<SECTION name="LUN/LD List">
<UNIT name="LUN(h)">0001</UNIT>
<UNIT name="LDN(h)">0006</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="LD Set(iSCSI)">
<SECTION name="LD Set(iSCSI) Information">
<UNIT name="Platform">LX</UNIT>
<UNIT name="LD Set Name">OpenStack0</UNIT>
<UNIT name="Target Mode">Multi-Target</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.1.90:3260</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.1.91:3260</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.2.92:3260</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.2.93:3260</UNIT>
</SECTION>
<SECTION name="Initiator List">
<UNIT name="Initiator List">iqn.1994-05.com.redhat:d1d8e8f23255</UNIT>
</SECTION>
<SECTION name="Target Information For Multi-Target Mode">
<UNIT name="Target Name">iqn.2001-03.target0000</UNIT>
<UNIT name="LUN(h)">0000</UNIT>
<UNIT name="LDN(h)">0000</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<RETURN_MSG>Command Completed Successfully!!</RETURN_MSG>
<RETURN_CODE>0</RETURN_CODE>
</CMD_REQUEST>
</REQUEST>
'''
def patch_view_all(self, conf_ismview_path=None, delete_ismview=True,
cmd_lock=True):
return xml_out
def patch_execute(self, command, expected_status=[0], raise_exec=True):
return "success", 0, 0
class DummyVolume(object):
def __init__(self):
super(DummyVolume, self).__init__()
self.id = ''
self.size = 0
self.status = ''
self.migration_status = ''
self.volume_id = ''
self.volume_type_id = ''
self.attach_status = ''
self.provider_location = ''
@ddt.ddt
class VolumeIDConvertTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(VolumeIDConvertTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
@ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR"))
@ddt.unpack
def test_volumeid_should_change_62scale(self, volid, ldname):
self.vol.id = volid
actual = self._convert_id2name(self.vol)
self.assertEqual(ldname, actual,
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
@ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR"))
@ddt.unpack
def test_snap_volumeid_should_change_62scale_andpostfix(self,
volid,
ldname):
self.vol.id = volid
actual = self._convert_id2snapname(self.vol)
self.assertEqual(ldname, actual,
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
@ddt.data(("AAAAAAAA", "LX:37mA82_m"), ("BBBBBBBB", "LX:3R9ZwR_m"))
@ddt.unpack
def test_ddrsnap_volumeid_should_change_62scale_and_m(self,
volid,
ldname):
self.vol.id = volid
actual = self._convert_id2migratename(self.vol)
self.assertEqual(ldname, actual,
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
class NominatePoolLDTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(NominatePoolLDTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
self._properties['cli_fip'] = '10.0.0.1'
self._properties['pool_pools'] = {0, 1}
self._properties['pool_backup_pools'] = {2, 3}
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
self._numofld_per_pool = 1024
def test_getxml(self):
self.assertIsNotNone(self.xml, "iSMview xml should not be None")
def test_selectldn_for_normalvolume(self):
ldn = self._select_ldnumber(self.used_ldns, self.max_ld_count)
self.assertEqual(2, ldn, "selected ldn should be XXX")
def test_selectpool_for_normalvolume(self):
self.vol.size = 10
pool = self._select_leastused_poolnumber(self.vol,
self.pools,
self.xml)
self.assertEqual(1, pool, "selected pool should be 1")
# config:pool_pools=[1]
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_leastused_poolnumber(self.vol,
self.pools,
self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_selectpool_for_migratevolume(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
dummyhost = {}
dummyhost['capabilities'] = self._update_volume_status()
pool = self._select_migrate_poolnumber(self.vol,
self.pools,
self.xml,
dummyhost)
self.assertEqual(1, pool, "selected pool should be 1")
self.vol.id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
self.vol.size = 10
pool = self._select_migrate_poolnumber(self.vol,
self.pools,
self.xml,
dummyhost)
self.assertEqual(-1, pool, "selected pool is the same pool(return -1)")
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_migrate_poolnumber(self.vol,
self.pools,
self.xml,
dummyhost)
def test_selectpool_for_snapvolume(self):
self.vol.size = 10
savePool1 = self.pools[1]['free']
self.pools[1]['free'] = 0
pool = self._select_dsv_poolnumber(self.vol, self.pools)
self.assertEqual(2, pool, "selected pool should be 2")
# config:pool_backup_pools=[2]
self.pools[1]['free'] = savePool1
if len(self.pools[0]['ld_list']) is 1024:
savePool2 = self.pools[2]['free']
savePool3 = self.pools[3]['free']
self.pools[2]['free'] = 0
self.pools[3]['free'] = 0
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_dsv_poolnumber(self.vol, self.pools)
self.pools[2]['free'] = savePool2
self.pools[3]['free'] = savePool3
self.vol.size = 999999999999
pool = self._select_dsv_poolnumber(self.vol, self.pools)
self.assertEqual(2, pool, "selected pool should be 2")
# config:pool_backup_pools=[2]
def test_selectpool_for_ddrvolume(self):
self.vol.size = 10
pool = self._select_ddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.assertEqual(2, pool, "selected pool should be 2")
# config:pool_backup_pools=[2]
savePool2 = self.pools[2]['free']
savePool3 = self.pools[3]['free']
self.pools[2]['free'] = 0
self.pools[3]['free'] = 0
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_ddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.pools[2]['free'] = savePool2
self.pools[3]['free'] = savePool3
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_ddr_poolnumber(self.vol,
self.pools,
self.xml,
999999999999)
def test_selectpool_for_volddrvolume(self):
self.vol.size = 10
pool = self._select_volddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.assertEqual(1, pool, "selected pool should be 1")
# config:pool_backup_pools=[2]
savePool0 = self.pools[0]['free']
savePool1 = self.pools[1]['free']
self.pools[0]['free'] = 0
self.pools[1]['free'] = 0
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_volddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.pools[0]['free'] = savePool0
self.pools[1]['free'] = savePool1
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_volddr_poolnumber(self.vol,
self.pools,
self.xml,
999999999999)
class GetInformationTest(volume_helper.MStorageDSVDriver, test.TestCase):
def setUp(self):
super(GetInformationTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_ldset(self):
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
self._properties['ldset_name'] = ''
ldset = self.get_ldset(self.ldsets)
self.assertIsNone(ldset)
self._properties['ldset_name'] = 'LX:OpenStack1'
ldset = self.get_ldset(self.ldsets)
self.assertEqual('LX:OpenStack1', ldset['ldsetname'])
self._properties['ldset_name'] = 'LX:OpenStackX'
with self.assertRaisesRegexp(exception.NotFound,
'Logical Disk Set'
' `LX:OpenStackX`'
' could not be found.'):
self.get_ldset(self.ldsets)
class VolumeCreateTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(VolumeCreateTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
def test_validate_migrate_volume(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = 'available'
self._validate_migrate_volume(self.vol, self.xml)
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = 'creating'
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'Specified Logical Disk'
' LX:287RbQoP7VdwR1WsPC2fZT'
' is not available.'):
self._validate_migrate_volume(self.vol, self.xml)
self.vol.id = "AAAAAAAA"
self.vol.size = 10
self.vol.status = 'available'
with self.assertRaisesRegexp(exception.NotFound,
'Logical Disk `LX:37mA82`'
' could not be found.'):
self._validate_migrate_volume(self.vol, self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_extend_volume(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" # MV
self.vol.size = 1
self.vol.status = 'available'
self.extend_volume(self.vol, 10)
self.vol.id = "00046058-d38e-7f60-67b7-59ed65e54225" # RV
self.vol.size = 1
self.vol.status = 'available'
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'RPL Attribute Error. '
'RPL Attribute = RV.'):
self.extend_volume(self.vol, 10)
class BindLDTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(BindLDTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.src = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
mock_bindld = mock.Mock()
self._bind_ld = mock_bindld
self._bind_ld.return_value = 0, 0, 0
def test_bindld_CreateVolume(self):
self.vol.id = "AAAAAAAA"
self.vol.size = 1
self.vol.migration_status = "success"
self.vol.volume_type_id = None
self.create_volume(self.vol)
self._bind_ld.assert_called_once_with(
self.vol, self.vol.size, None,
self._convert_id2name,
self._select_leastused_poolnumber)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_bindld_CreateCloneVolume(self):
self.vol.id = "AAAAAAAA"
self.vol.size = 1
self.vol.migration_status = "success"
self.src.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.src.size = 1
self.vol.volume_type_id = None
mock_query_DSV = mock.Mock()
self._cli.query_BV_SV_status = mock_query_DSV
self._cli.query_BV_SV_status.return_value = 'snap/active'
mock_query_DDR = mock.Mock()
self._cli.query_MV_RV_name = mock_query_DDR
self._cli.query_MV_RV_name.return_value = 'separated'
mock_backup = mock.Mock()
self._cli.backup_restore = mock_backup
self.create_cloned_volume(self.vol, self.src)
self._bind_ld.assert_called_once_with(
self.vol, self.vol.size, None,
self._convert_id2name,
self._select_leastused_poolnumber)
class BindLDTest_Snap(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(BindLDTest_Snap, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.snap = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
mock_bindld = mock.Mock()
self._bind_ld = mock_bindld
self._bind_ld.return_value = 0, 0, 0
mock_bindsnap = mock.Mock()
self._create_snapshot = mock_bindsnap
def test_bindld_CreateSnapshot(self):
self.snap.id = "AAAAAAAA"
self.snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
self.snap.size = 10
self.create_snapshot(self.snap)
self._create_snapshot.assert_called_once_with(
self.snap, self._properties['diskarray_name'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_bindld_CreateFromSnapshot(self):
self.vol.id = "AAAAAAAA"
self.vol.size = 1
self.vol.migration_status = "success"
self.vol.volume_type_id = None
self.snap.id = "63410c76-2f12-4473-873d-74a63dfcd3e2"
self.snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_query = mock.Mock()
self._cli.query_BV_SV_status = mock_query
self._cli.query_BV_SV_status.return_value = 'snap/active'
mock_backup = mock.Mock()
self._cli.backup_restore = mock_backup
self.create_volume_from_snapshot(self.vol, self.snap)
self._bind_ld.assert_called_once_with(
self.vol, 1, None,
self._convert_id2name,
self._select_volddr_poolnumber, 1)
class ExportTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(ExportTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_iscsi_portal(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = None
self.vol.migration_status = None
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
self.iscsi_do_export(None, self.vol, connector)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_do_export(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = None
self.vol.migration_status = None
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
self.fc_do_export(None, self.vol, connector)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_remove_export(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = 'uploading'
self.vol.attach_status = 'attached'
self.vol.migration_status = None
self.vol.volume_type_id = None
context = mock.Mock()
ret = self.remove_export(context, self.vol)
self.assertIsNone(ret)
self.vol.attach_status = None
self.vol.status = 'downloading'
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
r'Failed to unregister Logical Disk from'
r' Logical Disk Set \(iSM31064\)'):
mock_del = mock.Mock()
self._cli.delldsetld = mock_del
self._cli.delldsetld.return_value = False, 'iSM31064'
self.remove_export(context, self.vol)
def test_iscsi_initialize_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88"
self.vol.provider_location = loc
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255",
'multipath': True}
info = self._iscsi_initialize_connection(self.vol, connector)
self.assertEqual('iscsi', info['driver_volume_type'])
self.assertEqual('iqn.2010-10.org.openstack:volume-00000001',
info['data']['target_iqn'])
self.assertEqual('127.0.0.1:3260', info['data']['target_portal'])
self.assertEqual(88, info['data']['target_lun'])
self.assertEqual('iqn.2010-10.org.openstack:volume-00000001',
info['data']['target_iqns'][0])
self.assertEqual('127.0.0.1:3260', info['data']['target_portals'][0])
self.assertEqual(88, info['data']['target_luns'][0])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_initialize_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.migration_status = None
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
info = self._fc_initialize_connection(self.vol, connector)
self.assertEqual('fibre_channel', info['driver_volume_type'])
self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
self.assertEqual('2200000991020012', info['data']['target_wwn'][1])
self.assertEqual('2900000991020012', info['data']['target_wwn'][2])
self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][0])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][0])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][1])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][1])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][2])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][2])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][3])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][3])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_terminate_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
info = self._fc_terminate_connection(self.vol, connector)
self.assertEqual('fibre_channel', info['driver_volume_type'])
self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
self.assertEqual('2200000991020012', info['data']['target_wwn'][1])
self.assertEqual('2900000991020012', info['data']['target_wwn'][2])
self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][0])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][0])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][1])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][1])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][2])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][2])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][3])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][3])
info = self._fc_terminate_connection(self.vol, None)
self.assertEqual('fibre_channel', info['driver_volume_type'])
self.assertEqual({}, info['data'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_iscsi_portal_with_controller_node_name(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.status = 'downloading'
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
self._properties['ldset_controller_node_name'] = 'LX:OpenStack1'
self._properties['portal_number'] = 2
location = self.iscsi_do_export(None, self.vol, connector)
self.assertEqual('192.168.1.90:3260;192.168.1.91:3260;'
'192.168.2.92:3260;192.168.2.93:3260'
',1 iqn.2001-03.target0000 0',
location['provider_location'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_do_export_with_controller_node_name(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.status = 'downloading'
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
self._properties['ldset_controller_node_name'] = 'LX:OpenStack0'
location = self.fc_do_export(None, self.vol, connector)
self.assertIsNone(location)
class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver,
test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(DeleteDSVVolume_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_delete_snapshot(self):
self.vol.id = "63410c76-2f12-4473-873d-74a63dfcd3e2"
self.vol.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_query = mock.Mock()
self._cli.query_BV_SV_status = mock_query
self._cli.query_BV_SV_status.return_value = 'snap/active'
ret = self.delete_snapshot(self.vol)
self.assertIsNone(ret)
class NonDisruptiveBackup_test(volume_helper.MStorageDSVDriver,
test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(NonDisruptiveBackup_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.volvolume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
self.volsize = 10
self.volstatus = None
self.volmigration_status = None
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
def test_validate_ld_exist(self):
ldname = self._validate_ld_exist(
self.lds, self.vol.id, self._properties['ld_name_format'])
self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT', ldname)
self.vol.id = "00000000-0000-0000-0000-6b6d96553b4b"
with self.assertRaisesRegexp(exception.NotFound,
'Logical Disk `LX:XXXXXXXX`'
' could not be found.'):
self._validate_ld_exist(
self.lds, self.vol.id, self._properties['ld_name_format'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', new=mock.Mock())
def test_validate_iscsildset_exist(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
self.assertEqual('LX:OpenStack0', ldset['ldsetname'])
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f232XX"}
mock_data = {'ldsetname': 'LX:redhatd1d8e8f23',
'protocol': 'iSCSI',
'portal_list': ['1.1.1.1:3260', '2.2.2.2:3260'],
'lds': {},
'initiator_list':
['iqn.1994-05.com.redhat:d1d8e8f232XX']}
mock_ldset = {}
mock_ldset['LX:redhatd1d8e8f23'] = mock_data
mock_configs = mock.Mock()
self.configs = mock_configs
self.configs.return_value = None, None, mock_ldset, None, None, None
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
self.assertEqual('LX:redhatd1d8e8f23', ldset['ldsetname'])
self.assertEqual('iqn.1994-05.com.redhat:d1d8e8f232XX',
ldset['initiator_list'][0])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', new=mock.Mock())
def test_validate_fcldset_exist(self):
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
ldset = self._validate_fcldset_exist(self.ldsets, connector)
self.assertEqual('LX:OpenStack1', ldset['ldsetname'])
connector = {'wwpns': ["10000090FAA0786X", "10000090FAA0786Y"]}
mock_data = {'ldsetname': 'LX:10000090FAA0786X',
'lds': {},
'protocol': 'FC',
'wwpn': ["1000-0090-FAA0-786X", "1000-0090-FAA0-786Y"],
'port': []}
mock_ldset = {}
mock_ldset['LX:10000090FAA0786X'] = mock_data
mock_configs = mock.Mock()
self.configs = mock_configs
self.configs.return_value = None, None, mock_ldset, None, None, None
ldset = self._validate_fcldset_exist(self.ldsets, connector)
self.assertEqual('LX:10000090FAA0786X', ldset['ldsetname'])
self.assertEqual('1000-0090-FAA0-786X', ldset['wwpn'][0])
self.assertEqual('1000-0090-FAA0-786Y', ldset['wwpn'][1])
def test_enumerate_iscsi_portals(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
self.assertEqual('LX:OpenStack0', ldset['ldsetname'])
self._properties['portal_number'] = 2
portal = self._enumerate_iscsi_portals(self.hostports, ldset)
self.assertEqual('192.168.1.90:3260', portal[0])
self.assertEqual('192.168.1.91:3260', portal[1])
self.assertEqual('192.168.2.92:3260', portal[2])
self.assertEqual('192.168.2.93:3260', portal[3])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_initialize_connection_snapshot(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88"
self.vol.provider_location = loc
ret = self.iscsi_initialize_connection_snapshot(self.vol, connector)
self.assertIsNotNone(ret)
self.assertEqual('iscsi', ret['driver_volume_type'])
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
ret = self.fc_initialize_connection_snapshot(self.vol, connector)
self.assertIsNotNone(ret)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_terminate_connection_snapshot(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
self.iscsi_terminate_connection_snapshot(self.vol, connector)
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
ret = self.fc_terminate_connection_snapshot(self.vol, connector)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_remove_export_snapshot(self):
self.remove_export_snapshot(None, self.vol)
def test_backup_use_temp_snapshot(self):
ret = self.backup_use_temp_snapshot()
self.assertTrue(ret)
class VolumeStats_test(volume_helper.MStorageDSVDriver, test.TestCase):
def setUp(self):
super(VolumeStats_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['cli_fip'] = '10.0.0.1'
self._properties['pool_pools'] = {0, 1}
self._properties['pool_backup_pools'] = {2, 3}
def test_update_volume_status(self):
self.mock_object(volume_common.MStorageVolumeCommon, 'parse_xml',
side_effect=Exception)
stats = self._update_volume_status()
self.assertEqual('dummy', stats.get('volume_backend_name'))
self.assertEqual('NEC', stats.get('vendor_name'))
self.assertEqual(self.VERSION, stats.get('driver_version'))
self.assertEqual('10.0.0.1', stats.get('location_info').split(':')[0])
self.assertEqual('0,1', stats.get('location_info').split(':')[1])
class Migrate_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(Migrate_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.sourcevol = DummyVolume()
self.sourcevol.id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_update_migrate_volume(self):
update_data = self.update_migrated_volume(None, self.sourcevol,
self.newvol, 'available')
self.assertIsNone(update_data['_name_id'])
self.assertIsNone(update_data['provider_location'])
class ManageUnmanage_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(ManageUnmanage_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['pool_pools'] = {0}
self._properties['pool_backup_pools'] = {1}
def test_is_manageable_volume(self):
ld_ok_iv = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ok_bv = {'pool_num': 0, 'RPL Attribute': 'BV', 'Purpose': 'INV'}
ld_ng_pool = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ng_rpl1 = {'pool_num': 0, 'RPL Attribute': 'MV', 'Purpose': 'INV'}
ld_ng_rpl2 = {'pool_num': 0, 'RPL Attribute': 'RV', 'Purpose': 'INV'}
ld_ng_rpl3 = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ng_purp = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': 'INV'}
self.assertTrue(self._is_manageable_volume(ld_ok_iv))
self.assertTrue(self._is_manageable_volume(ld_ok_bv))
self.assertFalse(self._is_manageable_volume(ld_ng_pool))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl1))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl2))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl3))
self.assertFalse(self._is_manageable_volume(ld_ng_purp))
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_manageable_volumes(self):
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT',
volumes[2]['reference']['source-name'])
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['asc'])
self.assertEqual(' :2000000991020012000A',
volumes[0]['reference']['source-name'])
self.assertEqual(10, len(volumes))
volume = {'id': '46045673-41e7-44a7-9333-02f07feab04b'}
current_volumes = []
current_volumes.append(volume)
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.assertFalse(volumes[2]['safe_to_manage'])
self.assertFalse(volumes[3]['safe_to_manage'])
self.assertTrue(volumes[4]['safe_to_manage'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing(self):
mock_rename = mock.Mock()
self._cli.changeldname = mock_rename
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.manage_existing(self.newvol, volumes[4]['reference'])
self._cli.changeldname.assert_called_once_with(
None,
'LX:287RbQoP7VdwR1WsPC2fZT',
' :20000009910200140009')
with self.assertRaisesRegex(exception.ManageExistingInvalidReference,
'Specified resource is already in-use.'):
self.manage_existing(self.newvol, volumes[3]['reference'])
volume = {'source-name': 'LX:yEUHrXa5AHMjOZZLb93eP'}
with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch,
'Volume type is unmatched.'):
self.manage_existing(self.newvol, volume)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_get_size(self):
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
size_in_gb = self.manage_existing_get_size(self.newvol,
volumes[3]['reference'])
self.assertEqual(10, size_in_gb)
class ManageUnmanage_Snap_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(ManageUnmanage_Snap_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['pool_pools'] = {0}
self._properties['pool_backup_pools'] = {1}
def test_is_manageable_snapshot(self):
ld_ok_sv1 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ok_sv2 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': '---'}
ld_ng_pool = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ng_rpl1 = {'pool_num': 1, 'RPL Attribute': 'MV', 'Purpose': 'INV'}
ld_ng_rpl2 = {'pool_num': 1, 'RPL Attribute': 'RV', 'Purpose': 'INV'}
ld_ng_rpl3 = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ng_rpl4 = {'pool_num': 1, 'RPL Attribute': 'BV', 'Purpose': 'INV'}
self.assertTrue(self._is_manageable_snapshot(ld_ok_sv1))
self.assertTrue(self._is_manageable_snapshot(ld_ok_sv2))
self.assertFalse(self._is_manageable_snapshot(ld_ng_pool))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl1))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl2))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl3))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl4))
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_manageable_snapshots(self):
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
volumes = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
self.assertEqual('LX:4T7JpyqI3UuPlKeT9D3VQF',
volumes[0]['reference']['source-name'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_snapshot(self):
mock_rename = mock.Mock()
self._cli.changeldname = mock_rename
self.newsnap = DummyVolume()
self.newsnap.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.newsnap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
snaps = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
self.manage_existing_snapshot(self.newsnap, snaps[0]['reference'])
self._cli.changeldname.assert_called_once_with(
None,
'LX:287RbQoP7VdwR1WsPC2fZT',
'LX:4T7JpyqI3UuPlKeT9D3VQF')
self.newsnap.volume_id = "AAAAAAAA"
with self.assertRaisesRegex(exception.ManageExistingInvalidReference,
'Snapshot source is unmatch.'):
self.manage_existing_snapshot(self.newsnap, snaps[0]['reference'])
self._cli.get_bvname.return_value = "2000000991020012000C"
self.newsnap.volume_id = "00046058-d38e-7f60-67b7-59ed6422520c"
snap = {'source-name': ' :2000000991020012000B'}
with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch,
'Volume type is unmatched.'):
self.manage_existing_snapshot(self.newsnap, snap)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_snapshot_get_size(self):
self.newsnap = DummyVolume()
self.newsnap.id = "46045673-41e7-44a7-9333-02f07feab04b"
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
snaps = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
size_in_gb = self.manage_existing_snapshot_get_size(
self.newsnap,
snaps[0]['reference'])
self.assertEqual(6, size_in_gb)
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Builds an IdlDefinitions object from an AST (produced by blink_idl_parser)."""
import os
from idl_definitions import IdlDefinitions, IdlInterface, IdlException, IdlOperation, IdlCallbackFunction, IdlArgument, IdlAttribute, IdlConstant, IdlEnum, IdlUnionType
SPECIAL_KEYWORD_LIST = ['GETTER', 'SETTER', 'DELETER']
STANDARD_TYPEDEFS = {
# http://www.w3.org/TR/WebIDL/#common-DOMTimeStamp
'DOMTimeStamp': 'unsigned long long',
}
def build_idl_definitions_from_ast(node):
if node is None:
return None
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
return file_node_to_idl_definitions(node)
def file_node_to_idl_definitions(node):
callback_functions = {}
enumerations = {}
exceptions = {}
interfaces = {}
typedefs = STANDARD_TYPEDEFS
# FIXME: only needed for Perl, remove later
file_name = os.path.abspath(node.GetName())
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = interface_node_to_idl_interface(child)
interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = exception_node_to_idl_exception(child)
exceptions[exception.name] = exception
elif child_class == 'Typedef':
type_name = child.GetName()
typedefs[type_name] = typedef_node_to_type(child)
elif child_class == 'Enum':
enumeration = enum_node_to_idl_enum(child)
enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = callback_node_to_idl_callback_function(child)
callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
# Implements is handled at the interface merging step
pass
else:
raise ValueError('Unrecognized node class: %s' % child_class)
return IdlDefinitions(callback_functions=callback_functions, enumerations=enumerations, exceptions=exceptions, file_name=file_name, interfaces=interfaces, typedefs=typedefs)
# Constructors for Interface definitions and interface members
def interface_node_to_idl_interface(node):
attributes = []
constants = []
constructors = None
custom_constructors = None
extended_attributes = None
operations = []
is_callback = node.GetProperty('CALLBACK') or False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
is_partial = node.GetProperty('Partial') or False
name = node.GetName()
parent = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attributes.append(attribute_node_to_idl_attribute(child))
elif child_class == 'Const':
constants.append(constant_node_to_idl_constant(child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
constructors, custom_constructors = extended_attributes_to_constructors(extended_attributes)
clear_constructor_attributes(extended_attributes)
elif child_class == 'Operation':
operations.append(operation_node_to_idl_operation(child))
elif child_class == 'Inherit':
parent = child.GetName()
else:
raise ValueError('Unrecognized node class: %s' % child_class)
return IdlInterface(name=name, attributes=attributes, constants=constants, constructors=constructors, custom_constructors=custom_constructors, extended_attributes=extended_attributes, operations=operations, is_callback=is_callback, is_partial=is_partial, parent=parent)
def attribute_node_to_idl_attribute(node):
idl_type = None
extended_attributes = {}
is_nullable = False
is_read_only = node.GetProperty('READONLY') or False
is_static = node.GetProperty('STATIC') or False
name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
idl_type = type_node_to_type(child)
is_nullable = child.GetProperty('NULLABLE') or False
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
return IdlAttribute(idl_type=idl_type, extended_attributes=extended_attributes, is_nullable=is_nullable, is_read_only=is_read_only, is_static=is_static, name=name)
def constant_node_to_idl_constant(node):
name = node.GetName()
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
# ConstType is more limited than Type, so subtree is smaller and we don't
# use the full type_node_to_type function.
idl_type = type_node_inner_to_type(type_node)
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
value = value_node.GetName()
extended_attributes = None
if num_children == 3:
ext_attributes_node = children[2]
extended_attributes = ext_attributes_node_to_extended_attributes(ext_attributes_node)
return IdlConstant(idl_type=idl_type, extended_attributes=extended_attributes, name=name, value=value)
def operation_node_to_idl_operation(node):
name = node.GetName()
# FIXME: AST should use None internally
if name == '_unnamed_':
name = None
is_static = node.GetProperty('STATIC') or False
specials = []
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
specials.append(special_keyword.lower())
extended_attributes = None
arguments = []
return_type = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
arguments = arguments_node_to_arguments(child)
elif child_class == 'Type':
return_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
return IdlOperation(name=name, idl_type=return_type, extended_attributes=extended_attributes, is_static=is_static, arguments=arguments, specials=specials)
def arguments_node_to_arguments(node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
arguments = []
argument_node_list = node.GetChildren()
for argument_node in argument_node_list:
arguments.append(argument_node_to_idl_argument(argument_node))
return arguments
def argument_node_to_idl_argument(node):
name = node.GetName()
idl_type = None
extended_attributes = {}
# FIXME: Boolean values are inconsistent due to Perl compatibility.
# Make all default to False once Perl removed.
is_nullable = False
is_optional = node.GetProperty('OPTIONAL')
is_variadic = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
idl_type = type_node_to_type(child)
# FIXME: Doesn't handle nullable arrays (Foo[]?), and arrays of
# nullable (Foo?[]) are treated as nullable arrays. No actual use.
is_nullable = child.GetProperty('NULLABLE')
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
is_variadic = child.GetProperty('ELLIPSIS') or False
else:
raise ValueError('Unrecognized node class: %s' % child_class)
return IdlArgument(name=name, idl_type=idl_type, extended_attributes=extended_attributes, is_nullable=is_nullable, is_optional=is_optional, is_variadic=is_variadic)
# Constructors for for non-interface definitions
def callback_node_to_idl_callback_function(node):
name = node.GetName()
children = node.GetChildren()
num_children = len(children)
if num_children != 2:
raise ValueError('Expected 2 children, got %s' % num_children)
type_node = children[0]
idl_type = type_node_to_type(type_node)
arguments_node = children[1]
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Value node, got %s' % arguments_node_class)
arguments = arguments_node_to_arguments(arguments_node)
return IdlCallbackFunction(name=name, idl_type=idl_type, arguments=arguments)
def enum_node_to_idl_enum(node):
name = node.GetName()
values = []
for child in node.GetChildren():
values.append(child.GetName())
return IdlEnum(name=name, values=values)
def exception_operation_node_to_idl_operation(node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
extended_attributes = {}
name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
return_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
extended_attributes = ext_attributes_node_to_extended_attributes(ext_attributes_node)
return IdlOperation(name=name, idl_type=return_type, extended_attributes=extended_attributes)
def exception_node_to_idl_exception(node):
# Exceptions are similar to Interfaces, but simpler
attributes = []
constants = []
extended_attributes = None
operations = []
name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = attribute_node_to_idl_attribute(child)
attributes.append(attribute)
elif child_class == 'Const':
constants.append(constant_node_to_idl_constant(child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
elif child_class == 'ExceptionOperation':
operations.append(exception_operation_node_to_idl_operation(child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
return IdlException(name=name, attributes=attributes, constants=constants, extended_attributes=extended_attributes, operations=operations)
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
# Extended attributes
def ext_attributes_node_to_extended_attributes(node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with three exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possibly signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possibly signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
attribute_list = node.GetChildren()
for attribute in attribute_list:
name = attribute.GetName()
children = attribute.GetChildren()
if children:
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
child = children[0]
child_class = child.GetClass()
else:
child = None
child_class = None
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetReference':
if not child:
raise ValueError('[SetReference] requires a child, but has none.')
if child_class != 'Arguments':
raise ValueError('[SetReference] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = arguments_node_to_arguments(child)
elif children:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = attribute.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for interface_node_to_idl_interface.
"""
constructors = []
custom_constructors = []
if 'Constructors' in extended_attributes:
constructor_list = extended_attributes['Constructors']
# If not overloaded, have index 0, otherwise index from 1
overloaded_index = 0 if len(constructor_list) == 1 else 1
for arguments_node in constructor_list:
name = 'Constructor'
arguments = arguments_node_to_arguments(arguments_node)
constructor = IdlOperation(name=name, extended_attributes=extended_attributes, overloaded_index=overloaded_index, arguments=arguments)
constructors.append(constructor)
overloaded_index += 1
if 'CustomConstructors' in extended_attributes:
custom_constructor_list = extended_attributes['CustomConstructors']
# If not overloaded, have index 0, otherwise index from 1
overloaded_index = 0 if len(custom_constructor_list) == 1 else 1
for arguments_node in custom_constructor_list:
name = 'CustomConstructor'
arguments = arguments_node_to_arguments(arguments_node)
custom_constructor = IdlOperation(name=name, extended_attributes=extended_attributes, overloaded_index=overloaded_index, arguments=arguments)
custom_constructors.append(custom_constructor)
overloaded_index += 1
if 'NamedConstructor' in extended_attributes:
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
overloaded_index = None # named constructors are not overloaded
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
arguments = arguments_node_to_arguments(arguments_node)
named_constructor = IdlOperation(name=name, extended_attributes=extended_attributes, overloaded_index=overloaded_index, arguments=arguments)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
# Types
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
type_node_child = children[0]
idl_type = type_node_inner_to_type(type_node_child)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
idl_type += '[]'
return idl_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'Typeref']:
return node.GetName()
elif node_class == 'Any':
return 'any'
elif node_class == 'Sequence':
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
raise ValueError('Unrecognized node class: %s' % node_class)
def sequence_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Sequence node expects exactly 1 child, got %s' % len(children))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
sequence_type = type_node_to_type(sequence_child)
return 'sequence<%s>' % sequence_type
def union_type_node_to_idl_union_type(node):
union_member_types = []
for member_type_node in node.GetChildren():
member_type = type_node_to_type(member_type_node)
union_member_types.append(member_type)
return IdlUnionType(union_member_types=union_member_types)
|
|
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for the backup service base driver. """
import uuid
import mock
from cinder.backup import driver
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.backup import fake_service
_backup_db_fields = ['id', 'user_id', 'project_id',
'volume_id', 'host', 'availability_zone',
'display_name', 'display_description',
'container', 'status', 'fail_reason',
'service_metadata', 'service', 'size',
'object_count']
class BackupBaseDriverTestCase(test.TestCase):
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size):
backup = {'id': backupid, 'size': size, 'volume_id': volid}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupBaseDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_id = str(uuid.uuid4())
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id, 1)
self._create_volume_db_entry(self.volume_id, 1)
self.backup = db.backup_get(self.ctxt, self.backup_id)
self.driver = fake_service.FakeBackupService(self.ctxt)
def test_get_metadata(self):
json_metadata = self.driver.get_metadata(self.volume_id)
metadata = jsonutils.loads(json_metadata)
self.assertEqual(metadata['version'], 1)
def test_put_metadata(self):
metadata = {'version': 1}
self.driver.put_metadata(self.volume_id, jsonutils.dumps(metadata))
def test_get_put_metadata(self):
json_metadata = self.driver.get_metadata(self.volume_id)
self.driver.put_metadata(self.volume_id, json_metadata)
def test_export_record(self):
export_string = self.driver.export_record(self.backup)
export_dict = jsonutils.loads(export_string.decode("base64"))
# Make sure we don't lose data when converting to string
for key in _backup_db_fields:
self.assertTrue(key in export_dict)
self.assertEqual(self.backup[key], export_dict[key])
def test_import_record(self):
export_string = self.driver.export_record(self.backup)
imported_backup = self.driver.import_record(export_string)
# Make sure we don't lose data when converting from string
for key in _backup_db_fields:
self.assertTrue(key in imported_backup)
self.assertEqual(imported_backup[key], self.backup[key])
class BackupMetadataAPITestCase(test.TestCase):
def _create_volume_db_entry(self, id, size, display_name,
display_description):
vol = {'id': id, 'size': size, 'status': 'available',
'display_name': display_name,
'display_description': display_description}
return db.volume_create(self.ctxt, vol)['id']
def setUp(self):
super(BackupMetadataAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_id = str(uuid.uuid4())
self.volume_display_name = 'vol-1'
self.volume_display_description = 'test vol'
self._create_volume_db_entry(self.volume_id, 1,
self.volume_display_name,
self.volume_display_description)
self.bak_meta_api = driver.BackupMetadataAPI(self.ctxt)
def _add_metadata(self, vol_meta=False, vol_glance_meta=False):
if vol_meta:
# Add some VolumeMetadata
db.volume_metadata_update(self.ctxt, self.volume_id,
{'fee': 'fi'}, False)
db.volume_metadata_update(self.ctxt, self.volume_id,
{'fo': 'fum'}, False)
if vol_glance_meta:
# Add some GlanceMetadata
db.volume_glance_metadata_create(self.ctxt, self.volume_id,
'disk_format', 'bare')
db.volume_glance_metadata_create(self.ctxt, self.volume_id,
'container_type', 'ovf')
def test_get(self):
# Volume won't have anything other than base by default
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META]
self.assertEqual(s1.symmetric_difference(s2), set())
self._add_metadata(vol_glance_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(s1.symmetric_difference(s2), set())
self._add_metadata(vol_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META,
self.bak_meta_api.TYPE_TAG_VOL_META]
self.assertEqual(s1.symmetric_difference(s2), set())
def test_put(self):
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
self._add_metadata(vol_glance_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
self._add_metadata(vol_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
def test_put_invalid_version(self):
container = jsonutils.dumps({'version': 2})
self.assertRaises(exception.BackupMetadataUnsupportedVersion,
self.bak_meta_api.put, self.volume_id, container)
def test_v1_restore_factory(self):
fact = self.bak_meta_api._v1_restore_factory()
keys = [self.bak_meta_api.TYPE_TAG_VOL_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set(keys).symmetric_difference(set(fact.keys())),
set([]))
meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META:
{'display_name': 'vol-2',
'display_description': 'description'},
self.bak_meta_api.TYPE_TAG_VOL_META: {},
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}}
for f in fact:
func = fact[f][0]
fields = fact[f][1]
func(meta_container[f], self.volume_id, fields)
vol = db.volume_get(self.ctxt, self.volume_id)
self.assertEqual(self.volume_display_name, vol['display_name'])
self.assertEqual(self.volume_display_description,
vol['display_description'])
def test_restore_vol_glance_meta(self):
fields = {}
container = {}
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id,
fields)
self._add_metadata(vol_glance_meta=True)
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id,
fields)
def test_restore_vol_meta(self):
fields = {}
container = {}
self.bak_meta_api._save_vol_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_meta(container, self.volume_id, fields)
self._add_metadata(vol_meta=True)
self.bak_meta_api._save_vol_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_meta(container, self.volume_id, fields)
def test_filter(self):
metadata = {'a': 1, 'b': 2, 'c': 3}
self.assertEqual(metadata, self.bak_meta_api._filter(metadata, []))
self.assertEqual({'b': 2}, self.bak_meta_api._filter(metadata, ['b']))
self.assertEqual({}, self.bak_meta_api._filter(metadata, ['d']))
self.assertEqual({'a': 1, 'b': 2},
self.bak_meta_api._filter(metadata, ['a', 'b']))
def test_save_vol_glance_meta(self):
container = {}
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
def test_save_vol_meta(self):
container = {}
self.bak_meta_api._save_vol_meta(container, self.volume_id)
def test_save_vol_base_meta(self):
container = {}
self.bak_meta_api._save_vol_base_meta(container, self.volume_id)
def test_is_serializable(self):
data = {'foo': 'bar'}
if self.bak_meta_api._is_serializable(data):
jsonutils.dumps(data)
def test_is_not_serializable(self):
data = {'foo': 'bar'}
with mock.patch.object(jsonutils, 'dumps') as mock_dumps:
mock_dumps.side_effect = TypeError
self.assertFalse(self.bak_meta_api._is_serializable(data))
mock_dumps.assert_called_once()
|
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
from oslo.config import cfg
from nova.compute import arch
from nova import exception
import nova.image.glance
from nova.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64,
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64,
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, dst_path=None, data=None):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, ''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id, include_locations=False,
show_deleted=True):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def get_valid_image_id():
return _fakeImageService.images.keys()[0]
def stub_out_image_service(stubs):
image_service = FakeImageService()
stubs.Set(nova.image.glance, 'get_remote_image_service',
lambda x, y: (image_service, y))
stubs.Set(nova.image.glance, 'get_default_image_service',
lambda: image_service)
return image_service
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import contextlib
import errno
import itertools
import os
import subprocess
import sys
import time
import unittest
from grpc.framework.face import exceptions
from grpc.framework.foundation import future
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'EarlyAdopterTestServiceServicer'
SERVER_IDENTIFIER = 'EarlyAdopterTestServiceServer'
STUB_IDENTIFIER = 'EarlyAdopterTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'early_adopter_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'early_adopter_create_TestService_stub'
# Timeouts and delays.
SHORT_TIMEOUT = 0.1
NORMAL_TIMEOUT = 1
LONG_TIMEOUT = 2
DOES_NOT_MATTER_DELAY = 0
NO_DELAY = 0
LONG_DELAY = 1
# Assigned in __main__.
_build_mode = None
_port = None
class _ServicerMethods(object):
def __init__(self, test_pb2, delay):
self._paused = False
self._failed = False
self.test_pb2 = test_pb2
self.delay = delay
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
self._paused = True
yield
self._paused = False
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
self._failed = True
yield
self._failed = False
def _control(self): # pylint: disable=invalid-name
if self._failed:
raise ValueError()
time.sleep(self.delay)
while self._paused:
time.sleep(0)
def UnaryCall(self, request, context):
response = self.test_pb2.SimpleResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, context):
for parameter in request.response_parameters:
response = self.test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, context):
response = self.test_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, context):
for request in request_iter:
for parameter in request.response_parameters:
response = self.test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self.test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
def _CreateService(test_pb2, delay):
"""Provides a servicer backend and a stub.
The servicer is just the implementation
of the actual servicer passed to the face player of the python RPC
implementation; the two are detached.
Non-zero delay puts a delay on each call to the servicer, representative of
communication latency. Timeout is the default timeout for the stub while
waiting for the service.
Args:
test_pb2: the test_pb2 module generated by this test
delay: delay in seconds per response from the servicer
timeout: how long the stub will wait for the servicer by default.
Returns:
A two-tuple (servicer, stub), where the servicer is the back-end of the
service bound to the stub.
"""
servicer_methods = _ServicerMethods(test_pb2, delay)
class Servicer(getattr(test_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(test_pb2, SERVER_FACTORY_IDENTIFIER)(servicer, _port,
None, None)
stub = getattr(test_pb2, STUB_FACTORY_IDENTIFIER)('localhost', _port)
return servicer_methods, stub, server
def StreamingInputRequest(test_pb2):
for _ in range(3):
request = test_pb2.StreamingInputCallRequest()
request.payload.payload_type = test_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def StreamingOutputRequest(test_pb2):
request = test_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def FullDuplexRequest(test_pb2):
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.async()`) only gives futures for the *non-streaming* responses,
else it behaves like its blocking cousin.
"""
def setUp(self):
protoc_command = '../../bins/%s/protobuf/protoc' % _build_mode
protoc_plugin_filename = '../../bins/%s/grpc_python_plugin' % _build_mode
test_proto_filename = './test.proto'
if not os.path.isfile(protoc_command):
# Assume that if we haven't built protoc that it's on the system.
protoc_command = 'protoc'
# Ensure that the output directory exists.
outdir = '../../gens/test/compiler/python'
try:
os.makedirs(outdir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Invoke protoc with the plugin.
cmd = [
protoc_command,
'--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
'-I %s' % os.path.dirname(test_proto_filename),
'--python_out=%s' % outdir,
'--python-grpc_out=%s' % outdir,
os.path.basename(test_proto_filename),
]
subprocess.call(' '.join(cmd), shell=True)
sys.path.append(outdir)
# TODO(atash): Figure out which of theses tests is hanging flakily with small
# probability.
def testImportAttributes(self):
# check that we can access the generated module and its members.
import test_pb2 # pylint: disable=g-import-not-at-top
self.assertIsNotNone(getattr(test_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, SERVER_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
import test_pb2
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = test_pb2.SimpleRequest(response_size=13)
with server, stub:
pass
def testUnaryCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, NO_DELAY)
request = test_pb2.SimpleRequest(response_size=13)
with server, stub:
response = stub.UnaryCall(request, NORMAL_TIMEOUT)
expected_response = servicer.UnaryCall(request, None)
self.assertEqual(expected_response, response)
def testUnaryCallAsync(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, LONG_DELAY)
request = test_pb2.SimpleRequest(response_size=13)
with server, stub:
start_time = time.clock()
response_future = stub.UnaryCall.async(request, LONG_TIMEOUT)
# Check that we didn't block on the asynchronous call.
self.assertGreater(LONG_DELAY, time.clock() - start_time)
response = response_future.result()
expected_response = servicer.UnaryCall(request, None)
self.assertEqual(expected_response, response)
def testUnaryCallAsyncExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
# set the timeout super low...
servicer, stub, server = _CreateService(test_pb2,
delay=DOES_NOT_MATTER_DELAY)
request = test_pb2.SimpleRequest(response_size=13)
with server, stub:
with servicer.pause():
response_future = stub.UnaryCall.async(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
response_future.result()
def testUnaryCallAsyncCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = test_pb2.SimpleRequest(response_size=13)
with server, stub:
with servicer.pause():
response_future = stub.UnaryCall.async(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallAsyncFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = test_pb2.SimpleRequest(response_size=13)
with server, stub:
with servicer.fail():
response_future = stub.UnaryCall.async(request, NORMAL_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, NO_DELAY)
request = StreamingOutputRequest(test_pb2)
with server, stub:
responses = stub.StreamingOutputCall(request, NORMAL_TIMEOUT)
expected_responses = servicer.StreamingOutputCall(request, None)
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = StreamingOutputRequest(test_pb2)
with server, stub:
with servicer.pause():
responses = stub.StreamingOutputCall(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
list(responses)
def testStreamingOutputCallCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
unused_servicer, stub, server = _CreateService(test_pb2,
DOES_NOT_MATTER_DELAY)
request = StreamingOutputRequest(test_pb2)
with server, stub:
responses = stub.StreamingOutputCall(request, SHORT_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(future.CancelledError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this times out '
'instead of raising the proper error.')
def testStreamingOutputCallFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = StreamingOutputRequest(test_pb2)
with server, stub:
with servicer.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(exceptions.ServicerError):
next(responses)
def testStreamingInputCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, NO_DELAY)
with server, stub:
response = stub.StreamingInputCall(StreamingInputRequest(test_pb2),
NORMAL_TIMEOUT)
expected_response = servicer.StreamingInputCall(
StreamingInputRequest(test_pb2), None)
self.assertEqual(expected_response, response)
def testStreamingInputCallAsync(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(
test_pb2, LONG_DELAY)
with server, stub:
start_time = time.clock()
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), LONG_TIMEOUT)
self.assertGreater(LONG_DELAY, time.clock() - start_time)
response = response_future.result()
expected_response = servicer.StreamingInputCall(
StreamingInputRequest(test_pb2), None)
self.assertEqual(expected_response, response)
def testStreamingInputCallAsyncExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
# set the timeout super low...
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
with server, stub:
with servicer.pause():
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
response_future.result()
self.assertIsInstance(
response_future.exception(), exceptions.ExpirationError)
def testStreamingInputCallAsyncCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
with server, stub:
with servicer.pause():
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), NORMAL_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallAsyncFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
with server, stub:
with servicer.fail():
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), SHORT_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, NO_DELAY)
with server, stub:
responses = stub.FullDuplexCall(FullDuplexRequest(test_pb2),
NORMAL_TIMEOUT)
expected_responses = servicer.FullDuplexCall(FullDuplexRequest(test_pb2),
None)
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = FullDuplexRequest(test_pb2)
with server, stub:
with servicer.pause():
responses = stub.FullDuplexCall(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
list(responses)
def testFullDuplexCallCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
unused_servicer, stub, server = _CreateService(test_pb2, NO_DELAY)
with server, stub:
request = FullDuplexRequest(test_pb2)
responses = stub.FullDuplexCall(request, NORMAL_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(future.CancelledError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this hangs forever '
'and fix.')
def testFullDuplexCallFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, DOES_NOT_MATTER_DELAY)
request = FullDuplexRequest(test_pb2)
with server, stub:
with servicer.fail():
responses = stub.FullDuplexCall(request, NORMAL_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(exceptions.ServicerError):
next(responses)
def testHalfDuplexCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
servicer, stub, server = _CreateService(test_pb2, NO_DELAY)
def HalfDuplexRequest():
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
with server, stub:
responses = stub.HalfDuplexCall(HalfDuplexRequest(), NORMAL_TIMEOUT)
expected_responses = servicer.HalfDuplexCall(HalfDuplexRequest(), None)
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
import test_pb2 # pylint: disable=g-import-not-at-top
_, stub, server = _CreateService(test_pb2, NO_DELAY)
wait_flag = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
wait_flag[0] = True
yield
wait_flag[0] = False
def HalfDuplexRequest():
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
while wait_flag[0]:
time.sleep(0.1)
with server, stub:
with wait():
responses = stub.HalfDuplexCall(HalfDuplexRequest(), NORMAL_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(exceptions.ExpirationError):
next(responses)
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
parser = argparse.ArgumentParser(
description='Run Python compiler plugin test.')
parser.add_argument(
'--build_mode', dest='build_mode', type=str, default='dbg',
help='The build mode of the targets to test, e.g. "dbg", "opt", "asan", '
'etc.')
parser.add_argument('--port', dest='port', type=int, default=0)
args, remainder = parser.parse_known_args()
_build_mode = args.build_mode
_port = args.port
sys.argv[1:] = remainder
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Metrics API classes for internal use only.
Users should use apache_beam.metrics.metric package instead.
For internal use only. No backwards compatibility guarantees.
"""
# pytype: skip-file
# mypy: disallow-untyped-defs
import datetime
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
from apache_beam.internal.metrics.cells import HistogramCellFactory
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricUpdater
from apache_beam.metrics.metric import Metrics as UserMetrics
from apache_beam.metrics.metricbase import Histogram
from apache_beam.metrics.metricbase import MetricName
if TYPE_CHECKING:
from apache_beam.metrics.cells import MetricCell
from apache_beam.metrics.cells import MetricCellFactory
from apache_beam.utils.histogram import BucketType
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
pass
__all__ = ['Metrics']
_LOGGER = logging.getLogger(__name__)
class Metrics(object):
@staticmethod
def counter(urn, labels=None, process_wide=False):
# type: (str, Optional[Dict[str, str]], bool) -> UserMetrics.DelegatingCounter
"""Obtains or creates a Counter metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
urn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.
labels: Labels to populate on a MonitoringInfo
process_wide: Whether or not the metric is specific to the current bundle
or should be calculated for the entire process.
Returns:
A Counter object.
"""
return UserMetrics.DelegatingCounter(
MetricName(namespace=None, name=None, urn=urn, labels=labels),
process_wide=process_wide)
@staticmethod
def histogram(namespace, name, bucket_type, logger=None):
# type: (Union[Type, str], str, BucketType, Optional[MetricLogger]) -> Metrics.DelegatingHistogram
"""Obtains or creates a Histogram metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
bucket_type: A type of bucket used in a histogram. A subclass of
apache_beam.utils.histogram.BucketType
logger: MetricLogger for logging locally aggregated metric
Returns:
A Histogram object.
"""
namespace = UserMetrics.get_namespace(namespace)
return Metrics.DelegatingHistogram(
MetricName(namespace, name), bucket_type, logger)
class DelegatingHistogram(Histogram):
"""Metrics Histogram that Delegates functionality to MetricsEnvironment."""
def __init__(self, metric_name, bucket_type, logger):
# type: (MetricName, BucketType, Optional[MetricLogger]) -> None
super(Metrics.DelegatingHistogram, self).__init__(metric_name)
self.metric_name = metric_name
self.cell_type = HistogramCellFactory(bucket_type)
self.logger = logger
self.updater = MetricUpdater(self.cell_type, self.metric_name)
def update(self, value):
# type: (object) -> None
self.updater(value)
if self.logger:
self.logger.update(self.cell_type, self.metric_name, value)
class MetricLogger(object):
"""Simple object to locally aggregate and log metrics.
This class is experimental. No backwards-compatibility guarantees.
"""
def __init__(self):
# type: () -> None
self._metric = dict() # type: Dict[MetricName, MetricCell]
self._lock = threading.Lock()
self._last_logging_millis = int(time.time() * 1000)
self.minimum_logging_frequency_msec = 180000
def update(self, cell_type, metric_name, value):
# type: (Union[Type[MetricCell], MetricCellFactory], MetricName, object) -> None
cell = self._get_metric_cell(cell_type, metric_name)
cell.update(value)
def _get_metric_cell(self, cell_type, metric_name):
# type: (Union[Type[MetricCell], MetricCellFactory], MetricName) -> MetricCell
with self._lock:
if metric_name not in self._metric:
self._metric[metric_name] = cell_type()
return self._metric[metric_name]
def log_metrics(self, reset_after_logging=False):
# type: (bool) -> None
if self._lock.acquire(False):
try:
current_millis = int(time.time() * 1000)
if ((current_millis - self._last_logging_millis) >
self.minimum_logging_frequency_msec):
logging_metric_info = [
'[Locally aggregated metrics since %s]' %
datetime.datetime.fromtimestamp(
self._last_logging_millis / 1000.0)
]
for name, cell in self._metric.items():
logging_metric_info.append('%s: %s' % (name, cell.get_cumulative()))
_LOGGER.info('\n'.join(logging_metric_info))
if reset_after_logging:
self._metric = dict()
self._last_logging_millis = current_millis
finally:
self._lock.release()
class ServiceCallMetric(object):
"""Metric class which records Service API call metrics.
This class will capture a request count metric for the specified
request_count_urn and base_labels.
When call() is invoked the status must be provided, which will
be converted to a canonical GCP status code, if possible.
TODO(ajamato): Add Request latency metric.
"""
def __init__(self, request_count_urn, base_labels=None):
# type: (str, Optional[Dict[str, str]]) -> None
self.base_labels = base_labels if base_labels else {}
self.request_count_urn = request_count_urn
def call(self, status):
# type: (Union[int, str, HttpError]) -> None
"""Record the status of the call into appropriate metrics."""
canonical_status = self.convert_to_canonical_status_string(status)
additional_labels = {monitoring_infos.STATUS_LABEL: canonical_status}
labels = dict(
list(self.base_labels.items()) + list(additional_labels.items()))
request_counter = Metrics.counter(
urn=self.request_count_urn, labels=labels, process_wide=True)
request_counter.inc()
def convert_to_canonical_status_string(self, status):
# type: (Union[int, str, HttpError]) -> str
"""Converts a status to a canonical GCP status cdoe string."""
http_status_code = None
if isinstance(status, int):
http_status_code = status
elif isinstance(status, str):
return status.lower()
elif isinstance(status, HttpError):
http_status_code = int(status.status_code)
http_to_canonical_gcp_status = {
200: 'ok',
400: 'out_of_range',
401: 'unauthenticated',
403: 'permission_denied',
404: 'not_found',
409: 'already_exists',
429: 'resource_exhausted',
499: 'cancelled',
500: 'internal',
501: 'not_implemented',
503: 'unavailable',
504: 'deadline_exceeded'
}
if (http_status_code is not None and
http_status_code in http_to_canonical_gcp_status):
return http_to_canonical_gcp_status[http_status_code]
return str(http_status_code)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Various HBase helpers
"""
import copy
import datetime
import json
import bson.json_util
from happybase.hbase import ttypes
import six
from ceilometer.i18n import _
from ceilometer.openstack.common import log
from ceilometer import utils
LOG = log.getLogger(__name__)
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
# We need this additional dictionary because we have reverted timestamp in
# row-keys for stored metrics
OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<',
'ge': '<='}
def _QualifierFilter(op, qualifier):
return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier)
def timestamp(dt, reverse=True):
"""Timestamp is count of milliseconds since start of epoch.
If reverse=True then timestamp will be reversed. Such a technique is used
in HBase rowkey design when period queries are required. Because of the
fact that rows are sorted lexicographically it's possible to vary whether
the 'oldest' entries will be on top of the table or it should be the newest
ones (reversed timestamp case).
:param dt: datetime which is translated to timestamp
:param reverse: a boolean parameter for reverse or straight count of
timestamp in milliseconds
:return: count or reversed count of milliseconds since start of epoch
"""
epoch = datetime.datetime(1970, 1, 1)
td = dt - epoch
ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000
return 0x7fffffffffffffff - ts if reverse else ts
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False)
if event_filter.start_timestamp else "")
stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False)
if event_filter.end_timestamp else "")
kwargs = {'event_type': event_filter.event_type,
'event_id': event_filter.message_id}
res_q = make_query(**kwargs)
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
q_trait = make_query(trait_query=True, **trait_filter)
if q_trait:
if res_q:
res_q += " AND " + q_trait
else:
res_q = q_trait
return res_q, start, stop
def make_timestamp_query(func, start=None, start_op=None, end=None,
end_op=None, bounds_only=False, **kwargs):
"""Return a filter start and stop row for filtering and a query.
Query is based on the fact that CF-name is 'rts'.
:param start: Optional start timestamp
:param start_op: Optional start timestamp operator, like gt, ge
:param end: Optional end timestamp
:param end_op: Optional end timestamp operator, like lt, le
:param bounds_only: if True than query will not be returned
:param func: a function that provide a format of row
:param kwargs: kwargs for :param func
"""
# We don't need to dump here because get_start_end_rts returns strings
rts_start, rts_end = get_start_end_rts(start, end)
start_row, end_row = func(rts_start, rts_end, **kwargs)
if bounds_only:
return start_row, end_row
q = []
start_op = start_op or 'ge'
end_op = end_op or 'lt'
if rts_start:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[start_op], rts_start))
if rts_end:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[end_op], rts_end))
res_q = None
if len(q):
res_q = " AND ".join(q)
return start_row, end_row, res_q
def get_start_end_rts(start, end):
rts_start = str(timestamp(start)) if start else ""
rts_end = str(timestamp(end)) if end else ""
return rts_start, rts_end
def make_query(metaquery=None, trait_query=None, **kwargs):
"""Return a filter query string based on the selected parameters.
:param metaquery: optional metaquery dict
:param trait_query: optional boolean, for trait_query from kwargs
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
q = []
res_q = None
# Query for traits differs from others. It is constructed with
# SingleColumnValueFilter with the possibility to choose comparison
# operator
if trait_query:
trait_name = kwargs.pop('key')
op = kwargs.pop('op', 'eq')
for k, v in kwargs.items():
if v is not None:
res_q = ("SingleColumnValueFilter "
"('f', '%s', %s, 'binary:%s', true, true)" %
(prepare_key(trait_name, EVENT_TRAIT_TYPES[k]),
OP_SIGN[op], dump(v)))
return res_q
# Note: we use extended constructor for SingleColumnValueFilter here.
# It is explicitly specified that entry should not be returned if CF is not
# found in table.
for key, value in sorted(kwargs.items()):
if value is not None:
if key == 'source':
q.append("SingleColumnValueFilter "
"('f', 's_%s', =, 'binary:%s', true, true)" %
(value, dump('1')))
elif key == 'trait_type':
q.append("ColumnPrefixFilter('%s')" % value)
elif key == 'event_id':
q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value)
else:
q.append("SingleColumnValueFilter "
"('f', '%s', =, 'binary:%s', true, true)" %
(quote(key), dump(value)))
res_q = None
if len(q):
res_q = " AND ".join(q)
if metaquery:
meta_q = []
for k, v in metaquery.items():
meta_q.append(
"SingleColumnValueFilter ('f', '%s', =, 'binary:%s', "
"true, true)"
% ('r_' + k, dump(v)))
meta_q = " AND ".join(meta_q)
# join query and metaquery
if res_q is not None:
res_q += " AND " + meta_q
else:
res_q = meta_q # metaquery only
return res_q
def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs):
"""Return a list of required columns in meter table to be scanned.
SingleColumnFilter has 'columns' filter that should be used to determine
what columns we are interested in. But if we want to use 'filter' and
'columns' together we have to include columns we are filtering by
to columns list.
Please see an example: If we make scan with filter
"SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')"
and columns ['f:rts'], the output will be always empty
because only 'rts' will be returned and filter will be applied
to this data so 's_test-1' cannot be find.
To make this request correct it should be fixed as follows:
filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')",
columns = ['f:rts','f:s_test-1']}
:param metaquery: optional metaquery dict
:param need_timestamp: flag, which defines the need for timestamp columns
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
columns = ['f:message', 'f:recorded_at']
columns.extend("f:%s" % k for k, v in kwargs.items()
if v is not None)
if metaquery:
columns.extend("f:r_%s" % k for k, v in metaquery.items()
if v is not None)
source = kwargs.get('source')
if source:
columns.append("f:s_%s" % source)
if need_timestamp:
columns.extend(['f:rts', 'f:timestamp'])
return columns
def make_sample_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
meter = sample_filter.meter
if not meter and require_meter:
raise RuntimeError('Missing required meter specifier')
start_row, end_row, ts_query = make_timestamp_query(
make_general_rowkey_scan,
start=sample_filter.start_timestamp,
start_op=sample_filter.start_timestamp_op,
end=sample_filter.end_timestamp,
end_op=sample_filter.end_timestamp_op,
some_id=meter)
kwargs = dict(user_id=sample_filter.user,
project_id=sample_filter.project,
counter_name=meter,
resource_id=sample_filter.resource,
source=sample_filter.source,
message_id=sample_filter.message_id)
q = make_query(metaquery=sample_filter.metaquery, **kwargs)
if q:
res_q = q + " AND " + ts_query if ts_query else q
else:
res_q = ts_query if ts_query else None
need_timestamp = (sample_filter.start_timestamp or
sample_filter.end_timestamp) is not None
columns = get_meter_columns(metaquery=sample_filter.metaquery,
need_timestamp=need_timestamp, **kwargs)
return res_q, start_row, end_row, columns
def make_meter_query_for_resource(start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op, source,
query=None):
"""This method is used when Resource table should be filtered by meters.
In this method we are looking into all qualifiers with m_ prefix.
:param start_timestamp: meter's timestamp start range.
:param start_timestamp_op: meter's start time operator, like ge, gt.
:param end_timestamp: meter's timestamp end range.
:param end_timestamp_op: meter's end time operator, like lt, le.
:param source: source filter.
:param query: a query string to concatenate with.
"""
start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp)
mq = []
start_op = start_timestamp_op or 'ge'
end_op = end_timestamp_op or 'lt'
if start_rts:
filter_value = (start_rts + ':' + quote(source) if source
else start_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value))
if end_rts:
filter_value = (end_rts + ':' + quote(source) if source
else end_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value))
if mq:
meter_q = " AND ".join(mq)
# If there is a filtering on time_range we need to point that
# qualifiers should start with m_. Overwise in case e.g.
# QualifierFilter (>=, 'binaryprefix:m_9222030811134775808')
# qualifier 's_test' satisfies the filter and will be returned.
meter_q = _QualifierFilter("=", '') + " AND " + meter_q
query = meter_q if not query else query + " AND " + meter_q
return query
def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None):
"""If it's filter on some_id without start and end.
start_row = some_id while end_row = some_id + MAX_BYTE.
"""
if some_id is None:
return None, None
if not rts_start:
# NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123
# will be quoted and character will be turn in a composition that is
# started with '%' (chr(37)) that lexicographically is less then chr
# of number
rts_start = chr(122)
end_row = prepare_key(some_id, rts_start)
start_row = prepare_key(some_id, rts_end)
return start_row, end_row
def prepare_key(*args):
"""Prepares names for rows and columns with correct separator.
:param args: strings or numbers that we want our key construct of
:return: key with quoted args that are separated with character ":"
"""
key_quote = []
for key in args:
if isinstance(key, six.integer_types):
key = str(key)
key_quote.append(quote(key))
return ":".join(key_quote)
def timestamp_from_record_tuple(record):
"""Extract timestamp from HBase tuple record."""
return record[0]['timestamp']
def resource_id_from_record_tuple(record):
"""Extract resource_id from HBase tuple record."""
return record[0]['resource_id']
def deserialize_entry(entry, get_raw_meta=True):
"""Return a list of flatten_result, sources, meters and metadata.
Flatten_result contains a dict of simple structures such as 'resource_id':1
sources/meters are the lists of sources and meters correspondingly.
metadata is metadata dict. This dict may be returned as flattened if
get_raw_meta is False.
:param entry: entry from HBase, without row name and timestamp
:param get_raw_meta: If true then raw metadata will be returned,
if False metadata will be constructed from
'f:r_metadata.' fields
"""
flatten_result = {}
sources = []
meters = []
metadata_flattened = {}
for k, v in entry.items():
if k.startswith('f:s_'):
sources.append(decode_unicode(k[4:]))
elif k.startswith('f:r_metadata.'):
qualifier = decode_unicode(k[len('f:r_metadata.'):])
metadata_flattened[qualifier] = load(v)
elif k.startswith("f:m_"):
meter = ([unquote(i) for i in k[4:].split(':')], load(v))
meters.append(meter)
else:
if ':' in k[2:]:
key = tuple([unquote(i) for i in k[2:].split(':')])
else:
key = unquote(k[2:])
flatten_result[key] = load(v)
if get_raw_meta:
metadata = flatten_result.get('resource_metadata', {})
else:
metadata = metadata_flattened
return flatten_result, sources, meters, metadata
def serialize_entry(data=None, **kwargs):
"""Return a dict that is ready to be stored to HBase
:param data: dict to be serialized
:param kwargs: additional args
"""
data = data or {}
entry_dict = copy.copy(data)
entry_dict.update(**kwargs)
result = {}
for k, v in entry_dict.items():
if k == 'source':
# user, project and resource tables may contain several sources.
# Besides, resource table may contain several meters.
# To make insertion safe we need to store all meters and sources in
# a separate cell. For this purpose s_ and m_ prefixes are
# introduced.
qualifier = encode_unicode('f:s_%s' % v)
result[qualifier] = dump('1')
elif k == 'meter':
for meter, ts in v.items():
qualifier = encode_unicode('f:m_%s' % meter)
result[qualifier] = dump(ts)
elif k == 'resource_metadata':
# keep raw metadata as well as flattened to provide
# capability with API v2. It will be flattened in another
# way on API level. But we need flattened too for quick filtering.
flattened_meta = dump_metadata(v)
for key, m in flattened_meta.items():
metadata_qualifier = encode_unicode('f:r_metadata.' + key)
result[metadata_qualifier] = dump(m)
result['f:resource_metadata'] = dump(v)
else:
result['f:' + quote(k, ':')] = dump(v)
return result
def dump_metadata(meta):
resource_metadata = {}
for key, v in utils.dict_to_keyval(meta):
resource_metadata[key] = v
return resource_metadata
def dump(data):
return json.dumps(data, default=bson.json_util.default)
def load(data):
return json.loads(data, object_hook=object_hook)
def encode_unicode(data):
return data.encode('utf-8') if isinstance(data, six.text_type) else data
def decode_unicode(data):
return data.decode('utf-8') if isinstance(data, six.string_types) else data
# We don't want to have tzinfo in decoded json.This object_hook is
# overwritten json_util.object_hook for $date
def object_hook(dct):
if "$date" in dct:
dt = bson.json_util.object_hook(dct)
return dt.replace(tzinfo=None)
return bson.json_util.object_hook(dct)
def create_tables(conn, tables, column_families):
for table in tables:
try:
conn.create_table(table, column_families)
except ttypes.AlreadyExists:
if conn.table_prefix:
table = ("%(table_prefix)s"
"%(separator)s"
"%(table_name)s" %
dict(table_prefix=conn.table_prefix,
separator=conn.table_prefix_separator,
table_name=table))
LOG.warn(_("Cannot create table %(table_name)s "
"it already exists. Ignoring error")
% {'table_name': table})
def quote(s, *args):
"""Return quoted string even if it is unicode one.
:param s: string that should be quoted
:param args: any symbol we want to stay unquoted
"""
s_en = s.encode('utf8')
return six.moves.urllib.parse.quote(s_en, *args)
def unquote(s):
"""Return unquoted and decoded string.
:param s: string that should be unquoted
"""
s_de = six.moves.urllib.parse.unquote(s)
return s_de.decode('utf8')
|
|
#!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import itertools
import json
import os.path
import pprint
import re
import sys
if sys.version_info.major == 2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
from json_parse import OrderedDict
# This file is a peer to json_schema.py. Each of these files understands a
# certain format describing APIs (either JSON or IDL), reads files written
# in that format into memory, and emits them as a Python array of objects
# corresponding to those APIs, where the objects are formatted in a way that
# the JSON schema compiler understands. compiler.py drives both idl_schema.py
# and json_schema.py.
# idl_parser expects to be able to import certain files in its directory,
# so let's set things up the way it wants.
_idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'ppapi', 'generators')
if _idl_generators_path in sys.path:
import idl_parser
else:
sys.path.insert(0, _idl_generators_path)
try:
import idl_parser
finally:
sys.path.pop(0)
def ProcessComment(comment):
'''
Convert a comment into a parent comment and a list of parameter comments.
Function comments are of the form:
Function documentation. May contain HTML and multiple lines.
|arg1_name|: Description of arg1. Use <var>argument</var> to refer
to other arguments.
|arg2_name|: Description of arg2...
Newlines are removed, and leading and trailing whitespace is stripped.
Args:
comment: The string from a Comment node.
Returns: A tuple that looks like:
(
"The processed comment, minus all |parameter| mentions and jsexterns.",
"Any block wrapped in <jsexterns></jsexterns>.",
{
'parameter_name_1': "The comment that followed |parameter_name_1|:",
...
}
)
'''
jsexterns = None
match = re.search('<jsexterns>(.*)</jsexterns>', comment, re.DOTALL)
if match:
jsexterns = match.group(1).strip()
comment = comment[:match.start()] + comment[match.end():]
def add_paragraphs(content):
paragraphs = content.split('\n\n')
if len(paragraphs) < 2:
return content
return '<p>' + '</p><p>'.join(p.strip() for p in paragraphs) + '</p>'
# Find all the parameter comments of the form '|name|: comment'.
parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment))
# Get the parent comment (everything before the first parameter comment.
first_parameter_location = (parameter_starts[0].start()
if parameter_starts else len(comment))
parent_comment = (add_paragraphs(comment[:first_parameter_location].strip())
.replace('\n', ''))
params = OrderedDict()
for (cur_param, next_param) in zip_longest(parameter_starts,
parameter_starts[1:]):
param_name = cur_param.group(1)
# A parameter's comment goes from the end of its introduction to the
# beginning of the next parameter's introduction.
param_comment_start = cur_param.end()
param_comment_end = next_param.start() if next_param else len(comment)
params[param_name] = (
add_paragraphs(comment[param_comment_start:param_comment_end].strip())
.replace('\n', ''))
return (parent_comment, jsexterns, params)
class Callspec(object):
'''
Given a Callspec node representing an IDL function declaration, converts into
a tuple:
(name, list of function parameters, return type, async return)
'''
def __init__(self, callspec_node, comment):
self.node = callspec_node
self.comment = comment
def process(self, callbacks):
parameters = []
return_type = None
returns_async = None
if self.node.GetProperty('TYPEREF') not in ('void', None):
return_type = Typeref(self.node.GetProperty('TYPEREF'),
self.node.parent,
{'name': self.node.GetName()}).process(callbacks)
# The IDL parser doesn't allow specifying return types as optional.
# Instead we infer any object return values to be optional.
# TODO(asargent): fix the IDL parser to support optional return types.
if return_type.get('type') == 'object' or '$ref' in return_type:
return_type['optional'] = True
for node in self.node.GetChildren():
parameter = Param(node).process(callbacks)
if parameter['name'] in self.comment:
parameter['description'] = self.comment[parameter['name']]
parameters.append(parameter)
# For promise supporting functions, pull off the callback from the final
# parameter and put it into the separate returns async field.
if self.node.GetProperty('supportsPromises'):
assert len(parameters) > 0, (
'Callspec "%s" is marked as supportsPromises '
'but has no existing callback defined.' % self.node.GetName())
returns_async = parameters.pop()
assert returns_async.get('type') == 'function', (
'Callspec "%s" is marked as supportsPromises '
'but the final parameter is not a function.' % self.node.GetName())
# The returns_async field is inherently a function, so doesn't need type
# specified on it.
returns_async.pop('type')
return (self.node.GetName(), parameters, return_type, returns_async)
class Param(object):
'''
Given a Param node representing a function parameter, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, param_node):
self.node = param_node
def process(self, callbacks):
return Typeref(self.node.GetProperty('TYPEREF'),
self.node,
{'name': self.node.GetName()}).process(callbacks)
class Dictionary(object):
'''
Given an IDL Dictionary node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, dictionary_node):
self.node = dictionary_node
def process(self, callbacks):
properties = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Member':
k, v = Member(node).process(callbacks)
properties[k] = v
result = {'id': self.node.GetName(),
'properties': properties,
'type': 'object'}
if self.node.GetProperty('nodoc'):
result['nodoc'] = True
elif self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
elif self.node.GetProperty('noinline_doc'):
result['noinline_doc'] = True
return result
class Member(object):
'''
Given an IDL dictionary or interface member, converts into a name/value pair
where the value is a Python dictionary that the JSON schema compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks, functions_are_properties=False):
properties = OrderedDict()
name = self.node.GetName()
if self.node.GetProperty('deprecated'):
properties['deprecated'] = self.node.GetProperty('deprecated')
for property_name in ['allowAmbiguousOptionalArguments',
'nodoc', 'nocompile', 'nodart',
'serializableFunction']:
if self.node.GetProperty(property_name):
properties[property_name] = True
if self.node.GetProperty('OPTIONAL'):
properties['optional'] = True
if self.node.GetProperty('platforms'):
properties['platforms'] = list(self.node.GetProperty('platforms'))
for option_name, sanitizer in [
('maxListeners', int),
('supportsFilters', lambda s: s == 'true'),
('supportsListeners', lambda s: s == 'true'),
('supportsRules', lambda s: s == 'true')]:
if self.node.GetProperty(option_name):
if 'options' not in properties:
properties['options'] = {}
properties['options'][option_name] = sanitizer(self.node.GetProperty(
option_name))
type_override = None
parameter_comments = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Comment':
(parent_comment, jsexterns, parameter_comments) = ProcessComment(
node.GetName())
properties['description'] = parent_comment
properties['jsexterns'] = jsexterns
elif node.cls == 'Callspec':
name, parameters, return_type, returns_async = (
Callspec(node, parameter_comments).process(callbacks))
if functions_are_properties:
# If functions are treated as properties (which will happen if the
# interface is named Properties) then this isn't a function, it's a
# property which is encoded as a function with no arguments. The
# property type is the return type. This is an egregious hack in lieu
# of the IDL parser supporting 'const'.
assert parameters == [], (
'Property "%s" must be no-argument functions '
'with a non-void return type' % name)
assert return_type is not None, (
'Property "%s" must be no-argument functions '
'with a non-void return type' % name)
assert 'type' in return_type, (
'Property return type "%s" from "%s" must specify a '
'fundamental IDL type.' % (pprint.pformat(return_type), name))
type_override = return_type['type']
else:
type_override = 'function'
properties['parameters'] = parameters
if return_type is not None:
properties['returns'] = return_type
if returns_async is not None:
assert return_type is None, (
'Function "%s" cannot support promises and also have a '
'return value.' % name)
properties['returns_async'] = returns_async
properties['name'] = name
if type_override is not None:
properties['type'] = type_override
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
self.node, properties).process(callbacks)
value = self.node.GetProperty('value')
if value is not None:
# IDL always returns values as strings, so cast to their real type.
properties['value'] = self.cast_from_json_type(properties['type'], value)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
# IDL always returns enum values as strings, so cast to their real type.
properties['enum'] = [self.cast_from_json_type(properties['type'], enum)
for enum in enum_values]
return name, properties
def cast_from_json_type(self, json_type, string_value):
'''Casts from string |string_value| to a real Python type based on a JSON
Schema type |json_type|. For example, a string value of '42' and a JSON
Schema type 'integer' will cast to int('42') ==> 42.
'''
if json_type == 'integer':
return int(string_value)
if json_type == 'number':
return float(string_value)
# Add more as necessary.
assert json_type == 'string', (
'No rule exists to cast JSON Schema type "%s" to its equivalent '
'Python type for value "%s". You must add a new rule here.' %
(json_type, string_value))
return string_value
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetPropertyLocal('OPTIONAL'):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = OrderedDict()
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref == 'ArrayBufferView':
properties['type'] = 'binary'
# We force the APIs to specify instanceOf since ArrayBufferView isn't an
# instantiable type, therefore we don't specify isInstanceOf here.
elif self.typeref == 'FileEntry':
properties['type'] = 'object'
properties['isInstanceOf'] = 'FileEntry'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
elif self.parent.GetPropertyLocal('Union'):
properties['choices'] = [Typeref(node.GetProperty('TYPEREF'),
node,
OrderedDict()).process(callbacks)
for node in self.parent.GetChildren()
if node.cls == 'Option']
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self):
enum = []
for node in self.node.GetChildren():
if node.cls == 'EnumItem':
enum_value = {'name': node.GetName()}
if node.GetProperty('nodoc'):
enum_value['nodoc'] = True
for child in node.GetChildren():
if child.cls == 'Comment':
enum_value['description'] = ProcessComment(child.GetName())[0]
else:
raise ValueError('Did not process %s %s' % (child.cls, child))
enum.append(enum_value)
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
for property_name in ['cpp_enum_prefix_override', 'inline_doc',
'noinline_doc', 'nodoc']:
if self.node.GetProperty(property_name):
result[property_name] = self.node.GetProperty(property_name)
if self.node.GetProperty('deprecated'):
result['deprecated'] = self.node.GetProperty('deprecated')
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self,
namespace_node,
description,
nodoc=False,
internal=False,
platforms=None,
compiler_options=None,
deprecated=None,
documentation_options=None):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.platforms = platforms
self.compiler_options = compiler_options
self.events = []
self.functions = []
self.properties = OrderedDict()
self.manifest_keys = None
self.types = []
self.callbacks = OrderedDict()
self.description = description
self.deprecated = deprecated
self.documentation_options = documentation_options
def process(self):
for node in self.namespace.GetChildren():
if node.cls == 'Dictionary' and node.GetName() == 'ManifestKeys':
self.manifest_keys = Dictionary(node).process(
self.callbacks)['properties']
elif node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Properties':
properties_as_list = self.process_interface(
node, functions_are_properties=True)
for prop in properties_as_list:
# Properties are given as key-value pairs, but IDL will parse
# it as a list. Convert back to key-value pairs.
prop_name = prop.pop('name')
assert not prop_name in self.properties, (
'Property "%s" cannot be specified more than once.' % prop_name)
self.properties[prop_name] = prop
elif node.cls == 'Enum':
self.types.append(Enum(node).process())
else:
sys.exit('Did not process %s %s' % (node.cls, node))
compiler_options = self.compiler_options or {}
documentation_options = self.documentation_options or {}
return {
'namespace': self.namespace.GetName(),
'description': self.description,
'nodoc': self.nodoc,
'types': self.types,
'functions': self.functions,
'properties': self.properties,
'manifest_keys': self.manifest_keys,
'internal': self.internal,
'events': self.events,
'platforms': self.platforms,
'compiler_options': compiler_options,
'deprecated': self.deprecated,
'documentation_options': documentation_options
}
def process_interface(self, node, functions_are_properties=False):
members = []
for member in node.GetChildren():
if member.cls == 'Member':
_, properties = Member(member).process(
self.callbacks,
functions_are_properties=functions_are_properties)
members.append(properties)
return members
class IDLSchema(object):
'''
Given a list of IDLNodes and IDLAttributes, converts into a Python list
of api_defs that the JSON schema compiler expects to see.
'''
def __init__(self, idl):
self.idl = idl
def process(self):
namespaces = []
nodoc = False
internal = False
description = None
platforms = None
compiler_options = {}
deprecated = None
documentation_options = {}
for node in self.idl:
if node.cls == 'Namespace':
if not description:
# TODO(kalman): Go back to throwing an error here.
print('%s must have a namespace-level comment. This will '
'appear on the API summary page.' % node.GetName())
description = ''
namespace = Namespace(node, description, nodoc, internal,
platforms=platforms,
compiler_options=compiler_options or None,
deprecated=deprecated,
documentation_options=documentation_options)
namespaces.append(namespace.process())
nodoc = False
internal = False
platforms = None
compiler_options = None
elif node.cls == 'Copyright':
continue
elif node.cls == 'Comment':
description = node.GetName()
elif node.cls == 'ExtAttribute':
if node.name == 'nodoc':
nodoc = bool(node.value)
elif node.name == 'internal':
internal = bool(node.value)
elif node.name == 'platforms':
platforms = list(node.value)
elif node.name == 'implemented_in':
compiler_options['implemented_in'] = node.value
elif node.name == 'generate_error_messages':
compiler_options['generate_error_messages'] = True
elif node.name == 'deprecated':
deprecated = str(node.value)
elif node.name == 'documentation_title':
documentation_options['title'] = node.value
elif node.name == 'documentation_namespace':
documentation_options['namespace'] = node.value
elif node.name == 'documented_in':
documentation_options['documented_in'] = node.value
else:
continue
else:
sys.exit('Did not process %s %s' % (node.cls, node))
return namespaces
def Load(filename):
'''
Given the filename of an IDL file, parses it and returns an equivalent
Python dictionary in a format that the JSON schema compiler expects to see.
'''
with open(filename, 'rb') as handle:
contents = handle.read().decode('utf-8')
return Process(contents, filename)
def Process(contents, filename):
'''
Processes the contents of a file and returns an equivalent Python dictionary
in a format that the JSON schema compiler expects to see. (Separate from
Load primarily for testing purposes.)
'''
idl = idl_parser.IDLParser().ParseData(contents, filename)
idl_schema = IDLSchema(idl)
return idl_schema.process()
def Main():
'''
Dump a json serialization of parse result for the IDL files whose names
were passed in on the command line.
'''
if len(sys.argv) > 1:
for filename in sys.argv[1:]:
schema = Load(filename)
print(json.dumps(schema, indent=2))
else:
contents = sys.stdin.read()
idl = idl_parser.IDLParser().ParseData(contents, '<stdin>')
schema = IDLSchema(idl).process()
print(json.dumps(schema, indent=2))
if __name__ == '__main__':
Main()
|
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Mocks for testing.
"""
import Queue
import threading
from mod_pywebsocket import common
from mod_pywebsocket.stream import StreamHixie75
class _MockConnBase(object):
"""Base class of mocks for mod_python.apache.mp_conn.
This enables tests to check what is written to a (mock) mp_conn.
"""
def __init__(self):
self._write_data = []
self.remote_addr = 'fake_address'
def write(self, data):
"""Override mod_python.apache.mp_conn.write."""
self._write_data.append(data)
def written_data(self):
"""Get bytes written to this mock."""
return ''.join(self._write_data)
class MockConn(_MockConnBase):
"""Mock for mod_python.apache.mp_conn.
This enables tests to specify what should be read from a (mock) mp_conn as
well as to check what is written to it.
"""
def __init__(self, read_data):
"""Constructs an instance.
Args:
read_data: bytes that should be returned when read* methods are
called.
"""
_MockConnBase.__init__(self)
self._read_data = read_data
self._read_pos = 0
def readline(self):
"""Override mod_python.apache.mp_conn.readline."""
if self._read_pos >= len(self._read_data):
return ''
end_index = self._read_data.find('\n', self._read_pos) + 1
if not end_index:
end_index = len(self._read_data)
return self._read_up_to(end_index)
def read(self, length):
"""Override mod_python.apache.mp_conn.read."""
if self._read_pos >= len(self._read_data):
return ''
end_index = min(len(self._read_data), self._read_pos + length)
return self._read_up_to(end_index)
def _read_up_to(self, end_index):
line = self._read_data[self._read_pos:end_index]
self._read_pos = end_index
return line
class MockBlockingConn(_MockConnBase):
"""Blocking mock for mod_python.apache.mp_conn.
This enables tests to specify what should be read from a (mock) mp_conn as
well as to check what is written to it.
Callers of read* methods will block if there is no bytes available.
"""
def __init__(self):
_MockConnBase.__init__(self)
self._queue = Queue.Queue()
def readline(self):
"""Override mod_python.apache.mp_conn.readline."""
line = ''
while True:
c = self._queue.get()
line += c
if c == '\n':
return line
def read(self, length):
"""Override mod_python.apache.mp_conn.read."""
data = ''
for unused in range(length):
data += self._queue.get()
return data
def put_bytes(self, bytes):
"""Put bytes to be read from this mock.
Args:
bytes: bytes to be read.
"""
for byte in bytes:
self._queue.put(byte)
class MockTable(dict):
"""Mock table.
This mimics mod_python mp_table. Note that only the methods used by
tests are overridden.
"""
def __init__(self, copy_from={}):
if isinstance(copy_from, dict):
copy_from = copy_from.items()
for key, value in copy_from:
self.__setitem__(key, value)
def __getitem__(self, key):
return super(MockTable, self).__getitem__(key.lower())
def __setitem__(self, key, value):
super(MockTable, self).__setitem__(key.lower(), value)
def get(self, key, def_value=None):
return super(MockTable, self).get(key.lower(), def_value)
class MockRequest(object):
"""Mock request.
This mimics mod_python request.
"""
def __init__(self, uri=None, headers_in={}, connection=None, method='GET',
protocol='HTTP/1.1', is_https=False):
"""Construct an instance.
Arguments:
uri: URI of the request.
headers_in: Request headers.
connection: Connection used for the request.
method: request method.
is_https: Whether this request is over SSL.
See the document of mod_python Request for details.
"""
self.uri = uri
self.connection = connection
self.method = method
self.protocol = protocol
self.headers_in = MockTable(headers_in)
# self.is_https_ needs to be accessible from tests. To avoid name
# conflict with self.is_https(), it is named as such.
self.is_https_ = is_https
self.ws_stream = StreamHixie75(self, True)
self.ws_close_code = None
self.ws_close_reason = None
self.ws_version = common.VERSION_HYBI00
self.ws_deflate = False
self.drain_received_data_called = False
def is_https(self):
"""Return whether this request is over SSL."""
return self.is_https_
def _drain_received_data(self):
self.drain_received_data_called = True
class MockDispatcher(object):
"""Mock for dispatch.Dispatcher."""
def __init__(self):
self.do_extra_handshake_called = False
def do_extra_handshake(self, conn_context):
self.do_extra_handshake_called = True
def transfer_data(self, conn_context):
pass
# vi:sts=4 sw=4 et
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.notify_on_registration'
db.add_column(u'events_event', 'notify_on_registration',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='notify_event_on_registration', null=True, to=orm['mailcenter.EmailSpecification']),
keep_default=False)
# Adding field 'Event.notify_on_registration_update'
db.add_column(u'events_event', 'notify_on_registration_update',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='notify_event_on_registration_update', null=True, to=orm['mailcenter.EmailSpecification']),
keep_default=False)
# Adding field 'Event.notify_on_payment'
db.add_column(u'events_event', 'notify_on_payment',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='notify_event_on_payment_registration', null=True, to=orm['mailcenter.EmailSpecification']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.notify_on_registration'
db.delete_column(u'events_event', 'notify_on_registration_id')
# Deleting field 'Event.notify_on_registration_update'
db.delete_column(u'events_event', 'notify_on_registration_update_id')
# Deleting field 'Event.notify_on_payment'
db.delete_column(u'events_event', 'notify_on_payment_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.attend': {
'Meta': {'unique_together': "(('event', 'user'),)", 'object_name': 'Attend'},
'change_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'changed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paid': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'registration_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'events.attendeecomment': {
'Meta': {'object_name': 'AttendeeComment'},
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comment_set'", 'to': "orm['events.Attend']"}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'comment': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'events.attendstatechange': {
'Meta': {'object_name': 'AttendStateChange'},
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'state_history'", 'to': "orm['events.Attend']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'custom_change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'custom_signup_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'custom_status_page': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enddate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'location_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'maximum_attendees': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'move_to_accepted_policy': ('django.db.models.fields.CharField', [], {'default': "'always'", 'max_length': '32'}),
'notify_on_payment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notify_event_on_payment_registration'", 'null': 'True', 'to': u"orm['mailcenter.EmailSpecification']"}),
'notify_on_registration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notify_event_on_registration'", 'null': 'True', 'to': u"orm['mailcenter.EmailSpecification']"}),
'notify_on_registration_update': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notify_event_on_registration_update'", 'null': 'True', 'to': u"orm['mailcenter.EmailSpecification']"}),
'registration_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_custom_change_message': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_custom_signup_message': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_custom_status_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'startdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'tagline': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'events.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'events.option': {
'Meta': {'ordering': "('order',)", 'object_name': 'Option'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.OptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_scope_edit_manage_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_edit_manage_attended': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_edit_manage_waiting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_edit_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_view_manage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_view_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_view_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_scope_view_system_invoice': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'in_scope_view_user_invoice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'selected_by_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'boolean'", 'max_length': '32'})
},
'events.optiongroup': {
'Meta': {'ordering': "('-is_special', 'order')", 'object_name': 'OptionGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
'gatekeeper': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Option']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'maximum_selected': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'minimum_selected': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'package_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'})
},
'events.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Attend']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'signee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'signee_payment_set'", 'null': 'True', 'to': u"orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'events.selection': {
'Meta': {'unique_together': "(('attendee', 'option'),)", 'object_name': 'Selection'},
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Attend']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Option']"}),
'suboption': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.SubOption']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'events.suboption': {
'Meta': {'object_name': 'SubOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Option']"}),
'price': ('django.db.models.fields.DecimalField', [], {'default': 'None', 'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'})
},
u'mailcenter.emailspecification': {
'Meta': {'object_name': 'EmailSpecification'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '32'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'template_context': ('django.db.models.fields.CharField', [], {'default': "'user'", 'max_length': '32'})
}
}
complete_apps = ['events']
|
|
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import json
import django.db.models
from chroma_core.services import log_register
from chroma_core.lib.util import invoke_rust_agent, invoke_rust_local_action, RustAgentCancellation
from iml_common.lib.agent_rpc import agent_result
job_log = log_register("job")
def is_string(obj):
try:
return isinstance(obj, basestring) # python 2
except NameError:
return isinstance(obj, str) # python 3
class Dependable(object):
def all(self):
if hasattr(self, "objects"):
for o in self.objects:
for i in o.all():
yield i
else:
yield self
def debug_list(self):
if hasattr(self, "objects"):
result = []
for o in self.objects:
result.append((o.__class__.__name__, o.debug_list()))
return result
else:
return [self.stateful_object, self.acceptable_states]
def satisfied(self):
"""Return True or False for whether this and all child
dependencies are satisfied (i.e. their required state
is set on their object)"""
return NotImplementedError
class DependOn(Dependable):
def __init__(
self, stateful_object, preferred_state, acceptable_states=None, unacceptable_states=None, fix_state=None
):
"""preferred_state: what we will try to put the dependency into if
it is not already in one of acceptable_states.
fix_state: what we will try to put the depender into if his
dependency can no longer be satisfied."""
assert isinstance(stateful_object, django.db.models.Model)
assert (unacceptable_states == None) or (acceptable_states == None)
if unacceptable_states:
acceptable_states = [state for state in stateful_object.states if state not in unacceptable_states]
if not acceptable_states:
self.acceptable_states = [preferred_state]
else:
if not preferred_state in acceptable_states:
self.acceptable_states = acceptable_states + [preferred_state]
else:
self.acceptable_states = acceptable_states
# Preferred state is a piece of metadata which tells callers how to
# get our stateful_object into an acceptable state -- i.e. "If X is not
# in one of Y then put it into Z" where X is stateful_object, Y is
# acceptable_states, Z is preferred_state.
self.preferred_state = preferred_state
# fix_state is a piece of metadata which tells callers how to eliminate
# this dependency, i.e. "I depend on X in Y but I wouldn't if I was in
# state Z" where X is stateful_object, Y is acceptable_states, Z is
# fix_state.
self.fix_state = fix_state
self.stateful_object = stateful_object
def __str__(self):
return "%s %s %s %s" % (self.stateful_object, self.preferred_state, self.acceptable_states, self.fix_state)
def get_stateful_object(self):
return self.stateful_object.__class__._base_manager.get(pk=self.stateful_object.pk)
def satisfied(self):
try:
depended_object = self.get_stateful_object()
except:
self.stateful_object.__class__._base_manager.get(pk=self.stateful_object.pk)
satisfied = depended_object.state in self.acceptable_states
if not satisfied:
job_log.warning(
"DependOn not satisfied: %s in state %s, not one of %s (preferred %s)"
% (depended_object, depended_object.state, self.acceptable_states, self.preferred_state)
)
return satisfied
class MultiDependable(Dependable):
def __init__(self, *args):
from collections import Iterable
if len(args) == 1 and isinstance(args[0], Iterable):
self.objects = args[0]
else:
self.objects = args
class DependAll(MultiDependable):
"""Stores a list of Dependables, all of which must be in the
desired state for this dependency to be true"""
def satisfied(self):
for o in self.objects:
if not o.satisfied():
return False
return True
class DependAny(MultiDependable):
"""Stores a list of Dependables, one or more of which must be in the
desired state for this dependency to be true"""
def satisfied(self):
if len(self.objects) == 0:
return True
for o in self.objects:
if o.satisfied():
return True
return False
class Step(object):
def __init__(self, job, args, log_callback, console_callback, cancel_event):
self.args = args
self.job_id = job.id
self._log_callback = log_callback
self._console_callback = console_callback
# This step is the final one in the job
self.final = False
self._cancel_event = cancel_event
@classmethod
def describe(cls, kwargs):
return "%s: %s" % (cls.__name__, kwargs)
def mark_final(self):
self.final = True
# Indicate whether the step is idempotent. For example, mounting
# a target. Step subclasses which are always idempotent should set the
# idempotent class attribute. Subclasses which may be idempotent should
# override this attribute.
idempotent = False
# If true, this step may use the database (limits concurrency to number of
# database connections)
database = False
def run(self, kwargs):
raise NotImplementedError
def log(self, message):
job_log.info("Job %s %s: %s" % (self.job_id, self.__class__.__name__, message))
self._log_callback("%s\n" % message)
def _log_subprocesses(self, subprocesses):
for subprocess in subprocesses:
self._console_callback(
"%s: %s\n%s\n%s\n"
% (" ".join(subprocess["args"]), subprocess["rc"], subprocess["stdout"], subprocess["stderr"])
)
def invoke_agent(self, host, command, args={}):
"""
Wrapper around AgentRpc.call which provides logging
"""
fqdn = host if is_string(host) else host.fqdn
from chroma_core.services.job_scheduler.agent_rpc import AgentRpc, AgentException
job_log.info("invoke_agent on agent {} {} {}".format(fqdn, command, args))
try:
result, action_state = AgentRpc.call(fqdn, command, args, self._cancel_event)
self._log_subprocesses(action_state.subprocesses)
return result
except AgentException as e:
self._log_subprocesses(e.subprocesses)
raise
def invoke_rust_local_action(self, command, args={}):
"""
Talks to the iml-action-runner service
"""
return invoke_rust_local_action(command, args, self._cancel_event)
def invoke_rust_local_action_expect_result(self, command, args={}):
from chroma_core.services.job_scheduler.agent_rpc import LocalActionException
try:
result = self.invoke_rust_local_action(command, args)
except RustAgentCancellation as e:
raise LocalActionException(command, args, "Cancelled: {}".format(e))
except Exception as e:
raise LocalActionException(command, args, "Unexpected error: {}".format(e))
try:
result = json.loads(result)
except ValueError as e:
raise LocalActionException(
command,
args,
"Error parsing json: {}; result: {}; command: {}; args: {}".format(e, result, command, args),
)
if "Err" in result:
self.log(json.dumps(result["Err"], indent=2))
raise LocalActionException(command, args, result["Err"])
return result["Ok"]
def invoke_rust_agent(self, host, command, args={}):
"""
Talks to the iml-action-runner service
"""
from chroma_core.services.job_scheduler.agent_rpc import AgentException
try:
return invoke_rust_agent(host, command, args, self._cancel_event)
except RustAgentCancellation as e:
raise AgentException(host, command, args, "Cancelled: {}; command: {}; args: {}".format(e, command, args))
except Exception as e:
raise AgentException(
host, command, args, "Unexpected error: {}; command: {}; args: {}".format(e, command, args)
)
def invoke_rust_agent_expect_result(self, host, command, args={}):
from chroma_core.services.job_scheduler.agent_rpc import AgentException
result = self.invoke_rust_agent(host, command, args)
try:
result = json.loads(result)
except ValueError as e:
raise AgentException(
host,
command,
args,
"Error parsing json: {}; result: {}; command: {}; args: {}".format(e, result, command, args),
)
if "Err" in result:
self.log(json.dumps(result["Err"], indent=2))
raise AgentException(host, command, args, result["Err"])
return result["Ok"]
def invoke_agent_expect_result(self, host, command, args={}):
from chroma_core.services.job_scheduler.agent_rpc import AgentException
fqdn = host if is_string(host) else host.fqdn
result = self.invoke_agent(fqdn, command, args)
# This case is to deal with upgrades, once every installation is using the new protocol then we should not allow this.
# Once everything is 3.0 or later we will also have version information in the wrapper header.
if (result is None) or ((type(result) == dict) and ("error" not in result) and ("result" not in result)):
job_log.info("Invalid result %s fixed up on called to %s with args %s" % (result, command, args))
# Prior to 3.0 update_packages returned {'update_packages': data} so fix this up. This code is here so that all
# of the legacy fixups are in one place and can easily be removed.
if command == "install_packages" and "scan_packages" in result:
result = agent_result(result["scan_packages"])
else:
result = agent_result(result)
if type(result) != dict:
raise AgentException(
fqdn, command, args, "Expected a dictionary but got a %s when calling %s" % (type(result), command)
)
if ("error" not in result) and ("result" not in result):
raise AgentException(
fqdn,
command,
args,
"Expected a dictionary with 'error' or 'result' in keys but got %s when calling %s" % (result, command),
)
if "error" in result:
self.log(result["error"])
raise AgentException(fqdn, command, args, result["error"])
return result["result"]
class IdempotentStep(Step):
idempotent = True
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 16:18
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Bearing',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Condition',
fields=[
('image', models.ImageField(blank=True, upload_to='condition/')),
('name', models.CharField(max_length=255)),
('xws', models.CharField(max_length=255, unique=True)),
('text', models.CharField(max_length=512)),
('unique', models.BooleanField(default=False)),
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, validators=[django.core.validators.MinValueValidator(0)])),
],
),
migrations.CreateModel(
name='Dial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('speed', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('maneuvers_energy', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('bearing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Bearing')),
],
),
migrations.CreateModel(
name='Difficulty',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Faction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('faction', models.CharField(max_length=255, unique=True)),
('image', models.ImageField(blank=True, upload_to='factions/')),
],
),
migrations.CreateModel(
name='GrantsSlot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
],
),
migrations.CreateModel(
name='Pilot',
fields=[
('name', models.CharField(max_length=255)),
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, validators=[django.core.validators.MinValueValidator(0)])),
('unique', models.BooleanField(default=False)),
('skill', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9)])),
('skill_special_ruling', models.BooleanField(default=False)),
('points', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('points_special_ruling', models.BooleanField(default=False)),
('text', models.CharField(blank=True, max_length=255)),
('image', models.ImageField(blank=True, upload_to='pilots/')),
('xws', models.CharField(max_length=255)),
('attack_override', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('agility_override', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('hull_override', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('shields_override', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('range', models.CharField(blank=True, max_length=255, validators=[django.core.validators.RegexValidator(regex='^[1-5](-[1-5])?$')])),
('conditions', models.ManyToManyField(blank=True, to='xwingdata.Condition')),
('faction', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Faction')),
],
),
migrations.CreateModel(
name='PilotSlot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('pilot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Pilot')),
],
),
migrations.CreateModel(
name='PrimaryFaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary_faction', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='ReferenceCard',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, validators=[django.core.validators.MinValueValidator(0)])),
('title', models.CharField(max_length=255)),
('subtitle', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, upload_to='reference-cards/')),
('text', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Ship',
fields=[
('name', models.CharField(max_length=255, unique=True)),
('attack', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('agility', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('hull', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('shields', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('xws', models.CharField(max_length=255, unique=True)),
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('energy', models.IntegerField(blank=True, default=1, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('epic_points', models.DecimalField(blank=True, decimal_places=1, default=0, max_digits=2, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('actions', models.ManyToManyField(blank=True, to='xwingdata.Action')),
('faction', models.ManyToManyField(to='xwingdata.Faction')),
('maneuvers', models.ManyToManyField(through='xwingdata.Dial', to='xwingdata.Bearing')),
],
),
migrations.CreateModel(
name='Size',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Slot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slot', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Source',
fields=[
('name', models.CharField(max_length=255, unique=True)),
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, validators=[django.core.validators.MinValueValidator(0)])),
('image', models.ImageField(blank=True, upload_to='sources/')),
('thumb', models.ImageField(blank=True, upload_to='sources/')),
('wave', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('special_wave_name', models.CharField(blank=True, max_length=255, null=True)),
('released', models.BooleanField(default=False)),
('sku', models.CharField(max_length=100, unique=True, validators=[django.core.validators.RegexValidator('^SWX[0-9]+$')])),
('announcement_date', models.DateField()),
('release_date', models.DateField(blank=True, null=True)),
('conditions', models.ManyToManyField(blank=True, to='xwingdata.Condition')),
],
),
migrations.CreateModel(
name='SourcePilot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('pilot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Pilot')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Source')),
],
),
migrations.CreateModel(
name='SourceShip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('ship', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Ship')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Source')),
],
),
migrations.CreateModel(
name='SourceUpgrade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Source')),
],
),
migrations.CreateModel(
name='Upgrade',
fields=[
('name', models.CharField(max_length=30)),
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, validators=[django.core.validators.MinValueValidator(0)])),
('slot_cost', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('points', models.IntegerField()),
('attack', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('range', models.CharField(blank=True, max_length=255, validators=[django.core.validators.RegexValidator(regex='^[1-5](-[1-5])?$')])),
('text', models.CharField(max_length=512)),
('image', models.ImageField(blank=True, upload_to='upgrade/')),
('xws', models.CharField(max_length=255)),
('unique', models.BooleanField(default=False)),
('effect', models.CharField(max_length=255)),
('limited', models.BooleanField(default=False)),
('energy', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('grants_attack', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('grants_agility', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('grants_hull', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('grants_shields', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('grants_skill', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1)])),
('conditions', models.ManyToManyField(blank=True, to='xwingdata.Condition')),
('faction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Faction')),
('grants_action', models.ManyToManyField(to='xwingdata.Action')),
('grants_slot', models.ManyToManyField(related_name='grants', through='xwingdata.GrantsSlot', to='xwingdata.Slot')),
('ship', models.ManyToManyField(blank=True, to='xwingdata.Ship')),
('size', models.ManyToManyField(to='xwingdata.Size')),
('slot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Slot')),
],
),
migrations.AddField(
model_name='sourceupgrade',
name='upgrade',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Upgrade'),
),
migrations.AddField(
model_name='source',
name='pilots',
field=models.ManyToManyField(through='xwingdata.SourcePilot', to='xwingdata.Pilot'),
),
migrations.AddField(
model_name='source',
name='reference_cards',
field=models.ManyToManyField(blank=True, to='xwingdata.ReferenceCard'),
),
migrations.AddField(
model_name='source',
name='ships',
field=models.ManyToManyField(through='xwingdata.SourceShip', to='xwingdata.Ship'),
),
migrations.AddField(
model_name='source',
name='upgrades',
field=models.ManyToManyField(through='xwingdata.SourceUpgrade', to='xwingdata.Upgrade'),
),
migrations.AddField(
model_name='ship',
name='size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Size'),
),
migrations.AddField(
model_name='pilotslot',
name='slot',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Slot'),
),
migrations.AddField(
model_name='pilot',
name='ship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Ship'),
),
migrations.AddField(
model_name='pilot',
name='slots',
field=models.ManyToManyField(through='xwingdata.PilotSlot', to='xwingdata.Slot'),
),
migrations.AddField(
model_name='grantsslot',
name='slot',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Slot'),
),
migrations.AddField(
model_name='grantsslot',
name='upgrade',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Upgrade'),
),
migrations.AddField(
model_name='faction',
name='primary_faction',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.PrimaryFaction'),
),
migrations.AddField(
model_name='dial',
name='difficulty',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Difficulty'),
),
migrations.AddField(
model_name='dial',
name='ship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwingdata.Ship'),
),
migrations.AlterUniqueTogether(
name='upgrade',
unique_together=set([('name', 'faction')]),
),
migrations.AlterUniqueTogether(
name='sourceupgrade',
unique_together=set([('source', 'upgrade')]),
),
migrations.AlterUniqueTogether(
name='sourceship',
unique_together=set([('source', 'ship')]),
),
migrations.AlterUniqueTogether(
name='sourcepilot',
unique_together=set([('source', 'pilot')]),
),
migrations.AlterUniqueTogether(
name='pilotslot',
unique_together=set([('pilot', 'slot')]),
),
migrations.AlterUniqueTogether(
name='pilot',
unique_together=set([('ship', 'faction', 'xws')]),
),
migrations.AlterUniqueTogether(
name='grantsslot',
unique_together=set([('upgrade', 'slot')]),
),
migrations.AlterUniqueTogether(
name='dial',
unique_together=set([('ship', 'speed', 'bearing')]),
),
]
|
|
"""
sentry.quotas.redis
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
import six
from time import time
from sentry.exceptions import InvalidConfiguration
from sentry.quotas.base import NotRateLimited, Quota, RateLimited
from sentry.utils.redis import get_cluster_from_options, load_script
is_rate_limited = load_script('quotas/is_rate_limited.lua')
class BasicRedisQuota(object):
__slots__ = ['key', 'limit', 'window', 'reason_code', 'enforce']
def __init__(self, key, limit=0, window=60, reason_code=None, enforce=True):
self.key = key
# maximum number of events in the given window, 0 indicates "no limit"
self.limit = limit
# time in seconds that this quota reflects
self.window = window
# a machine readable string
self.reason_code = reason_code
# should this quota be hard-enforced (or just tracked)
self.enforce = enforce
class RedisQuota(Quota):
#: The ``grace`` period allows accomodating for clock drift in TTL
#: calculation since the clock on the Redis instance used to store quota
#: metrics may not be in sync with the computer running this code.
grace = 60
def __init__(self, **options):
self.cluster, options = get_cluster_from_options('SENTRY_QUOTA_OPTIONS', options)
super(RedisQuota, self).__init__(**options)
self.namespace = 'quota'
def validate(self):
try:
with self.cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(six.text_type(e))
def __get_redis_key(self, key, timestamp, interval, shift):
return '{}:{}:{}'.format(
self.namespace,
key,
int((timestamp - shift) // interval),
)
def get_quotas_with_limits(self, project, key=None):
return [
quota for quota in self.get_quotas(project, key=key)
# x = (key, limit, interval)
if quota.limit > 0 # a zero limit means "no limit", not "reject all"
]
def get_quotas(self, project, key=None):
if key:
key.project = project
pquota = self.get_project_quota(project)
oquota = self.get_organization_quota(project.organization)
results = [
BasicRedisQuota(
key='p:{}'.format(project.id),
limit=pquota[0],
window=pquota[1],
reason_code='project_quota',
),
BasicRedisQuota(
key='o:{}'.format(project.organization_id),
limit=oquota[0],
window=oquota[1],
reason_code='org_quota',
),
]
if key:
kquota = self.get_key_quota(key)
results.append(
BasicRedisQuota(
key='k:{}'.format(key.id),
limit=kquota[0],
window=kquota[1],
reason_code='key_quota',
)
)
return results
def get_usage(self, organization_id, quotas, timestamp=None):
if timestamp is None:
timestamp = time()
def get_usage_for_quota(client, quota):
if quota.limit == 0:
return (None, None)
key = self.__get_redis_key(
quota.key, timestamp, quota.window, organization_id % quota.window
)
refund_key = self.get_refunded_quota_key(key)
return (client.get(key), client.get(refund_key))
def get_value_for_result(result, refund_result):
if result is None:
return None
return int(result.value or 0) - int(refund_result.value or 0)
with self.cluster.fanout() as client:
results = map(
functools.partial(
get_usage_for_quota,
client.target_key(
six.text_type(organization_id),
),
),
quotas,
)
return [
get_value_for_result(*r) for r in results
]
def get_refunded_quota_key(self, key):
return 'r:{}'.format(key)
def refund(self, project, key=None, timestamp=None):
if timestamp is None:
timestamp = time()
quotas = self.get_quotas_with_limits(project, key=key)
if not quotas:
return
client = self.cluster.get_local_client_for_key(six.text_type(project.organization_id))
pipe = client.pipeline()
for quota in quotas:
shift = project.organization_id % quota.window
# kind of arbitrary, but seems like we don't want this to expire til we're
# sure the window is over?
expiry = self.get_next_period_start(quota.window, shift, timestamp) + self.grace
return_key = self.get_refunded_quota_key(
self.__get_redis_key(quota.key, timestamp, quota.window, shift),
)
pipe.incr(return_key, 1)
pipe.expireat(return_key, int(expiry))
pipe.execute()
def get_next_period_start(self, interval, shift, timestamp):
"""Return the timestamp when the next rate limit period begins for an interval."""
return (((timestamp - shift) // interval) + 1) * interval + shift
def is_rate_limited(self, project, key=None, timestamp=None):
if timestamp is None:
timestamp = time()
quotas = self.get_quotas_with_limits(project, key=key)
# If there are no quotas to actually check, skip the trip to the database.
if not quotas:
return NotRateLimited()
keys = []
args = []
for quota in quotas:
shift = project.organization_id % quota.window
key = self.__get_redis_key(quota.key, timestamp, quota.window, shift)
return_key = self.get_refunded_quota_key(key)
keys.extend((key, return_key))
expiry = self.get_next_period_start(quota.window, shift, timestamp) + self.grace
args.extend((quota.limit, int(expiry)))
client = self.cluster.get_local_client_for_key(six.text_type(project.organization_id))
rejections = is_rate_limited(client, keys, args)
if any(rejections):
enforce = False
worst_case = (0, None)
for quota, rejected in zip(quotas, rejections):
if not rejected:
continue
if quota.enforce:
enforce = True
shift = project.organization_id % quota.window
delay = self.get_next_period_start(quota.window, shift, timestamp) - timestamp
if delay > worst_case[0]:
worst_case = (delay, quota.reason_code)
if enforce:
return RateLimited(
retry_after=worst_case[0],
reason_code=worst_case[1],
)
return NotRateLimited()
|
|
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
"Unit test for the StyleManager class."
__version__ = "$Revision: #1 $"
#===========================================================================
# Required imports. Do not modify these.
import unittest
#===========================================================================
# Place all imports after here.
#
import mplStyle as S
import os
import shutil
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class MyStyle( S.types.Style ):
"""A Sub-Classed Style."""
#-----------------------------------------------------------------------
def __init__( self, name, kw = {}, parent = None, custom = None ):
self.figure = S.types.Data(
text = S.types.Data(
size = None,
color = None,
),
bgColor = None,
fgColor = None,
)
self.axes = S.types.Data(
text = S.types.Data(
size = None,
color = None,
),
bgColor = None,
fgColor = None,
)
# The Style CTOR will attempt to apply the keyword argument values,
# So we need to call this last.
S.types.Style.__init__( self, name, kw, parent, custom )
#-----------------------------------------------------------------------
def __str__( self ):
s = "%s\n" % self.name
s += " figure = %s\n" % self.figure
s += " axes = %s\n" % self.axes
return s
#-----------------------------------------------------------------------
def copy( self, newName ):
style = MyStyle( newName, {}, self.parent, self.custom )
style.figure = self.figure.copy( deep = True )
style.axes = self.axes.copy( deep = True )
return style
#-----------------------------------------------------------------------
def update( self, style ):
super( MyStyle, self ).update( style )
if style.figure.text.size is not None:
self.figure.text.size = style.figure.text.size
if style.figure.text.color is not None:
self.figure.text.color = style.figure.text.color
if style.figure.bgColor is not None:
self.figure.bgColor = style.figure.bgColor
if style.figure.fgColor is not None:
self.figure.fgColor = style.figure.fgColor
if style.axes.text.size is not None:
self.axes.text.size = style.axes.text.size
if style.axes.text.color is not None:
self.axes.text.color = style.axes.text.color
if style.axes.bgColor is not None:
self.axes.bgColor = style.axes.bgColor
if style.axes.fgColor is not None:
self.axes.fgColor = style.axes.fgColor
#-----------------------------------------------------------------------
def _applyStyle( self, obj, filter, postProcess ):
process, recurse = filter( obj )
if not process:
return
if self.figure.text.size is not None:
obj.figureTextSize = self.figure.text.size
if self.figure.text.color is not None:
obj.figureTextColor = self.figure.text.color
if self.figure.bgColor is not None:
obj.figureBgColor = self.figure.bgColor
if self.figure.fgColor is not None:
obj.figureFgColor = self.figure.fgColor
if self.axes.text.size is not None:
obj.axesTextSize = self.axes.text.size
if self.axes.text.color is not None:
obj.axesTextColor = self.axes.text.color
if self.axes.bgColor is not None:
obj.axesBgColor = self.axes.bgColor
if self.axes.fgColor is not None:
obj.axesFgColor = self.axes.fgColor
postProcess( obj )
#===========================================================================
class MyStyleManager( S.types.StyleManager ):
"""A Sub-Classed Style."""
#-----------------------------------------------------------------------
def __init__( self ):
S.types.StyleManager.__init__( self, MyStyle, 'testyle', 'TEST' )
#-----------------------------------------------------------------------
def _loadFromFile( self, fname ):
name = fname.namebase
style = MyStyle( name )
with open( fname, 'r' ) as fin:
s = fin.readline()
style.figure.__dict__ = eval( s )
d = style.figure.text
style.figure.text = Data()
style.figure.text.__dict__ = d
s = fin.readline()
style.axes.__dict__ = eval( s )
d = style.axes.text
style.axes.text = Data()
style.axes.text.__dict__ = d
return style
#-----------------------------------------------------------------------
def _saveToFile( self, style, fname ):
with open( fname, 'w' ) as fout:
fout.write( "%s\n" % style.figure )
fout.write( "%s\n" % style.axes )
#-----------------------------------------------------------------------
def _deleteStyleFile( self, fname ):
fname.remove()
#===========================================================================
class TestStyleManager( unittest.TestCase ):
"""Test the StyleManager class."""
#-----------------------------------------------------------------------
def setUp( self ):
"""This method is called before any tests are run."""
# Save the existing STYLEPATH (if there is one)
self.outputDir = "output"
self.stylepath = os.environ.get( "STYLEPATH", None )
os.environ[ "STYLEPATH" ] = self.outputDir
if not os.path.exists( self.outputDir ):
os.mkdir( self.outputDir )
#-----------------------------------------------------------------------
def tearDown( self ):
"""This method is called after all tests are run."""
# You may place finalization code here.
if self.stylepath is not None:
os.environ[ "STYLEPATH" ] = self.stylepath
if os.path.exists( self.outputDir ):
shutil.rmtree( self.outputDir )
#=======================================================================
# Add tests methods below.
# Any method whose name begins with 'test' will be run by the framework.
#-----------------------------------------------------------------------
def checkObj( self, name, obj, style ):
"""Check if the object has the style."""
attrs = {
'TextSize' : 'text.size',
'TextColor' : 'text.color',
'BgColor' : 'bgColor',
'FgColor' : 'fgColor'
}
style = style.resolve(None)
for key in attrs:
desired = style.getValue( 'figure.%s' % attrs[key] )
actual = getattr( obj, 'figure%s' % key )
msg = "%s: figure %s not properly set." % (name, key)
self.assertEqual( desired, actual, msg = msg )
desired = style.getValue( 'axes.%s' % attrs[key] )
actual = getattr( obj, 'axes%s' % key )
msg = "%s: axes %s not properly set." % (name, key)
self.assertEqual( desired, actual, msg = msg )
#-----------------------------------------------------------------------
def checkDataEq( self, name, desired, actual ):
"""Check if two Data are equall"""
if isinstance( desired, S.types.Data ) and \
isinstance( actual, S.types.Data ):
self.assertEqual( desired.keys(), actual.keys(),
msg = "Data keys are not the same is test '%s'." % (name,) )
for key in desired:
self.checkDataEq( '%s:%s' % (name, key), desired[key], actual[key] )
else:
self.assertEqual( desired, actual,
msg = "Data value wrong in test '%s'" % (name,) )
#-----------------------------------------------------------------------
def checkStyleEq( self, name, desired, actual ):
"""Check if two Styles are equall"""
self.checkDataEq( '%s:figure' % name, desired.figure, actual.figure )
self.checkDataEq( '%s:axes' % name, desired.axes, actual.axes )
#-----------------------------------------------------------------------
def testBasic( self ):
"""A basic test of StyleManager."""
mgr = MyStyleManager()
style1 = mgr.create( 'Style #1' )
style1.figure.text.size = 12
style1.axes.text.size = 8
style2 = mgr.create( 'Style #2',
{ 'figure.bgColor' : 'grey',
'axes.bgColor' : 'white' } )
style2.figure.bgColor = 'grey'
style2.axes.bgColor = 'white'
style3 = mgr.create( 'Style #3', parent = style1 )
style3.figure.text.size = 24
# Resolved 3 with 2
style4 = MyStyle( 'Style #4' )
style4.figure.text.size = 24
style4.axes.text.size = 8
style4.figure.bgColor = 'grey'
style4.axes.bgColor = 'white'
# Resolved 3 with 2 and updated 3
style5 = mgr.create( 'Style #5' )
mgr[ 'Style #5' ].figure.text.size = 16
mgr[ 'Style #5' ].axes.text.size = 8
mgr[ 'Style #5' ].figure.bgColor = 'grey'
mgr[ 'Style #5' ].axes.bgColor = 'white'
style6 = mgr.create( 'Style #6', parent='Style #5' )
style6.figure.text.color = 'orange'
style6.axes.text.color = 'purple'
# Copy tests
newStyle = mgr.copy( style3, 'NewStyle1' )
self.checkStyleEq( 'Copy by style', style3, newStyle )
newStyle = mgr.copy( 'Style #3', 'NewStyle2' )
self.checkStyleEq( 'Copy by name', style3, newStyle )
self.assertRaises( Exception, mgr.copy, 'badName', 'blah',
msg = "Failed to raise when copying a non-existant style." )
# Test Resolve
resolvedStyle = mgr.resolve( "Resolved Style #1", 'Style #2' )
self.checkStyleEq( 'Resolve by name', style2, resolvedStyle )
mgr.erase( resolvedStyle )
resolvedStyle = mgr.resolve( "Resolved Style #2",
['Style #2', style3, 'blah'],
ignoreNotFound = True )
self.checkStyleEq( 'Resolve by list', style4, resolvedStyle )
mgr.erase( resolvedStyle )
self.assertRaises( Exception, mgr.resolve, None, 'does not exist',
msg = "Resolve failed to throw on invalid style." )
# Apply testing
def customFunc( obj ):
obj.figureBgColor = 'grey'
obj.axesBgColor = 'white'
style3.custom = customFunc
obj = S.types.Data(
figureTextSize = None,
figureTextColor = None,
figureBgColor = None,
figureFgColor = None,
axesTextSize = None,
axesTextColor = None,
axesBgColor = None,
axesFgColor = None,
)
mgr.apply( obj, style4 )
self.checkObj( 'Apply by style', obj, style4 )
self.assertEqual( True, mgr.exists(style4),
msg = "Failed to auto add an applied style." )
obj = S.types.Data(
figureTextSize = None,
figureTextColor = None,
figureBgColor = None,
figureFgColor = None,
axesTextSize = None,
axesTextColor = None,
axesBgColor = None,
axesFgColor = None,
)
self.assertRaises( Exception, mgr.apply, obj, 'invalidName',
msg = "Failed to raise on applying a bad style." )
self.assertEqual( [], mgr.getElementStyles( obj ),
msg = "Element styles should be []." )
mgr.apply( obj, 'Style #3' )
self.checkObj( 'Apply by name', obj, style4 )
style3.figure.text.size = 16
mgr.reapply()
self.checkObj( 'Re-Apply', obj, style5 )
mgr.set( obj, 'figure.text.size', 24 )
self.checkObj( 'Set by name', obj, style4 )
mgr.set( obj, { 'figure.text.size' : 16 } )
self.checkObj( 'Set by dict', obj, style5 )
self.assertRaises( Exception, mgr.set, obj, { 'foo' : 1 }, 1,
msg = "Failed to throw with invalid 'set' parameters." )
# Check the get/set of the element tag
obj = S.types.Data(
figureTextSize = None,
figureTextColor = None,
figureBgColor = None,
figureFgColor = None,
axesTextSize = None,
axesTextColor = None,
axesBgColor = None,
axesFgColor = None,
)
mgr.apply( obj, style5 )
self.assertEqual( [], mgr.getTags( obj ),
msg = "Element name should be None" )
mgr.untag( obj, 'testTag' )
self.assertEqual( False, mgr.hasTag( obj, 'testTag' ),
msg = "Incorrect result for hasTag (when nothing set)." )
mgr.tag( obj, 'testTag' )
self.assertEqual( ['testTag'], mgr.getTags( obj ),
msg = "Set/get tag failed for obj" )
self.assertEqual( True, mgr.hasTag( obj, 'testTag' ),
msg = "Incorrect result for hasTag (when it has it)" )
mgr.untag( obj, 'testTag' )
self.assertEqual( False, mgr.hasTag( obj, 'testTag' ),
msg = "Incorrect result for hasTag (when it doesn't have it)" )
mgr.tag( obj, 'testTag' )
tagStyle = MyStyle( 'TagStyle' )
tagStyle.figure.text.color = 'orange'
tagStyle.axes.text.color = 'purple'
mgr.apply( obj, tagStyle, tag = "OtherTag" )
self.checkObj( 'Set by incorrect tag', obj, style5 )
mgr.apply( obj, tagStyle, tag = "testTag" )
self.checkObj( 'Set by correct tag', obj, style6 )
# Check apply by tag only
obj = S.types.Data(
figureTextSize = None,
figureTextColor = None,
figureBgColor = None,
figureFgColor = None,
axesTextSize = None,
axesTextColor = None,
axesBgColor = None,
axesFgColor = None,
)
mgr.tag( obj, 'applyTestTag' )
mgr.apply( None, style6, tag = 'applyTestTag' )
self.checkObj( 'Apply by tag', obj, style6 )
# Verify that the new style will be added when resolve is called
tmpStyle1 = MyStyle( 'TmpStyle #1' )
mgr.resolve( None, tmpStyle1 )
# Verify that the new style will be added when create is called
tmpStyle2 = MyStyle( 'TmpStyle #2' )
mgr.create( None, parent = tmpStyle2 )
# check loading / clearing styles
expected = [ 'NewStyle1', 'NewStyle2', 'Style #1', 'Style #2',
'Style #3', 'Style #4', 'Style #5', 'Style #6',
'TagStyle', 'TmpStyle #1', 'TmpStyle #2' ]
self.assertEqual( expected, mgr.getAll(),
msg = "Invalid list of loaded styles." )
tmpfile = open( os.path.join( self.outputDir, "warning-output.log" ),
'w' )
mgr.apply( obj, [ style1, 'Style #3' ] )
mgr.erase( style3 )
mgr.reapply()
tmpfile.close()
mgr.clear()
expected = []
self.assertEqual( expected, mgr.getAll(),
msg = "mgr shows styles loaded, when should be empty." )
tmpStyle = MyStyle( "Temp Style" )
mgr.add( tmpStyle )
self.assertRaises( Exception, mgr.add, tmpStyle,
msg = "Failed to throw on multiple adds." )
mgr.erase( tmpStyle )
result = mgr.find( tmpStyle.name )
self.assertEqual( None, result,
msg = "Did not remove 'tmpStyle' from the manager." )
msg = "Failed to throw on multiple removes."
self.assertRaises( Exception, mgr.erase, tmpStyle, msg = msg )
msg = "Failed to throw with invalid parent"
self.assertRaises( Exception, mgr.create, 'Bad Parent', parent = "Bogus", msg = msg )
#-----------------------------------------------------------------------
def testPersistence( self ):
"""Test reading and writing functionality."""
mgr = MyStyleManager()
style1 = mgr.create( 'Style_#1' )
style1.figure.text.size = 12
style1.axes.text.size = 8
mgr.save( outdir = self.outputDir )
self.assertRaises( Exception, mgr.save, outdir=self.outputDir, overwrite=False,
msg = "Failed to raise when writing to an existing file." )
mgr2 = MyStyleManager()
mgr2.load()
self.checkStyleEq( "Default Load", style1, mgr2[ style1.name ] )
mgr3 = MyStyleManager()
mgr3.path = [ self.outputDir ]
mgr3.load()
self.checkStyleEq( "Load by path", style1, mgr3[ style1.name ] )
mgr4 = MyStyleManager()
mgr4.path = [ '$STYLEPATH' ]
mgr4.load()
self.checkStyleEq( "Load by path STYLEPATH", style1, mgr4[ style1.name ] )
mgr5 = MyStyleManager()
mgr5.load( self.outputDir )
self.checkStyleEq( "Load by passed path", style1, mgr5[ style1.name ] )
os.environ.pop( "STYLEPATH" )
mgr6 = MyStyleManager()
mgr6.load()
self.assertEqual( None, mgr6.find( style1.name ),
msg = "There should be not style loaded." )
p = path( self.outputFile( 'Style_#1.testyle' ) )
self.assertEqual( True, p.exists(),
msg = "Manager failed to write the style file." )
mgr.erase( style1, delete = True )
self.assertEqual( False, p.exists(),
msg = "Manager failed to remove the style file." )
#-----------------------------------------------------------------------
def testErrors( self ):
"""Test error conditions."""
mgr = MyStyleManager()
style = MyStyle( "Dummy" )
self.assertRaises( Exception, S.types.StyleManager._loadFromFile,
mgr, "Dummy", "Bogus",
msg = "Failed to throw on call to '_loadFromFile'." )
self.assertRaises( Exception, S.types.StyleManager._saveToFile, mgr,
style, "Bogus",
msg = "Failed to throw on call to '_saveToFile'." )
self.assertRaises( Exception, S.types.StyleManager._deleteStyleFile,
mgr, "Bogus",
msg = "Failed to throw on call to '_deleteStyleFile'." )
#-----------------------------------------------------------------------
|
|
import sqlparse
from flask import jsonify, request, url_for
from flask_login import login_required
from flask_restful import abort
from sqlalchemy.orm.exc import StaleDataError
from funcy import rpartial
from redash import models
from redash.authentication.org_resolving import current_org
from redash.handlers.base import (BaseResource, filter_by_tags, get_object_or_404,
org_scoped_rule, paginate, routes, order_results as _order_results)
from redash.handlers.query_results import run_query
from redash.permissions import (can_modify, not_view_only, require_access,
require_admin_or_owner,
require_object_modify_permission,
require_permission, view_only)
from redash.utils import collect_parameters_from_request
from redash.serializers import QuerySerializer
# Ordering map for relationships
order_map = {
'name': 'lowercase_name',
'-name': '-lowercase_name',
'created_at': 'created_at',
'-created_at': '-created_at',
'schedule': 'schedule',
'-schedule': '-schedule',
'runtime': 'query_results-runtime',
'-runtime': '-query_results-runtime',
'executed_at': 'query_results-retrieved_at',
'-executed_at': '-query_results-retrieved_at',
'created_by': 'users-name',
'-created_by': '-users-name',
}
order_results = rpartial(_order_results, '-created_at', order_map)
@routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
@login_required
def format_sql_query(org_slug=None):
"""
Formats an SQL query using the Python ``sqlparse`` formatter.
:<json string query: The SQL text to format
:>json string query: Formatted SQL text
"""
arguments = request.get_json(force=True)
query = arguments.get("query", "")
return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})
class QuerySearchResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Search query text, names, and descriptions.
:qparam string q: Search term
:qparam number include_drafts: Whether to include draft in results
Responds with a list of :ref:`query <query-response-label>` objects.
"""
term = request.args.get('q', '')
if not term:
return []
include_drafts = request.args.get('include_drafts') is not None
self.record_event({
'action': 'search',
'object_type': 'query',
'term': term,
})
# this redirects to the new query list API that is aware of search
new_location = url_for(
'queries',
q=term,
org_slug=current_org.slug,
drafts='true' if include_drafts else 'false',
)
return {}, 301, {'Location': new_location}
class QueryRecentResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Retrieve up to 10 queries recently modified by the user.
Responds with a list of :ref:`query <query-response-label>` objects.
"""
results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)
return QuerySerializer(results, with_last_modified_by=False, with_user=False).serialize()
class QueryListResource(BaseResource):
@require_permission('create_query')
def post(self):
"""
Create a new query.
:<json number data_source_id: The ID of the data source this query will run on
:<json string query: Query text
:<json string name:
:<json string description:
:<json string schedule: Schedule interval, in seconds, for repeated execution of this query
:<json object options: Query options
.. _query-response-label:
:>json number id: Query ID
:>json number latest_query_data_id: ID for latest output data from this query
:>json string name:
:>json string description:
:>json string query: Query text
:>json string query_hash: Hash of query text
:>json string schedule: Schedule interval, in seconds, for repeated execution of this query
:>json string api_key: Key for public access to this query's results.
:>json boolean is_archived: Whether this query is displayed in indexes and search results or not.
:>json boolean is_draft: Whether this query is a draft or not
:>json string updated_at: Time of last modification, in ISO format
:>json string created_at: Time of creation, in ISO format
:>json number data_source_id: ID of the data source this query will run on
:>json object options: Query options
:>json number version: Revision version (for update conflict avoidance)
:>json number user_id: ID of query creator
:>json number last_modified_by_id: ID of user who last modified this query
:>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)
:>json number runtime: Runtime of last query execution, in seconds (may be null)
"""
query_def = request.get_json(force=True)
data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
require_access(data_source.groups, self.current_user, not_view_only)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
query_def.pop(field, None)
query_def['query_text'] = query_def.pop('query')
query_def['user'] = self.current_user
query_def['data_source'] = data_source
query_def['org'] = self.current_org
query_def['is_draft'] = True
query = models.Query.create(**query_def)
models.db.session.add(query)
models.db.session.commit()
self.record_event({
'action': 'create',
'object_id': query.id,
'object_type': 'query'
})
return QuerySerializer(query).serialize()
@require_permission('view_query')
def get(self):
"""
Retrieve a list of queries.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number q: Full text search term
Responds with an array of :ref:`query <query-response-label>` objects.
"""
# See if we want to do full-text search or just regular queries
search_term = request.args.get('q', '')
if search_term:
results = models.Query.search(
search_term,
self.current_user.group_ids,
self.current_user.id,
include_drafts=True,
)
else:
results = models.Query.all_queries(
self.current_user.group_ids,
self.current_user.id,
drafts=True,
)
results = filter_by_tags(results, models.Query.tags)
# order results according to passed order parameter
ordered_results = order_results(results)
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(
ordered_results,
page=page,
page_size=page_size,
serializer=QuerySerializer,
with_stats=True,
with_last_modified_by=False
)
if search_term:
self.record_event({
'action': 'search',
'object_type': 'query',
'term': search_term,
})
else:
self.record_event({
'action': 'list',
'object_type': 'query',
})
return response
class MyQueriesResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Retrieve a list of queries created by the current user.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number search: Full text search term
Responds with an array of :ref:`query <query-response-label>` objects.
"""
search_term = request.args.get('q', '')
if search_term:
results = models.Query.search_by_user(search_term, self.current_user)
else:
results = models.Query.by_user(self.current_user)
results = filter_by_tags(results, models.Query.tags)
# order results according to passed order parameter
ordered_results = order_results(results)
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
return paginate(
ordered_results,
page,
page_size,
QuerySerializer,
with_stats=True,
with_last_modified_by=False,
)
class QueryResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
"""
Modify a query.
:param query_id: ID of query to update
:<json number data_source_id: The ID of the data source this query will run on
:<json string query: Query text
:<json string name:
:<json string description:
:<json string schedule: Schedule interval, in seconds, for repeated execution of this query
:<json object options: Query options
Responds with the updated :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
query_def = request.get_json(force=True)
require_object_modify_permission(query, self.current_user)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
query_def.pop(field, None)
if 'query' in query_def:
query_def['query_text'] = query_def.pop('query')
query_def['last_modified_by'] = self.current_user
query_def['changed_by'] = self.current_user
# SQLAlchemy handles the case where a concurrent transaction beats us
# to the update. But we still have to make sure that we're not starting
# out behind.
if 'version' in query_def and query_def['version'] != query.version:
abort(409)
try:
self.update_model(query, query_def)
models.db.session.commit()
except StaleDataError:
abort(409)
return QuerySerializer(query, with_visualizations=True).serialize()
@require_permission('view_query')
def get(self, query_id):
"""
Retrieve a query.
:param query_id: ID of query to fetch
Responds with the :ref:`query <query-response-label>` contents.
"""
q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(q.groups, self.current_user, view_only)
result = QuerySerializer(q, with_visualizations=True).serialize()
result['can_edit'] = can_modify(q, self.current_user)
self.record_event({
'action': 'view',
'object_id': query_id,
'object_type': 'query',
})
return result
# TODO: move to resource of its own? (POST /queries/{id}/archive)
def delete(self, query_id):
"""
Archives a query.
:param query_id: ID of query to archive
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_admin_or_owner(query.user_id)
query.archive(self.current_user)
models.db.session.commit()
class QueryForkResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
"""
Creates a new query, copying the query text from an existing one.
:param query_id: ID of query to fork
Responds with created :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.data_source.groups, self.current_user, not_view_only)
forked_query = query.fork(self.current_user)
models.db.session.commit()
self.record_event({
'action': 'fork',
'object_id': query_id,
'object_type': 'query',
})
return QuerySerializer(forked_query, with_visualizations=True).serialize()
class QueryRefreshResource(BaseResource):
def post(self, query_id):
"""
Execute a query, updating the query object with the results.
:param query_id: ID of query to execute
Responds with query task details.
"""
# TODO: this should actually check for permissions, but because currently you can only
# get here either with a user API key or a query one, we can just check whether it's
# an api key (meaning this is a query API key, which only grants read access).
if self.current_user.is_api_user():
abort(403, message="Please use a user API key.")
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.groups, self.current_user, not_view_only)
parameter_values = collect_parameters_from_request(request.args)
return run_query(query.data_source, parameter_values, query.query_text, query.id)
class QueryTagsResource(BaseResource):
def get(self):
"""
Returns all query tags including those for drafts.
"""
tags = models.Query.all_tags(self.current_user, include_drafts=True)
return {
'tags': [
{
'name': name,
'count': count,
}
for name, count in tags
]
}
|
|
#!/usr/bin/env python
"""
MCNPX Model for Cylindrical RPM8
"""
import sys
sys.path.append('../MCNPTools/')
sys.path.append('../')
from MCNPMaterial import Materials
import subprocess
import math
import mctal
import numpy as np
import itertools
import os
class CylinderRPM(object):
# Material Dictionaries
cellForStr = '{:5d} {:d} -{:4.3f} {:d} -{:d} u={:d}\n'
surfForStr = '{:5d} cz {:5.3f}\n'
tranForStr = '*tr{:d} {:4.3f} {:4.3f} 0.000\n'
geoParam={'RPM8Size':12.7,'DetectorThickness':0.01,'DetectorSpacing':0.8,
'CylinderLightGuideRadius':0.5,'CylinderRadius':2.5}
def __init__(self,inp='INP.mcnp'):
""" Wrapped Cylinder MCNPX Model of RPM8
Keywords:
inp -- desired name of the input deck
"""
# Material dictionary for the moderator, light guide, and detector
self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector
self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA
self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE
# Cell and Surface Inital Numbering
self.CellStartNum = 600
self.SurfaceStartNum = 600
self.ZeroSurfaceNum = 500
self.UniverseNum = 200
self.surfGeo = None
self.inp = inp
self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'
self.setMaterial(0.1,'PS')
def __str__(self):
s = '\tMCNPX Model of Wrapped Cylinder\n'
s += '\t Cell Number Starts: {0:d}\n'.format(self.CellStartNum)
s += '\t Surface Number Starts: {0:d}\n'.format(self.SurfaceStartNum)
return s
def getInteractionRate(self):
""" Returns the interaction rate """
m = mctal.MCTAL(self.name+'.m')
t = m.tallies[4]
# Returing the total
return t.data[-1],t.errors[-1]
def setMaterial(self,massFraction,polymer):
"""
Sets the detector material
"""
M = Materials()
num = self.material['Detector']['mt']
if polymer == 'PS':
self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)
elif polymer == 'PEN':
self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)
else:
raise ValueError('Polymer {} is not in the material database'.format(polymer))
def createSurfaceGeo(self):
"""
Creates a dictionary of surface positions and cylinders
"""
self.surfGeo = dict()
r = self.geoParam['CylinderLightGuideRadius']
self.surfGeo[r] = 'LightGuide'
#self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
r += self.geoParam['DetectorThickness']
self.surfGeo[r] = 'Detector'
r += self.geoParam['DetectorSpacing']
if (r < self.geoParam['CylinderRadius']):
self.surfGeo[r] = 'LightGuide'
return self.surfGeo
def calculateDetectorArea(self):
"""
Calculates the area used in a detector
"""
area = 0.0
r = self.geoParam['CylinderLightGuideRadius']
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
area -= math.pow(r,2)
r += self.geoParam['DetectorThickness']
area += math.pow(r,2)
r += self.geoParam['DetectorSpacing']
return math.pi*area
def createDetectorCylinder(self,uNum=1):
"""
Creates a detector cylinder
Returns an ntuple of s,c,detectorCells
s - the surface string
c - the cell string
detectorCells - a list of the numbers corresponding to the detectors cells
"""
cellsCreated = 0
sNum = self.SurfaceStartNum
cNum = self.CellStartNum
detectorCells = list()
s = '{:5d} rcc 0 0 0 0 0 217.7 {}\n'.format(self.SurfaceStartNum,self.geoParam['CylinderRadius'])
c = ''
keyList = sorted(self.surfGeo.keys(), key = lambda x: float(x))
for key in keyList:
sPrev = sNum
sNum += 1
cNum += 1
s += self.surfForStr.format(sNum,key)
m = self.material[self.surfGeo[key]]
if cNum == self.CellStartNum+1:
c+= '{:5d} {:d} -{:4.3f} -{:d} u={:d}\n'.format(cNum,m['mt'],m['rho'],sNum,uNum)
else:
c += self.cellForStr.format(cNum,m['mt'],m['rho'],sPrev,sNum,uNum)
# List of cells for the detector
if self.surfGeo[key] is 'Detector':
detectorCells.append(cNum)
cellsCreated += 1
# Last cell up to universe boundary
m = self.material['Moderator']
c += '{:5d} {:d} -{:4.3f} {:d} u={:d}\n'.format(cNum+1,m['mt'],m['rho'],sNum,uNum)
cellsCreated += 1
return s,c,detectorCells,cellsCreated
def runModel(self):
"""
Runs the Model by submission to Tourqe / Maui
"""
qsub= subprocess.check_output('which qsub',shell=True).strip()
cmd = '#!/bin/bash\n'
cmd += '#PBS -N {0}\n#PBS -V\n#PBS -q gen1\n#PBS -l nodes=1:ppn=1\n'
cmd += 'cd $PBS_O_WORKDIR\nmpirun mcnpx inp={1} name={2}\n'
job = cmd.format('Job_RPMCylinder',self.inp,self.name)
with open('qsub','w') as o:
o.write(job)
subprocess.call(qsub+' qsub',shell=True)
subprocess.call('rm qsub',shell=True)
def createInputDeck(self,cylinderPositions,inp=None,name=None):
""" createInputDeck
Creates an input deck of the given geometry
"""
self.inp = inp
self.name = name
if not inp:
self.inp = 'INP_Cylinder.mcnp'
if not name:
self.name = 'OUT_Cylinder.'
oFile = self.inp
# Problem Constants
cellString = 'c ------------------------- Source ----------------------------------------\n'
cellString += '70 5 -15.1 -70 $ 252Cf source \n'
cellString += '71 406 -11.34 -71 70 $ Lead around source\n'
cellString += '72 456 -0.93 -72 71 $ Poly around source\n'
surfString = 'c ########################### Surface Cards ##############################\n'
surfString += 'c ------------------- Encasing Bounds (Size of RPM8) ---------------------\n'
surfString += '500 rpp 0 12.7 -15.25 15.25 0 217.7 \n'
# Add in other cells here
numCells = 4 # 3 Source, 1 RPM8 Encasing
##################################################################
# Add in Detector Cells and Surfaces #
##################################################################
universeNum = 1
(s,c,detectorCells,cellsCreated) = self.createDetectorCylinder(universeNum)
surfString += s
cellString += 'c ------------------- Detector Cylinder Universe ------------------------\n'
cellString += c
transNum = 1
uCellNum = self.UniverseNum
transString = ''
cellString += 'c ----------------------- Detector Universe ----------------------------\n'
for pos in cylinderPositions:
transString += self.tranForStr.format(transNum,pos[0],pos[1])
cellString += '{:5d} 0 -{:d} trcl={:d} fill={:d}\n'.format(uCellNum,self.SurfaceStartNum,transNum,universeNum)
transNum +=1
uCellNum +=1
# Adding the PMMA Moderator Block
m = self.material['Moderator']
cellString += 'c ------------------------- HDPE Moderator -----------------------------\n'
cellString += '{:5d} {:d} -{:4.3f} -{:d} '.format(500,m['mt'],m['rho'],self.ZeroSurfaceNum)
cellString += ''.join('#{:d} '.format(i) for i in range(self.UniverseNum,uCellNum))
cellString += '\n'
# Getting total number of cells
numCells += cellsCreated + uCellNum-self.UniverseNum +1
##################################################################
# Write the Tallies #
##################################################################
univCells = range(self.UniverseNum,uCellNum)
tallyString = 'c ------------------------- Tallies Yo! -----------------------------------\n'
tallies = {'F54:n':{'cells':detectorCells,'comments':'FC54 6Li Reaction Rates\n',
'options':' T\nSD54 1 {0:d}R\nFM54 -1 3 105'}}
for t in tallies:
# Getting a list of cells
tallyString += tallies[t]['comments']
tallyString += str(t)+' '
j = 0
for u in univCells:
cell = list('('+str(c)+'<'+str(u)+') ' for c in tallies[t]['cells'])
cell = [cell[i:i+6] for i in range(0,len(cell),6)]
if j > 0:
tallyString += ' '+''.join(''.join(i)+'\n' for i in cell)
else:
tallyString += ' '.join(''.join(i)+'\n' for i in cell)
j +=1
tallyString = tallyString.rstrip()
tallyString += tallies[t]['options'].format(len(univCells)*len(tallies[t]['cells']))
tallyString+='\n'
# Finish up the problem data
cellString += 'c ---------------------- Detector Encasing ------------------------------\n'
cellString += '700 488 -7.92 701 -700 $ SS-316 Encasing \n'
cellString += 'c -------------------------- Outside World -------------------------------\n'
cellString += '1000 204 -0.001225 -1000 700 #70 #71 #72 $ Atmosphere \n'
cellString += '1001 0 1000 \n'
surfString += 'c ------------------------ Encasing Material -----------------------------\n'
surfString += '700 rpp -0.3175 13.018 -15.5675 15.5675 -0.3175 218.018 \n'
surfString += '701 rpp 0.0 12.7 -15.25 15.25 0.0 217.7 \n'
surfString += 'c -------------- Source --------------------------------------------------\n'
surfString += '70 s -200 0 108.85 2.510E-04 $ Source \n'
surfString += '71 s -200 0 108.85 5.0025E-01 $ 0.5 cm lead surrounding source \n'
surfString += '72 s -200 0 108.85 3.00025 $ 2.5 cm poly surrounding source \n'
surfString += 'c -------------- Outside World -------------------------------------------\n'
surfString += '1000 so 250 \n'
matString = 'c -------------------------- Material Cards -----------------------------\n'
matString += self.material['Detector']['matString']
matString += self.getMaterialString()
with open(oFile,'w') as o:
o.write('MCNPX Simulation of RPM8 Cylinder\n')
o.write(cellString)
o.write('\n')
o.write(surfString)
o.write('\n')
o.write(self.getRunString().format(numCells))
o.write(self.getSrcString())
o.write(tallyString)
o.write(matString)
o.write(transString)
o.write('\n')
def getRunString(self):
runString ='c ------------------------------ Run Info ---------------------------------\n'
runString +='nps 1E6 \n'
runString +='IMP:N 1 {0:d}R 0 $ Particle Importances within cells \n'
runString +='c -------------- Output --------------------------------------------------\n'
runString +='PRDMP j j 1 $ Write a MCTAL File \n'
runString +='PRINT 40 \n'
runString +='c ------------------------------ Physics ---------------------------------\n'
runString +='MODE N \n'
runString +='PHYS:N 100 4j -1 2 \n'
runString +='CUT:N 2j 0 0 \n'
return runString
def getSrcString(self):
"""
Returns the MCNPX formated source string
"""
srcString = 'c -------------------------- Source Defination ----------------------------\n'
srcString += 'c 1 nanogram Cf-252 source = 1E-9 grams = 6.623E-11 cc \n'
srcString += 'sdef pos=-200 0 108.85 cel=70 par=SF rad=d1 \n'
srcString += 'si1 0 2.510E-04 \n'
srcString += 'sp1 -21 1 \n'
return srcString
def getMaterialString(self):
"""
Returns the MCNXP material string
"""
matString = 'm10 1001.70c -0.080538 $Lucite (PMMA / Plexiglass) rho = 1.19 g/cc\n'
matString += ' 6012.70c -0.599848 8016.70c -0.319614 \n'
matString += 'm204 7014.70c -0.755636 $air (US S. Atm at sea level) rho = 0.001225 \n'
matString += ' 8016.70c -0.231475 18036.70c -3.9e-005 18038.70c -8e-006\n'
matString += ' 18040.70c -0.012842 \n'
matString += 'm5 98252.66c 1 $ Cf-252, rho =15.1 g/cc wiki \n'
matString += 'm406 82204.70c -0.013781 $Lead, \n'
matString += ' 82206.70c -0.239557 82207.70c -0.220743 82208.70c -0.525919\n'
matString += 'm456 1001.70c -0.143716 $Polyethylene - rho = 0.93 g/cc \n'
matString += ' 6000.70c -0.856284 \n'
matString += 'm488 14028.70c -0.009187 $Steel, Stainless 316 rho = 7.92 \n'
matString += ' 14029.70c -0.000482 14030.70c -0.000331 24050.70c -0.007095\n'
matString += ' 24052.70c -0.142291 24053.70c -0.016443 24054.70c -0.004171\n'
matString += ' 25055.70c -0.02 26054.70c -0.037326 26056.70c -0.601748\n'
matString += ' 26057.70c -0.014024 26058.70c -0.001903 28058.70c -0.080873\n'
matString += ' 28060.70c -0.031984 28061.70c -0.001408 28062.70c -0.004546\n'
matString += ' 28064.70c -0.001189 42092.70c -0.003554 42094.70c -0.002264\n'
matString += ' 42095.70c -0.003937 42096.70c -0.004169 42097.70c -0.002412\n'
matString += ' 42098.70c -0.006157 42100.70c -0.002507 \n'
matString += 'mt3 poly.01t \n'
matString += 'mt456 poly.01t \n'
matString += 'mt10 poly.01t \n'
return matString
def run(loading,polymers):
"""
Runs a matrix of loading and polymers
"""
cylinderPositions = ((4.23,10.16),(4.23,-10.16))
cylinderPositions = ((4.23,7.625),(4.23,0),(4.23,-7.625))
cylinderPositions = ((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15))
cylinderPositions = ((4.23,10.16),(4.23,5.08),(4.23,0.0),(4.23,-5.08),(4.23,-10.16))
for l in loading:
for p in polymers:
RunCylinder(l,p,cylinderPositions)
def RunCylinder(l,p,cylinderPositions):
"""
Runs an mcnpx model of the cylinder of loading l, polymer p, with
cylinder positions cylinderPositions.
Keywords:
l - loading of the films
p - polymer
cylinderPositions - the cylinder positons
"""
# Creating input and output deck names
posString = ''
for pos in cylinderPositions:
posString += '{:2.1f}-'.format(pos[0])
posString = posString.rstrip('-')
inp='Cyl_{}LiF_{}_{}.mcnp'.format(int(l*100),p,posString)
name='OUTCyl_{}LiF_{}_{}.'.format(int(l*100),p,posString)
print inp
# Creating and running the model
m = CylinderRPM()
m.createSurfaceGeo()
m.setMaterial(l,p)
m.createDetectorCylinder()
m.createInputDeck(cylinderPositions,inp,name)
m.runModel()
def CreatePositions(yPos,numXPertubations):
"""
Creates and returns an array of positions, using a set array of y
positions, with equally spaced number of numXPertubations.
Keywords:
yPos - the number of y positions (or spacing of the cylinders). The
number of elements in this array corresponds to the number of
cylinders that are simulated.
numXPertubations - the number of pertubations in x. The arrays
positions returned are spaced linerly in the x from 2.54 to
10.16 cm
"""
pos = list()
xVals = np.linspace(2.54,10,numXPertubations)
xPos = [i for i in itertools.product(xVals,repeat=len(yPos))]
for x in xPos:
pos.append(zip(x,yPos))
return pos
def PositionOptimization(loading,polymers,positions):
"""
Runs a matrix of loading, polymers and positions
"""
for l in loading:
for p in polymers:
for pos in positions:
RunCylinder(l,p,pos)
def createInputPlotDecks():
positions = list()
positions.append(((4.23,10.16),(4.23,-10.16)))
positions.append(((4.23,7.625),(4.23,0),(4.23,-7.625)))
#positions.append(((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15)))
for pos in positions:
m = CylinderRPM()
m.createSurfaceGeo()
m.createDetectorCylinder()
inp='Cylinder_{}.mcnp'.format(len(pos))
name='OUTCylinder_{}.'.format(len(pos))
m.createInputDeck(pos,inp,name)
def computeMassLi(polymer,loading,density=1.1):
"""
Computes the mass of Li for a given polymer and loading
"""
M = Materials()
m = CylinderRPM()
area = m.calculateDetectorArea()
massLi = area*217.0*M.GetLiMassFraction(loading,polymer)*density
return massLi
def extractRunInfo(filename):
"""
Extracts the loading and polymer from the file name
"""
tokens = filename.split('_')
loading = tokens[1].strip('LiF')
polymer = tokens[2].strip('.m')
return (float(loading)/100, polymer)
###########################################################################
# #
# Summerizes / Analysis #
# #
###########################################################################
def GetInteractionRate(f,tallyNum=54,src=2.3E3):
"""
Returns the interaction rate of the mctal file
"""
m = mctal.MCTAL(f)
t = m.tallies[tallyNum]
return (t.data[-1]*src,t.errors[-1]*t.data[-1]*src)
import glob
def summerize():
files = glob.glob('OUTCylinder*.m')
s = 'Polymer, loading, mass Li, count rate, error, count rate per mass\n'
for f in files:
runParam = extractRunInfo(f)
massLi = computeMassLi(runParam[1],runParam[0])
countRate = GetInteractionRate(f)
s += '{}, {:5.2f} , {:5.3f} , {:5.3f} , {:4.2f} , {:5.3f}\n'.format(runParam[1].ljust(7),runParam[0],massLi,countRate[0],countRate[1],countRate[0]/massLi)
print s
def OptimizationSummary(path):
"""
Summerizes the Optimization Output
"""
# Getting the files
if not os.path.isdir(path):
raise IOError('Path {} is not found'.format(path))
files = glob.glob(path+'/*.m')
if not files:
print 'No files matched the pattern'
return
# Parsing the files
data = dict()
for f in files:
name = os.path.splitext(os.path.split(f)[1])[0]
data[name] = GetInteractionRate(f)
# Max value
sortedKeys = sorted(data, key=data.get,reverse=True)
#sortedKeys = sorted(data.items(), key=lambda x : float(x[1][0]),reverse=True)
for key in sortedKeys[0:9]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
for key in sortedKeys[-6:-1]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
def cleanup(path):
files = glob.glob(path+'/OUTCyl_*.m')
for f in files:
head,tail = os.path.split(f)
numCylinders = tail.count('-')+1
if numCylinders == 3:
newdir = 'ThreeCylPosOpt'
elif numCylinders == 4:
newdir = 'FourCylPosOpt'
elif numCylinders == 5:
newdir = 'FiveCylPosOpt'
os.rename(f,os.path.join(newdir,tail))
###########################################################################
# #
# MAIN #
# #
###########################################################################
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r','--run',action="store_true",
default=False,help='Runs the cylinders for multiple polymers and precent loadings')
parser.add_argument('-p','--plot',action="store_true",
default=False,help='Creates input decks for plotting')
parser.add_argument('-c','--clean',action="store_true",
default=False,help='Cleans up the files')
parser.add_argument('-a','--analysis',action="store_true",default=False,help="Analyze the results")
parser.add_argument('path', nargs='?', default='CylPosOpt',help='Specifiy the output directory to summerize')
parser.add_argument('-o','--optimize',action='store',type=int,default=-1,help='Run a number of optimizations on the positions. If 0 is entered a summary is preformed on the directory provided with path')
parser.add_argument('loading',metavar='loading',type=float,nargs='*',action="store",default=(0.1,0.2,0.3),help='Precent Loading of LiF')
args = parser.parse_args()
if args.run:
run(args.loading,('PS','PEN'))
if args.plot:
createInputPlotDecks()
if args.optimize > 0:
yPos = (7.625,0,-7.625)
yPos = (9.15,3.05,-3.05,-9.15)
#yPos = (10.16,5.08,0.0,-5.08,-10.16)
pos = CreatePositions(yPos,args.optimize)
loading = (0.3,)
polymers = ('PS',)
PositionOptimization(loading,polymers,pos)
if args.optimize == 0:
OptimizationSummary(args.path)
if args.analysis:
summerize()
if args.clean:
cleanup(os.getcwd())
|
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a pair of recurrant models for the Stack Overflow next word prediction task.
Modified version of
tff.simulation.baselines.stackoverflow.create_word_prediction_task and dependent
functions which allows for different sized recurrant models
"""
import functools
import tensorflow as tf
import tensorflow_federated as tff
class TransposableEmbedding(tf.keras.layers.Layer):
"""A Keras layer implementing a transposed projection output layer."""
def __init__(self, embedding_layer: tf.keras.layers.Embedding):
super().__init__()
self.embeddings = embedding_layer.embeddings
# Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
def call(self, inputs):
return tf.matmul(inputs, self.embeddings, transpose_b=True)
def create_recurrent_model(vocab_size: int,
embedding_size: int = 96,
num_lstm_layers: int = 1,
lstm_size: int = 670,
shared_embedding: bool = False) -> tf.keras.Model:
"""Constructs a recurrent model with an initial embeding layer.
The resulting model embeds sequences of integer tokens (whose values vary
between `0` and `vocab_size-1`) into an `embedding_size`-dimensional space.
It then applies `num_lstm_layers` LSTM layers, each of size `lstm_size`.
Each LSTM is followed by a dense layer mapping the output to `embedding_size`
units. The model then has a final dense layer mapping to `vocab_size` logits
units. Note that this model does not compute any kind of softmax on the final
logits. This should instead be done in the loss function for the purposes of
backpropagation.
Args:
vocab_size: Vocabulary size to use in the initial embedding layer.
embedding_size: The size of the embedding layer.
num_lstm_layers: The number of LSTM layers in the model.
lstm_size: The size of each LSTM layer.
shared_embedding: If set to `True`, the final layer of the model is a dense
layer given by the transposition of the embedding layer. If `False`, the
final dense layer is instead learned separately.
Returns:
An uncompiled `tf.keras.Model`.
"""
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer.')
if embedding_size < 1:
raise ValueError('embedding_size must be a positive integer.')
if num_lstm_layers < 1:
raise ValueError('num_lstm_layers must be a positive integer.')
if lstm_size < 1:
raise ValueError('lstm_size must be a positive integer.')
inputs = tf.keras.layers.Input(shape=(None,))
input_embedding = tf.keras.layers.Embedding(
input_dim=vocab_size, output_dim=embedding_size, mask_zero=True)
embedded = input_embedding(inputs)
projected = embedded
for _ in range(num_lstm_layers):
layer = tf.keras.layers.LSTM(lstm_size, return_sequences=True)
processed = layer(projected)
projected = tf.keras.layers.Dense(embedding_size)(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
logits = tf.keras.layers.Dense(vocab_size, activation=None)(projected)
return tf.keras.Model(inputs=inputs, outputs=logits)
def make_big_and_small_stackoverflow_model_fn(my_task,
vocab_size=10000,
num_out_of_vocab_buckets=1,
big_embedding_size=96,
big_lstm_size=670,
small_embedding_size=72,
small_lstm_size=503):
"""Generates two model functions for a given task.
This code is a modified version of
tff.simulation.baselines.stackoverflow.create_word_prediction_task
Args:
my_task: a tff.simulation.baselines.BaselineTask object
vocab_size: an integer specifying the vocab size
num_out_of_vocab_buckets: an integer specifying the number of out of vocab
buckets
big_embedding_size: an integer specifying the size of the embedding layer of
the big model
big_lstm_size: an integer specifying the size of the lstm layer of the big
model
small_embedding_size: an integer specifying the size of the embedding layer
of the small model
small_lstm_size: an integer specifying the size of the lstm layer of the
small model
Returns:
Two model_fn functions
"""
extended_vocab_size = vocab_size + 3 + num_out_of_vocab_buckets
def big_stackoverflownwp_rnn_model_fn():
return tff.learning.from_keras_model(
keras_model=create_recurrent_model(
vocab_size=extended_vocab_size,
embedding_size=big_embedding_size,
lstm_size=big_lstm_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
input_spec=my_task.datasets.element_type_structure,
)
# the standard size corresponding the stackoverflow baseline task
# has embedding_size=96, lstm_size=670
def small_stackoverflownwp_rnn_model_fn():
return tff.learning.from_keras_model(
keras_model=create_recurrent_model(
vocab_size=extended_vocab_size,
embedding_size=small_embedding_size,
lstm_size=small_lstm_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
input_spec=my_task.datasets.element_type_structure,
)
return big_stackoverflownwp_rnn_model_fn, small_stackoverflownwp_rnn_model_fn
def create_conv_dropout_model(conv1_filters=32,
conv2_filters=64,
dense_size=128,
only_digits: bool = True) -> tf.keras.Model:
"""Create a convolutional network with dropout.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
conv1_filters: The number of convolutional filters in the 1st convolutional
layer
conv2_filters: The number of convolutional filters in the 2nd convolutional
layer
dense_size: The number of neurons in the last dense layer
only_digits: If `True`, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If `False`, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
conv1_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(
conv2_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_size, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_conv_dropout_model_mfactor(
conv1_filters=32,
conv2_filters=64,
dense_size=128,
mfactor1=1.0,
mfactor2=1.0,
mfactor_dense=1.0,
only_digits: bool = True) -> tf.keras.Model:
"""Create a convolutional network with dropout.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
conv1_filters: The number of convolutional filters in the 1st convolutional
layer
conv2_filters: The number of convolutional filters in the 2nd convolutional
layer
dense_size: The number of neurons in the last dense layer
mfactor1: The multiplicative scaling applied after the first convolutional
layer
mfactor2: The multiplicative scaling applied after the second convolutional
layer
mfactor_dense: The multiplicative scaling applied after the dense layer
only_digits: If `True`, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If `False`, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
conv1_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1)),
tf.keras.layers.Lambda(lambda x: mfactor1 * x),
tf.keras.layers.Conv2D(
conv2_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format),
tf.keras.layers.Lambda(lambda x: mfactor2 * x),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_size, activation='relu'),
tf.keras.layers.Lambda(lambda x: mfactor_dense * x),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_original_fedavg_cnn_model(
conv1_filters=32,
conv2_filters=64,
dense_size=512,
only_digits: bool = True) -> tf.keras.Model:
"""Create a convolutional network without dropout.
This recreates the CNN model used in the original FedAvg paper,
https://arxiv.org/abs/1602.05629. The number of parameters when
`only_digits=True` is (1,663,370), which matches what is reported in the
paper. When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 28, 28, 32) 832
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 14, 14, 64) 51264
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 3136) 0
_________________________________________________________________
dense (Dense) (None, 512) 1606144
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 1,663,370
Trainable params: 1,663,370
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
conv1_filters: The number of convolutional filters in the 1st convolutional
layer
conv2_filters: The number of convolutional filters in the 2nd convolutional
layer
dense_size: The number of neurons in the last dense layer
only_digits: If `True`, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If `False`, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
conv2d(filters=conv1_filters, input_shape=(28, 28, 1)),
max_pool(),
conv2d(filters=conv2_filters),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_size, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def make_big_and_small_emnist_cnn_model_fn(my_task,
big_conv1_filters=32,
big_conv2_filters=64,
big_dense_size=512,
small_conv1_filters=24,
small_conv2_filters=48,
small_dense_size=384):
"""Generates two model functions for a given task.
Args:
my_task: a tff.simulation.baselines.BaselineTask object
big_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the big model
big_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the big model
big_dense_size: The number of neurons in the last dense layer of the big
model
small_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the small model
small_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the small model
small_dense_size: The number of neurons in the last dense layer of the small
model
Returns:
Two model_fn functions
"""
def big_model_fn():
return tff.learning.from_keras_model(
keras_model=create_original_fedavg_cnn_model(
only_digits=False,
conv1_filters=big_conv1_filters,
conv2_filters=big_conv2_filters,
dense_size=big_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def small_model_fn():
return tff.learning.from_keras_model(
keras_model=create_original_fedavg_cnn_model(
only_digits=False,
conv1_filters=small_conv1_filters,
conv2_filters=small_conv2_filters,
dense_size=small_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return big_model_fn, small_model_fn
def make_big_and_small_emnist_cnn_dropout_model_fn(my_task,
big_conv1_filters=32,
big_conv2_filters=64,
big_dense_size=128,
small_conv1_filters=24,
small_conv2_filters=48,
small_dense_size=96):
"""Generates two model functions for a given task.
Args:
my_task: a tff.simulation.baselines.BaselineTask object
big_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the big model
big_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the big model
big_dense_size: The number of neurons in the last dense layer of the big
model
small_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the small model
small_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the small model
small_dense_size: The number of neurons in the last dense layer of the small
model
Returns:
Two model_fn functions.
"""
def big_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model(
only_digits=False,
conv1_filters=big_conv1_filters,
conv2_filters=big_conv2_filters,
dense_size=big_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def small_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model(
only_digits=False,
conv1_filters=small_conv1_filters,
conv2_filters=small_conv2_filters,
dense_size=small_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return big_model_fn, small_model_fn
def make_big_and_small_emnist_cnn_dropout_mfactor_model_fn(
my_task,
big_conv1_filters=32,
big_conv2_filters=64,
big_dense_size=128,
small_conv1_filters=24,
small_conv2_filters=48,
small_dense_size=96):
"""Generates two model functions for a given task.
Args:
my_task: a tff.simulation.baselines.BaselineTask object
big_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the big model
big_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the big model
big_dense_size: The number of neurons in the last dense layer of the big
model
small_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the small model
small_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the small model
small_dense_size: The number of neurons in the last dense layer of the small
model
Returns:
Two model_fn functions.
"""
def big_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model_mfactor(
only_digits=False,
conv1_filters=big_conv1_filters,
conv2_filters=big_conv2_filters,
dense_size=big_dense_size,
mfactor1=1.0,
mfactor2=1.0,
mfactor_dense=1.0),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def small_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model_mfactor(
only_digits=False,
conv1_filters=small_conv1_filters,
conv2_filters=small_conv2_filters,
dense_size=small_dense_size,
mfactor1=tf.cast(
big_conv1_filters / small_conv1_filters, tf.float32
), # cast this as a float since these could be integers
mfactor2=tf.cast(big_conv2_filters / small_conv2_filters,
tf.float32),
mfactor_dense=tf.cast(big_dense_size / small_dense_size,
tf.float32)),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return big_model_fn, small_model_fn
|
|
# Natural Language Toolkit: Utility functions
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
import locale
import re
import types
import textwrap
import pydoc
import bisect
import os
from itertools import islice, chain, combinations
from pprint import pprint
from collections import defaultdict, deque
from sys import version_info
from nltk.internals import slice_bounds, raise_unorderable_types
from nltk.collections import *
from nltk.compat import (class_types, text_type, string_types, total_ordering,
python_2_unicode_compatible, getproxies,
ProxyHandler, build_opener, install_opener,
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler, ProxyDigestAuthHandler)
######################################################################
# Short usage message
######################################################################
def usage(obj, selfname='self'):
import inspect
str(obj) # In case it's lazy, this will load it.
if not isinstance(obj, class_types):
obj = obj.__class__
print('%s supports the following operations:' % obj.__name__)
for (name, method) in sorted(pydoc.allmethods(obj).items()):
if name.startswith('_'): continue
if getattr(method, '__deprecated__', False): continue
args, varargs, varkw, defaults = inspect.getargspec(method)
if (args and args[0]=='self' and
(defaults is None or len(args)>len(defaults))):
args = args[1:]
name = '%s.%s' % (selfname, name)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults)
print(textwrap.fill('%s%s' % (name, argspec),
initial_indent=' - ',
subsequent_indent=' '*(len(name)+5)))
##########################################################################
# IDLE
##########################################################################
def in_idle():
"""
Return True if this function is run within idle. Tkinter
programs that are run in idle should never call ``Tk.mainloop``; so
this function should be used to gate all calls to ``Tk.mainloop``.
:warning: This function works by checking ``sys.stdin``. If the
user has modified ``sys.stdin``, then it may return incorrect
results.
:rtype: bool
"""
import sys
return sys.stdin.__class__.__name__ in ('PyShell', 'RPCProxy')
##########################################################################
# PRETTY PRINTING
##########################################################################
def pr(data, start=0, end=None):
"""
Pretty print a sequence of data items
:param data: the data stream to print
:type data: sequence or iter
:param start: the start position
:type start: int
:param end: the end position
:type end: int
"""
pprint(list(islice(data, start, end)))
def print_string(s, width=70):
"""
Pretty print a string, breaking lines on whitespace
:param s: the string to print, consisting of words and spaces
:type s: str
:param width: the display width
:type width: int
"""
print('\n'.join(textwrap.wrap(s, width=width)))
def tokenwrap(tokens, separator=" ", width=70):
"""
Pretty print a list of text tokens, breaking lines on whitespace
:param tokens: the tokens to print
:type tokens: list
:param separator: the string to use to separate tokens
:type separator: str
:param width: the display width (default=70)
:type width: int
"""
return '\n'.join(textwrap.wrap(separator.join(tokens), width=width))
##########################################################################
# Python version
##########################################################################
def py25():
return version_info[0] == 2 and version_info[1] == 5
def py26():
return version_info[0] == 2 and version_info[1] == 6
def py27():
return version_info[0] == 2 and version_info[1] == 7
##########################################################################
# Indexing
##########################################################################
class Index(defaultdict):
def __init__(self, pairs):
defaultdict.__init__(self, list)
for key, value in pairs:
self[key].append(value)
######################################################################
## Regexp display (thanks to David Mertz)
######################################################################
def re_show(regexp, string, left="{", right="}"):
"""
Return a string with markers surrounding the matched substrings.
Search str for substrings matching ``regexp`` and wrap the matches
with braces. This is convenient for learning about regular expressions.
:param regexp: The regular expression.
:type regexp: str
:param string: The string being matched.
:type string: str
:param left: The left delimiter (printed before the matched substring)
:type left: str
:param right: The right delimiter (printed after the matched substring)
:type right: str
:rtype: str
"""
print(re.compile(regexp, re.M).sub(left + r"\g<0>" + right, string.rstrip()))
##########################################################################
# READ FROM FILE OR STRING
##########################################################################
# recipe from David Mertz
def filestring(f):
if hasattr(f, 'read'):
return f.read()
elif isinstance(f, string_types):
with open(f, 'r') as infile:
return infile.read()
else:
raise ValueError("Must be called with a filename or file-like object")
##########################################################################
# Breadth-First Search
##########################################################################
def breadth_first(tree, children=iter, maxdepth=-1):
"""Traverse the nodes of a tree in breadth-first order.
(No need to check for cycles.)
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
"""
queue = deque([(tree, 0)])
while queue:
node, depth = queue.popleft()
yield node
if depth != maxdepth:
try:
queue.extend((c, depth + 1) for c in children(node))
except TypeError:
pass
##########################################################################
# Guess Character Encoding
##########################################################################
# adapted from io.py in the docutils extension module (http://docutils.sourceforge.net)
# http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html
def guess_encoding(data):
"""
Given a byte string, attempt to decode it.
Tries the standard 'UTF8' and 'latin-1' encodings,
Plus several gathered from locale information.
The calling program *must* first call::
locale.setlocale(locale.LC_ALL, '')
If successful it returns ``(decoded_unicode, successful_encoding)``.
If unsuccessful it raises a ``UnicodeError``.
"""
successful_encoding = None
# we make 'utf-8' the first encoding
encodings = ['utf-8']
#
# next we add anything we can learn from the locale
try:
encodings.append(locale.nl_langinfo(locale.CODESET))
except AttributeError:
pass
try:
encodings.append(locale.getlocale()[1])
except (AttributeError, IndexError):
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except (AttributeError, IndexError):
pass
#
# we try 'latin-1' last
encodings.append('latin-1')
for enc in encodings:
# some of the locale calls
# may have returned None
if not enc:
continue
try:
decoded = text_type(data, enc)
successful_encoding = enc
except (UnicodeError, LookupError):
pass
else:
break
if not successful_encoding:
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: %s.'
% ', '.join([repr(enc) for enc in encodings if enc]))
else:
return (decoded, successful_encoding)
##########################################################################
# Remove repeated elements from a list deterministcally
##########################################################################
def unique_list(xs):
seen = set()
# not seen.add(x) here acts to make the code shorter without using if statements, seen.add(x) always returns None.
return [x for x in xs if x not in seen and not seen.add(x)]
##########################################################################
# Invert a dictionary
##########################################################################
def invert_dict(d):
inverted_dict = defaultdict(list)
for key in d:
if hasattr(d[key], '__iter__'):
for term in d[key]:
inverted_dict[term].append(key)
else:
inverted_dict[d[key]] = key
return inverted_dict
##########################################################################
# Utilities for directed graphs: transitive closure, and inversion
# The graph is represented as a dictionary of sets
##########################################################################
def transitive_closure(graph, reflexive=False):
"""
Calculate the transitive closure of a directed graph,
optionally the reflexive transitive closure.
The algorithm is a slight modification of the "Marking Algorithm" of
Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms".
:param graph: the initial graph, represented as a dictionary of sets
:type graph: dict(set)
:param reflexive: if set, also make the closure reflexive
:type reflexive: bool
:rtype: dict(set)
"""
if reflexive:
base_set = lambda k: set([k])
else:
base_set = lambda k: set()
# The graph U_i in the article:
agenda_graph = dict((k, graph[k].copy()) for k in graph)
# The graph M_i in the article:
closure_graph = dict((k, base_set(k)) for k in graph)
for i in graph:
agenda = agenda_graph[i]
closure = closure_graph[i]
while agenda:
j = agenda.pop()
closure.add(j)
closure |= closure_graph.setdefault(j, base_set(j))
agenda |= agenda_graph.get(j, base_set(j))
agenda -= closure
return closure_graph
def invert_graph(graph):
"""
Inverts a directed graph.
:param graph: the graph, represented as a dictionary of sets
:type graph: dict(set)
:return: the inverted graph
:rtype: dict(set)
"""
inverted = {}
for key in graph:
for value in graph[key]:
inverted.setdefault(value, set()).add(key)
return inverted
##########################################################################
# HTML Cleaning
##########################################################################
def clean_html(html):
raise NotImplementedError ("To remove HTML markup, use BeautifulSoup's get_text() function")
def clean_url(url):
raise NotImplementedError ("To remove HTML markup, use BeautifulSoup's get_text() function")
##########################################################################
# FLATTEN LISTS
##########################################################################
def flatten(*args):
"""
Flatten a list.
>>> from nltk.util import flatten
>>> flatten(1, 2, ['b', 'a' , ['c', 'd']], 3)
[1, 2, 'b', 'a', 'c', 'd', 3]
:param args: items and lists to be combined into a single list
:rtype: list
"""
x = []
for l in args:
if not isinstance(l, (list, tuple)): l = [l]
for item in l:
if isinstance(item, (list, tuple)):
x.extend(flatten(item))
else:
x.append(item)
return x
##########################################################################
# Ngram iteration
##########################################################################
def pad_sequence(sequence, n, pad_left=False, pad_right=False,
left_pad_symbol=None, right_pad_symbol=None):
"""
Returns a padded sequence of items before ngram extraction.
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
['<s>', 1, 2, 3, 4, 5, '</s>']
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
['<s>', 1, 2, 3, 4, 5]
>>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[1, 2, 3, 4, 5, '</s>']
:param sequence: the source data to be padded
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = iter(sequence)
if pad_left:
sequence = chain((left_pad_symbol,) * (n-1), sequence)
if pad_right:
sequence = chain(sequence, (right_pad_symbol,) * (n-1))
return sequence
# add a flag to pad the sequence so we get peripheral ngrams?
def ngrams(sequence, n, pad_left=False, pad_right=False,
left_pad_symbol=None, right_pad_symbol=None):
"""
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = pad_sequence(sequence, n, pad_left, pad_right,
left_pad_symbol, right_pad_symbol)
history = []
while n > 1:
history.append(next(sequence))
n -= 1
for item in sequence:
history.append(item)
yield tuple(history)
del history[0]
def bigrams(sequence, **kwargs):
"""
Return the bigrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import bigrams
>>> list(bigrams([1,2,3,4,5]))
[(1, 2), (2, 3), (3, 4), (4, 5)]
Use bigrams for a list version of this function.
:param sequence: the source data to be converted into bigrams
:type sequence: sequence or iter
:rtype: iter(tuple)
"""
for item in ngrams(sequence, 2, **kwargs):
yield item
def trigrams(sequence, **kwargs):
"""
Return the trigrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import trigrams
>>> list(trigrams([1,2,3,4,5]))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Use trigrams for a list version of this function.
:param sequence: the source data to be converted into trigrams
:type sequence: sequence or iter
:rtype: iter(tuple)
"""
for item in ngrams(sequence, 3, **kwargs):
yield item
def everygrams(sequence, min_len=1, max_len=-1, **kwargs):
"""
Returns all possible ngrams generated from a sequence of items, as an iterator.
>>> sent = 'a b c'.split()
>>> list(everygrams(sent))
[('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c')]
>>> list(everygrams(sent, max_len=2))
[('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c')]
:param sequence: the source data to be converted into trigrams
:type sequence: sequence or iter
:param min_len: minimum length of the ngrams, aka. n-gram order/degree of ngram
:type min_len: int
:param max_len: maximum length of the ngrams (set to length of sequence by default)
:type max_len: int
:rtype: iter(tuple)
"""
if max_len == -1:
max_len = len(sequence)
for n in range(min_len, max_len+1):
for ng in ngrams(sequence, n, **kwargs):
yield ng
def skipgrams(sequence, n, k, **kwargs):
"""
Returns all possible skipgrams generated from a sequence of items, as an iterator.
Skipgrams are ngrams that allows tokens to be skipped.
Refer to http://homepages.inf.ed.ac.uk/ballison/pdf/lrec_skipgrams.pdf
>>> sent = "Insurgents killed in ongoing fighting".split()
>>> list(skipgrams(sent, 2, 2))
[('Insurgents', 'killed'), ('Insurgents', 'in'), ('Insurgents', 'ongoing'), ('killed', 'in'), ('killed', 'ongoing'), ('killed', 'fighting'), ('in', 'ongoing'), ('in', 'fighting'), ('ongoing', 'fighting')]
>>> list(skipgrams(sent, 3, 2))
[('Insurgents', 'killed', 'in'), ('Insurgents', 'killed', 'ongoing'), ('Insurgents', 'killed', 'fighting'), ('Insurgents', 'in', 'ongoing'), ('Insurgents', 'in', 'fighting'), ('Insurgents', 'ongoing', 'fighting'), ('killed', 'in', 'ongoing'), ('killed', 'in', 'fighting'), ('killed', 'ongoing', 'fighting'), ('in', 'ongoing', 'fighting')]
:param sequence: the source data to be converted into trigrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param k: the skip distance
:type k: int
:rtype: iter(tuple)
"""
# Pads the sequence as desired by **kwargs.
if 'pad_left' in kwargs or 'pad_right' in kwargs:
sequence = pad_sequence(sequence, n, **kwargs)
# Note when iterating through the ngrams, the pad_right here is not
# the **kwargs padding, it's for the algorithm to detect the SENTINEL
# object on the right pad to stop inner loop.
SENTINEL = object()
for ngram in ngrams(sequence, n + k, pad_right=True, right_pad_symbol=SENTINEL):
head = ngram[:1]
tail = ngram[1:]
for skip_tail in combinations(tail, n - 1):
if skip_tail[-1] is SENTINEL:
continue
yield head + skip_tail
######################################################################
# Binary Search in a File
######################################################################
# inherited from pywordnet, by Oliver Steele
def binary_search_file(file, key, cache={}, cacheDepth=-1):
"""
Return the line from the file with first word key.
Searches through a sorted file using the binary search algorithm.
:type file: file
:param file: the file to be searched through.
:type key: str
:param key: the identifier we are searching for.
"""
key = key + ' '
keylen = len(key)
start = 0
currentDepth = 0
if hasattr(file, 'name'):
end = os.stat(file.name).st_size - 1
else:
file.seek(0, 2)
end = file.tell() - 1
file.seek(0)
while start < end:
lastState = start, end
middle = (start + end) // 2
if cache.get(middle):
offset, line = cache[middle]
else:
line = ""
while True:
file.seek(max(0, middle - 1))
if middle > 0:
file.readline()
offset = file.tell()
line = file.readline()
if line != "": break
# at EOF; try to find start of the last line
middle = (start + middle)//2
if middle == end -1:
return None
if currentDepth < cacheDepth:
cache[middle] = (offset, line)
if offset > end:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line[:keylen] == key:
return line
elif line > key:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line < key:
start = offset + len(line) - 1
currentDepth += 1
thisState = start, end
if lastState == thisState:
# Detects the condition where we're searching past the end
# of the file, which is otherwise difficult to detect
return None
return None
######################################################################
# Proxy configuration
######################################################################
def set_proxy(proxy, user=None, password=''):
"""
Set the HTTP proxy for Python to download through.
If ``proxy`` is None then tries to set proxy from environment or system
settings.
:param proxy: The HTTP proxy server to use. For example:
'http://proxy.example.com:3128/'
:param user: The username to authenticate with. Use None to disable
authentication.
:param password: The password to authenticate with.
"""
from nltk import compat
if proxy is None:
# Try and find the system proxy settings
try:
proxy = getproxies()['http']
except KeyError:
raise ValueError('Could not detect default proxy settings')
# Set up the proxy handler
proxy_handler = ProxyHandler({'http': proxy})
opener = build_opener(proxy_handler)
if user is not None:
# Set up basic proxy authentication if provided
password_manager = HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm=None, uri=proxy, user=user,
passwd=password)
opener.add_handler(ProxyBasicAuthHandler(password_manager))
opener.add_handler(ProxyDigestAuthHandler(password_manager))
# Overide the existing url opener
install_opener(opener)
######################################################################
# ElementTree pretty printing from http://www.effbot.org/zone/element-lib.htm
######################################################################
def elementtree_indent(elem, level=0):
"""
Recursive function to indent an ElementTree._ElementInterface
used for pretty printing. Run indent on elem and then output
in the normal way.
:param elem: element to be indented. will be modified.
:type elem: ElementTree._ElementInterface
:param level: level of indentation for this element
:type level: nonnegative integer
:rtype: ElementTree._ElementInterface
:return: Contents of elem indented to reflect its structure
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
elementtree_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
######################################################################
# Mathematical approximations
######################################################################
def choose(n, k):
"""
This function is a fast way to calculate binomial coefficients, commonly
known as nCk, i.e. the number of combinations of n things taken k at a time.
(https://en.wikipedia.org/wiki/Binomial_coefficient).
This is the *scipy.special.comb()* with long integer computation but this
approximation is faster, see https://github.com/nltk/nltk/issues/1181
>>> choose(4, 2)
6
>>> choose(6, 2)
15
:param n: The number of things.
:type n: int
:param r: The number of times a thing is taken.
:type r: int
"""
if 0 <= k <= n:
ntok, ktok = 1, 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUVNFInterfacesFetcher
from .fetchers import NUVNFMetadatasFetcher
from .fetchers import NUVNFThresholdPoliciesFetcher
from .fetchers import NUJobsFetcher
from bambou import NURESTObject
class NUVNF(NURESTObject):
""" Represents a VNF in the VSD
Notes:
Instantiation of a VNF on a specified Network Services Gateway that has the resources to manage a VNF.
"""
__rest_name__ = "vnf"
__resource_name__ = "vnfs"
## Constants
CONST_LAST_USER_ACTION_NONE = "NONE"
CONST_STATUS_SHUTDOWN = "SHUTDOWN"
CONST_LAST_USER_ACTION_START = "START"
CONST_LAST_USER_ACTION_REDEPLOY = "REDEPLOY"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_LAST_USER_ACTION_RESTART = "RESTART"
CONST_TASK_STATE_STOPPING = "STOPPING"
CONST_ALLOWED_ACTIONS_START = "START"
CONST_STATUS_SHUTOFF = "SHUTOFF"
CONST_STATUS_IDLE = "IDLE"
CONST_TASK_STATE_NONE = "NONE"
CONST_STATUS_INIT = "INIT"
CONST_ALLOWED_ACTIONS_DEPLOY = "DEPLOY"
CONST_TASK_STATE_DEPLOYING = "DEPLOYING"
CONST_TYPE_THREAT_PREVENTION = "THREAT_PREVENTION"
CONST_TYPE_WAN_OPT = "WAN_OPT"
CONST_ALLOWED_ACTIONS_RESTART = "RESTART"
CONST_ALLOWED_ACTIONS_UNDEPLOY = "UNDEPLOY"
CONST_STATUS_LAST = "LAST"
CONST_STATUS_CRASHED = "CRASHED"
CONST_STATUS_RUNNING = "RUNNING"
CONST_STATUS_BLOCKED = "BLOCKED"
CONST_STATUS_PAUSED = "PAUSED"
CONST_TASK_STATE_STARTING = "STARTING"
CONST_STATUS_DYING = "DYING"
CONST_ALLOWED_ACTIONS_REDEPLOY = "REDEPLOY"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_LAST_USER_ACTION_UNDEPLOY = "UNDEPLOY"
CONST_TYPE_FIREWALL = "FIREWALL"
CONST_ALLOWED_ACTIONS_STOP = "STOP"
CONST_LAST_USER_ACTION_DEPLOY = "DEPLOY"
CONST_STATUS_PMSUSPENDED = "PMSUSPENDED"
CONST_LAST_USER_ACTION_STOP = "STOP"
CONST_TASK_STATE_UNDEPLOYING = "UNDEPLOYING"
def __init__(self, **kwargs):
""" Initializes a VNF instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vnf = NUVNF(id=u'xxxx-xxx-xxx-xxx', name=u'VNF')
>>> vnf = NUVNF(data=my_dict)
"""
super(NUVNF, self).__init__()
# Read/Write Attributes
self._vnf_descriptor_id = None
self._vnf_descriptor_name = None
self._cpu_count = None
self._nsg_name = None
self._nsg_system_id = None
self._ns_gateway_id = None
self._name = None
self._task_state = None
self._last_known_error = None
self._last_updated_by = None
self._last_updated_date = None
self._last_user_action = None
self._memory_mb = None
self._vendor = None
self._description = None
self._allowed_actions = None
self._embedded_metadata = None
self._enterprise_id = None
self._entity_scope = None
self._creation_date = None
self._is_attached_to_descriptor = None
self._associated_vnf_metadata_id = None
self._associated_vnf_threshold_policy_id = None
self._status = None
self._storage_gb = None
self._owner = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="vnf_descriptor_id", remote_name="VNFDescriptorID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="vnf_descriptor_name", remote_name="VNFDescriptorName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_count", remote_name="CPUCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="nsg_name", remote_name="NSGName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="nsg_system_id", remote_name="NSGSystemID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ns_gateway_id", remote_name="NSGatewayID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="task_state", remote_name="taskState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DEPLOYING', u'NONE', u'STARTING', u'STOPPING', u'UNDEPLOYING'])
self.expose_attribute(local_name="last_known_error", remote_name="lastKnownError", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_user_action", remote_name="lastUserAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'DEPLOY', u'NONE', u'REDEPLOY', u'RESTART', u'START', u'STOP', u'UNDEPLOY'])
self.expose_attribute(local_name="memory_mb", remote_name="memoryMB", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="vendor", remote_name="vendor", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="allowed_actions", remote_name="allowedActions", attribute_type=list, is_required=False, is_unique=False, choices=[u'DEPLOY', u'REDEPLOY', u'RESTART', u'START', u'STOP', u'UNDEPLOY'])
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="is_attached_to_descriptor", remote_name="isAttachedToDescriptor", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_vnf_metadata_id", remote_name="associatedVNFMetadataID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_vnf_threshold_policy_id", remote_name="associatedVNFThresholdPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED', u'CRASHED', u'DYING', u'IDLE', u'INIT', u'LAST', u'PAUSED', u'PMSUSPENDED', u'RUNNING', u'SHUTDOWN', u'SHUTOFF'])
self.expose_attribute(local_name="storage_gb", remote_name="storageGB", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'FIREWALL', u'THREAT_PREVENTION', u'WAN_OPT'])
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vnf_interfaces = NUVNFInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vnf_metadatas = NUVNFMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vnf_threshold_policies = NUVNFThresholdPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def vnf_descriptor_id(self):
""" Get vnf_descriptor_id value.
Notes:
The ID of VNF Descriptor from which VNF to be created. This is required on creation and can be removed on moidification of VNF instance.
This attribute is named `VNFDescriptorID` in VSD API.
"""
return self._vnf_descriptor_id
@vnf_descriptor_id.setter
def vnf_descriptor_id(self, value):
""" Set vnf_descriptor_id value.
Notes:
The ID of VNF Descriptor from which VNF to be created. This is required on creation and can be removed on moidification of VNF instance.
This attribute is named `VNFDescriptorID` in VSD API.
"""
self._vnf_descriptor_id = value
@property
def vnf_descriptor_name(self):
""" Get vnf_descriptor_name value.
Notes:
The Name of VNF Descriptor from which this VNF instance is created.
This attribute is named `VNFDescriptorName` in VSD API.
"""
return self._vnf_descriptor_name
@vnf_descriptor_name.setter
def vnf_descriptor_name(self, value):
""" Set vnf_descriptor_name value.
Notes:
The Name of VNF Descriptor from which this VNF instance is created.
This attribute is named `VNFDescriptorName` in VSD API.
"""
self._vnf_descriptor_name = value
@property
def cpu_count(self):
""" Get cpu_count value.
Notes:
Number of CPUs to be allocated for this VNF instance
This attribute is named `CPUCount` in VSD API.
"""
return self._cpu_count
@cpu_count.setter
def cpu_count(self, value):
""" Set cpu_count value.
Notes:
Number of CPUs to be allocated for this VNF instance
This attribute is named `CPUCount` in VSD API.
"""
self._cpu_count = value
@property
def nsg_name(self):
""" Get nsg_name value.
Notes:
The NSG name where VNF is deployed
This attribute is named `NSGName` in VSD API.
"""
return self._nsg_name
@nsg_name.setter
def nsg_name(self, value):
""" Set nsg_name value.
Notes:
The NSG name where VNF is deployed
This attribute is named `NSGName` in VSD API.
"""
self._nsg_name = value
@property
def nsg_system_id(self):
""" Get nsg_system_id value.
Notes:
The NSG system id where VNF is deployed
This attribute is named `NSGSystemID` in VSD API.
"""
return self._nsg_system_id
@nsg_system_id.setter
def nsg_system_id(self, value):
""" Set nsg_system_id value.
Notes:
The NSG system id where VNF is deployed
This attribute is named `NSGSystemID` in VSD API.
"""
self._nsg_system_id = value
@property
def ns_gateway_id(self):
""" Get ns_gateway_id value.
Notes:
The NSG instance id where VNF is deployed
This attribute is named `NSGatewayID` in VSD API.
"""
return self._ns_gateway_id
@ns_gateway_id.setter
def ns_gateway_id(self, value):
""" Set ns_gateway_id value.
Notes:
The NSG instance id where VNF is deployed
This attribute is named `NSGatewayID` in VSD API.
"""
self._ns_gateway_id = value
@property
def name(self):
""" Get name value.
Notes:
Name of the VNF
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the VNF
"""
self._name = value
@property
def task_state(self):
""" Get task_state value.
Notes:
Current state of operation/task
This attribute is named `taskState` in VSD API.
"""
return self._task_state
@task_state.setter
def task_state(self, value):
""" Set task_state value.
Notes:
Current state of operation/task
This attribute is named `taskState` in VSD API.
"""
self._task_state = value
@property
def last_known_error(self):
""" Get last_known_error value.
Notes:
Last error reported
This attribute is named `lastKnownError` in VSD API.
"""
return self._last_known_error
@last_known_error.setter
def last_known_error(self, value):
""" Set last_known_error value.
Notes:
Last error reported
This attribute is named `lastKnownError` in VSD API.
"""
self._last_known_error = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def last_user_action(self):
""" Get last_user_action value.
Notes:
Last action perform by user
This attribute is named `lastUserAction` in VSD API.
"""
return self._last_user_action
@last_user_action.setter
def last_user_action(self, value):
""" Set last_user_action value.
Notes:
Last action perform by user
This attribute is named `lastUserAction` in VSD API.
"""
self._last_user_action = value
@property
def memory_mb(self):
""" Get memory_mb value.
Notes:
Memory (in MB) to be allocated for this VNF instance.
This attribute is named `memoryMB` in VSD API.
"""
return self._memory_mb
@memory_mb.setter
def memory_mb(self, value):
""" Set memory_mb value.
Notes:
Memory (in MB) to be allocated for this VNF instance.
This attribute is named `memoryMB` in VSD API.
"""
self._memory_mb = value
@property
def vendor(self):
""" Get vendor value.
Notes:
The vendor for VNF
"""
return self._vendor
@vendor.setter
def vendor(self, value):
""" Set vendor value.
Notes:
The vendor for VNF
"""
self._vendor = value
@property
def description(self):
""" Get description value.
Notes:
Description of the VNF Instance
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the VNF Instance
"""
self._description = value
@property
def allowed_actions(self):
""" Get allowed_actions value.
Notes:
Action allowed to performed on VNF based on current status and taskState
This attribute is named `allowedActions` in VSD API.
"""
return self._allowed_actions
@allowed_actions.setter
def allowed_actions(self, value):
""" Set allowed_actions value.
Notes:
Action allowed to performed on VNF based on current status and taskState
This attribute is named `allowedActions` in VSD API.
"""
self._allowed_actions = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
ID of the enterprise that this VNF belongs to
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
ID of the enterprise that this VNF belongs to
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def is_attached_to_descriptor(self):
""" Get is_attached_to_descriptor value.
Notes:
This specifies if VNF instance is using VNF descriptor or it is decoupled from it
This attribute is named `isAttachedToDescriptor` in VSD API.
"""
return self._is_attached_to_descriptor
@is_attached_to_descriptor.setter
def is_attached_to_descriptor(self, value):
""" Set is_attached_to_descriptor value.
Notes:
This specifies if VNF instance is using VNF descriptor or it is decoupled from it
This attribute is named `isAttachedToDescriptor` in VSD API.
"""
self._is_attached_to_descriptor = value
@property
def associated_vnf_metadata_id(self):
""" Get associated_vnf_metadata_id value.
Notes:
VNF metadata associated to VNF instance.
This attribute is named `associatedVNFMetadataID` in VSD API.
"""
return self._associated_vnf_metadata_id
@associated_vnf_metadata_id.setter
def associated_vnf_metadata_id(self, value):
""" Set associated_vnf_metadata_id value.
Notes:
VNF metadata associated to VNF instance.
This attribute is named `associatedVNFMetadataID` in VSD API.
"""
self._associated_vnf_metadata_id = value
@property
def associated_vnf_threshold_policy_id(self):
""" Get associated_vnf_threshold_policy_id value.
Notes:
VNF threshold policy associated to VNF instance
This attribute is named `associatedVNFThresholdPolicyID` in VSD API.
"""
return self._associated_vnf_threshold_policy_id
@associated_vnf_threshold_policy_id.setter
def associated_vnf_threshold_policy_id(self, value):
""" Set associated_vnf_threshold_policy_id value.
Notes:
VNF threshold policy associated to VNF instance
This attribute is named `associatedVNFThresholdPolicyID` in VSD API.
"""
self._associated_vnf_threshold_policy_id = value
@property
def status(self):
""" Get status value.
Notes:
State/Status of the VNF
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
State/Status of the VNF
"""
self._status = value
@property
def storage_gb(self):
""" Get storage_gb value.
Notes:
Disk storage (in GB) to be allocated for deployed VNF instance
This attribute is named `storageGB` in VSD API.
"""
return self._storage_gb
@storage_gb.setter
def storage_gb(self, value):
""" Set storage_gb value.
Notes:
Disk storage (in GB) to be allocated for deployed VNF instance
This attribute is named `storageGB` in VSD API.
"""
self._storage_gb = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def type(self):
""" Get type value.
Notes:
Type of virtual network function
"""
return self._type
@type.setter
def type(self, value):
""" Set type value.
Notes:
Type of virtual network function
"""
self._type = value
|
|
from collections import namedtuple
from posixpath import basename
from random import randint, shuffle, uniform
from time import time
from urllib.parse import urlsplit
from discord import User
from bot import HahaNoUR
from core.argument_parser import parse_arguments
from core.image_generator import create_image, get_one_img, \
idol_img_path
RATES = {
"regular": {"N": 0.95, "R": 0.05, "SR": 0.00, "SSR": 0.00, "UR": 0.00},
"honour": {"N": 0.00, "R": 0.80, "SR": 0.15, "SSR": 0.04, "UR": 0.01},
"coupon": {"N": 0.00, "R": 0.00, "SR": 0.80, "SSR": 0.00, "UR": 0.20},
"support": {"N": 0.00, "R": 0.60, "SR": 0.30, "SSR": 0.00, "UR": 0.10},
"alpaca": {"N": 0.00, "R": 0.85, "SR": 0.15, "SSR": 0.00, "UR": 0.00}
}
class ScoutImage(namedtuple('ScoutImage', ('bytes', 'name'))):
__slots__ = ()
class ScoutHandler:
"""
Provides scouting functionality for bot.
"""
__slots__ = ('results', '_bot', '_user', '_box', '_count',
'_guaranteed_sr', '_args')
def __init__(self, bot: HahaNoUR, user: User,
box: str = "honour", count: int = 1,
guaranteed_sr: bool = False, args: tuple = ()):
"""
Constructor for a Scout.
:param session_manager: the SessionManager.
:param user: User requesting scout.
:param box: Box to scout in (honour, regular, coupon).
:param count: Number of cards in scout.
:param guaranteed_sr: Whether the scout will roll at least one SR.
:param args: Scout command arguments
"""
self.results = []
self._bot = bot
self._user = user
self._box = box
self._count = count
self._guaranteed_sr = guaranteed_sr
self._args = parse_arguments(self._bot, args, True)
async def do_scout(self):
if self._count > 1:
return await self._handle_multiple_scout()
else:
return await self._handle_solo_scout()
async def _handle_multiple_scout(self):
"""
Handles a scout with multiple cards
:return: Path of scout image
"""
cards = await self._scout_cards()
if len(cards) != self._count:
self.results = []
return None
fname = f'{int(time())}{randint(0, 100)}.png'
_bytes = await create_image(self._bot.session_manager, cards, 2)
return ScoutImage(_bytes, fname)
async def _handle_solo_scout(self):
"""
Handles a solo scout
:return: Path of scout image
"""
card = await self._scout_cards()
# Send error message if no card was returned
if not card:
self.results = []
return None
card = card[0]
if card["card_image"] is None:
url = "https:" + card["card_idolized_image"]
else:
url = "https:" + card["card_image"]
fname = basename(urlsplit(url).path)
image_path = idol_img_path.joinpath(fname)
bytes_ = await get_one_img(
url, image_path, self._bot.session_manager)
return ScoutImage(bytes_, fname)
async def _scout_cards(self) -> list:
"""
Scouts a specified number of cards
:return: cards scouted
"""
rarities = []
if self._guaranteed_sr:
for r in range(self._count - 1):
rarities.append(self._roll_rarity())
if rarities.count("R") + rarities.count("N") == self._count - 1:
rarities.append(self._roll_rarity(True))
else:
rarities.append(self._roll_rarity())
# Case where a normal character is selected
elif (self._box == "regular") \
and len(self._args["name"]) > 0:
for r in range(self._count):
rarities.append("N")
else:
for r in range(self._count):
rarities.append(self._roll_rarity())
results = []
for rarity in RATES[self._box].keys():
if rarities.count(rarity) > 0:
scout = await self._scout_request(
rarities.count(rarity), rarity
)
results += _get_adjusted_scout(
scout, rarities.count(rarity)
)
self.results = results
shuffle(results)
return results
async def _scout_request(self, count: int, rarity: str) -> dict:
"""
Scouts a specified number of cards of a given rarity
:param rarity: Rarity of all cards in scout
:return: Cards scouted
"""
if count == 0:
return []
params = {
'rarity': rarity,
'is_promo': False,
'is_special': (self._box == 'support')
}
for arg_type, arg_values in self._args.items():
if not arg_values:
continue
val = arg_values
# Comma seperated strings need to use $in.
if len(arg_values) > 0:
val = {'$in': arg_values}
if arg_type == "main_unit":
params['idol.main_unit'] = val
elif arg_type == "sub_unit":
params['idol.sub_unit'] = val
elif arg_type == "name":
params['idol.name'] = val
elif arg_type == "year":
params['idol.year'] = val
elif arg_type == "attribute":
params['attribute'] = val
# Get and return response
return await self._bot.db.cards.get_random_cards(params, count)
def _roll_rarity(self, guaranteed_sr: bool = False) -> str:
"""
Generates a random rarity based on the defined scouting rates
:param guaranteed_sr: Whether roll should be an SR
:return: rarity represented as a string ('UR', 'SSR', 'SR', 'R')
"""
roll = uniform(0, 1)
required_roll = RATES[self._box]['UR']
if roll < required_roll:
return 'UR'
required_roll = RATES[self._box]['SSR'] + RATES[self._box]['UR']
if roll < required_roll:
return 'SSR'
required_roll = RATES[self._box]['SR'] + RATES[self._box]['SSR']
required_roll += RATES[self._box]['UR']
if roll < required_roll:
return 'SR'
required_roll = RATES[self._box]['R'] + RATES[self._box]['SR']
required_roll += RATES[self._box]['SSR'] + RATES[self._box]['UR']
if roll < required_roll:
if guaranteed_sr:
return 'SR'
else:
return 'R'
else:
return 'N'
def _get_adjusted_scout(scout: list, required_count: int) -> list:
"""
Adjusts a pull of a single rarity by checking if a card should flip to
a similar one and by duplicating random cards in the scout if there were
not enough scouted.
:param scout: List representing the scout.
All these cards will have the same rarity.
:param required_count: The number of cards that need to be scouted.
:return: Adjusted list of cards scouted
"""
# Add missing cards to scout by duplicating random cards already present
current_count = len(scout)
# Something bad happened, return an empty list
if current_count == 0:
return []
pool_size = current_count
while current_count < required_count:
scout.append(
scout[randint(0, pool_size - 1)]
)
current_count += 1
# Traverse scout and roll for flips
for card_index in range(len(scout) - 1):
# for each card there is a (1 / total cards)
# chance that we should dupe
# the previous card
roll = uniform(0, 1)
if roll < 1 / len(scout):
scout[card_index] = scout[card_index + 1]
return scout
|
|
from abc import abstractmethod, ABCMeta
from ._compat import Sequence, Hashable
from numbers import Integral
import operator
import six
from pyrsistent._transformations import transform
def _bitcount(val):
return bin(val).count("1")
BRANCH_FACTOR = 32
BIT_MASK = BRANCH_FACTOR - 1
SHIFT = _bitcount(BIT_MASK)
def compare_pvector(v, other, operator):
return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other)
def _index_or_slice(index, stop):
if stop is None:
return index
return slice(index, stop)
class PythonPVector(object):
"""
Support structure for PVector that implements structural sharing for vectors using a trie.
"""
__slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__')
def __new__(cls, count, shift, root, tail):
self = super(PythonPVector, cls).__new__(cls)
self._count = count
self._shift = shift
self._root = root
self._tail = tail
# Derived attribute stored for performance
self._tail_offset = self._count - len(self._tail)
return self
def __len__(self):
return self._count
def __getitem__(self, index):
if isinstance(index, slice):
# There are more conditions than the below where it would be OK to
# return ourselves, implement those...
if index.start is None and index.stop is None and index.step is None:
return self
# This is a bit nasty realizing the whole structure as a list before
# slicing it but it is the fastest way I've found to date, and it's easy :-)
return _EMPTY_PVECTOR.extend(self.tolist()[index])
if index < 0:
index += self._count
return PythonPVector._node_for(self, index)[index & BIT_MASK]
def __add__(self, other):
return self.extend(other)
def __repr__(self):
return 'pvector({0})'.format(str(self.tolist()))
def __str__(self):
return self.__repr__()
def __iter__(self):
# This is kind of lazy and will produce some memory overhead but it is the fasted method
# by far of those tried since it uses the speed of the built in python list directly.
return iter(self.tolist())
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq)
def __gt__(self, other):
return compare_pvector(self, other, operator.gt)
def __lt__(self, other):
return compare_pvector(self, other, operator.lt)
def __ge__(self, other):
return compare_pvector(self, other, operator.ge)
def __le__(self, other):
return compare_pvector(self, other, operator.le)
def __mul__(self, times):
if times <= 0 or self is _EMPTY_PVECTOR:
return _EMPTY_PVECTOR
if times == 1:
return self
return _EMPTY_PVECTOR.extend(times * self.tolist())
__rmul__ = __mul__
def _fill_list(self, node, shift, the_list):
if shift:
shift -= SHIFT
for n in node:
self._fill_list(n, shift, the_list)
else:
the_list.extend(node)
def tolist(self):
"""
The fastest way to convert the vector into a python list.
"""
the_list = []
self._fill_list(self._root, self._shift, the_list)
the_list.extend(self._tail)
return the_list
def _totuple(self):
"""
Returns the content as a python tuple.
"""
return tuple(self.tolist())
def __hash__(self):
# Taking the easy way out again...
return hash(self._totuple())
def transform(self, *transformations):
return transform(self, transformations)
def __reduce__(self):
# Pickling support
return pvector, (self.tolist(),)
def mset(self, *args):
if len(args) % 2:
raise TypeError("mset expected an even number of arguments")
evolver = self.evolver()
for i in range(0, len(args), 2):
evolver[args[i]] = args[i+1]
return evolver.persistent()
class Evolver(object):
__slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes',
'_extra_tail', '_cached_leafs', '_orig_pvector')
def __init__(self, v):
self._reset(v)
def __getitem__(self, index):
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
index += self._count + len(self._extra_tail)
if self._count <= index < self._count + len(self._extra_tail):
return self._extra_tail[index - self._count]
return PythonPVector._node_for(self, index)[index & BIT_MASK]
def _reset(self, v):
self._count = v._count
self._shift = v._shift
self._root = v._root
self._tail = v._tail
self._tail_offset = v._tail_offset
self._dirty_nodes = {}
self._cached_leafs = {}
self._extra_tail = []
self._orig_pvector = v
def append(self, element):
self._extra_tail.append(element)
return self
def extend(self, iterable):
self._extra_tail.extend(iterable)
return self
def set(self, index, val):
self[index] = val
return self
def __setitem__(self, index, val):
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
index += self._count + len(self._extra_tail)
if 0 <= index < self._count:
node = self._cached_leafs.get(index >> SHIFT)
if node:
node[index & BIT_MASK] = val
elif index >= self._tail_offset:
if id(self._tail) not in self._dirty_nodes:
self._tail = list(self._tail)
self._dirty_nodes[id(self._tail)] = True
self._cached_leafs[index >> SHIFT] = self._tail
self._tail[index & BIT_MASK] = val
else:
self._root = self._do_set(self._shift, self._root, index, val)
elif self._count <= index < self._count + len(self._extra_tail):
self._extra_tail[index - self._count] = val
elif index == self._count + len(self._extra_tail):
self._extra_tail.append(val)
else:
raise IndexError("Index out of range: %s" % (index,))
def _do_set(self, level, node, i, val):
if id(node) in self._dirty_nodes:
ret = node
else:
ret = list(node)
self._dirty_nodes[id(ret)] = True
if level == 0:
ret[i & BIT_MASK] = val
self._cached_leafs[i >> SHIFT] = ret
else:
sub_index = (i >> level) & BIT_MASK # >>>
ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
return ret
def delete(self, index):
del self[index]
return self
def __delitem__(self, key):
if self._orig_pvector:
# All structural sharing bets are off, base evolver on _extra_tail only
l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist()
l.extend(self._extra_tail)
self._reset(_EMPTY_PVECTOR)
self._extra_tail = l
del self._extra_tail[key]
def persistent(self):
result = self._orig_pvector
if self.is_dirty():
result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail)
self._reset(result)
return result
def __len__(self):
return self._count + len(self._extra_tail)
def is_dirty(self):
return bool(self._dirty_nodes or self._extra_tail)
def evolver(self):
return PythonPVector.Evolver(self)
def set(self, i, val):
# This method could be implemented by a call to mset() but doing so would cause
# a ~5 X performance penalty on PyPy (considered the primary platform for this implementation
# of PVector) so we're keeping this implementation for now.
if not isinstance(i, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__)
if i < 0:
i += self._count
if 0 <= i < self._count:
if i >= self._tail_offset:
new_tail = list(self._tail)
new_tail[i & BIT_MASK] = val
return PythonPVector(self._count, self._shift, self._root, new_tail)
return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail)
if i == self._count:
return self.append(val)
raise IndexError("Index out of range: %s" % (i,))
def _do_set(self, level, node, i, val):
ret = list(node)
if level == 0:
ret[i & BIT_MASK] = val
else:
sub_index = (i >> level) & BIT_MASK # >>>
ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
return ret
@staticmethod
def _node_for(pvector_like, i):
if 0 <= i < pvector_like._count:
if i >= pvector_like._tail_offset:
return pvector_like._tail
node = pvector_like._root
for level in range(pvector_like._shift, 0, -SHIFT):
node = node[(i >> level) & BIT_MASK] # >>>
return node
raise IndexError("Index out of range: %s" % (i,))
def _create_new_root(self):
new_shift = self._shift
# Overflow root?
if (self._count >> SHIFT) > (1 << self._shift): # >>>
new_root = [self._root, self._new_path(self._shift, self._tail)]
new_shift += SHIFT
else:
new_root = self._push_tail(self._shift, self._root, self._tail)
return new_root, new_shift
def append(self, val):
if len(self._tail) < BRANCH_FACTOR:
new_tail = list(self._tail)
new_tail.append(val)
return PythonPVector(self._count + 1, self._shift, self._root, new_tail)
# Full tail, push into tree
new_root, new_shift = self._create_new_root()
return PythonPVector(self._count + 1, new_shift, new_root, [val])
def _new_path(self, level, node):
if level == 0:
return node
return [self._new_path(level - SHIFT, node)]
def _mutating_insert_tail(self):
self._root, self._shift = self._create_new_root()
self._tail = []
def _mutating_fill_tail(self, offset, sequence):
max_delta_len = BRANCH_FACTOR - len(self._tail)
delta = sequence[offset:offset + max_delta_len]
self._tail.extend(delta)
delta_len = len(delta)
self._count += delta_len
return offset + delta_len
def _mutating_extend(self, sequence):
offset = 0
sequence_len = len(sequence)
while offset < sequence_len:
offset = self._mutating_fill_tail(offset, sequence)
if len(self._tail) == BRANCH_FACTOR:
self._mutating_insert_tail()
self._tail_offset = self._count - len(self._tail)
def extend(self, obj):
# Mutates the new vector directly for efficiency but that's only an
# implementation detail, once it is returned it should be considered immutable
l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj)
if l:
new_vector = self.append(l[0])
new_vector._mutating_extend(l[1:])
return new_vector
return self
def _push_tail(self, level, parent, tail_node):
"""
if parent is leaf, insert node,
else does it map to an existing child? ->
node_to_insert = push node one more level
else alloc new path
return node_to_insert placed in copy of parent
"""
ret = list(parent)
if level == SHIFT:
ret.append(tail_node)
return ret
sub_index = ((self._count - 1) >> level) & BIT_MASK # >>>
if len(parent) > sub_index:
ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node)
return ret
ret.append(self._new_path(level - SHIFT, tail_node))
return ret
def index(self, value, *args, **kwargs):
return self.tolist().index(value, *args, **kwargs)
def count(self, value):
return self.tolist().count(value)
def delete(self, index, stop=None):
l = self.tolist()
del l[_index_or_slice(index, stop)]
return _EMPTY_PVECTOR.extend(l)
def remove(self, value):
l = self.tolist()
l.remove(value)
return _EMPTY_PVECTOR.extend(l)
@six.add_metaclass(ABCMeta)
class PVector(object):
"""
Persistent vector implementation. Meant as a replacement for the cases where you would normally
use a Python list.
Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to
create an instance.
Heavily influenced by the persistent vector available in Clojure. Initially this was more or
less just a port of the Java code for the Clojure vector. It has since been modified and to
some extent optimized for usage in Python.
The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No
updates are done to the original vector. Structural sharing between vectors are applied where possible to save
space and to avoid making complete copies.
This structure corresponds most closely to the built in list type and is intended as a replacement. Where the
semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
for example assignments.
The PVector implements the Sequence protocol and is Hashable.
Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector.
The following are examples of some common operations on persistent vectors:
>>> p = v(1, 2, 3)
>>> p2 = p.append(4)
>>> p3 = p2.extend([5, 6, 7])
>>> p
pvector([1, 2, 3])
>>> p2
pvector([1, 2, 3, 4])
>>> p3
pvector([1, 2, 3, 4, 5, 6, 7])
>>> p3[5]
6
>>> p.set(1, 99)
pvector([1, 99, 3])
>>>
"""
@abstractmethod
def __len__(self):
"""
>>> len(v(1, 2, 3))
3
"""
@abstractmethod
def __getitem__(self, index):
"""
Get value at index. Full slicing support.
>>> v1 = v(5, 6, 7, 8)
>>> v1[2]
7
>>> v1[1:3]
pvector([6, 7])
"""
@abstractmethod
def __add__(self, other):
"""
>>> v1 = v(1, 2)
>>> v2 = v(3, 4)
>>> v1 + v2
pvector([1, 2, 3, 4])
"""
@abstractmethod
def __mul__(self, times):
"""
>>> v1 = v(1, 2)
>>> 3 * v1
pvector([1, 2, 1, 2, 1, 2])
"""
@abstractmethod
def __hash__(self):
"""
>>> v1 = v(1, 2, 3)
>>> v2 = v(1, 2, 3)
>>> hash(v1) == hash(v2)
True
"""
@abstractmethod
def evolver(self):
"""
Create a new evolver for this pvector. The evolver acts as a mutable view of the vector
with "transaction like" semantics. No part of the underlying vector i updated, it is still
fully immutable. Furthermore multiple evolvers created from the same pvector do not
interfere with each other.
You may want to use an evolver instead of working directly with the pvector in the
following cases:
* Multiple updates are done to the same vector and the intermediate results are of no
interest. In this case using an evolver may be a more efficient and easier to work with.
* You need to pass a vector into a legacy function or a function that you have no control
over which performs in place mutations of lists. In this case pass an evolver instance
instead and then create a new pvector from the evolver once the function returns.
The following example illustrates a typical workflow when working with evolvers. It also
displays most of the API (which i kept small by design, you should not be tempted to
use evolvers in excess ;-)).
Create the evolver and perform various mutating updates to it:
>>> v1 = v(1, 2, 3, 4, 5)
>>> e = v1.evolver()
>>> e[1] = 22
>>> _ = e.append(6)
>>> _ = e.extend([7, 8, 9])
>>> e[8] += 1
>>> len(e)
9
The underlying pvector remains the same:
>>> v1
pvector([1, 2, 3, 4, 5])
The changes are kept in the evolver. An updated pvector can be created using the
persistent() function on the evolver.
>>> v2 = e.persistent()
>>> v2
pvector([1, 22, 3, 4, 5, 6, 7, 8, 10])
The new pvector will share data with the original pvector in the same way that would have
been done if only using operations on the pvector.
"""
@abstractmethod
def mset(self, *args):
"""
Return a new vector with elements in specified positions replaced by values (multi set).
Elements on even positions in the argument list are interpreted as indexes while
elements on odd positions are considered values.
>>> v1 = v(1, 2, 3)
>>> v1.mset(0, 11, 2, 33)
pvector([11, 2, 33])
"""
@abstractmethod
def set(self, i, val):
"""
Return a new vector with element at position i replaced with val. The original vector remains unchanged.
Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will
result in an IndexError.
>>> v1 = v(1, 2, 3)
>>> v1.set(1, 4)
pvector([1, 4, 3])
>>> v1.set(3, 4)
pvector([1, 2, 3, 4])
>>> v1.set(-1, 4)
pvector([1, 2, 4])
"""
@abstractmethod
def append(self, val):
"""
Return a new vector with val appended.
>>> v1 = v(1, 2)
>>> v1.append(3)
pvector([1, 2, 3])
"""
@abstractmethod
def extend(self, obj):
"""
Return a new vector with all values in obj appended to it. Obj may be another
PVector or any other Iterable.
>>> v1 = v(1, 2, 3)
>>> v1.extend([4, 5])
pvector([1, 2, 3, 4, 5])
"""
@abstractmethod
def index(self, value, *args, **kwargs):
"""
Return first index of value. Additional indexes may be supplied to limit the search to a
sub range of the vector.
>>> v1 = v(1, 2, 3, 4, 3)
>>> v1.index(3)
2
>>> v1.index(3, 3, 5)
4
"""
@abstractmethod
def count(self, value):
"""
Return the number of times that value appears in the vector.
>>> v1 = v(1, 4, 3, 4)
>>> v1.count(4)
2
"""
@abstractmethod
def transform(self, *transformations):
"""
Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
consists of two parts. One match expression that specifies which elements to transform
and one transformation function that performs the actual transformation.
>>> from pyrsistent import freeze, ny
>>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
... {'author': 'Steve', 'content': 'A slightly longer article'}],
... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
>>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
>>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
>>> very_short_news.articles[0].content
'A short article'
>>> very_short_news.articles[1].content
'A slightly long...'
When nothing has been transformed the original data structure is kept
>>> short_news is news_paper
True
>>> very_short_news is news_paper
False
>>> very_short_news.articles[0] is news_paper.articles[0]
True
"""
@abstractmethod
def delete(self, index, stop=None):
"""
Delete a portion of the vector by index or range.
>>> v1 = v(1, 2, 3, 4, 5)
>>> v1.delete(1)
pvector([1, 3, 4, 5])
>>> v1.delete(1, 3)
pvector([1, 4, 5])
"""
@abstractmethod
def remove(self, value):
"""
Remove the first occurrence of a value from the vector.
>>> v1 = v(1, 2, 3, 2, 1)
>>> v2 = v1.remove(1)
>>> v2
pvector([2, 3, 2, 1])
>>> v2.remove(1)
pvector([2, 3, 2])
"""
_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], [])
PVector.register(PythonPVector)
Sequence.register(PVector)
Hashable.register(PVector)
def python_pvector(iterable=()):
"""
Create a new persistent vector containing the elements in iterable.
>>> v1 = pvector([1, 2, 3])
>>> v1
pvector([1, 2, 3])
"""
return _EMPTY_PVECTOR.extend(iterable)
try:
# Use the C extension as underlying trie implementation if it is available
import os
if os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
pvector = python_pvector
else:
from pvectorc import pvector
PVector.register(type(pvector()))
except ImportError:
pvector = python_pvector
def v(*elements):
"""
Create a new persistent vector containing all parameters to this function.
>>> v1 = v(1, 2, 3)
>>> v1
pvector([1, 2, 3])
"""
return pvector(elements)
|
|
from __future__ import absolute_import
import abc
import pytest
from django.core.exceptions import ValidationError
from django.utils import timezone
from nose.tools import assert_equal, assert_in, assert_raises
import mock
from framework.auth import Auth
from tests.base import DbTestCase
from osf_tests.factories import UserFactory, CommentFactory, ProjectFactory, PreprintFactory, RegistrationFactory, AuthUserFactory
from osf.models import NotableEmailDomain, SpamStatus
from website import settings, mails
@pytest.mark.django_db
@mock.patch('framework.auth.views.mails.send_mail')
def test_throttled_autoban(mock_mail):
settings.SPAM_THROTTLE_AUTOBAN = True
user = AuthUserFactory()
projects = []
for _ in range(7):
proj = ProjectFactory(creator=user)
proj.flag_spam()
proj.save()
projects.append(proj)
mock_mail.assert_called_with(osf_support_email=settings.OSF_SUPPORT_EMAIL,
can_change_preferences=False,
to_addr=user.username,
user=user,
mail=mails.SPAM_USER_BANNED)
user.reload()
assert user.is_disabled
for project in projects:
assert not project.is_public
@pytest.mark.enable_implicit_clean
class TestReportAbuse(DbTestCase):
def setUp(self):
super(TestReportAbuse, self).setUp()
self.comment = CommentFactory()
self.auth = Auth(user=self.comment.user)
def test_report_abuse(self):
user = UserFactory()
time = timezone.now()
self.comment.report_abuse(
user, date=time, category='spam', text='ads', save=True)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
equivalent = dict(
date=time,
category='spam',
text='ads',
retracted=False
)
assert_in(user._id, self.comment.reports)
assert_equal(self.comment.reports[user._id], equivalent)
def test_report_abuse_own_comment(self):
with assert_raises(ValueError):
self.comment.report_abuse(
self.auth.user,
category='spam', text='ads',
save=True
)
assert_equal(self.comment.spam_status, SpamStatus.UNKNOWN)
def test_retract_report(self):
user = UserFactory()
time = timezone.now()
self.comment.report_abuse(
user, date=time, category='spam', text='ads', save=True
)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
self.comment.retract_report(user, save=True)
assert_equal(self.comment.spam_status, SpamStatus.UNKNOWN)
equivalent = {
'date': time,
'category': 'spam',
'text': 'ads',
'retracted': True
}
assert_in(user._id, self.comment.reports)
assert_equal(self.comment.reports[user._id], equivalent)
def test_retract_report_not_reporter(self):
reporter = UserFactory()
non_reporter = UserFactory()
self.comment.report_abuse(
reporter, category='spam', text='ads', save=True
)
with assert_raises(ValueError):
self.comment.retract_report(non_reporter, save=True)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_retract_one_report_of_many(self):
user_1 = UserFactory()
user_2 = UserFactory()
time = timezone.now()
self.comment.report_abuse(
user_1, date=time, category='spam', text='ads', save=True
)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
self.comment.report_abuse(
user_2, date=time, category='spam', text='all', save=True
)
self.comment.retract_report(user_1, save=True)
equivalent = {
'date': time,
'category': 'spam',
'text': 'ads',
'retracted': True
}
assert_in(user_1._id, self.comment.reports)
assert_equal(self.comment.reports[user_1._id], equivalent)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_cannot_remove_flag_not_retracted(self):
user = UserFactory()
self.comment.report_abuse(
user, category='spam', text='ads', save=True
)
self.comment.remove_flag(save=True)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_remove_flag(self):
self.comment.flag_spam()
self.comment.save()
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
self.comment.remove_flag(save=True)
assert_equal(self.comment.spam_status, SpamStatus.UNKNOWN)
def test_validate_reports_bad_key(self):
self.comment.reports[None] = {'category': 'spam', 'text': 'ads'}
with assert_raises(ValidationError):
self.comment.save()
def test_validate_reports_bad_type(self):
self.comment.reports[self.auth.user._id] = 'not a dict'
with assert_raises(ValidationError):
self.comment.save()
def test_validate_reports_bad_value(self):
self.comment.reports[self.auth.user._id] = {'foo': 'bar'}
with assert_raises(ValidationError):
self.comment.save()
@pytest.mark.django_db
class TestSpamState:
@pytest.fixture(params=[
CommentFactory,
ProjectFactory,
PreprintFactory,
RegistrationFactory,
UserFactory,
])
def spammable_thing(self, request):
spammable_factory = request.param
return spammable_factory()
def test_flag_spam(self, spammable_thing):
assert not spammable_thing.is_spammy
assert not spammable_thing.is_spam
spammable_thing.flag_spam()
spammable_thing.save()
assert spammable_thing.is_spammy
assert not spammable_thing.is_spam
def test_confirm_ham(self, spammable_thing):
spammable_thing.confirm_ham(save=True)
assert spammable_thing.is_ham
def test_confirm_spam(self, spammable_thing):
spammable_thing.confirm_spam(save=True)
assert spammable_thing.is_spam
@pytest.mark.parametrize('assume_ham', (True, False))
@pytest.mark.parametrize('spam_status, expected_props', (
(SpamStatus.UNKNOWN, {
'is_spam': False,
'is_spammy': False,
'is_ham': False,
'is_hammy': None, # set in the test body based on assume_ham
}),
(SpamStatus.FLAGGED, {
'is_spam': False,
'is_spammy': True,
'is_ham': False,
'is_hammy': False,
}),
(SpamStatus.SPAM, {
'is_spam': True,
'is_spammy': True,
'is_ham': False,
'is_hammy': False,
}),
(SpamStatus.HAM, {
'is_spam': False,
'is_spammy': False,
'is_ham': True,
'is_hammy': True,
}),
))
def test_spam_status_properties(self, spammable_thing, assume_ham, spam_status, expected_props):
if spam_status == SpamStatus.UNKNOWN:
expected_props['is_hammy'] = assume_ham
with mock.patch.object(type(spammable_thing), 'is_assumed_ham', new_callable=mock.PropertyMock) as mock_assumed_ham:
mock_assumed_ham.return_value = assume_ham
spammable_thing.spam_status = spam_status
assert spammable_thing.is_spam == expected_props['is_spam']
assert spammable_thing.is_spammy == expected_props['is_spammy']
assert spammable_thing.is_ham == expected_props['is_ham']
assert spammable_thing.is_hammy == expected_props['is_hammy']
@pytest.mark.django_db
class TestSpamCheckEmailDomain:
@mock.patch('osf.models.spam.SpamMixin.do_check_spam', return_value=False)
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
@mock.patch.object(settings, 'SPAM_CHECK_PUBLIC_ONLY', False)
def test_trusted_domain(self, mock_do_check_spam):
user = UserFactory()
project = ProjectFactory()
# spam check should normally call do_check_spam
assert not mock_do_check_spam.called
is_spam = project.check_spam(user, saved_fields={'title'}, request_headers={})
assert not is_spam
assert mock_do_check_spam.called
# but what if we trust the user's email domain?
user_email_address = user.emails.values_list('address', flat=True).first()
user_email_domain = user_email_address.rpartition('@')[2].lower()
NotableEmailDomain.objects.create(
domain=user_email_domain,
note=NotableEmailDomain.Note.ASSUME_HAM_UNTIL_REPORTED,
)
# should not call do_check_spam this time
mock_do_check_spam.reset_mock()
assert not mock_do_check_spam.called
is_spam = project.check_spam(user, saved_fields={'title'}, request_headers={})
assert not is_spam
assert not mock_do_check_spam.called
|
|
#!/usr/bin/env python
from __future__ import division
import logging
import requests
import datetime
import ConfigParser
import argparse
import os
import yara
import re
import sys
import json
import time
try:
import pygal
from pygal.style import Style
pygal_available = True
except ImportError:
raise ImportError("Could not import pygal. Yaraqa is not going to generate plots.")
pygal_available = False
class YaraQA():
@property
def logger(self):
name = 'yaraqa'
return logging.getLogger(name)
def __init__(self, family, config_file='yaraqa.conf', method='ALL', malware=True, goodware=True, verbose=False, nolog=False, show=False, plot=False, targeted=False, timeout=15):
'''
This method constructs a yaraqa object.
'''
# Default behaviour: ./yaraqa.py [family] --static --memory --malware --goodware
self.GOODWARE_DIR = ''
self.MALWARE_DIR = ''
self.YARA_STATIC_DIR = ''
self.YARA_MEMORY_DIR = ''
self.API_PATH = ''
self.API_PORT = ''
self.API_HOST = ''
self.API_IP = ''
self.HIGH_THRESHOLD = ''
self.MEDIUM_THRESHOLD = ''
self.PLOT_LABELS = []
self.PLOT_STATIC_RATIOS = []
self.PLOT_MEMORY_RATIOS = []
self.PLOT_TOTAL_MATCH = []
self.nolog = nolog
if not family:
self.die("--family must be set")
self.family = family.lower()
self.method = method
self.malware = malware
self.goodware = goodware
self.verbose = verbose
if timeout < 0:
self.die("Timeout cannot be less than zero")
self.timeout = timeout
self.targeted = targeted
self.plot = plot
self.LOGGING = True
if self.nolog:
self.LOGGING = False
self.init_logging()
if self.method:
self.method = method.upper()
if self.method != 'STATIC' and self.method != 'MEMORY' and self.method != 'ALL':
self.die("Method is not valid. Valid methods: MEMORY, STATIC, ALL.")
self.config_file = config_file
self.parse_config()
self.show = show
if self.show:
self.show_available()
self.DIRECTORIES = []
if self.malware:
self.DIRECTORIES.append(self.MALWARE_DIR)
if self.goodware:
self.DIRECTORIES.append(self.GOODWARE_DIR)
if not self.goodware and not self.malware:
self.DIRECTORIES.append(self.MALWARE_DIR)
self.DIRECTORIES.append(self.GOODWARE_DIR)
def die(self, m):
'''
This method logs a critical message and exits yaraqa.py
'''
self.logger.critical(m)
sys.exit()
def init_yara_rules(self):
'''
This method tries to find and compile the yara rules specified by 'family' before the q&a test starts.
'''
if (self.method == 'STATIC' or self.method == 'ALL'):
if not os.path.isfile(self.YARA_STATIC_DIR+self.family+'.yara'):
yara_path = self.YARA_STATIC_DIR+self.family+'.yara'
self.die("Can't found static yaras for this family! {0}".format(str(yara_path)))
yara_path = self.YARA_STATIC_DIR+self.family+'.yara'
rule_static = yara.compile(filepath=yara_path)
if not rule_static:
self.die("Couldn't compile the .yara! {0}".format(str(yara_path)))
if (self.method == 'MEMORY' or self.method == 'ALL'):
if not os.path.isfile(self.YARA_MEMORY_DIR+self.family+'.yara'):
yara_path = self.YARA_MEMORY_DIR+self.family+'.yara'
self.die("Can't found memory yaras for this family! {0}".format(str(yara_path)))
yara_path = self.YARA_MEMORY_DIR+self.family+'.yara'
rule_memory = yara.compile(filepath=yara_path)
if not rule_memory:
self.die("Couldn't compile the .yara! {0}".format(str(yara_path)))
if (self.method == 'STATIC' or self.method == 'ALL'):
return rule_static
else:
return rule_memory
def init_logging(self):
'''
This method establishes the logging configs. properly.
'''
self.logger.setLevel(logging.DEBUG)
if not self.logger.handlers:
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.CRITICAL)
if self.LOGGING:
if not os.path.exists('reports'):
os.makedirs('reports')
daytime = datetime.datetime.now().strftime("%d%m%Y_%H_%M_%S")
logname = "reports/report_{}_{}.log".format(self.family,daytime)
fileHandler = logging.FileHandler(logname)
fileHandler.setLevel(logging.DEBUG)
self.logger.addHandler(fileHandler)
if self.verbose:
consoleHandler.setLevel(logging.DEBUG)
self.logger.addHandler(consoleHandler)
def request_api(self):
'''
This method makes a simple request to the API to see whether it's working or not.
'''
try:
r = requests.get('http://{0}:{1}/cuckoo/status'.format(str(self.API_IP), str(self.API_PORT)))
except requests.exceptions.RequestException as err:
self.die('ERROR: could not communicate with Cuckoo API, check yaraqa.conf: {0}'.format(str(err)))
except Exception as err:
self.die('ERROR: could not communicate with Cuckoo API, check yaraqa.conf: {0}'.format(str(err)))
def parse_config(self):
'''
This method tries to parse yaraqa.conf file in order to setup config.
'''
if os.path.isfile(self.config_file):
if not os.access(self.config_file, os.R_OK):
self.die("Cannot read {0} configuration file".format(str(self.config_file)))
else:
self.die("Cannot find {0} configuration file".format(str(self.config_file)))
try:
config_parser = ConfigParser.ConfigParser()
configFilePath = self.config_file
config_parser.read(configFilePath)
self.GOODWARE_DIR = config_parser.get('SAMPLES_DIR', 'goodware_path').replace('"', '')
self.MALWARE_DIR = config_parser.get('SAMPLES_DIR', 'malware_path').replace('"', '')
self.YARA_STATIC_DIR = config_parser.get('YARA_DIR', 'yara_static_path').replace('"', '')
self.YARA_MEMORY_DIR = config_parser.get('YARA_DIR', 'yara_memory_path').replace('"', '')
self.API_PATH = config_parser.get('CUCKOO_API', 'api_path').replace('"', '')
self.API_HOST = config_parser.get('CUCKOO_API', 'api_host').replace('"', '')
self.API_PORT = config_parser.get('CUCKOO_API', 'api_port').replace('"', '')
self.API_IP = config_parser.get('CUCKOO_API', 'api_ip').replace('"', '')
self.HIGH_THRESHOLD = config_parser.get('THRESHOLD_LEVELS', 'high_threshold').replace('"', '')
self.MEDIUM_THRESHOLD = config_parser.get('THRESHOLD_LEVELS', 'medium_threshold').replace('"', '')
except ConfigParser.ParsingError as err:
self.die('Could not parse config: {0}'.format(str(err)))
except Exception as err:
self.die('Could not parse config: {0}'.format(str(err)))
def create_cuckoo_task(self, current_file):
'''
This method creates a task at cuckoo by sending a multipart file.
'''
try:
request_url = ('http://{0}:{1}/tasks/create/file'.format(str(self.API_IP), str(self.API_PORT)))
with open(current_file, "rb") as sample:
multipart_file = {"file": ("temp_file_name", sample)}
request = requests.post(request_url, files=multipart_file, timeout=self.timeout)
if request.status_code != 200:
self.die("An error ocurred: {} status code".format(request.status_code))
json_decoder = json.JSONDecoder()
task_id = json_decoder.decode(request.text)["task_id"]
return task_id
except requests.exceptions.RequestException as err:
self.die(err)
except Exception as err:
self.die(err)
def view_cuckoo_report(self, task_id, tsleep=5):
'''
This method retireves the resulting cuckoo's task report
'''
try:
r = requests.get('http://{0}:{1}/tasks/report/{2}'.format(str(self.API_IP), str(self.API_PORT), str(task_id)))
while r.status_code != 200:
time.sleep(tsleep)
r = requests.get('http://{0}:{1}/tasks/report/{2}'.format(str(self.API_IP), str(self.API_PORT), str(task_id)))
report = json.loads(r.text)
return report
except requests.exceptions.RequestException as err:
self.die(err)
except Exception as err:
self.die(err)
def show_available(self):
'''
This method shows all available .yara files at both static and memory yara directories.
'''
TOTAL_FILES = 0
print '\033[0;32m[STATIC YARAS]\033[0m\n'
for root, dirs, files in os.walk(self.YARA_STATIC_DIR):
for file in files:
TOTAL_FILES = TOTAL_FILES + 1
print "{}".format(file)
print '\n--->Total Static Yaras: {0}\n'.format(str(TOTAL_FILES))
TOTAL_FILES = 0
print '\033[0;32m[MEMORY YARAS]\033[0m\n'
for root, dirs, files in os.walk(self.YARA_MEMORY_DIR):
for file in files:
TOTAL_FILES = TOTAL_FILES + 1
print "{}".format(file)
print '\n--->Total Memory Yaras: {0}\n'.format(str(TOTAL_FILES))
self.die("")
def print_results(self, method, directory, expected_matches, family_matches, misses, false_positives, total_matches):
'''
This method prints the analysis results
'''
self.logger.debug(" Expected matches: {0}".format(str(expected_matches)))
self.logger.debug(" Family matches: {0}".format(str(family_matches)))
self.logger.debug(" Misses: {0}".format(str(misses)))
self.logger.debug(" False positives: {0}".format(str(false_positives)))
self.logger.debug(" Total matches: {0}".format(str(total_matches)))
if directory == self.MALWARE_DIR:
if expected_matches != 0:
ratio = (family_matches/expected_matches)*100
ratio = "{:.2f}".format(ratio)
if method == 'STATIC':
self.PLOT_STATIC_RATIOS.append(float(ratio))
if method == 'MEMORY':
self.PLOT_MEMORY_RATIOS.append(float(ratio))
self.print_threshold(" Ratio: ", ratio)
def print_threshold(self, message, ratio):
'''
This method prints with colors depending on the ratio
'''
GREEN_COLOR = '\033[1;32m'
YELLOW_COLOR = '\033[1;33m'
RED_COLOR = '\033[1;31m'
BOLD_COLOR = '\033[1;37m'
END_TAG_COLOR = '\033[0m'
if float(ratio) >= float(self.HIGH_THRESHOLD):
self.logger.debug("{0}{1}{2}{3}{4}%{5}\n".format(str(BOLD_COLOR), str(message), str(END_TAG_COLOR), \
str(GREEN_COLOR), str(ratio), str(END_TAG_COLOR)))
elif float(ratio) >= float(self.MEDIUM_THRESHOLD):
self.logger.debug("{0}{1}{2}{3}{4}%{5}\n".format(str(BOLD_COLOR), str(message), str(END_TAG_COLOR), \
str(YELLOW_COLOR), str(ratio), str(END_TAG_COLOR)))
else:
self.logger.debug("{0}{1}{2}{3}{4}%{5}\n".format(str(BOLD_COLOR), str(message), str(END_TAG_COLOR), \
str(RED_COLOR), str(ratio), str(END_TAG_COLOR)))
def render_plot(self):
'''
This method renders a plot in .svg showing yara's accuracy.
'''
if not self.PLOT_STATIC_RATIOS:
self.PLOT_STATIC_RATIOS.append(0)
if not self.PLOT_MEMORY_RATIOS:
self.PLOT_MEMORY_RATIOS.append(0)
if not self.PLOT_TOTAL_MATCH:
self.PLOT_TOTAL_MATCH.append(0)
PLOT_COLOR_PINK = '#990033'
PLOT_COLOR_GREEN = '#66CC33'
PLOT_COLOR_BLUE = '#006699'
custom_style = Style(colors=(PLOT_COLOR_PINK, PLOT_COLOR_GREEN, PLOT_COLOR_BLUE))
bar_chart = pygal.Bar(style=custom_style)
bar_chart.title = 'Yara Q&A Test'
bar_chart.title_font_size = 18
bar_chart.label_font_size = 8
bar_chart.x_labels = self.PLOT_LABELS
bar_chart.x_label_rotation = 20
bar_chart.y_title = '% Matched'
bar_chart.add('STATIC', self.PLOT_STATIC_RATIOS)
bar_chart.add('MEMORY', self.PLOT_MEMORY_RATIOS)
bar_chart.add('TOTAL', self.PLOT_TOTAL_MATCH)
bar_chart.x_labels_major = []
for i in range(len(self.PLOT_TOTAL_MATCH)):
if self.PLOT_TOTAL_MATCH[i] == 100:
bar_chart.x_labels_major.append(bar_chart.x_labels[i])
timestamp = datetime.datetime.now().strftime("%d%m%Y_%H_%M_%S")
chartname = 'report_'+timestamp+'.svg'
bar_chart.render_to_file(chartname)
def match_yara_rules(self):
'''
This method tries to match yara rules at malware and/or goodware repo.
'''
rules = self.init_yara_rules()
self.PLOT_LABELS.append(format(str(self.family)))
for path in self.DIRECTORIES:
EXPECTED_MATCHES = 0
TOTAL_STATIC_MATCHES = 0
TOTAL_MEMORY_MATCHES = 0
STATIC_FAMILY_MATCHES = 0
MEMORY_FAMILY_MATCHES = 0
STATIC_FALSE_POSITIVES = 0
MEMORY_FALSE_POSITIVES = 0
STATIC_MISS = 0
MEMORY_MISS = 0
TOTAL_FILES = 0
TOTAL_MATCHES = 0
self.logger.debug('Matching against {0}'.format(str(path)))
self.logger.debug('========================================\n')
for root, dirs, files in os.walk(path):
for file in files:
current_file = os.path.join(root, file)
file_matched = False
if self.targeted:
if self.family not in current_file:
continue
TOTAL_FILES = TOTAL_FILES + 1
if self.family in current_file:
EXPECTED_MATCHES = EXPECTED_MATCHES + 1
self.logger.debug('\nTARGET: {0}'.format(str(current_file)))
if (self.method == 'STATIC'):
matches = rules.match(current_file)
elif (self.method == 'MEMORY'):
task_id = self.create_cuckoo_task(current_file)
else:
matches = rules.match(current_file)
task_id = self.create_cuckoo_task(current_file)
# MATCH STATIC
if (self.method == 'STATIC' or self.method == 'ALL'):
if matches:
TOTAL_STATIC_MATCHES = TOTAL_STATIC_MATCHES + 1
if self.family in current_file:
if not file_matched:
TOTAL_MATCHES = TOTAL_MATCHES + 1
file_matched = True
STATIC_FAMILY_MATCHES = STATIC_FAMILY_MATCHES + 1
self.logger.debug('-> STATIC YARA MATCH {0} \033[0;32m[OK]\033[0m'.format(str(matches)))
else:
STATIC_FALSE_POSITIVES = STATIC_FALSE_POSITIVES + 1
self.logger.debug('FALSE POSITIVE: ' + current_file)
self.logger.debug('-> STATIC YARA MATCH {0} \033[0;31m[FALSE POSITIVE]\033[0m'.format(str(matches)))
else:
if self.family in current_file:
STATIC_MISS = STATIC_MISS + 1
self.logger.debug('-> STATIC YARA \033[0;31m[MISS]\033[0m')
# MATCH MEMORY
if (self.method == 'MEMORY' or self.method == 'ALL'):
report = self.view_cuckoo_report(task_id)
matched = False
rxp = re.compile(self.family, re.IGNORECASE)
if 'memory' in report:
if 'yarascan' in report['memory']:
if 'data' in report['memory']['yarascan']:
matched = any(rxp.search(yar_n['rule']) for yar_n in report['memory']['yarascan']['data'])
else:
if self.family in current_file:
self.logger.debug("Warning: No 'data' key found in 'yarascan' section. file = {0}".format(str(current_file)))
else:
if self.family in current_file:
self.logger.debug("Warning: No 'yarascan' key found in 'memory' section. file = {0}".format(str(current_file)))
else:
if self.family in current_file:
self.logger.debug("Warning: No 'memory' key found in report data. file = {0}".format(str(current_file)))
if matched:
TOTAL_MEMORY_MATCHES = TOTAL_MEMORY_MATCHES + 1
if self.family in current_file:
if not file_matched:
TOTAL_MATCHES = TOTAL_MATCHES + 1
file_matched = True
MEMORY_FAMILY_MATCHES = MEMORY_FAMILY_MATCHES + 1
self.logger.debug('-> MEMORY YARA MATCH \033[0;32m[OK]\033[0m')
else:
MEMORY_FALSE_POSITIVES = MEMORY_FALSE_POSITIVES + 1
self.logger.debug('FALSE POSITIVE: {0}'.format(str(current_file)))
self.logger.debug('-> MEMORY YARA MATCH \033[0;31m[FALSE POSITIVE]\033[0m')
else:
if self.family in current_file:
MEMORY_MISS = MEMORY_MISS + 1
self.logger.debug('-> MEMORY YARA \033[0;31m[MISS]\033[0m')
if path == self.MALWARE_DIR:
self.logger.debug('\n\t_MALWARE REPO_')
elif path == self.GOODWARE_DIR:
self.logger.debug('\n\t_GOODWARE REPO_')
if (self.method == 'STATIC' or self.method == 'ALL'):
self.logger.debug('\n STATIC YARA Q&A OVERVIEW:')
self.logger.debug(' =========================')
self.print_results('STATIC', path, EXPECTED_MATCHES, STATIC_FAMILY_MATCHES, STATIC_MISS, STATIC_FALSE_POSITIVES, TOTAL_STATIC_MATCHES)
if (self.method == 'MEMORY' or self.method == 'ALL'):
self.logger.debug('\n MEMORY YARA Q&A OVERVIEW:')
self.logger.debug(' =========================')
self.print_results('MEMORY', path, EXPECTED_MATCHES, MEMORY_FAMILY_MATCHES, MEMORY_MISS, MEMORY_FALSE_POSITIVES, TOTAL_MEMORY_MATCHES)
if path == self.MALWARE_DIR:
if EXPECTED_MATCHES != 0:
TOTAL_MATCHES = (TOTAL_MATCHES/EXPECTED_MATCHES)*100
TOTAL_MATCHES = "{:.2f}".format(TOTAL_MATCHES)
self.PLOT_TOTAL_MATCH.append(float(TOTAL_MATCHES))
self.print_threshold(" Total Accuracy: ", TOTAL_MATCHES)
self.logger.debug(" Total files analyzed: {0}\n\n".format(str(TOTAL_FILES)))
if self.plot:
if pygal_available:
self.render_plot()
DATA_PLOT = [self.PLOT_LABELS, self.PLOT_STATIC_RATIOS, self.PLOT_MEMORY_RATIOS, self.PLOT_TOTAL_MATCH]
return DATA_PLOT
def parse_arguments():
'''
This function parses the arguments recieved by yaraqa.
'''
try:
args = P.parse_args()
except IOError as e:
ArgumentParser.error(e)
if args.memory and not args.static:
method = 'MEMORY'
if args.static and not args.memory:
method = 'STATIC'
if args.all or (not args.all and not args.static and not args.memory) or (args.static and args.memory):
method = 'ALL'
malware_dir = True
goodware_dir = True
if not args.malware and args.goodware:
malware_dir = False
if not args.goodware and args.malware:
goodware_dir = False
DATA = {
'family': args.family,
'method': method,
'malware_dir': malware_dir,
'goodware_dir': goodware_dir,
'verbose': args.verbose,
'nolog': args.nolog,
'show': args.show,
'plot': args.plot,
'timeout': args.timeout,
'targeted': args.targeted
}
return DATA
if __name__ == '__main__':
P = argparse.ArgumentParser(description=' == Yara Quality Assurance Test ==')
P.add_argument('--family' , default=None, help='Choose a malware familty to Q&A', type=str, required=True)
P.add_argument('--verbose' , action='store_true', help='Be Verbose! =)')
P.add_argument('--static' , action='store_true', help='Yara static matching only')
P.add_argument('--memory' , action='store_true', help='Yara memory matching only')
P.add_argument('--all' , action='store_true', help='Yara static and memory matching')
P.add_argument('--show' , action='store_true', help='Show available yaras and exit')
P.add_argument('--malware' , action='store_true', help='Match against malware repo.')
P.add_argument('--goodware' , action='store_true', help='Match against goodware repo.')
P.add_argument('--nolog' , action='store_true', help='Do not store results in a .log file')
P.add_argument('--plot' , action='store_true', help='Plot matching statistics')
P.add_argument('--timeout' , default=15, help='Timeout for cuckoo memory analysis', type=int)
P.add_argument('--targeted' , action='store_true', help='Scan only on targeted files')
DATA = parse_arguments()
QA = YaraQA(DATA['family'], 'yaraqa.conf', DATA['method'], DATA['malware_dir'], DATA['goodware_dir'], DATA['verbose'], DATA['nolog'], \
DATA['show'], DATA['plot'], DATA['targeted'], DATA['timeout'])
QA.match_yara_rules()
QA.die('Q&A Finished\n')
|
|
# Authors: Alex Li <7Alex7Li@gmail.com>
# Siyuan Ma <Siyuan.ma9@gmail.com>
import numpy as np
from scipy.linalg import eigh, LinAlgError
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.metrics.pairwise import pairwise_kernels, euclidean_distances
from sklearn.utils import check_random_state
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_is_fitted, check_X_y
class BaseEigenPro(BaseEstimator):
"""
Base class for EigenPro iteration.
"""
def __init__(
self,
batch_size="auto",
n_epoch=2,
n_components=1000,
subsample_size="auto",
kernel="rbf",
gamma="scale",
degree=3,
coef0=1,
kernel_params=None,
random_state=None,
):
self.batch_size = batch_size
self.n_epoch = n_epoch
self.n_components = n_components
self.subsample_size = subsample_size
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.random_state = random_state
def _kernel(self, X, Y):
"""Calculate the kernel matrix
Parameters
---------
X : {float, array}, shape = [n_samples, n_features]
Input data.
Y : {float, array}, shape = [n_centers, n_targets]
Kernel centers.
Returns
-------
K : {float, array}, shape = [n_samples, n_centers]
Kernel matrix.
"""
if (
self.kernel != "rbf"
and self.kernel != "laplace"
and self.kernel != "cauchy"
):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {
"gamma": self.gamma_,
"degree": self.degree,
"coef0": self.coef0,
}
return pairwise_kernels(
X, Y, metric=self.kernel, filter_params=True, **params
)
distance = euclidean_distances(X, Y, squared=True)
bandwidth = np.float32(1.0 / np.sqrt(2.0 * self.gamma_))
if self.kernel == "rbf":
distance = -self.gamma_ * distance
K = np.exp(distance)
elif self.kernel == "laplace":
d = np.maximum(distance, 0)
K = np.exp(-np.sqrt(d) / bandwidth)
else: # self.kernel == "cauchy":
K = 1 / (1 + 2.0 * self.gamma_ * distance)
return K
def _nystrom_svd(self, X, n_components):
"""Compute the top eigensystem of a kernel
operator using Nystrom method
Parameters
----------
X : {float, array}, shape = [n_subsamples, n_features]
Subsample feature matrix.
n_components : int
Number of top eigencomponents to be restored.
Returns
-------
E : {float, array}, shape = [k]
Top eigenvalues.
Lambda : {float, array}, shape = [n_subsamples, k]
Top eigenvectors of a subsample kernel matrix (which can be
directly used to approximate the eigenfunctions of the kernel
operator).
"""
m, _ = X.shape
K = self._kernel(X, X)
W = K / m
try:
E, Lambda = eigh(W, eigvals=(m - n_components, m - 1))
except LinAlgError:
# Use float64 when eigh fails due to precision
W = np.float64(W)
E, Lambda = eigh(W, eigvals=(m - n_components, m - 1))
E, Lambda = np.float32(E), np.float32(Lambda)
# Flip so eigenvalues are in descending order.
E = np.maximum(np.float32(1e-7), np.flipud(E))
Lambda = np.fliplr(Lambda)[:, :n_components] / np.sqrt(
m, dtype="float32"
)
return E, Lambda
def _setup(self, feat, max_components, mG, alpha):
"""Compute preconditioner and scale factors for EigenPro iteration
Parameters
----------
feat : {float, array}, shape = [n_samples, n_features]
Feature matrix (normally from training data).
max_components : int
Maximum number of components to be used in EigenPro iteration.
mG : int
Maximum batch size to fit in memory.
alpha : float
Exponential factor (< 1) for eigenvalue ratio.
Returns
-------
max_S : float
Normalized largest eigenvalue.
max_kxx : float
Maximum of k(x,x) where k is the EigenPro kernel.
E : {float, array}, shape = [k]
Preconditioner for EigenPro
Lambda : {float, array}, shape = [n_subsamples, k]
Top eigenvectors of a subsample kernel matrix
"""
alpha = np.float32(alpha)
# Estimate eigenvalues (S) and eigenvectors (V) of the kernel matrix
# corresponding to the feature matrix.
E, Lambda = self._nystrom_svd(feat, max_components)
n_subsamples = feat.shape[0]
# Calculate the number of components to be used such that the
# corresponding batch size is bounded by the subsample size and the
# memory size.
max_bs = min(max(n_subsamples / 5, mG), n_subsamples)
n_components = np.sum(np.power(1 / E, alpha) < max_bs) - 1
if n_components < 2:
n_components = min(E.shape[0] - 1, 2)
Lambda = Lambda[:, :n_components]
scale = np.power(E[0] / E[n_components], alpha)
# Compute part of the preconditioner for step 2 of gradient descent in
# the eigenpro model
D = (1 - np.power(E[n_components] / E[:n_components], alpha)) / E[
:n_components
]
max_S = E[0].astype(np.float32)
kxx = 1 - np.sum(Lambda ** 2, axis=1) * n_subsamples
return max_S / scale, np.max(kxx), D, Lambda
def _initialize_params(self, X, Y, random_state):
"""
Validate parameters passed to the model, choose parameters
that have not been passed in, and run setup for EigenPro iteration.
Parameters
----------
X : {float, array}, shape = [n_samples, n_features]
Training data.
Y : {float, array}, shape = [n_samples, n_targets]
Training targets.
random_state : RandomState instance
The random state to use for random number generation
Returns
-------
Y : {float, array}, shape = [n_samples, n_targets]
Training targets. If Y was originally of shape
[n_samples], it is now [n_samples, 1].
E : {float, array}, shape = [k]
Preconditioner for EigenPro
Lambda : {float, array}, shape = [n_subsamples, k]
Top eigenvectors of a subsample kernel matrix
eta : float
The learning rate
pinx : {int, array}, shape = [sample_size]
The rows of X used to calculate E and Lambda
"""
n, d = X.shape
n_label = 1 if len(Y.shape) == 1 else Y.shape[1]
self.centers_ = X
# Calculate the subsample size to be used.
if self.subsample_size == "auto":
if n < 100000:
sample_size = 4000
else:
sample_size = 12000
else:
sample_size = self.subsample_size
sample_size = min(n, sample_size)
n_components = min(sample_size - 1, self.n_components)
n_components = max(1, n_components)
# Approximate amount of memory that we want to use
mem_bytes = 0.1 * 1024 ** 3
# Memory used with a certain sample size
mem_usages = (d + n_label + 2 * np.arange(sample_size)) * n * 4
mG = np.int32(np.sum(mem_usages < mem_bytes))
# Calculate largest eigenvalue and max{k(x,x)} using subsamples.
pinx = random_state.choice(n, sample_size, replace=False).astype(
"int32"
)
if self.gamma == "scale":
self.gamma_ = np.float32(1.0 / (X.var() * d))
else:
self.gamma_ = self.gamma
max_S, beta, E, Lambda = self._setup(
X[pinx], n_components, mG, alpha=0.95
)
# Calculate best batch size.
if self.batch_size == "auto":
bs = min(np.int32(beta / max_S), mG) + 1
else:
bs = self.batch_size
self.bs_ = min(bs, n)
# Calculate best step size.
if self.bs_ < beta / max_S + 1:
eta = self.bs_ / beta
elif self.bs_ < n:
eta = 2.0 * self.bs_ / (beta + (self.bs_ - 1) * max_S)
else:
eta = 0.95 * 2 / max_S
# Remember the shape of Y for predict() and ensure it's shape is 2-D.
self.was_1D_ = False
if len(Y.shape) == 1:
Y = np.reshape(Y, (Y.shape[0], 1))
self.was_1D_ = True
return Y, E, Lambda, np.float32(eta), pinx
def validate_parameters(self):
"""
Validate the parameters of the model to ensure that no unreasonable
values were passed in.
"""
if self.n_epoch <= 0:
raise ValueError(
"n_epoch should be positive, was " + str(self.n_epoch)
)
if self.n_components < 0:
raise ValueError(
"n_components should be non-negative, was "
+ str(self.n_components)
)
if self.subsample_size != "auto" and self.subsample_size < 0:
raise ValueError(
"subsample_size should be non-negative, was "
+ str(self.subsample_size)
)
if self.batch_size != "auto" and self.batch_size <= 0:
raise ValueError(
"batch_size should be positive, was " + str(self.batch_size)
)
if self.gamma != "scale" and self.gamma <= 0:
raise ValueError(
"gamma should be positive, was " + str(self.gamma)
)
def _raw_fit(self, X, Y):
"""Train eigenpro regression model
Parameters
----------
X : {float, array}, shape = [n_samples, n_features]
Training data.
Y : {float, array}, shape = [n_samples, n_targets]
Training targets.
Returns
-------
self : returns an instance of self.
"""
X, Y = check_X_y(
X,
Y,
dtype=np.float32,
multi_output=True,
ensure_min_samples=3,
y_numeric=True,
)
Y = Y.astype(np.float32)
random_state = check_random_state(self.random_state)
self.validate_parameters()
"""Parameter Initialization"""
Y, D, V, eta, pinx = self._initialize_params(X, Y, random_state)
"""Training loop"""
n = self.centers_.shape[0]
self.coef_ = np.zeros((n, Y.shape[1]), dtype=np.float32)
step = np.float32(eta / self.bs_)
for _ in range(0, self.n_epoch):
epoch_inds = random_state.choice(
n, n // self.bs_ * self.bs_, replace=False
).astype("int32")
for batch_inds in np.array_split(epoch_inds, n // self.bs_):
batch_x = self.centers_[batch_inds]
kfeat = self._kernel(batch_x, self.centers_)
batch_y = Y[batch_inds]
# Update 1: Sampled Coordinate Block.
gradient = np.dot(kfeat, self.coef_) - batch_y
self.coef_[batch_inds] -= step * gradient
# Update 2: Fixed Coordinate Block
delta = np.dot(
V * D, np.dot(V.T, np.dot(kfeat[:, pinx].T, gradient))
)
self.coef_[pinx] += step * delta
return self
def _raw_predict(self, X):
"""Predict using the kernel regression model
Parameters
----------
X : {float, array}, shape = [n_samples, n_features]
Samples.
Returns
-------
Y : {float, array}, shape = [n_samples, n_targets]
Predicted targets.
"""
check_is_fitted(
self, ["bs_", "centers_", "coef_", "was_1D_", "gamma_"]
)
X = np.asarray(X, dtype=np.float64)
if len(X.shape) == 1:
raise ValueError(
"Reshape your data. X should be a matrix of shape"
" (n_samples, n_features)."
)
n = X.shape[0]
Ys = []
for batch_inds in np.array_split(range(n), max(1, n // self.bs_)):
batch_x = X[batch_inds]
kfeat = self._kernel(batch_x, self.centers_)
pred = np.dot(kfeat, self.coef_)
Ys.append(pred)
Y = np.vstack(Ys)
if self.was_1D_:
Y = np.reshape(Y, Y.shape[0])
return Y
def _get_tags(self):
tags = super()._get_tags()
tags["multioutput"] = True
return tags
class EigenProRegressor(RegressorMixin, BaseEigenPro):
"""Regression using EigenPro iteration.
Train least squared kernel regression model with mini-batch EigenPro
iteration.
Parameters
----------
batch_size : int, default = 'auto'
Mini-batch size for gradient descent.
n_epoch : int, default = 2
The number of passes over the training data.
n_components : int, default = 1000
the maximum number of eigendirections used in modifying the kernel
operator. Convergence rate speedup over normal gradient descent is
approximately the largest eigenvalue over the n_componentth
eigenvalue, however, it may take time to compute eigenvalues for
large n_components
subsample_size : int, default = 'auto'
The number of subsamples used for estimating the largest
n_component eigenvalues and eigenvectors. When it is set to 'auto',
it will be 4000 if there are less than 100,000 samples
(for training), and otherwise 12000.
kernel : string or callable, default = "rbf"
Kernel mapping used internally. Strings can be anything supported
by scikit-learn, however, there is special support for the
rbf, laplace, and cauchy kernels. If a callable is given, it should
accept two arguments and return a floating point number.
gamma : float, default='scale'
Kernel coefficient. If 'scale', gamma = 1/(n_features*X.var()).
Interpretation of the default value is left to the kernel;
see the documentation for sklearn.metrics.pairwise.
For kernels that use bandwidth, bandwidth = 1/sqrt(2*gamma).
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any
Additional parameters (keyword arguments) for kernel function
passed as callable object.
random_state : int, RandomState instance or None, (default=None)
The seed of the pseudo random number generator to use when
shuffling the data. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
References
----------
* Siyuan Ma, Mikhail Belkin
"Diving into the shallows: a computational perspective on
large-scale machine learning", NIPS 2017.
Examples
--------
>>> from sklearn_extra.kernel_methods import EigenProRegressor
>>> import numpy as np
>>> n_samples, n_features, n_targets = 4000, 20, 3
>>> rng = np.random.RandomState(1)
>>> x_train = rng.randn(n_samples, n_features)
>>> y_train = rng.randn(n_samples, n_targets)
>>> rgs = EigenProRegressor(n_epoch=3, gamma=.5, subsample_size=50)
>>> rgs.fit(x_train, y_train)
EigenProRegressor(gamma=0.5, n_epoch=3, subsample_size=50)
>>> y_pred = rgs.predict(x_train)
>>> loss = np.mean(np.square(y_train - y_pred))
"""
def __init__(
self,
batch_size="auto",
n_epoch=2,
n_components=1000,
subsample_size="auto",
kernel="rbf",
gamma="scale",
degree=3,
coef0=1,
kernel_params=None,
random_state=None,
):
super().__init__(
batch_size=batch_size,
n_epoch=n_epoch,
n_components=n_components,
subsample_size=subsample_size,
kernel=kernel,
gamma=gamma,
degree=degree,
coef0=coef0,
kernel_params=kernel_params,
random_state=random_state,
)
def fit(self, X, Y):
return self._raw_fit(X, Y)
def predict(self, X):
return self._raw_predict(X)
class EigenProClassifier(ClassifierMixin, BaseEigenPro):
"""Classification using EigenPro iteration.
Train least squared kernel classification model with mini-batch EigenPro
iteration.
Parameters
----------
batch_size : int, default = 'auto'
Mini-batch size for gradient descent.
n_epoch : int, default = 2
The number of passes over the training data.
n_components : int, default = 1000
the maximum number of eigendirections used in modifying the
kernel operator. Convergence rate speedup over normal gradient
descent is approximately the largest eigenvalue over the
n_componenth eigenvalue, however, it may take time to compute
eigenvalues for large n_components
subsample_size : int, default = 'auto'
The size of subsamples used for estimating the largest
n_component eigenvalues and eigenvectors. When it is set to
'auto', it will be 4000 if there are less than 100,000 samples
(for training), and otherwise 12000.
kernel : string or callable, default = "rbf"
Kernel mapping used internally. Strings can be anything supported
by scikit-learn, however, there is special support for the
rbf, laplace, and cauchy kernels. If a callable is given, it should
accept two arguments and return a floating point number.
gamma : float, default='scale'
Kernel coefficient. If 'scale', gamma = 1/(n_features*X.var()).
Interpretation of the default value is left to the kernel;
see the documentation for sklearn.metrics.pairwise.
For kernels that use bandwidth, bandwidth = 1/sqrt(2*gamma).
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels. Ignored by
other kernels.
kernel_params : mapping of string to any
Additional parameters (keyword arguments) for kernel function
passed as callable object.
random_state : int, RandomState instance or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data. If int, random_state is the seed used by
the random number generator; If RandomState instance,
random_state is the random number generator;
If None, the random number generator is the RandomState
instance used by `np.random`.
References
----------
* Siyuan Ma, Mikhail Belkin
"Diving into the shallows: a computational perspective on
large-scale machine learning", NIPS 2017.
Examples
--------
>>> from sklearn_extra.kernel_methods import EigenProClassifier
>>> import numpy as np
>>> n_samples, n_features, n_targets = 4000, 20, 3
>>> rng = np.random.RandomState(1)
>>> x_train = rng.randn(n_samples, n_features)
>>> y_train = rng.randint(n_targets, size=n_samples)
>>> rgs = EigenProClassifier(n_epoch=3, gamma=.01, subsample_size=50)
>>> rgs.fit(x_train, y_train)
EigenProClassifier(gamma=0.01, n_epoch=3, subsample_size=50)
>>> y_pred = rgs.predict(x_train)
>>> loss = np.mean(y_train != y_pred)
"""
def __init__(
self,
batch_size="auto",
n_epoch=2,
n_components=1000,
subsample_size="auto",
kernel="rbf",
gamma=0.02,
degree=3,
coef0=1,
kernel_params=None,
random_state=None,
):
super().__init__(
batch_size=batch_size,
n_epoch=n_epoch,
n_components=n_components,
subsample_size=subsample_size,
kernel=kernel,
gamma=gamma,
degree=degree,
coef0=coef0,
kernel_params=kernel_params,
random_state=random_state,
)
def fit(self, X, Y):
"""Train eigenpro classification model
Parameters
----------
X : {float, array}, shape = [n_samples, n_raw_feature]
The raw input feature matrix.
Y : {float, array}, shape =[n_samples]
The labels corresponding to the features of X.
Returns
-------
self : returns an instance of self.
"""
X, Y = check_X_y(
X,
Y,
dtype=np.float32,
force_all_finite=True,
multi_output=False,
ensure_min_samples=3,
)
check_classification_targets(Y)
self.classes_ = np.unique(Y)
loc = {}
for ind, label in enumerate(self.classes_):
loc[label] = ind
class_matrix = np.zeros((Y.shape[0], self.classes_.shape[0]))
for ind, label in enumerate(Y):
class_matrix[ind, loc[label]] = 1
self._raw_fit(X, class_matrix)
return self
def predict(self, X):
"""Predict using the kernel classification model
Parameters
----------
X : {float, array}, shape = [n_samples, n_features]
Samples.
Returns
-------
y : {float, array}, shape = [n_samples]
Predicted labels.
"""
Y = self._raw_predict(X)
return self.classes_[np.argmax(Y, axis=1)]
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import json
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import read_int, write_int, write_with_length, UTF8Deserializer
class TaskContext(object):
"""
Contextual information about a task which can be read or mutated during
execution. To access the TaskContext for a running task, use:
:meth:`TaskContext.get`.
"""
_taskContext = None
_attemptNumber = None
_partitionId = None
_stageId = None
_taskAttemptId = None
_localProperties = None
_resources = None
def __new__(cls):
"""Even if users construct TaskContext instead of using get, give them the singleton."""
taskContext = cls._taskContext
if taskContext is not None:
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
@classmethod
def _getOrCreate(cls):
"""Internal function to get or create global TaskContext."""
if cls._taskContext is None:
cls._taskContext = TaskContext()
return cls._taskContext
@classmethod
def _setTaskContext(cls, taskContext):
cls._taskContext = taskContext
@classmethod
def get(cls):
"""
Return the currently active TaskContext. This can be called inside of
user functions to access contextual information about running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
def stageId(self):
"""The ID of the stage that this task belong to."""
return self._stageId
def partitionId(self):
"""
The ID of the RDD partition that is computed by this task.
"""
return self._partitionId
def attemptNumber(self):
""""
How many times this task has been attempted. The first task attempt will be assigned
attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
"""
return self._attemptNumber
def taskAttemptId(self):
"""
An ID that is unique to this task attempt (within the same SparkContext, no two task
attempts will share the same attempt ID). This is roughly equivalent to Hadoop's
TaskAttemptID.
"""
return self._taskAttemptId
def getLocalProperty(self, key):
"""
Get a local property set upstream in the driver, or None if it is missing.
"""
return self._localProperties.get(key, None)
def resources(self):
"""
Resources allocated to the task. The key is the resource name and the value is information
about the resource.
"""
return self._resources
BARRIER_FUNCTION = 1
ALL_GATHER_FUNCTION = 2
def _load_from_socket(port, auth_secret, function, all_gather_message=None):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The call may block forever, so no timeout
sock.settimeout(None)
if function == BARRIER_FUNCTION:
# Make a barrier() function call.
write_int(function, sockfile)
elif function == ALL_GATHER_FUNCTION:
# Make a all_gather() function call.
write_int(function, sockfile)
write_with_length(all_gather_message.encode("utf-8"), sockfile)
else:
raise ValueError("Unrecognized function type")
sockfile.flush()
# Collect result.
len = read_int(sockfile)
res = []
for i in range(len):
res.append(UTF8Deserializer().loads(sockfile))
# Release resources.
sockfile.close()
sock.close()
return res
class BarrierTaskContext(TaskContext):
"""
.. note:: Experimental
A :class:`TaskContext` with extra contextual info and tooling for tasks in a barrier stage.
Use :func:`BarrierTaskContext.get` to obtain the barrier context for a running barrier task.
.. versionadded:: 2.4.0
"""
_port = None
_secret = None
@classmethod
def _getOrCreate(cls):
"""
Internal function to get or create global BarrierTaskContext. We need to make sure
BarrierTaskContext is returned from here because it is needed in python worker reuse
scenario, see SPARK-25921 for more details.
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
cls._taskContext = object.__new__(cls)
return cls._taskContext
@classmethod
def get(cls):
"""
.. note:: Experimental
Return the currently active :class:`BarrierTaskContext`.
This can be called inside of user functions to access contextual information about
running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
An Exception will raise if it is not in a barrier stage.
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
raise Exception('It is not in a barrier stage')
return cls._taskContext
@classmethod
def _initialize(cls, port, secret):
"""
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called
after BarrierTaskContext is initialized.
"""
cls._port = port
cls._secret = secret
def barrier(self):
"""
.. note:: Experimental
Sets a global barrier and waits until all tasks in this stage hit this barrier.
Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks
in the same stage have reached this routine.
.. warning:: In a barrier stage, each task much have the same number of `barrier()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
_load_from_socket(self._port, self._secret, BARRIER_FUNCTION)
def allGather(self, message=""):
"""
.. note:: Experimental
This function blocks until all tasks in the same stage have reached this routine.
Each task passes in a message and returns with a list of all the messages passed in
by each of those tasks.
.. warning:: In a barrier stage, each task much have the same number of `allGather()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
.. versionadded:: 3.0.0
"""
if not isinstance(message, str):
raise ValueError("Argument `message` must be of type `str`")
elif self._port is None or self._secret is None:
raise Exception("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
return _load_from_socket(self._port, self._secret, ALL_GATHER_FUNCTION, message)
def getTaskInfos(self):
"""
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call getTaskInfos() before initialize " +
"BarrierTaskContext.")
else:
addresses = self._localProperties.get("addresses", "")
return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
class BarrierTaskInfo(object):
"""
.. note:: Experimental
Carries all task infos of a barrier task.
:var address: The IPv4 address (host:port) of the executor that the barrier task is running on
.. versionadded:: 2.4.0
"""
def __init__(self, address):
self.address = address
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Oliver Rutherfurd
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin
# Oliver Rutherfurd (initial implementation)
# Nuutti Kotivuori (role support)
#
# Trac support for reStructured Text, including a custom 'trac' directive
#
# 'trac' directive code by Oliver Rutherfurd, overhauled by cboos.
#
# Inserts `reference` nodes for TracLinks into the document tree.
__docformat__ = 'reStructuredText'
from distutils.version import StrictVersion
try:
from docutils import nodes
from docutils.core import publish_parts
from docutils.parsers import rst
from docutils.readers import standalone
from docutils import __version__
has_docutils = True
except ImportError:
has_docutils = False
from genshi.core import escape
from trac.core import *
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, content_to_unicode
from trac.util.html import Element, Fragment, Markup, find_element
from trac.util.translation import _
from trac.wiki.api import WikiSystem
from trac.wiki.formatter import WikiProcessor, Formatter, extract_link
if has_docutils and StrictVersion(__version__) < StrictVersion('0.6'):
# Monkey-patch "raw" role handler in docutils to add a missing check
# See docutils bug #2845002 on SourceForge
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
return _raw_role(role, rawtext, text, lineno, inliner, options,
content)
from docutils.parsers.rst import roles
raw_role.options = roles.raw_role.options
_raw_role = roles.raw_role
roles.raw_role = raw_role
roles.register_canonical_role('raw', raw_role)
if has_docutils:
# Register "trac" role handler and directive
def trac_get_reference(env, context, rawtext, target, text):
fulltext = target + ' ' + text if text else target
link = extract_link(env, context, fulltext)
uri = None
missing = False
if isinstance(link, (Element, Fragment)):
linktext = Markup(link).striptags()
# the following is a bit hackish, but it takes into account:
# - an eventual trailing '?' for missing wiki pages
# - space eventually introduced due to split_page_names option
if linktext.rstrip('?').replace(' ', '') != target:
text = linktext
elt = find_element(link, 'href', 'missing')
if elt is not None:
uri = elt.attrib.get('href', '')
missing = 'missing' in elt.attrib.get('class', '').split()
else:
uri = context.href.wiki(target)
missing = not WikiSystem(env).has_page(target)
if uri or missing:
reference = nodes.reference(rawtext, text or target)
reference['refuri'] = uri
if missing:
reference['classes'].append('missing')
return reference
def trac_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Inserts a `reference` node into the document for a given
`TracLink`_, based on the content of the arguments.
Usage::
.. trac:: target [text]
``target`` may be any `TracLink`_, provided it doesn't
embed a space character (e.g. wiki:"..." notation won't work).
``[text]`` is optional. If not given, ``target`` is
used as the reference text.
.. _TracLink: http://trac.edgewall.org/wiki/TracLinks
"""
if hasattr(state.inliner, 'trac'):
env, context = state.inliner.trac
link = arguments[0]
if len(arguments) == 2:
text = arguments[1]
else:
text = None
reference = trac_get_reference(env, context, block_text, link, text)
if reference:
if isinstance(state, rst.states.SubstitutionDef):
return [reference]
p = nodes.paragraph()
p += reference
return [p]
# didn't find a match (invalid TracLink)
msg = _("%(link)s is not a valid TracLink", link=arguments[0])
# this is an user facing message, hence localized
else:
msg = "No trac context active while rendering"
# this is more an internal error, not translated.
# report a warning
warning = state_machine.reporter.warning(
msg, nodes.literal_block(block_text, block_text), line=lineno)
return [warning]
def trac_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
if hasattr(inliner, 'trac'):
env, context = inliner.trac
args = text.split(" ", 1)
link = args[0]
if len(args) == 2:
text = args[1]
else:
text = None
reference = trac_get_reference(env, context, rawtext, link, text)
if reference:
return [reference], []
msg = _("%(link)s is not a valid TracLink", link=rawtext)
else:
msg = "No trac context active while rendering"
return nodes.warning(None, nodes.literal_block(text, msg)), []
# 1 required arg, 1 optional arg, spaces allowed in last arg
trac_directive.arguments = (1, 1, 1)
trac_directive.options = None
trac_directive.content = None
rst.directives.register_directive('trac', trac_directive)
rst.roles.register_canonical_role('trac', trac_role)
# Register "code-block" role handler and directive
# (code derived from the leo plugin rst2)
def code_formatter(env, context, language, text):
processor = WikiProcessor(Formatter(env, context), language)
html = processor.process(text)
raw = nodes.raw('', html, format='html')
return raw
def code_block_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
if not hasattr(inliner, 'trac'):
return [], []
env, context = inliner.trac
language = options.get('language')
if not language:
args = text.split(':', 1)
language = args[0]
if len(args) == 2:
text = args[1]
else:
text = ''
return [code_formatter(env, context, language, text)], []
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Create a code-block directive for docutils.
Usage: .. code-block:: language
If the language can be syntax highlighted it will be.
"""
if not hasattr(state.inliner, 'trac'):
return []
env, context = state.inliner.trac
language = arguments[0]
text = '\n'.join(content)
return [code_formatter(env, context, language, text)]
# These are documented
# at http://docutils.sourceforge.net/spec/howto/rst-directives.html.
code_block_directive.arguments = (
1, # Number of required arguments.
0, # Number of optional arguments.
0) # True if final argument may contain whitespace.
# A mapping from option name to conversion function.
code_block_role.options = code_block_directive.options = {
'language' :
rst.directives.unchanged # Return the text argument, unchanged
}
code_block_directive.content = 1 # True if content is allowed.
# Register the directive with docutils.
rst.directives.register_directive('code-block', code_block_directive)
rst.roles.register_local_role('code-block', code_block_role)
class ReStructuredTextRenderer(Component):
"""HTML renderer for plain text in reStructuredText format."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer)
can_render = False
def __init__(self):
if has_docutils:
if StrictVersion(__version__) < StrictVersion('0.3.9'):
self.log.warning('Docutils version >= %s required, '
'%s found' % ('0.3.9', __version__))
else:
self.can_render = True
# ISystemInfoProvider methods
def get_system_info(self):
if has_docutils:
yield 'Docutils', __version__
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
if self.can_render and mimetype in ('text/x-rst',
'text/prs.fallenstein.rst'):
return 8
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
# Minimize visual impact of errors
from docutils.writers import html4css1
class TracHTMLTranslator(html4css1.HTMLTranslator):
"""Specialized translator with unobtrusive error reporting"""
def visit_system_message(self, node):
paragraph = node.children.pop(0)
message = escape(paragraph.astext()) if paragraph else ''
backrefs = node['backrefs']
if backrefs:
span = ('<span class="system-message">%s</span>' %
(''.join('<a href="#%s" title="%s">?</a>' %
(backref, message)
for backref in backrefs)))
else:
span = ('<span class="system-message" title="%s">?</span>' %
message)
self.body.append(span)
def depart_system_message(self, node):
pass
writer = html4css1.Writer()
writer.translator_class = TracHTMLTranslator
inliner = rst.states.Inliner()
inliner.trac = (self.env, context)
parser = rst.Parser(inliner=inliner)
content = content_to_unicode(self.env, content, mimetype)
# The default Reader is explicitly passed as a workaround for #11248
parts = publish_parts(content, writer=writer, parser=parser,
reader=standalone.Reader(parser),
settings_overrides={'halt_level': 6,
'file_insertion_enabled': 0,
'raw_enabled': 0,
'warning_stream': False})
return parts['html_body']
|
|
"""
Test the numpy pickler as a replacement of the standard pickler.
"""
from tempfile import mkdtemp
import copy
import shutil
import os
import random
import nose
from .common import np, with_numpy
# numpy_pickle is not a drop-in replacement of pickle, as it takes
# filenames instead of open files as arguments.
from .. import numpy_pickle
###############################################################################
# Define a list of standard types.
# Borrowed from dill, initial author: Micheal McKerns:
# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py
typelist = []
# testing types
_none = None
typelist.append(_none)
_type = type
typelist.append(_type)
_bool = bool(1)
typelist.append(_bool)
_int = int(1)
typelist.append(_int)
try:
_long = long(1)
typelist.append(_long)
except NameError:
# long is not defined in python 3
pass
_float = float(1)
typelist.append(_float)
_complex = complex(1)
typelist.append(_complex)
_string = str(1)
typelist.append(_string)
try:
_unicode = unicode(1)
typelist.append(_unicode)
except NameError:
# unicode is not defined in python 3
pass
_tuple = ()
typelist.append(_tuple)
_list = []
typelist.append(_list)
_dict = {}
typelist.append(_dict)
try:
_file = file
typelist.append(_file)
except NameError:
pass # file does not exists in Python 3
try:
_buffer = buffer
typelist.append(_buffer)
except NameError:
# buffer does not exists in Python 3
pass
_builtin = len
typelist.append(_builtin)
def _function(x):
yield x
class _class:
def _method(self):
pass
class _newclass(object):
def _method(self):
pass
typelist.append(_function)
typelist.append(_class)
typelist.append(_newclass) # <type 'type'>
_instance = _class()
typelist.append(_instance)
_object = _newclass()
typelist.append(_object) # <type 'class'>
###############################################################################
# Test fixtures
env = dict()
def setup_module():
""" Test setup.
"""
env['dir'] = mkdtemp()
env['filename'] = os.path.join(env['dir'], 'test.pkl')
print(80 * '_')
print('setup numpy_pickle')
print(80 * '_')
def teardown_module():
""" Test teardown.
"""
shutil.rmtree(env['dir'])
#del env['dir']
#del env['filename']
print(80 * '_')
print('teardown numpy_pickle')
print(80 * '_')
###############################################################################
# Tests
def test_standard_types():
# Test pickling and saving with standard types.
filename = env['filename']
for compress in [0, 1]:
for member in typelist:
# Change the file name to avoid side effects between tests
this_filename = filename + str(random.randint(0, 1000))
numpy_pickle.dump(member, this_filename, compress=compress)
_member = numpy_pickle.load(this_filename)
# We compare the pickled instance to the reloaded one only if it
# can be compared to a copied one
if member == copy.deepcopy(member):
yield nose.tools.assert_equal, member, _member
def test_value_error():
# Test inverting the input arguments to dump
nose.tools.assert_raises(ValueError, numpy_pickle.dump, 'foo',
dict())
@with_numpy
def test_numpy_persistence():
filename = env['filename']
rnd = np.random.RandomState(0)
a = rnd.random_sample((10, 2))
for compress, cache_size in ((0, 0), (1, 0), (1, 10)):
# We use 'a.T' to have a non C-contiguous array.
for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])):
# Change the file name to avoid side effects between tests
this_filename = filename + str(random.randint(0, 1000))
filenames = numpy_pickle.dump(obj, this_filename,
compress=compress,
cache_size=cache_size)
# Check that one file was created per array
if not compress:
nose.tools.assert_equal(len(filenames), len(obj) + 1)
# Check that these files do exist
for file in filenames:
nose.tools.assert_true(
os.path.exists(os.path.join(env['dir'], file)))
# Unpickle the object
obj_ = numpy_pickle.load(this_filename)
# Check that the items are indeed arrays
for item in obj_:
nose.tools.assert_true(isinstance(item, np.ndarray))
# And finally, check that all the values are equal.
nose.tools.assert_true(np.all(np.array(obj) ==
np.array(obj_)))
# Now test with array subclasses
for obj in (
np.matrix(np.zeros(10)),
np.core.multiarray._reconstruct(np.memmap, (), np.float)
):
this_filename = filename + str(random.randint(0, 1000))
filenames = numpy_pickle.dump(obj, this_filename,
compress=compress,
cache_size=cache_size)
obj_ = numpy_pickle.load(this_filename)
if (type(obj) is not np.memmap
and hasattr(obj, '__array_prepare__')):
# We don't reconstruct memmaps
nose.tools.assert_true(isinstance(obj_, type(obj)))
# Finally smoke test the warning in case of compress + mmap_mode
this_filename = filename + str(random.randint(0, 1000))
numpy_pickle.dump(a, this_filename, compress=1)
numpy_pickle.load(this_filename, mmap_mode='r')
@with_numpy
def test_memmap_persistence():
rnd = np.random.RandomState(0)
a = rnd.random_sample(10)
filename = env['filename'] + str(random.randint(0, 1000))
numpy_pickle.dump(a, filename)
b = numpy_pickle.load(filename, mmap_mode='r')
if np.__version__ >= '1.3':
nose.tools.assert_true(isinstance(b, np.memmap))
@with_numpy
def test_masked_array_persistence():
# The special-case picker fails, because saving masked_array
# not implemented, but it just delegates to the standard pickler.
rnd = np.random.RandomState(0)
a = rnd.random_sample(10)
a = np.ma.masked_greater(a, 0.5)
filename = env['filename'] + str(random.randint(0, 1000))
numpy_pickle.dump(a, filename)
b = numpy_pickle.load(filename, mmap_mode='r')
nose.tools.assert_true(isinstance(b, np.ma.masked_array))
def test_z_file():
# Test saving and loading data with Zfiles
filename = env['filename'] + str(random.randint(0, 1000))
data = numpy_pickle.asbytes('Foo, \n Bar, baz, \n\nfoobar')
numpy_pickle.write_zfile(open(filename, 'wb'), data)
data_read = numpy_pickle.read_zfile(open(filename, 'rb'))
nose.tools.assert_equal(data, data_read)
################################################################################
# Test dumping array subclasses
if np is not None:
class SubArray(np.ndarray):
def __reduce__(self):
return (_load_sub_array, (np.asarray(self), ))
def _load_sub_array(arr):
d = SubArray(arr.shape)
d[:] = arr
return d
@with_numpy
def test_numpy_subclass():
filename = env['filename']
a = SubArray((10,))
numpy_pickle.dump(a, filename)
c = numpy_pickle.load(filename)
nose.tools.assert_true(isinstance(c, SubArray))
|
|
"""
----------------------------------------
Copyright 2015 Felix Woodhead
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
----------------------------------------
A Utility to read and write Configuration Files.
"""
class Config():
#{
def __init__(self, fileName):
#{
self.fileName = (str(fileName) + ".conf")
try: # Does file exist
#{
self.FILE = open(self.fileName, "r")
#}
except(IOError) as error: # If no existing file, make one
#{
print("Got error: " + str(error))
self.FILE = open(self.fileName, "w")
self.FILE.close()
self.FILE = open(self.fileName, "r")
#}
self.values = {}
def __getLengthOfFile(self):
#{
lengthOfFile = len(self.FILE.readlines())
self.__resetFile()
return lengthOfFile
#}
def __resetFile(self): # Used to go back to first line and get changes
#{
self.FILE = open(self.fileName, "r")
#}
def __getFileContents(self):
#{
fileContents = []
lengthOfFile = self.__getLengthOfFile()
currLine = ""
pos = 0
while(pos <= lengthOfFile):
#{
currLine = self.FILE.readline()
if(currLine != ""):
#{
fileContents.append(currLine)
#}
pos += 1
#}
self.__resetFile()
return "".join(fileContents).split() # removes newlines
#}
def __getBundledFileContents(self): # Gets file contents in nth-D array
#{
names = self.__getValueNames() # Used for getting names
amounts = self.__getFieldValues() # Used for getting amounts
bundledFileContents = [] # BFE
tempBundle = [None, None] # used to 'bundle' array
if(len(names) != len(amounts)):
#{
raise ValueError("Length of names, does not match length of amounts")
#}
for i in range(len(amounts)):
#{
tempBundle[0] = names[i]
tempBundle[1] = amounts[i]
bundledFileContents.append(tempBundle)
tempBundle = [None, None] # Set to default to enable re-use
#}
return bundledFileContents
#}
def __getValueNames(self):
#{
fileContents = self.__getFileContentsNoNewLines()
lengthOfFile = self.__getLengthOfFile()
names = [] # All of values
currLine = ""
currName = "" # Semi- complete value being parsed
currChar = "" # Char being parsed
lineNumber = 0
while(lineNumber <= lengthOfFile):
#{
currLine = fileContents[lineNumber]
if(len(currLine) > 0): # Ignore newlines
#{
if("".join(currLine)[0] == "#"): # Ignore comments
#{
currName = "" # Set to default
lineNumber += 1
continue
#}
#}
for i in range(len(currLine)):
#{
currChar = currLine[i]
if(currChar == ":"):
#{
break
#}
else:
#{
currName += currChar
#}
#}
names.append(currName)
currName = "" # Set to default
lineNumber += 1
#}
if("" in names):
#{
while("" in names):
#{
names.remove("")
#}
#}
return names
#}
def __getFileContentsNoNewLines(self):
#{
fileContents = []
lengthOfFile = self.__getLengthOfFile()
currLine = ""
currChar = ""
lineNumber = 0
while(lineNumber <= lengthOfFile):
#{
currLine = " ".join(self.FILE.readline().split()) # Remove '\n'
fileContents.append(currLine)
lineNumber += 1
#}
self.__resetFile()
return fileContents
#}
def __getFieldValues(self):
#{
fileContents = self.__getFileContentsNoNewLines()
lengthOfFile = self.__getLengthOfFile()
amounts = []
currLine = ""
currChar = ""
currAmount = "" # Semi- complete amount being parsed
lineNumber = 0
while(lineNumber <= lengthOfFile):
#{
currLine = fileContents[lineNumber]
for i in range(len(currLine)):
#{
currChar = currLine[i]
if(currChar == ":"):
#{
currAmount = currLine[(i + 1) : len(currLine)] # Everything AFTER the colon
amounts.append(currAmount)
break
#}
#}
lineNumber += 1
#}
return amounts
#}
def __getValueLineNumbers(self): # Returns line numbers in 'Machine Numbers'
#{
lineNumbers = {}
lengthOfFile = self.__getLengthOfFile()
currLineNumber = 0
currLine = ""
currValue = ""
currChar = ""
pos = 0 # Position in current line being parsed
while(currLineNumber < lengthOfFile):
#{
currLine = "".join(self.FILE.readline().split())
while((currChar != ":") and (pos < len(currLine))):
#{
currChar = currLine[pos]
pos += 1
#}
currValue = currLine[0 : (pos - 1)] # '-1' as UP TO colon
lineNumbers[currValue] = currValue
lineNumbers[currValue] = currLineNumber - 1 # To counteract previous nam
currChar = ""
pos = 0 # Reset
currLineNumber += 1
#}
self.__resetFile()
return lineNumbers
#}
def getValues(self): #T ODO
#{
valueNames = self.__getValueNames()
valueAmounts = self.__getFieldValues()
currValue = ""
currAmount = ""
if(len(valueNames) != len(valueAmounts)):
#{
print("LengthMismatchError: The length of valueNames and valueAmounts"
" does not match.")
return # Break
#}
for i in range(len(valueNames)):
#{
currValue = valueNames[i]
currAmount = valueAmounts[i]
if(currAmount[0] == " "): # Removes space at start of string
#{
currAmount = " ".join(currAmount.split())
#}
self.values[currValue] = currValue # Add new key
self.values[currValue] = currAmount
#}
return self.values
#}
def addField(self, newValueName, newValueAmount):
#{
values = self.__getValueNames()
self.FILE = open(self.fileName, "a") # open after values
valueName = newValueName
valueAmount = newValueAmount
if(":" in valueName): # remove : to stop mix-up
#{
valueName = valueName.replace(":", "")
#}
if(" " in valueName): # remove ' ' to stop mix-up
#{
valueName = valueName.replace(" ", "")
#}
if(valueName not in values): # prevent doubles
#{
self.FILE.write(str(valueName) + ": " + str(valueAmount) + "\n") # E.G. xValue: 12
#}
else:
#{
print("The valueName: '" + str(valueName) + "' given already existes") # TRY?
#}
self.__resetFile()
#}
def addComment(self, comment):
#{
self.FILE = open(self.fileName, "a") # open after values
self.FILE.write("# " + str(comment) + "\n") # E.G. xValue: 12
self.__resetFile()
#}
def editFieldValue(self, value, newValueAmount):
#{
fileContents = self.__getBundledFileContents()
valueLine = self.__getValueLineNumbers()[value]
self.FILE = open(self.fileName, "w")
fileContents[valueLine + 1][1] = newValueAmount # +1 to skip machine code
currName = ""
currValue = ""
for i in range (len(fileContents)):
#{
currName = fileContents[i][0]
pos = 1
while(pos < len(fileContents[i])): # Start from 1 to avoid first element
#{
currValue += "" + str(fileContents[i][pos]) # Add spaces and concatente
pos += 1
#}
currValue = " ".join(currValue.split()) # Remove 'ghost' space at start of string
self.FILE.write(str(currName) + ": " + str(currValue) + "\n")
currValue = ""
#}
self.__resetFile()
#}
def editFieldName(self, value, newValueName):
#{
fileContents = self.__getBundledFileContents()
valueLine = self.__getValueLineNumbers()[value]
self.FILE = open(self.fileName, "w")
fileContents[valueLine + 1][0] = newValueName # +1 to skip machine code
currName = ""
currValue = ""
for i in range (len(fileContents)):
#{
currName = fileContents[i][0]
currValue = fileContents[i][1]
currValue = " ".join(currValue.split()) # Remove 'ghost' space at start of string
if(":" not in currName):
#{
self.FILE.write(str(currName) + ": " + str(currValue) + "\n")
#}
else:
#{
self.FILE.write(str(currName) + " " + str(currValue) + "\n")
#}
#}
self.__resetFile()
#}
def removeField(self, valueToRemove):
#{
fileContents = self.__getBundledFileContents()
valueLine = self.__getValueLineNumbers()[valueToRemove]
self.FILE = open(self.fileName, "w")
fileContents[valueLine + 1][0] = "" # +1 to skip machine code
fileContents[valueLine + 1][1] = "" # +1 to skip machine code
currName = ""
currValue = ""
for i in range (len(fileContents)):
#{
currName = fileContents[i][0]
currValue = fileContents[i][1]
if((":" not in currName) and (currName != "")):
#{
self.FILE.write(str(currName) + ": " + str(currValue) + "\n")
#}
else:
#{
if(currName != ""):
#{
self.FILE.write(str(currName) + "" + str(currValue) + "\n")
#}
#}
#}
self.__resetFile()
#}
#}
|
|
# -*- coding: utf-8 -*-
"""
Utilities for generating histograms from pandas DataFrames.
.. deprecated:: 0.18.0
With the deprecation of :meth:`~verta.tracking.entities._deployable_entity._DeployableEntity.log_training_data`,
this module is effectively defunct and remains solely for reference.
"""
from ..external import six
def calculate_histograms(df):
"""
Calculates histograms for the columns of `df`.
Parameters
----------
df : pandas.DataFrame
Data to be binned.
Returns
-------
histograms : dict
"""
histograms = {'total_count': len(df.index), 'features': {}}
for colname in df:
histogram = calculate_single_histogram(df[colname])
histograms['features'][str(colname)] = histogram # TODO: directly store non-str column names
return histograms
def calculate_single_histogram(data):
"""
Calculates a histogram for `data`.
Parameters
----------
data : pandas.Series
Data to be binned.
Returns
-------
histogram : dict
"""
try: # binary
return calculate_binary_histogram(data)
except HistogramError:
pass
try: # discrete
return calculate_discrete_histogram(data)
except HistogramError:
pass
# continuous
return calculate_float_histogram(data)
def calculate_binary_histogram(data):
"""
Calculates a histogram for binary `data`.
Parameters
----------
data : pandas.Series
Binary data to be binned.
Returns
-------
histogram : dict
Raises
------
HistogramError
If a binary histogram cannot be calculated from `data`.
"""
values = data.values.tolist()
zeros = 0
ones = 0
for value in values:
if isinstance(value, bool):
if value == False:
zeros += 1
continue
elif value == True:
ones += 1
continue
if isinstance(value, six.string_types):
# handle bool-like strings
if value.lower() == "false":
zeros += 1
continue
elif value.lower() == "true":
ones += 1
continue
# handle num-like strings (falls through to numeric case)
try:
value = float(value)
except ValueError:
pass
if isinstance(value, (six.integer_types, float)):
if value == 0:
zeros += 1
continue
elif value == 1:
ones += 1
continue
# unsupported value
raise HistogramError("invalid binary value {}".format(value))
return {
'histogram': {
'binary': {
'count': [zeros, ones],
},
},
'type': "binary",
}
def calculate_discrete_histogram(data):
"""
Calculates a histogram for discrete `data`.
Parameters
----------
data : pandas.Series of int
Discrete data to be binned.
Returns
-------
histogram : dict
Raises
------
HistogramError
If a discrete histogram cannot be calculated from `data`.
"""
value_counts = data.value_counts().sort_index()
values = value_counts.index.tolist()
counts = value_counts.values.tolist()
# reject non-numbers
try:
values = list(map(float, values))
except ValueError:
raise HistogramError(
"values must be castable to numbers"
)
# reject non-integral floats
if not all(value.is_integer() for value in values):
raise HistogramError(
"values must be integers"
)
values = list(map(int, values))
# heuristic: reject if too many values
if len(values) > 10:
raise HistogramError(
"got {} possible discrete values but heuristic says the maximum is 10".format(len(values))
)
# heuristic: reject if counts don't seem high enough
if value_counts.mean() < 10: # `value_counts` instead of `counts` for mean() method
raise HistogramError(
"heuristic says that each discrete value should average at least 10 appearances"
)
return {
'histogram': {
'discrete': {
'bucket_values': values,
'count': counts,
},
},
'type': "discrete",
}
def calculate_float_histogram(data, num_bins=10):
"""
Calculates a histogram for continuous `data`.
Parameters
----------
data : pandas.Series of float
Continuous data to be binned.
num_bins : int, default 10
Number of bins to use.
Returns
-------
histogram : dict
"""
values = data.values.tolist()
# reject non-numbers
try:
values = list(map(float, values))
except ValueError:
raise TypeError(
"unable to generate histogram from non-numeric column {}".format(data.name)
)
# calculate bin boundaries
start, stop = min(values), max(values)
space = (stop - start)/num_bins
bin_boundaries = [start + space*i for i in range(num_bins)]
# ensure last bin covers max value
bin_boundaries.append(stop)
# fit `data` into bins
reference_counts = []
bin_windows = list(zip(bin_boundaries[:-1], bin_boundaries[1:]))
for l, r in bin_windows[:-1]: # handle last bin shortly
count = len([value for value in values if l <= value < r])
reference_counts.append(count)
# ensure last bin includes max value
count = len([value for value in values if bin_boundaries[-2] <= value])
reference_counts.append(count)
return {
'histogram': {
'float': {
'bucket_limits': bin_boundaries,
'count': reference_counts,
},
},
'type': "float",
}
class HistogramError(TypeError): # TODO: move to exceptions submodule
pass
|
|
# This file is part of the MapProxy project.
# Copyright (C) 2011-2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import time
import sqlite3
import threading
from cStringIO import StringIO
from mapproxy.image import ImageSource
from mapproxy.cache.base import TileCacheBase, FileBasedLocking, tile_buffer, CacheBackendError
from mapproxy.util.fs import ensure_directory
from mapproxy.util.lock import FileLock
import logging
log = logging.getLogger(__name__)
def sqlite_datetime_to_timestamp(datetime):
if datetime is None:
return None
d = time.strptime(datetime, "%Y-%m-%d %H:%M:%S")
return time.mktime(d)
class MBTilesCache(TileCacheBase, FileBasedLocking):
supports_timestamp = False
def __init__(self, mbtile_file, lock_dir=None, with_timestamps=False):
if lock_dir:
self.lock_dir = lock_dir
else:
self.lock_dir = mbtile_file + '.locks'
self.lock_timeout = 60
self.cache_dir = mbtile_file # for lock_id generation by FileBasedLocking
self.mbtile_file = mbtile_file
self.supports_timestamp = with_timestamps
self.ensure_mbtile()
self._db_conn_cache = threading.local()
@property
def db(self):
if not getattr(self._db_conn_cache, 'db', None):
self.ensure_mbtile()
self._db_conn_cache.db = sqlite3.connect(self.mbtile_file)
return self._db_conn_cache.db
def cleanup(self):
"""
Close all open connection and remove them from cache.
"""
if getattr(self._db_conn_cache, 'db', None):
self._db_conn_cache.db.close()
self._db_conn_cache.db = None
def ensure_mbtile(self):
if not os.path.exists(self.mbtile_file):
with FileLock(os.path.join(self.lock_dir, 'init.lck'),
timeout=self.lock_timeout,
remove_on_unlock=True):
if not os.path.exists(self.mbtile_file):
ensure_directory(self.mbtile_file)
self._initialize_mbtile()
def _initialize_mbtile(self):
log.info('initializing MBTile file %s', self.mbtile_file)
db = sqlite3.connect(self.mbtile_file)
stmt = """
CREATE TABLE tiles (
zoom_level integer,
tile_column integer,
tile_row integer,
tile_data blob
"""
if self.supports_timestamp:
stmt += """
, last_modified datetime DEFAULT (datetime('now','localtime'))
"""
stmt += """
);
"""
db.execute(stmt)
db.execute("""
CREATE TABLE metadata (name text, value text);
""")
db.execute("""
CREATE UNIQUE INDEX idx_tile on tiles
(zoom_level, tile_column, tile_row);
""")
db.commit()
db.close()
def update_metadata(self, name='', description='', version=1, overlay=True, format='png'):
db = sqlite3.connect(self.mbtile_file)
db.execute("""
CREATE TABLE IF NOT EXISTS metadata (name text, value text);
""")
db.execute("""DELETE FROM metadata;""")
if overlay:
layer_type = 'overlay'
else:
layer_type = 'baselayer'
db.executemany("""
INSERT INTO metadata (name, value) VALUES (?,?)
""",
(
('name', name),
('description', description),
('version', version),
('type', layer_type),
('format', format),
)
)
db.commit()
db.close()
def is_cached(self, tile):
if tile.coord is None:
return True
if tile.source:
return True
return self.load_tile(tile)
def store_tile(self, tile):
if tile.stored:
return True
with tile_buffer(tile) as buf:
content = buffer(buf.read())
x, y, level = tile.coord
cursor = self.db.cursor()
try:
if self.supports_timestamp:
stmt = "INSERT OR REPLACE INTO tiles (zoom_level, tile_column, tile_row, tile_data, last_modified) VALUES (?,?,?,?, datetime(?, 'unixepoch', 'localtime'))"
cursor.execute(stmt, (level, x, y, content, time.time()))
else:
stmt = "INSERT OR REPLACE INTO tiles (zoom_level, tile_column, tile_row, tile_data) VALUES (?,?,?,?)"
cursor.execute(stmt, (level, x, y, content))
self.db.commit()
except sqlite3.OperationalError, ex:
log.warn('unable to store tile: %s', ex)
return False
return True
def load_tile(self, tile, with_metadata=False):
if tile.source or tile.coord is None:
return True
cur = self.db.cursor()
if self.supports_timestamp:
cur.execute('''SELECT tile_data, last_modified
FROM tiles
WHERE tile_column = ? AND
tile_row = ? AND
zoom_level = ?''', tile.coord)
else:
cur.execute('''SELECT tile_data FROM tiles
WHERE tile_column = ? AND
tile_row = ? AND
zoom_level = ?''', tile.coord)
content = cur.fetchone()
if content:
tile.source = ImageSource(StringIO(content[0]))
if self.supports_timestamp:
tile.timestamp = sqlite_datetime_to_timestamp(content[1])
return True
else:
return False
def load_tiles(self, tiles, with_metadata=False):
#associate the right tiles with the cursor
tile_dict = {}
coords = []
for tile in tiles:
if tile.source or tile.coord is None:
continue
x, y, level = tile.coord
coords.append(x)
coords.append(y)
coords.append(level)
tile_dict[(x, y)] = tile
if not tile_dict:
# all tiles loaded or coords are None
return True
if len(coords) > 1000:
# SQLite is limited to 1000 args
raise CacheBackendError('cannot query SQLite for more than 333 tiles')
if self.supports_timestamp:
stmt = "SELECT tile_column, tile_row, tile_data, last_modified FROM tiles WHERE "
else:
stmt = "SELECT tile_column, tile_row, tile_data FROM tiles WHERE "
stmt += ' OR '.join(['(tile_column = ? AND tile_row = ? AND zoom_level = ?)'] * (len(coords)//3))
cursor = self.db.cursor()
cursor.execute(stmt, coords)
loaded_tiles = 0
for row in cursor:
loaded_tiles += 1
tile = tile_dict[(row[0], row[1])]
data = row[2]
tile.size = len(data)
tile.source = ImageSource(StringIO(data))
if self.supports_timestamp:
tile.timestamp = sqlite_datetime_to_timestamp(row[3])
cursor.close()
return loaded_tiles == len(tile_dict)
def remove_tile(self, tile):
cursor = self.db.cursor()
cursor.execute(
"DELETE FROM tiles WHERE (tile_column = ? AND tile_row = ? AND zoom_level = ?)",
tile.coord)
self.db.commit()
if cursor.rowcount:
return True
return False
def remove_level_tiles_before(self, level, timestamp):
if timestamp == 0:
cursor = self.db.cursor()
cursor.execute(
"DELETE FROM tiles WHERE (zoom_level = ?)",
(level, ))
self.db.commit()
if cursor.rowcount:
return True
return False
if self.supports_timestamp:
cursor = self.db.cursor()
cursor.execute(
"DELETE FROM tiles WHERE (zoom_level = ? AND last_modified < datetime(?, 'unixepoch', 'localtime'))",
(level, timestamp))
self.db.commit()
if cursor.rowcount:
return True
return False
def load_tile_metadata(self, tile):
if not self.supports_timestamp:
# MBTiles specification does not include timestamps.
# This sets the timestamp of the tile to epoch (1970s)
tile.timestamp = -1
else:
self.load_tile(tile)
class MBTilesLevelCache(TileCacheBase, FileBasedLocking):
supports_timestamp = True
def __init__(self, mbtiles_dir, lock_dir=None):
if lock_dir:
self.lock_dir = lock_dir
else:
self.lock_dir = mbtiles_dir + '.locks'
self.lock_timeout = 60
self.cache_dir = mbtiles_dir
self._mbtiles = {}
self._mbtiles_lock = threading.Lock()
def _get_level(self, level):
if level in self._mbtiles:
return self._mbtiles[level]
with self._mbtiles_lock:
if level not in self._mbtiles:
mbtile_filename = os.path.join(self.cache_dir, '%s.mbtile' % level)
self._mbtiles[level] = MBTilesCache(
mbtile_filename,
lock_dir=self.lock_dir,
with_timestamps=True,
)
return self._mbtiles[level]
def is_cached(self, tile):
if tile.coord is None:
return True
if tile.source:
return True
return self._get_level(tile.coord[2]).is_cached(tile)
def store_tile(self, tile):
if tile.stored:
return True
return self._get_level(tile.coord[2]).store_tile(tile)
def load_tile(self, tile, with_metadata=False):
if tile.source or tile.coord is None:
return True
return self._get_level(tile.coord[2]).load_tile(tile, with_metadata=with_metadata)
def load_tiles(self, tiles, with_metadata=False):
level = None
for tile in tiles:
if tile.source or tile.coord is None:
continue
level = tile.coord[2]
break
if not level:
return True
return self._get_level(level).load_tiles(tiles, with_metadata=with_metadata)
def remove_tile(self, tile):
if tile.coord is None:
return True
return self._get_level(tile.coord[2]).remove_tile(tile)
def load_tile_metadata(self, tile):
self.load_tile(tile)
def remove_level_tiles_before(self, level, timestamp):
level_cache = self._get_level(level)
if timestamp == 0:
level_cache.cleanup()
os.unlink(level_cache.mbtile_file)
return True
else:
return level_cache.remove_level_tiles_before(level, timestamp)
|
|
from collections import namedtuple
from decimal import Decimal as D
from . import availability, prices
# A container for policies
PurchaseInfo = namedtuple(
'PurchaseInfo', ['price', 'availability', 'stockrecord'])
class Selector(object):
"""
Responsible for returning the appropriate strategy class for a given
user/session.
This can be called in three ways:
#) Passing a request and user. This is for determining
prices/availability for a normal user browsing the site.
#) Passing just the user. This is for offline processes that don't
have a request instance but do know which user to determine prices for.
#) Passing nothing. This is for offline processes that don't
correspond to a specific user. Eg, determining a price to store in
a search index.
"""
def strategy(self, request=None, user=None, **kwargs):
"""
Return an instanticated strategy instance
"""
# Default to the backwards-compatible strategy of picking the first
# stockrecord but charging zero tax.
return Default(request)
class Base(object):
"""
The base strategy class
Given a product, strategies are responsible for returning a
``PurchaseInfo`` instance which contains:
- The appropriate stockrecord for this customer
- A pricing policy instance
- An availability policy instance
"""
def __init__(self, request=None):
self.request = request
self.user = None
if request and request.user.is_authenticated():
self.user = request.user
def fetch_for_product(self, product, stockrecord=None):
"""
Given a product, return a ``PurchaseInfo`` instance.
The ``PurchaseInfo`` class is a named tuple with attributes:
- ``price``: a pricing policy object.
- ``availability``: an availability policy object.
- ``stockrecord``: the stockrecord that is being used
If a stockrecord is passed, return the appropriate ``PurchaseInfo``
instance for that product and stockrecord is returned.
"""
raise NotImplementedError(
"A strategy class must define a fetch_for_product method "
"for returning the availability and pricing "
"information."
)
def fetch_for_group(self, product):
"""
Given a group product, fetch a ``StockInfo`` instance
"""
raise NotImplementedError(
"A strategy class must define a fetch_for_group method "
"for returning the availability and pricing "
"information."
)
def fetch_for_line(self, line, stockrecord=None):
"""
Given a basket line instance, fetch a ``PurchaseInfo`` instance.
This method is provided to allow purchase info to be determined using a
basket line's attributes. For instance, "bundle" products often use
basket line attributes to store SKUs of contained products. For such
products, we need to look at the availability of each contained product
to determine overall availability.
"""
# Default to ignoring any basket line options as we don't know what to
# do with them within Oscar - that's up to your project to implement.
return self.fetch_for_product(line.product)
class Structured(Base):
"""
A strategy class which provides separate, overridable methods for
determining the 3 things that a ``PurchaseInfo`` instance requires:
#) A stockrecord
#) A pricing policy
#) An availability policy
"""
def fetch_for_product(self, product, stockrecord=None):
"""
Return the appropriate ``PurchaseInfo`` instance.
This method is not intended to be overridden.
"""
if stockrecord is None:
stockrecord = self.select_stockrecord(product)
return PurchaseInfo(
price=self.pricing_policy(product, stockrecord),
availability=self.availability_policy(product, stockrecord),
stockrecord=stockrecord)
def fetch_for_group(self, product):
# Select variants and associated stockrecords
variant_stock = self.select_variant_stockrecords(product)
return PurchaseInfo(
price=self.group_pricing_policy(product, variant_stock),
availability=self.group_availability_policy(
product, variant_stock),
stockrecord=None)
def select_stockrecord(self, product):
"""
Select the appropriate stockrecord
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'select_stockrecord' method")
def select_variant_stockrecords(self, product):
"""
Select appropriate stock record for all variants of a product
"""
records = []
for variant in product.variants.all():
records.append((variant, self.select_stockrecord(variant)))
return records
def pricing_policy(self, product, stockrecord):
"""
Return the appropriate pricing policy
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'pricing_policy' method")
def availability_policy(self, product, stockrecord):
"""
Return the appropriate availability policy
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'availability_policy' method")
# Mixins - these can be used to construct the appropriate strategy class
class UseFirstStockRecord(object):
"""
Stockrecord selection mixin for use with the ``Structured`` base strategy.
This mixin picks the first (normally only) stockrecord to fulfil a product.
This is backwards compatible with Oscar<0.6 where only one stockrecord per
product was permitted.
"""
def select_stockrecord(self, product):
try:
return product.stockrecords.all()[0]
except IndexError:
return None
class StockRequired(object):
"""
Availability policy mixin for use with the ``Structured`` base strategy.
This mixin ensures that a product can only be bought if it has stock
available (if stock is being tracked).
"""
def availability_policy(self, product, stockrecord):
if not stockrecord:
return availability.Unavailable()
if not product.get_product_class().track_stock:
return availability.Available()
else:
return availability.StockRequired(
stockrecord.net_stock_level)
def group_availability_policy(self, product, variant_stock):
# A parent product is available if one of its variants is
for variant, stockrecord in variant_stock:
policy = self.availability_policy(product, stockrecord)
if policy.is_available_to_buy:
return availability.Available()
return availability.Unavailable()
class NoTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy.
This mixin specifies zero tax and uses the ``price_excl_tax`` from the
stockrecord.
"""
def pricing_policy(self, product, stockrecord):
if not stockrecord:
return prices.Unavailable()
# Check stockrecord has the appropriate data
if not stockrecord.price_excl_tax:
return prices.Unavailable()
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=D('0.00'))
def group_pricing_policy(self, product, variant_stock):
stockrecords = [x[1] for x in variant_stock if x[1] is not None]
if not stockrecords:
return prices.Unavailable()
# We take price from first record
stockrecord = stockrecords[0]
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=D('0.00'))
class FixedRateTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy. This
mixin applies a fixed rate tax to the base price from the product's
stockrecord. The price_incl_tax is quantized to two decimal places.
Rounding behaviour is Decimal's default
"""
rate = D('0') # Subclass and specify the correct rate
exponent = D('0.01') # Default to two decimal places
def pricing_policy(self, product, stockrecord):
if not stockrecord:
return prices.Unavailable()
tax = (stockrecord.price_excl_tax * self.rate).quantize(self.exponent)
return prices.TaxInclusiveFixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=tax)
class DeferredTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy.
This mixin does not specify the product tax and is suitable to territories
where tax isn't known until late in the checkout process.
"""
def pricing_policy(self, product, stockrecord):
if not stockrecord:
return prices.Unavailable()
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax)
# Example strategy composed of above mixins. For real projects, it's likely
# you'll want to use a different pricing mixin as you'll probably want to
# charge tax!
class Default(UseFirstStockRecord, StockRequired, NoTax, Structured):
"""
Default stock/price strategy that uses the first found stockrecord for a
product, ensures that stock is available (unless the product class
indicates that we don't need to track stock) and charges zero tax.
"""
class UK(UseFirstStockRecord, StockRequired, FixedRateTax, Structured):
"""
Sample strategy for the UK that:
- uses the first stockrecord for each product (effectively assuming
there is only one).
- requires that a product has stock available to be bought
- applies a fixed rate of tax on all products
This is just a sample strategy used for internal development. It is not
recommended to be used in production, especially as the tax rate is
hard-coded.
"""
# Use UK VAT rate (as of December 2013)
rate = D('0.20')
class US(UseFirstStockRecord, StockRequired, DeferredTax, Structured):
"""
Sample strategy for the US.
- uses the first stockrecord for each product (effectively assuming
there is only one).
- requires that a product has stock available to be bought
- doesn't apply a tax to product prices (normally this will be done
after the shipping address is entered).
This is just a sample one used for internal development. It is not
recommended to be used in production.
"""
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import re
import subprocess
import sys
from pycoin import encoding
from pycoin.ecdsa import is_public_pair_valid, generator_secp256k1, public_pair_for_x, secp256k1
from pycoin.serialize import b2h, h2b
from pycoin.key import Key
from pycoin.key.BIP32Node import BIP32Node
from pycoin.networks import full_network_name_for_netcode, network_name_for_netcode, NETWORK_NAMES
SEC_RE = re.compile(r"^(0[23][0-9a-fA-F]{64})|(04[0-9a-fA-F]{128})$")
HASH160_RE = re.compile(r"^([0-9a-fA-F]{40})$")
def gpg_entropy():
try:
output = subprocess.Popen(
["gpg", "--gen-random", "2", "64"], stdout=subprocess.PIPE).communicate()[0]
return output
except OSError:
sys.stderr.write("warning: can't open gpg, can't use as entropy source\n")
return b''
def get_entropy():
entropy = bytearray()
try:
entropy.extend(gpg_entropy())
except Exception:
print("warning: can't use gpg as entropy source", file=sys.stdout)
try:
entropy.extend(open("/dev/random", "rb").read(64))
except Exception:
print("warning: can't use /dev/random as entropy source", file=sys.stdout)
entropy = bytes(entropy)
if len(entropy) < 64:
raise OSError("can't find sources of entropy")
return entropy
def parse_as_number(s):
try:
return int(s)
except ValueError:
pass
try:
return int(s, 16)
except ValueError:
pass
def parse_as_secret_exponent(s):
v = parse_as_number(s)
if v and v < secp256k1._r:
return v
def parse_as_public_pair(s):
for c in ",/":
if c in s:
s0, s1 = s.split(c, 1)
v0 = parse_as_number(s0)
if v0:
if s1 in ("even", "odd"):
return public_pair_for_x(generator_secp256k1, v0, is_even=(s1 == 'even'))
v1 = parse_as_number(s1)
if v1:
if not is_public_pair_valid(generator_secp256k1, (v0, v1)):
sys.stderr.write("invalid (x, y) pair\n")
sys.exit(1)
return (v0, v1)
def create_output(item, key, subkey_path=None):
output_dict = {}
output_order = []
def add_output(json_key, value=None, human_readable_key=None):
if human_readable_key is None:
human_readable_key = json_key.replace("_", " ")
if value:
output_dict[json_key.strip().lower()] = value
output_order.append((json_key.lower(), human_readable_key))
network_name = network_name_for_netcode(key._netcode)
full_network_name = full_network_name_for_netcode(key._netcode)
add_output("input", item)
add_output("network", full_network_name)
add_output("netcode", key._netcode)
if hasattr(key, "wallet_key"):
if subkey_path:
add_output("subkey_path", subkey_path)
add_output("wallet_key", key.wallet_key(as_private=key.is_private()))
if key.is_private():
add_output("public_version", key.wallet_key(as_private=False))
child_number = key.child_index()
if child_number >= 0x80000000:
wc = child_number - 0x80000000
child_index = "%dH (%d)" % (wc, child_number)
else:
child_index = "%d" % child_number
add_output("tree_depth", "%d" % key.tree_depth())
add_output("fingerprint", b2h(key.fingerprint()))
add_output("parent_fingerprint", b2h(key.parent_fingerprint()), "parent f'print")
add_output("child_index", child_index)
add_output("chain_code", b2h(key.chain_code()))
add_output("private_key", "yes" if key.is_private() else "no")
secret_exponent = key.secret_exponent()
if secret_exponent:
add_output("secret_exponent", '%d' % secret_exponent)
add_output("secret_exponent_hex", '%x' % secret_exponent, " hex")
add_output("wif", key.wif(use_uncompressed=False))
add_output("wif_uncompressed", key.wif(use_uncompressed=True), " uncompressed")
public_pair = key.public_pair()
if public_pair:
add_output("public_pair_x", '%d' % public_pair[0])
add_output("public_pair_y", '%d' % public_pair[1])
add_output("public_pair_x_hex", '%x' % public_pair[0], " x as hex")
add_output("public_pair_y_hex", '%x' % public_pair[1], " y as hex")
add_output("y_parity", "odd" if (public_pair[1] & 1) else "even")
add_output("key_pair_as_sec", b2h(key.sec(use_uncompressed=False)))
add_output("key_pair_as_sec_uncompressed", b2h(key.sec(use_uncompressed=True)), " uncompressed")
hash160_c = key.hash160(use_uncompressed=False)
if hash160_c:
add_output("hash160", b2h(hash160_c))
hash160_u = key.hash160(use_uncompressed=True)
if hash160_u:
add_output("hash160_uncompressed", b2h(hash160_u), " uncompressed")
if hash160_c:
address = key.address(use_uncompressed=False)
add_output("address", address, "%s address" % network_name)
output_dict["%s_address" % key._netcode] = address
if hash160_u:
address = key.address(use_uncompressed=True)
add_output("address_uncompressed", address, "%s address uncompressed" % network_name)
output_dict["%s_address_uncompressed" % key._netcode] = address
return output_dict, output_order
def dump_output(output_dict, output_order):
print('')
max_length = max(len(v[1]) for v in output_order)
for key, hr_key in output_order:
space_padding = ' ' * (1 + max_length - len(hr_key))
val = output_dict.get(key)
if val is None:
print(hr_key)
else:
if len(val) > 80:
val = "%s\\\n%s%s" % (val[:66], ' ' * (5 + max_length), val[66:])
print("%s%s: %s" % (hr_key, space_padding, val))
def main():
networks = "MTLD"
parser = argparse.ArgumentParser(
description='Crypto coin utility ku ("key utility") to show'
' information about Bitcoin or other cryptocoin data structures.',
epilog='Known networks codes:\n ' \
+ ', '.join(['%s (%s)'%(i, full_network_name_for_netcode(i)) for i in NETWORK_NAMES])
)
parser.add_argument('-w', "--wallet", help='show just Bitcoin wallet key', action='store_true')
parser.add_argument('-W', "--wif", help='show just Bitcoin WIF', action='store_true')
parser.add_argument('-a', "--address", help='show just Bitcoin address', action='store_true')
parser.add_argument(
'-u', "--uncompressed", help='show output in uncompressed form',
action='store_true')
parser.add_argument(
'-P', "--public", help='only show public version of wallet keys',
action='store_true')
parser.add_argument('-j', "--json", help='output as JSON', action='store_true')
parser.add_argument('-s', "--subkey", help='subkey path (example: 0H/2/15-20)')
parser.add_argument('-n', "--network", help='specify network (default: BTC = Bitcoin)',
default='BTC', choices=NETWORK_NAMES)
parser.add_argument("--override-network", help='override detected network type',
default=None, choices=NETWORK_NAMES)
parser.add_argument(
'item', nargs="+", help='a BIP0032 wallet key string;'
' a WIF;'
' a bitcoin address;'
' an SEC (ie. a 66 hex chars starting with 02, 03 or a 130 hex chars starting with 04);'
' the literal string "create" to create a new wallet key using strong entropy sources;'
' P:wallet passphrase (NOT RECOMMENDED);'
' H:wallet passphrase in hex (NOT RECOMMENDED);'
' secret_exponent (in decimal or hex);'
' x,y where x,y form a public pair (y is a number or one of the strings "even" or "odd");'
' hash160 (as 40 hex characters)')
args = parser.parse_args()
if args.override_network:
# force network arg to match override, but also will override decoded data below.
args.network = args.override_network
PREFIX_TRANSFORMS = (
("P:", lambda s:
BIP32Node.from_master_secret(s.encode("utf8"), netcode=args.network)),
("H:", lambda s:
BIP32Node.from_master_secret(h2b(s), netcode=args.network)),
("create", lambda s:
BIP32Node.from_master_secret(get_entropy(), netcode=args.network)),
)
for item in args.item:
key = None
for k, f in PREFIX_TRANSFORMS:
if item.startswith(k):
try:
key = f(item[len(k):])
break
except Exception:
pass
else:
try:
key = Key.from_text(item)
except encoding.EncodingError:
pass
if key is None:
secret_exponent = parse_as_secret_exponent(item)
if secret_exponent:
key = Key(secret_exponent=secret_exponent, netcode=args.network)
if SEC_RE.match(item):
key = Key.from_sec(h2b(item))
if key is None:
public_pair = parse_as_public_pair(item)
if public_pair:
key = Key(public_pair=public_pair, netcode=args.network)
if HASH160_RE.match(item):
key = Key(hash160=h2b(item), netcode=args.network)
if key is None:
print("can't parse %s" % item, file=sys.stderr)
continue
if args.override_network:
# Override the network value, so we can take the same xpubkey and view what
# the values would be on each other network type.
# XXX public interface for this is needed...
key._netcode = args.override_network
for key in key.subkeys(args.subkey or ""):
if args.public:
key = key.public_copy()
output_dict, output_order = create_output(item, key)
if args.json:
print(json.dumps(output_dict, indent=3, sort_keys=True))
elif args.wallet:
print(output_dict["wallet_key"])
elif args.wif:
print(output_dict["wif_uncompressed" if args.uncompressed else "wif"])
elif args.address:
print(output_dict["address" + ("_uncompressed" if args.uncompressed else "")])
else:
dump_output(output_dict, output_order)
if __name__ == '__main__':
main()
|
|
# coding: utf-8
from devito import Function, TimeFunction, warning
from devito.tools import memoized_meth
from examples.seismic.tti.operators import ForwardOperator, AdjointOperator
from examples.seismic.tti.operators import JacobianOperator, JacobianAdjOperator
from examples.seismic.tti.operators import particle_velocity_fields
from examples.checkpointing.checkpoint import DevitoCheckpoint, CheckpointOperator
from pyrevolve import Revolver
class AnisotropicWaveSolver(object):
"""
Solver object that provides operators for seismic inversion problems
and encapsulates the time and space discretization for a given problem
setup.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Order of the spatial stencil discretisation. Defaults to 4.
Notes
-----
space_order must be even and it is recommended to be a multiple of 4
"""
def __init__(self, model, geometry, space_order=4, kernel='centered',
**kwargs):
self.model = model
self.model._initialize_bcs(bcs="damp")
self.geometry = geometry
self.kernel = kernel
if space_order % 2 != 0:
raise ValueError("space_order must be even but got %s"
% space_order)
if space_order % 4 != 0:
warning("It is recommended for space_order to be a multiple of 4" +
"but got %s" % space_order)
self.space_order = space_order
# Cache compiler options
self._kwargs = kwargs
@property
def dt(self):
return self.model.critical_dt
@memoized_meth
def op_fwd(self, save=False):
"""Cached operator for forward runs with buffered wavefield"""
return ForwardOperator(self.model, save=save, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
**self._kwargs)
@memoized_meth
def op_adj(self):
"""Cached operator for adjoint runs"""
return AdjointOperator(self.model, save=None, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
**self._kwargs)
@memoized_meth
def op_jac(self):
"""Cached operator for born runs"""
return JacobianOperator(self.model, save=None, geometry=self.geometry,
space_order=self.space_order, **self._kwargs)
@memoized_meth
def op_jacadj(self, save=True):
"""Cached operator for gradient runs"""
return JacobianAdjOperator(self.model, save=save, geometry=self.geometry,
space_order=self.space_order, **self._kwargs)
def forward(self, src=None, rec=None, u=None, v=None, model=None,
save=False, **kwargs):
"""
Forward modelling function that creates the necessary
data objects for running a forward modelling operator.
Parameters
----------
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
u : TimeFunction, optional
The computed wavefield first component.
v : TimeFunction, optional
The computed wavefield second component.
model : Model, optional
Object containing the physical parameters.
vp : Function or float, optional
The time-constant velocity.
epsilon : Function or float, optional
The time-constant first Thomsen parameter.
delta : Function or float, optional
The time-constant second Thomsen parameter.
theta : Function or float, optional
The time-constant Dip angle (radians).
phi : Function or float, optional
The time-constant Azimuth angle (radians).
save : bool, optional
Whether or not to save the entire (unrolled) wavefield.
kernel : str, optional
Type of discretization, centered or shifted.
Returns
-------
Receiver, wavefield and performance summary.
"""
if self.kernel == 'staggered':
time_order = 1
dims = self.model.space_dimensions
stagg_u = (-dims[-1])
stagg_v = (-dims[0], -dims[1]) if self.model.grid.dim == 3 else (-dims[0])
else:
time_order = 2
stagg_u = stagg_v = None
# Source term is read-only, so re-use the default
src = src or self.geometry.src
# Create a new receiver object to store the result
rec = rec or self.geometry.rec
# Create the forward wavefield if not provided
if u is None:
u = TimeFunction(name='u', grid=self.model.grid, staggered=stagg_u,
save=self.geometry.nt if save else None,
time_order=time_order,
space_order=self.space_order)
# Create the forward wavefield if not provided
if v is None:
v = TimeFunction(name='v', grid=self.model.grid, staggered=stagg_v,
save=self.geometry.nt if save else None,
time_order=time_order,
space_order=self.space_order)
if self.kernel == 'staggered':
vx, vz, vy = particle_velocity_fields(self.model, self.space_order)
kwargs["vx"] = vx
kwargs["vz"] = vz
if vy is not None:
kwargs["vy"] = vy
model = model or self.model
# Pick vp and Thomsen parameters from model unless explicitly provided
kwargs.update(model.physical_params(**kwargs))
if self.model.dim < 3:
kwargs.pop('phi', None)
# Execute operator and return wavefield and receiver data
summary = self.op_fwd(save).apply(src=src, rec=rec, u=u, v=v,
dt=kwargs.pop('dt', self.dt), **kwargs)
return rec, u, v, summary
def adjoint(self, rec, srca=None, p=None, r=None, model=None,
save=None, **kwargs):
"""
Adjoint modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
p : TimeFunction, optional
The computed wavefield first component.
r : TimeFunction, optional
The computed wavefield second component.
model : Model, optional
Object containing the physical parameters.
vp : Function or float, optional
The time-constant velocity.
epsilon : Function or float, optional
The time-constant first Thomsen parameter.
delta : Function or float, optional
The time-constant second Thomsen parameter.
theta : Function or float, optional
The time-constant Dip angle (radians).
phi : Function or float, optional
The time-constant Azimuth angle (radians).
Returns
-------
Adjoint source, wavefield and performance summary.
"""
if self.kernel == 'staggered':
time_order = 1
dims = self.model.space_dimensions
stagg_p = (-dims[-1])
stagg_r = (-dims[0], -dims[1]) if self.model.grid.dim == 3 else (-dims[0])
else:
time_order = 2
stagg_p = stagg_r = None
# Source term is read-only, so re-use the default
srca = srca or self.geometry.new_src(name='srca', src_type=None)
# Create the wavefield if not provided
if p is None:
p = TimeFunction(name='p', grid=self.model.grid, staggered=stagg_p,
time_order=time_order,
space_order=self.space_order)
# Create the wavefield if not provided
if r is None:
r = TimeFunction(name='r', grid=self.model.grid, staggered=stagg_r,
time_order=time_order,
space_order=self.space_order)
if self.kernel == 'staggered':
vx, vz, vy = particle_velocity_fields(self.model, self.space_order)
kwargs["vx"] = vx
kwargs["vz"] = vz
if vy is not None:
kwargs["vy"] = vy
model = model or self.model
# Pick vp and Thomsen parameters from model unless explicitly provided
kwargs.update(model.physical_params(**kwargs))
if self.model.dim < 3:
kwargs.pop('phi', None)
# Execute operator and return wavefield and receiver data
summary = self.op_adj().apply(srca=srca, rec=rec, p=p, r=r,
dt=kwargs.pop('dt', self.dt),
time_m=0 if time_order == 1 else None,
**kwargs)
return srca, p, r, summary
def jacobian(self, dm, src=None, rec=None, u0=None, v0=None, du=None, dv=None,
model=None, save=None, kernel='centered', **kwargs):
"""
Linearized Born modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
src : SparseTimeFunction or array_like, optional
Time series data for the injected source term.
rec : SparseTimeFunction or array_like, optional
The interpolated receiver data.
u : TimeFunction, optional
The computed background wavefield first component.
v : TimeFunction, optional
The computed background wavefield second component.
du : TimeFunction, optional
The computed perturbed wavefield first component.
dv : TimeFunction, optional
The computed perturbed wavefield second component.
model : Model, optional
Object containing the physical parameters.
vp : Function or float, optional
The time-constant velocity.
epsilon : Function or float, optional
The time-constant first Thomsen parameter.
delta : Function or float, optional
The time-constant second Thomsen parameter.
theta : Function or float, optional
The time-constant Dip angle (radians).
phi : Function or float, optional
The time-constant Azimuth angle (radians).
"""
if kernel != 'centered':
raise ValueError('Only centered kernel is supported for the jacobian')
dt = kwargs.pop('dt', self.dt)
# Source term is read-only, so re-use the default
src = src or self.geometry.src
# Create a new receiver object to store the result
rec = rec or self.geometry.rec
# Create the forward wavefields u, v du and dv if not provided
u0 = u0 or TimeFunction(name='u0', grid=self.model.grid,
time_order=2, space_order=self.space_order)
v0 = v0 or TimeFunction(name='v0', grid=self.model.grid,
time_order=2, space_order=self.space_order)
du = du or TimeFunction(name='du', grid=self.model.grid,
time_order=2, space_order=self.space_order)
dv = dv or TimeFunction(name='dv', grid=self.model.grid,
time_order=2, space_order=self.space_order)
model = model or self.model
# Pick vp and Thomsen parameters from model unless explicitly provided
kwargs.update(model.physical_params(**kwargs))
if self.model.dim < 3:
kwargs.pop('phi', None)
# Execute operator and return wavefield and receiver data
summary = self.op_jac().apply(dm=dm, u0=u0, v0=v0, du=du, dv=dv, src=src,
rec=rec, dt=dt, **kwargs)
return rec, u0, v0, du, dv, summary
def jacobian_adjoint(self, rec, u0, v0, du=None, dv=None, dm=None, model=None,
checkpointing=False, kernel='centered', **kwargs):
"""
Gradient modelling function for computing the adjoint of the
Linearized Born modelling function, ie. the action of the
Jacobian adjoint on an input data.
Parameters
----------
rec : SparseTimeFunction
Receiver data.
u0 : TimeFunction
The computed background wavefield.
v0 : TimeFunction, optional
The computed background wavefield.
du : Function or float
The computed perturbed wavefield.
dv : Function or float
The computed perturbed wavefield.
dm : Function, optional
Stores the gradient field.
model : Model, optional
Object containing the physical parameters.
vp : Function or float, optional
The time-constant velocity.
epsilon : Function or float, optional
The time-constant first Thomsen parameter.
delta : Function or float, optional
The time-constant second Thomsen parameter.
theta : Function or float, optional
The time-constant Dip angle (radians).
phi : Function or float, optional
The time-constant Azimuth angle (radians).
Returns
-------
Gradient field and performance summary.
"""
if kernel != 'centered':
raise ValueError('Only centered kernel is supported for the jacobian_adj')
dt = kwargs.pop('dt', self.dt)
# Gradient symbol
dm = dm or Function(name='dm', grid=self.model.grid)
# Create the perturbation wavefields if not provided
du = du or TimeFunction(name='du', grid=self.model.grid,
time_order=2, space_order=self.space_order)
dv = dv or TimeFunction(name='dv', grid=self.model.grid,
time_order=2, space_order=self.space_order)
model = model or self.model
# Pick vp and Thomsen parameters from model unless explicitly provided
kwargs.update(model.physical_params(**kwargs))
if self.model.dim < 3:
kwargs.pop('phi', None)
if checkpointing:
u0 = TimeFunction(name='u0', grid=self.model.grid,
time_order=2, space_order=self.space_order)
v0 = TimeFunction(name='v0', grid=self.model.grid,
time_order=2, space_order=self.space_order)
cp = DevitoCheckpoint([u0, v0])
n_checkpoints = None
wrap_fw = CheckpointOperator(self.op_fwd(save=False), src=self.geometry.src,
u=u0, v=v0, dt=dt, **kwargs)
wrap_rev = CheckpointOperator(self.op_jacadj(save=False), u0=u0, v0=v0,
du=du, dv=dv, rec=rec, dm=dm, dt=dt, **kwargs)
# Run forward
wrp = Revolver(cp, wrap_fw, wrap_rev, n_checkpoints, rec.data.shape[0]-2)
wrp.apply_forward()
summary = wrp.apply_reverse()
else:
summary = self.op_jacadj().apply(rec=rec, dm=dm, u0=u0, v0=v0, du=du, dv=dv,
dt=dt, **kwargs)
return dm, summary
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
import zipfile
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int
SPARK_HOME = os.environ["SPARK_HOME"]
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name , batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
self.sc._jvm.System.clearProperty("spark.driver.port")
class TestCheckpoint(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class TestAddFile(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
class TestRDDFunctions(PySparkTestCase):
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
class TestIO(PySparkTestCase):
def test_stdout_redirection(self):
import subprocess
def func(x):
subprocess.check_call('ls', shell=True)
self.sc.parallelize([1]).foreach(func)
class TestDaemon(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class TestSparkSubmit(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
with zipfile.ZipFile(path, 'w') as zip:
zip.writestr(name, content)
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--py-files", zip, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
if __name__ == "__main__":
unittest.main()
|
|
"""Encoding PDUs for use in testing."""
############################# A-ASSOCIATE-RQ PDU #############################
# Called AET: ANY-SCP
# Calling AET: ECHOSCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Abstract Syntax: 1.2.840.10008.1.1 Verification SOP Class
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# User Information
# Max Length Received: 16382
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.0.9.0
# Implementation Version Name: PYNETDICOM_090
a_associate_rq = (
b"\x01\x00\x00\x00\x00\xd1\x00\x01\x00\x00\x41\x4e\x59\x2d"
b"\x53\x43\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x45\x43"
b"\x48\x4f\x53\x43\x55\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e\x32\x2e\x38\x34"
b"\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e\x31\x2e"
b"\x31\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31"
b"\x2e\x31\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e"
b"\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00\x00\x3e\x51"
b"\x00\x00\x04\x00\x00\x3f\xfe\x52\x00\x00\x20\x31\x2e\x32"
b"\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30\x30"
b"\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x30\x2e\x39\x2e"
b"\x30\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f"
b"\x4d\x5f\x30\x39\x30"
)
# Called AET: ANY-SCP
# Calling AET: ECHOSCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Abstract Syntax: 1.2.840.10008.1.1 Verification SOP Class
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# User Information
# Max Length Received: 16382
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.0.9.0
# Implementation Version Name: PYNETDICOM_090
# User Identity
# Type: 1
# Response requested: 1
# Primary field: pynetdicom
# Secondary field: (none)
# AsynchronousOperationsWindow
# Max operations invoked: 5
# Max operations performed: 5
a_associate_rq_user_async = (
b"\x01\x00\x00\x00\x00\xed\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x45\x43\x48\x4f\x53\x43"
b"\x55\x20\x20\x20\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31"
b"\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30"
b"\x38\x2e\x31\x2e\x32\x50\x00\x00\x5a\x51\x00\x00\x04\x00\x00\x3f"
b"\xfe\x52\x00\x00\x20\x31\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31"
b"\x2e\x33\x36\x38\x30\x30\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e"
b"\x30\x2e\x39\x2e\x30\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49"
b"\x43\x4f\x4d\x5f\x30\x39\x30\x58\x00\x00\x10\x01\x01\x00\x0a\x70"
b"\x79\x6e\x65\x74\x64\x69\x63\x6f\x6d\x00\x00\x53\x00\x00\x04\x00"
b"\x05\x00\x05"
)
# Called AET: ANY-SCP
# Calling AET: GETSCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Abstract Syntax: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# Transfer Syntax: 1.2.840.10008.1.2.1 Explicit VR Little Endian
# User Information
# Max Length Received: 16382
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.0.9.0
# Implementation Version Name: PYNETDICOM_090
# SCP/SCU Role Selection
# SOP Class: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# SCU Role: 0
# SCP Role: 1
a_associate_rq_role = (
b"\x01\x00\x00\x00\x00\xfc\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x47\x45\x54\x53\x43\x55"
b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x20\x00\x00\x38\x01\x00\x00\x00\x30\x00\x00\x19\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e\x31"
b"\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x40\x00\x00\x13\x31\x2e\x32\x2e"
b"\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x2e\x31\x50"
b"\x00\x00\x5f\x51\x00\x00\x04\x00\x00\x3f\xfe\x52\x00\x00\x20\x31"
b"\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30\x30"
b"\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x30\x2e\x39\x2e\x30\x55"
b"\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f\x4d\x5f\x30\x39"
b"\x30\x54\x00\x00\x1d\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31"
b"\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32"
b"\x00\x01"
)
# Called AET: ANY-SCP
# Calling AET: STORESCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Abstract Syntax: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# Transfer Syntax: 1.2.840.10008.1.2.1 Explicit VR Little Endian
# Transfer Syntax: 1.2.840.10008.1.2.2 Explicit VR Big Endian
# User Information
# Max Length Received: 16384
# Implementation Class UID: 1.2.276.0.7230010.3.0.3.6.0
# Implementation Version Name: OFFIS_DCMTK_360
# User Identity
# Type: w
# Response requested: 0
# Primary field: pynetdicom
# Secondary field: p4ssw0rd
a_associate_rq_user_id_user_pass = (
b"\x01\x00\x00\x00\x01\x1f\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x53\x54\x4f\x52\x45\x53"
b"\x43\x55\x20\x20\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x20\x00\x00\x64\x01\x00\xff\x00\x30\x00\x00\x19\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e\x31"
b"\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x40\x00\x00\x13\x31\x2e\x32\x2e"
b"\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x2e\x31\x40"
b"\x00\x00\x13\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38"
b"\x2e\x31\x2e\x32\x2e\x32\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34"
b"\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00\x00\x56\x51"
b"\x00\x00\x04\x00\x00\x40\x00\x52\x00\x00\x1b\x31\x2e\x32\x2e\x32"
b"\x37\x36\x2e\x30\x2e\x37\x32\x33\x30\x30\x31\x30\x2e\x33\x2e\x30"
b"\x2e\x33\x2e\x36\x2e\x30\x55\x00\x00\x0f\x4f\x46\x46\x49\x53\x5f"
b"\x44\x43\x4d\x54\x4b\x5f\x33\x36\x30\x58\x00\x00\x18\x02\x00\x00"
b"\x0a\x70\x79\x6e\x65\x74\x64\x69\x63\x6f\x6d\x00\x08\x70\x34\x73"
b"\x73\x77\x30\x72\x64"
)
# Called AET: ANY-SCP
# Calling AET: ECHOSCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Item:
# Presentation Context ID: 1
# Abstract Syntax: 1.2.840.10008.1.1 Verification SOP Class
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# Presentation Context Item:
# Presentation Context ID: 3
# Abstract Syntax: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# Presentation Context Item:
# Presentation Context ID: 5
# Abstract Syntax: 1.2.840.10008.5.1.4.1.1.4 MR Image Storage
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# User Information
# Max Length Received: 16384
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.0.9.0
# Implementation Version Name: PYNETDICOM_090
# User Identity
# Type: 1
# Response requested: 1
# Primary field: pynetdicom
# Secondary field: (none)
# AsynchronousOperationsWindow
# Max operations invoked: 5
# Max operations performed: 5
# SOP Class Extended Negotiation Item
# SOP Class: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# Service Class App Info: b'\x02\x00\x03\x00\x01\x00'
# SOP Class Extended Negotiation Item
# SOP Class: 1.2.840.10008.5.1.4.1.1.4 MR Image Storage
# Service Class App Info: b'\x02\x00\x03\x00\x01\x00'
a_associate_rq_user_id_ext_neg = (
b"\x01\x00\x00\x00\x01\xab\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x45\x43\x48\x4f\x53\x43"
b"\x55\x20\x20\x20\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31"
b"\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30"
b"\x38\x2e\x31\x2e\x32\x20\x00\x00\x36\x03\x00\x00\x00\x30\x00\x00"
b"\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35"
b"\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x40\x00\x00\x11\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x20"
b"\x00\x00\x36\x05\x00\x00\x00\x30\x00\x00\x19\x31\x2e\x32\x2e\x38"
b"\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31"
b"\x2e\x31\x2e\x34\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e"
b"\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00\x00\xa4\x51\x00\x00"
b"\x04\x00\x00\x3f\xfe\x52\x00\x00\x20\x31\x2e\x32\x2e\x38\x32\x36"
b"\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30\x30\x34\x33\x2e\x39\x2e\x33"
b"\x38\x31\x31\x2e\x30\x2e\x39\x2e\x30\x55\x00\x00\x0e\x50\x59\x4e"
b"\x45\x54\x44\x49\x43\x4f\x4d\x5f\x30\x39\x30\x58\x00\x00\x10\x01"
b"\x01\x00\x0a\x70\x79\x6e\x65\x74\x64\x69\x63\x6f\x6d\x00\x00\x53"
b"\x00\x00\x04\x00\x05\x00\x05\x56\x00\x00\x21\x00\x19\x31\x2e\x32"
b"\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34"
b"\x2e\x31\x2e\x31\x2e\x32\x02\x00\x03\x00\x01\x00\x56\x00\x00\x21"
b"\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e"
b"\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x34\x02\x00\x03\x00\x01"
b"\x00"
)
# Needs to be updated - no presentation context items?
a_associate_rq_com_ext_neg = (
b"\x02\x00\x00\x00\x01\x49\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x45\x43\x48\x4f\x53\x43"
b"\x55\x20\x20\x20\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x21\x00\x00\x19\x01\x00\x00\x00\x40\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32"
b"\x21\x00\x00\x19\x03\x00\x00\x00\x40\x00\x00\x11\x31\x2e\x32\x2e"
b"\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x21\x00\x00"
b"\x19\x05\x00\x00\x00\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30"
b"\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00\x00\x91\x51\x00"
b"\x00\x04\x00\x00\x40\x00\x52\x00\x00\x20\x31\x2e\x32\x2e\x38\x32"
b"\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30\x30\x34\x33\x2e\x39\x2e"
b"\x33\x38\x31\x31\x2e\x30\x2e\x39\x2e\x30\x55\x00\x00\x0e\x50\x59"
b"\x4e\x45\x54\x44\x49\x43\x4f\x4d\x5f\x30\x39\x30\x57\x00\x00\x4f"
b"\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e"
b"\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x34\x00\x11\x31\x2e\x32"
b"\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x34\x2e\x32\x00\x1f"
b"\x00\x1d\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e"
b"\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x38\x38\x2e\x32\x32"
)
# Called AET is 16 spaces
a_associate_rq_called = (
b"\x01\x00\x00\x00\x00\xd1\x00\x01\x00\x00"
b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x41\x4e\x59\x2d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e\x32\x2e\x38\x34"
b"\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e\x31\x2e"
b"\x31\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31"
b"\x2e\x31\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e"
b"\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00\x00\x3e\x51"
b"\x00\x00\x04\x00\x00\x3f\xfe\x52\x00\x00\x20\x31\x2e\x32"
b"\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30\x30"
b"\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x30\x2e\x39\x2e"
b"\x30\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f"
b"\x4d\x5f\x30\x39\x30"
)
# Calling AET is 16 spaces
a_associate_rq_calling = (
b"\x01\x00\x00\x00\x00\xd1\x00\x01\x00\x00"
b"\x41\x4e\x59\x2d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e\x32\x2e\x38\x34"
b"\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e\x31\x2e"
b"\x31\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31"
b"\x2e\x31\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e"
b"\x31\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00\x00\x3e\x51"
b"\x00\x00\x04\x00\x00\x3f\xfe\x52\x00\x00\x20\x31\x2e\x32"
b"\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30\x30"
b"\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x30\x2e\x39\x2e"
b"\x30\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f"
b"\x4d\x5f\x30\x39\x30"
)
############################# A-ASSOCIATE-AC PDU #############################
# Called AET: ANY-SCP
# Calling AET: ECHOSCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Result: Accepted
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# User Information
# Max Length Received: 16384
# Implementation Class UID: 1.2.276.0.7230010.3.0.3.6.0
# Implementation Version Name: OFFIS_DCMTK_360
a_associate_ac = (
b"\x02\x00\x00\x00\x00\xb8\x00\x01\x00\x00\x41\x4e\x59\x2d"
b"\x53\x43\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x45\x43"
b"\x48\x4f\x53\x43\x55\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e\x32\x2e\x38\x34"
b"\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e\x31\x2e"
b"\x31\x21\x00\x00\x19\x01\x00\x00\x00\x40\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31"
b"\x2e\x32\x50\x00\x00\x3a\x51\x00\x00\x04\x00\x00\x40\x00"
b"\x52\x00\x00\x1b\x31\x2e\x32\x2e\x32\x37\x36\x2e\x30\x2e"
b"\x37\x32\x33\x30\x30\x31\x30\x2e\x33\x2e\x30\x2e\x33\x2e"
b"\x36\x2e\x30\x55\x00\x00\x0f\x4f\x46\x46\x49\x53\x5f\x44"
b"\x43\x4d\x54\x4b\x5f\x33\x36\x30"
)
# Called AET: ANY-SCP
# Calling AET: ECHOSCU
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Result: Accepted
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
# User Information
# Max Length Received: 16384
# Implementation Class UID: 1.2.276.0.7230010.3.0.3.6.0
# Implementation Version Name: OFFIS_DCMTK_360
# User Identity AC
# Server response: b'Accepted'
a_associate_ac_user = (
b"\x02\x00\x00\x00\x00\xb8\x00\x01\x00\x00"
b"\x41\x4e\x59\x2d\x53\x43\x50\x20\x20\x20"
b"\x20\x20\x20\x20\x20\x20\x45\x43\x48\x4f"
b"\x53\x43\x55\x20\x20\x20\x20\x20\x20\x20"
b"\x20\x20\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30"
b"\x38\x2e\x33\x2e\x31\x2e\x31\x2e\x31\x21"
b"\x00\x00\x19\x01\x00\x00\x00\x40\x00\x00"
b"\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31"
b"\x30\x30\x30\x38\x2e\x31\x2e\x32\x50\x00"
b"\x00\x48\x51\x00\x00\x04\x00\x00\x40\x00"
b"\x52\x00\x00\x1b\x31\x2e\x32\x2e\x32\x37"
b"\x36\x2e\x30\x2e\x37\x32\x33\x30\x30\x31"
b"\x30\x2e\x33\x2e\x30\x2e\x33\x2e\x36\x2e"
b"\x30\x55\x00\x00\x0f\x4f\x46\x46\x49\x53"
b"\x5f\x44\x43\x4d\x54\x4b\x5f\x33\x36\x30"
b"\x59\x00\x00\x0a\x00\x08\x41\x63\x63\x65"
b"\x70\x74\x65\x64"
)
# Issue 342
# Called AET: ANY-SCP
# Calling AET: PYNETDICOM
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Abstract Syntax: Verification SOP Class
# SCP/SCU Role: Default
# Result: Accepted
# Transfer Syntax: 1.2.840.10008.1.2.1 Explicit VR Little Endian
# Presentation Context ID: 3
# Abstract Syntax: Basic Grayscale Print Management Meta SOP Class
# SCP/SCU Role: Default
# Result: Abstract Syntax Not Supported
# Transfer Syntax: None
# User Information
# Max Length Received: 28672
# Implementation Class UID: 2.16.840.1
# Implementation Version Name: MergeCOM3_390IB2
# Extended Negotiation
# SOP Extended: None
# Async Ops: None
# User ID: None
a_associate_ac_zero_ts = (
b"\x02\x00\x00\x00\x00\xb6\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x50\x59\x4e\x45\x54\x44"
b"\x49\x43\x4f\x4d\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x21\x00\x00\x1b\x01\x00\x00\x00\x40\x00\x00\x13\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32"
b"\x2e\x31\x21\x00\x00\x08\x03\x00\x03\x00\x40\x00\x00\x00\x50\x00"
b"\x00\x2a\x51\x00\x00\x04\x00\x00\x70\x00\x52\x00\x00\x0a\x32\x2e"
b"\x31\x36\x2e\x38\x34\x30\x2e\x31\x55\x00\x00\x10\x4d\x65\x72\x67"
b"\x65\x43\x4f\x4d\x33\x5f\x33\x39\x30\x49\x42\x32"
)
# Issue 361
# Called AET: ANY-SCP
# Calling AET: PYNETDICOM
# Application Context Name: 1.2.840.10008.3.1.1.1
# Presentation Context Items:
# Presentation Context ID: 1
# Abstract Syntax: Verification SOP Class
# SCP/SCU Role: Default
# Result: Reject
# Transfer Syntax: (no Transfer Syntax Sub-Item)
# User Information
# Max Length Received: 16382
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.1.4.0
# Implementation Version Name: PYNETDICOM_140
# Extended Negotiation
# SOP Extended: None
# Async Ops: None
# User ID: None
a_associate_ac_no_ts = (
b"\x02\x00\x00\x00\x00\xa7\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x50\x59\x4e\x45\x54\x44"
b"\x49\x43\x4f\x4d\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x21\x00\x00\x04\x01\x00\x03\x00"
b"\x50\x00\x00\x3e\x51\x00\x00\x04\x00\x00\x3f\xfe\x52\x00\x00\x20"
b"\x31\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30"
b"\x30\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x31\x2e\x34\x2e\x30"
b"\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f\x4d\x5f\x31"
b"\x34\x30"
)
############################# A-ASSOCIATE-RJ PDU #############################
# Result: Rejected (Permanent)
# Source: DUL service-user
# Reason: No reason given
a_associate_rj = b"\x03\x00\x00\x00\x00\x04\x00\x01\x01\x01"
############################## A-RELEASE-RJ PDU ##############################
a_release_rq = b"\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00"
############################## A-RELEASE-RP PDU ##############################
a_release_rp = b"\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00"
############################### A-ABORT-RQ PDU ###############################
# Source: DUL service-user
# Reason: No reason given
a_abort = b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
############################## A-P-ABORT-RQ PDU ##############################
# Source: DUL service-provider`
# Reason: Unrecognised PDU parameter
a_p_abort = b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x04"
################################ P-DATA-TF PDU ###############################
# Contains a C-ECHO message
# Context ID: 1
# Data: \x03\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00\x00\x00\x02\x00
# \x12\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38
# \x2e\x31\x2e\x31\x00\x00\x00\x00\x01\x02\x00\x00\x00\x30\x80\x00\x00
# \x20\x01\x02\x00\x00\x00\x01\x00\x00\x00\x00\x08\x02\x00\x00\x00\x01
# \x01\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00
# P-DATA
# PDU type: 04
# Reserved: 00
# PDU Length: 00 00 00 54 (84)
# PDU Items:
# Item length: 00 00 00 50 (80)
# Context ID: 01
# PDV:
# 03 - Command information, last fragment
# 00 00 00 00 | 04 00 00 00 | 42 00 - Command Group Length (66)
# 00 00 02 00 | 12 00 00 00 | 31 2e ... 31 00- Affected SOP Class UID
# 00 00 00 01 | 02 00 00 00 | 30 80 - Command Field (32816)
# 00 00 20 01 | 02 00 00 00 | - MessageIDBeingRespondedTo (1)
# 00 00 00 08 | | - Command Data Set Type (257)
# 00 00 00 09 | - Status (0)
p_data_tf = (
b"\x04\x00\x00\x00\x00\x54\x00\x00\x00\x50\x01\x03\x00\x00\x00"
b"\x00\x04\x00\x00\x00\x42\x00\x00\x00\x00\x00\x02\x00\x12\x00"
b"\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38"
b"\x2e\x31\x2e\x31\x00\x00\x00\x00\x01\x02\x00\x00\x00\x30\x80"
b"\x00\x00\x20\x01\x02\x00\x00\x00\x01\x00\x00\x00\x00\x08\x02"
b"\x00\x00\x00\x01\x01\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00"
)
# C-ECHO RQ
p_data_tf_rq = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA
b"\x00\x00\x00\x46\x01" # PDV Item
b"\x03"
b"\x00\x00\x00\x00\x04\x00\x00\x00\x3a\x00" # Command Group Length
b"\x00\x00\x00\x00\x02\x00\x12\x00\x00\x00" # Affected SOP Class UID
b"\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00"
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # Command Data Set Type
)
# AsynchronousOperationsWindow
# Max operations invoked: 5
# Max operations performed: 5
asynchronous_window_ops = b"\x53\x00\x00\x04\x00\x05\x00\x05"
############################## User Identity Sub Item ########################
# -RQ
# Type: 1
# Response requested: 0
# Primary field: pynetdicom
# Secondary field: (none)
user_identity_rq_user_nopw = (
b"\x58\x00\x00\x10\x01\x01\x00\x0a\x70\x79\x6e\x65\x74\x64\x69\x63"
b"\x6f\x6d\x00\x00"
)
# -RQ
# Type: 1
# Response requested: 0
# Primary field: pynetdicom
# Secondary field: p4ssw0rd
user_identity_rq_user_pass = (
b"\x58\x00\x00\x18\x02\x00\x00\x0a\x70\x79\x6e\x65\x74\x64\x69\x63"
b"\x6f\x6d\x00\x08\x70\x34\x73\x73\x77\x30\x72\x64"
)
# -AC
# Server response: b'Accepted'
user_identity_ac = b"\x59\x00\x00\x0a\x00\x08\x41\x63\x63\x65\x70\x74\x65\x64"
########################### Application Context Item #########################
# Application Context Name: 1.2.840.10008.3.1.1.1
application_context = (
b"\x10\x00\x00\x15\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31"
b"\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e\x31\x2e\x31"
)
# Application Context Name without a value
application_context_empty = b"\x10\x00\x00\x00"
########################## Presentation Context Items #######################
# -RQ
# Presentation Context ID: 1
# Abstract Syntax: 1.2.840.10008.1.1 Verification SOP Class
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
presentation_context_rq = (
b"\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31\x2e\x32\x2e"
b"\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x40\x00\x00"
b"\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31"
b"\x2e\x32"
)
# Issue 560: Non-ASCII encoded abstract syntax
presentation_context_rq_utf8 = (
b"\x20\x00\x00\x51\x4d\x00\xff\x00"
b"\x30\x00\x00\x32\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38"
b"\x2e\xe2\x80\x8b\x35\x2e\xe2\x80\x8b\x31\x2e\xe2\x80\x8b\x34\x2e\xe2"
b"\x80\x8b\x31\x2e\xe2\x80\x8b\x31\x2e\xe2\x80\x8b\x31\x30\x34\x2e\xe2"
b"\x80\x8b\x33"
b"\x40\x00\x00\x13\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31"
b"\x30\x30\x30\x38\x2e\x31\x2e\x32\x2e\x31"
)
# -AC
# Presentation Context ID: 1
# Result: Accepted
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
presentation_context_ac = (
b"\x21\x00\x00\x19\x01\x00\x00\x00\x40\x00\x00\x11\x31\x2e\x32\x2e"
b"\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x32"
)
############################ Abstract Syntax Sub Item ########################
# Abstract Syntax: 1.2.840.10008.1.1 Verification SOP Class
abstract_syntax = (
b"\x30\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30"
b"\x38\x2e\x31\x2e\x31"
)
############################ Transfer Syntax Sub Item ########################
# Transfer Syntax: 1.2.840.10008.1.2 Implicit VR Little Endian
transfer_syntax = (
b"\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30"
b"\x38\x2e\x31\x2e\x32"
)
######################## Presentation Data Value Sub Item ####################
presentation_data = (
b"\x03\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00\x00\x00\x02"
b"\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30"
b"\x30\x38\x2e\x31\x2e\x31\x00\x00\x00\x00\x01\x02\x00\x00\x00\x30"
b"\x80\x00\x00\x20\x01\x02\x00\x00\x00\x01\x00\x00\x00\x00\x08\x02"
b"\x00\x00\x00\x01\x01\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00"
)
# Context ID: 1
# Data: \x03\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00\x00\x00\x02\x00
# \x12\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38
# \x2e\x31\x2e\x31\x00\x00\x00\x00\x01\x02\x00\x00\x00\x30\x80\x00\x00
# \x20\x01\x02\x00\x00\x00\x01\x00\x00\x00\x00\x08\x02\x00\x00\x00\x01
# \x01\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00
presentation_data_value = b"\x00\x00\x00\x50\x01" + presentation_data
######################## Maximum Length Received Sub Item ####################
# Max Length Received: 16382
maximum_length_received = b"\x51\x00\x00\x04\x00\x00\x3f\xfe"
######################## Implementation Class UID Sub Item ###################
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.0.9.0
implementation_class_uid = (
b"\x52\x00\x00\x20\x31\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e"
b"\x33\x36\x38\x30\x30\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x30"
b"\x2e\x39\x2e\x30"
)
implementation_class_uid_empty = b"\x52\x00\x00\x00"
##################### Implementation Version Name Sub Item ###################
# Implementation Version Name: PYNETDICOM_090
implementation_version_name = (
b"\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f\x4d\x5f" b"\x30\x39\x30"
)
implementation_version_name_empty = b"\x55\x00\x00\x00"
########################### Role Selection Sub Item ##########################
# SOP Class: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# SCU Role: 0
# SCP Role: 1
role_selection = (
b"\x54\x00\x00\x1e"
b"\x00\x1a"
b"\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e"
b"\x34\x2e\x31\x2e\x31\x2e\x32\x31"
b"\x00\x01"
)
role_selection_odd = (
b"\x54\x00\x00\x1d"
b"\x00\x19"
b"\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e"
b"\x34\x2e\x31\x2e\x31\x2e\x32"
b"\x00\x01"
)
############################ User Information Item ###########################
# Implementation Class UID: 1.2.826.0.1.3680043.9.3811.0.9.0
# Implementation Version Name: PYNETDICOM_090
user_information = (
b"\x50\x00\x00\x3e\x51\x00\x00\x04\x00\x00\x3f\xfe\x52\x00\x00\x20"
b"\x31\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31\x2e\x33\x36\x38\x30"
b"\x30\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e\x30\x2e\x39\x2e\x30"
b"\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f\x4d\x5f\x30"
b"\x39\x30"
)
######################## Extended Negotiation Sub Item #######################
# SOP Class: 1.2.840.10008.5.1.4.1.1.2 CT Image Storage
# Service Class App Info: b'\x02\x00\x03\x00\x01\x00'
extended_negotiation = (
b"\x56\x00\x00\x21\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x02"
b"\x00\x03\x00\x01\x00"
)
#################### Common Extended Negotiation Sub Item ####################
# SOP Class: 1.2.840.10008.5.1.4.1.1.4 MR Image Storage
# Service Class: 1.2.840.10008.4.2 Storage Service Class
# Related general SOP Class ID(s):
# 1.2.840.10008.5.1.4.1.1.88.22 Enhanced SR Storage
common_extended_negotiation = (
b"\x57\x00\x00\x4f\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x34\x00"
b"\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x34"
b"\x2e\x32\x00\x1f\x00\x1d\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x38\x38"
b"\x2e\x32\x32"
)
|
|
# Copyright (c) 2010-2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.objects import *
from Benchmarks import *
from m5.util import *
class CowIdeDisk(IdeDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
class MemBus(CoherentXBar):
badaddr_responder = BadAddr()
default = Self.badaddr_responder.pio
def makeLinuxAlphaSystem(mem_mode, mdesc = None, ruby = False):
class BaseTsunami(Tsunami):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxAlphaSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.tsunami = BaseTsunami()
# Create the io bus to connect all device ports
self.iobus = NoncoherentXBar()
self.tsunami.attachIO(self.iobus)
self.tsunami.ide.pio = self.iobus.master
self.tsunami.ide.config = self.iobus.master
self.tsunami.ethernet.pio = self.iobus.master
self.tsunami.ethernet.config = self.iobus.master
if ruby:
# Store the dma devices for later connection to dma ruby ports.
# Append an underscore to dma_ports to avoid the SimObjectVector check.
self._dma_ports = [self.tsunami.ide.dma, self.tsunami.ethernet.dma]
else:
self.membus = MemBus()
# By default the bridge responds to all addresses above the I/O
# base address (including the PCI config space)
IO_address_space_base = 0x80000000000
self.bridge = Bridge(delay='50ns',
ranges = [AddrRange(IO_address_space_base, Addr.max)])
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.tsunami.ide.dma = self.iobus.slave
self.tsunami.ethernet.dma = self.iobus.slave
self.system_port = self.membus.slave
self.mem_ranges = [AddrRange(mdesc.mem())]
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('vmlinux')
self.pal = binary('ts_osfpal')
self.console = binary('console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
return self
def makeSparcSystem(mem_mode, mdesc = None):
# Constants from iob.cc and uart8250.cc
iob_man_addr = 0x9800000000
uart_pio_size = 8
class CowMmDisk(MmDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
self = SparcSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns')
self.t1000 = T1000()
self.t1000.attachOnChipIO(self.membus)
self.t1000.attachIO(self.iobus)
self.mem_ranges = [AddrRange(Addr('1MB'), size = '64MB'),
AddrRange(Addr('2GB'), size ='256MB')]
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.rom.port = self.membus.master
self.nvram.port = self.membus.master
self.hypervisor_desc.port = self.membus.master
self.partition_desc.port = self.membus.master
self.intrctrl = IntrControl()
self.disk0 = CowMmDisk()
self.disk0.childImage(disk('disk.s10hw2'))
self.disk0.pio = self.iobus.master
# The puart0 and hvuart are placed on the IO bus, so create ranges
# for them. The remaining IO range is rather fragmented, so poke
# holes for the iob and partition descriptors etc.
self.bridge.ranges = \
[
AddrRange(self.t1000.puart0.pio_addr,
self.t1000.puart0.pio_addr + uart_pio_size - 1),
AddrRange(self.disk0.pio_addr,
self.t1000.fake_jbi.pio_addr +
self.t1000.fake_jbi.pio_size - 1),
AddrRange(self.t1000.fake_clk.pio_addr,
iob_man_addr - 1),
AddrRange(self.t1000.fake_l2_1.pio_addr,
self.t1000.fake_ssi.pio_addr +
self.t1000.fake_ssi.pio_size - 1),
AddrRange(self.t1000.hvuart.pio_addr,
self.t1000.hvuart.pio_addr + uart_pio_size - 1)
]
self.reset_bin = binary('reset_new.bin')
self.hypervisor_bin = binary('q_new.bin')
self.openboot_bin = binary('openboot_new.bin')
self.nvram_bin = binary('nvram1')
self.hypervisor_desc_bin = binary('1up-hv.bin')
self.partition_desc_bin = binary('1up-md.bin')
self.system_port = self.membus.slave
return self
def makeArmSystem(mem_mode, machine_type, mdesc = None,
dtb_filename = None, bare_metal=False,
sdcard_image = "sdcard-1g-mxplayer.img"):
assert machine_type
if bare_metal:
self = ArmSystem()
else:
self = LinuxArmSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.membus.badaddr_responder.warn_access = "warn"
self.bridge = Bridge(delay='50ns')
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.mem_mode = mem_mode
if machine_type == "RealView_PBX":
self.realview = RealViewPBX()
elif machine_type == "RealView_EB":
self.realview = RealViewEB()
elif machine_type == "VExpress_ELT":
self.realview = VExpress_ELT()
elif machine_type == "VExpress_EMM":
self.realview = VExpress_EMM()
elif machine_type == "VExpress_EMM64":
self.realview = VExpress_EMM64()
else:
print "Unknown Machine Type"
sys.exit(1)
self.cf0 = CowIdeDisk(driveID='master')
self.cf2 = CowIdeDisk(driveID='master')
self.cf0.childImage(mdesc.disk())
self.cf2.childImage(disk(sdcard_image))
# Attach any PCI devices this platform supports
self.realview.attachPciDevices()
# default to an IDE controller rather than a CF one
try:
self.realview.ide.disks = [self.cf0, self.cf2]
except:
self.realview.cf_ctrl.disks = [self.cf0, self.cf2]
if bare_metal:
# EOT character on UART will end the simulation
self.realview.uart.end_on_eot = True
self.mem_ranges = [AddrRange(self.realview.mem_start_addr,
size = mdesc.mem())]
else:
if machine_type == "VExpress_EMM64":
self.kernel = binary('vmlinux-3.16-aarch64-vexpress-emm64-pcie')
elif machine_type == "VExpress_EMM":
self.kernel = binary('vmlinux-3.3-arm-vexpress-emm-pcie')
else:
self.kernel = binary('vmlinux.arm.smp.fb.2.6.38.8')
if dtb_filename:
self.dtb_filename = binary(dtb_filename)
self.machine_type = machine_type
# Ensure that writes to the UART actually go out early in the boot
boot_flags = 'earlyprintk=pl011,0x1c090000 console=ttyAMA0 ' + \
'lpj=19988480 norandmaps rw loglevel=8 ' + \
'mem=%s root=/dev/sda1' % mdesc.mem()
self.mem_ranges = []
size_remain = long(Addr(mdesc.mem()))
for region in self.realview._mem_regions:
if size_remain > long(region[1]):
self.mem_ranges.append(AddrRange(region[0], size=region[1]))
size_remain = size_remain - long(region[1])
else:
self.mem_ranges.append(AddrRange(region[0], size=size_remain))
size_remain = 0
break
warn("Memory size specified spans more than one region. Creating" \
" another memory controller for that range.")
if size_remain > 0:
fatal("The currently selected ARM platforms doesn't support" \
" the amount of DRAM you've selected. Please try" \
" another platform")
self.realview.setupBootLoader(self.membus, self, binary)
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
if mdesc.disk().lower().count('android'):
boot_flags += " init=/init "
self.boot_osflags = boot_flags
self.realview.attachOnChipIO(self.membus, self.bridge)
self.realview.attachIO(self.iobus)
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.system_port = self.membus.slave
return self
def makeLinuxMipsSystem(mem_mode, mdesc = None):
class BaseMalta(Malta):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxMipsSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns')
self.mem_ranges = [AddrRange('1GB')]
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.malta = BaseMalta()
self.malta.attachIO(self.iobus)
self.malta.ide.pio = self.iobus.master
self.malta.ide.config = self.iobus.master
self.malta.ide.dma = self.iobus.slave
self.malta.ethernet.pio = self.iobus.master
self.malta.ethernet.config = self.iobus.master
self.malta.ethernet.dma = self.iobus.slave
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('mips/vmlinux')
self.console = binary('mips/console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
self.system_port = self.membus.slave
return self
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port
def connectX86ClassicSystem(x86_sys, numCPUs):
# Constants similar to x86_traits.hh
IO_address_space_base = 0x8000000000000000
pci_config_address_space_base = 0xc000000000000000
interrupts_address_space_base = 0xa000000000000000
APIC_range_size = 1 << 12;
x86_sys.membus = MemBus()
# North Bridge
x86_sys.iobus = NoncoherentXBar()
x86_sys.bridge = Bridge(delay='50ns')
x86_sys.bridge.master = x86_sys.iobus.slave
x86_sys.bridge.slave = x86_sys.membus.master
# Allow the bridge to pass through the IO APIC (two pages),
# everything in the IO address range up to the local APIC, and
# then the entire PCI address space and beyond
x86_sys.bridge.ranges = \
[
AddrRange(x86_sys.pc.south_bridge.io_apic.pio_addr,
x86_sys.pc.south_bridge.io_apic.pio_addr +
APIC_range_size - 1),
AddrRange(IO_address_space_base,
interrupts_address_space_base - 1),
AddrRange(pci_config_address_space_base,
Addr.max)
]
# Create a bridge from the IO bus to the memory bus to allow access to
# the local APIC (two pages)
x86_sys.apicbridge = Bridge(delay='50ns')
x86_sys.apicbridge.slave = x86_sys.iobus.master
x86_sys.apicbridge.master = x86_sys.membus.slave
x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,
interrupts_address_space_base +
numCPUs * APIC_range_size
- 1)]
# connect the io bus
x86_sys.pc.attachIO(x86_sys.iobus)
x86_sys.system_port = x86_sys.membus.slave
def connectX86RubySystem(x86_sys):
# North Bridge
x86_sys.iobus = NoncoherentXBar()
# add the ide to the list of dma devices that later need to attach to
# dma controllers
x86_sys._dma_ports = [x86_sys.pc.south_bridge.ide.dma]
x86_sys.pc.attachIO(x86_sys.iobus, x86_sys._dma_ports)
def makeX86System(mem_mode, numCPUs = 1, mdesc = None, self = None,
Ruby = False):
if self == None:
self = X86System()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.mem_mode = mem_mode
# Physical memory
# On the PC platform, the memory region 0xC0000000-0xFFFFFFFF is reserved
# for various devices. Hence, if the physical memory size is greater than
# 3GB, we need to split it into two parts.
excess_mem_size = \
convert.toMemorySize(mdesc.mem()) - convert.toMemorySize('3GB')
if excess_mem_size <= 0:
self.mem_ranges = [AddrRange(mdesc.mem())]
else:
warn("Physical memory size specified is %s which is greater than " \
"3GB. Twice the number of memory controllers would be " \
"created." % (mdesc.mem()))
self.mem_ranges = [AddrRange('3GB'),
AddrRange(Addr('4GB'), size = excess_mem_size)]
# Platform
self.pc = Pc()
# Create and connect the busses required by each memory system
if Ruby:
connectX86RubySystem(self)
else:
connectX86ClassicSystem(self, numCPUs)
self.intrctrl = IntrControl()
# Disks
disk0 = CowIdeDisk(driveID='master')
disk2 = CowIdeDisk(driveID='master')
disk0.childImage(mdesc.disk())
disk2.childImage(disk('linux-bigswap2.img'))
self.pc.south_bridge.ide.disks = [disk0, disk2]
# Add in a Bios information structure.
structures = [X86SMBiosBiosInformation()]
self.smbios_table.structures = structures
# Set up the Intel MP table
base_entries = []
ext_entries = []
for i in xrange(numCPUs):
bp = X86IntelMPProcessor(
local_apic_id = i,
local_apic_version = 0x14,
enable = True,
bootstrap = (i == 0))
base_entries.append(bp)
io_apic = X86IntelMPIOAPIC(
id = numCPUs,
version = 0x11,
enable = True,
address = 0xfec00000)
self.pc.south_bridge.io_apic.apic_id = io_apic.id
base_entries.append(io_apic)
isa_bus = X86IntelMPBus(bus_id = 0, bus_type='ISA')
base_entries.append(isa_bus)
pci_bus = X86IntelMPBus(bus_id = 1, bus_type='PCI')
base_entries.append(pci_bus)
connect_busses = X86IntelMPBusHierarchy(bus_id=0,
subtractive_decode=True, parent_bus=1)
ext_entries.append(connect_busses)
pci_dev4_inta = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 1,
source_bus_irq = 0 + (4 << 2),
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 16)
base_entries.append(pci_dev4_inta)
def assignISAInt(irq, apicPin):
assign_8259_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'ExtInt',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 0)
base_entries.append(assign_8259_to_apic)
assign_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = apicPin)
base_entries.append(assign_to_apic)
assignISAInt(0, 2)
assignISAInt(1, 1)
for i in range(3, 15):
assignISAInt(i, i)
self.intel_mp_table.base_entries = base_entries
self.intel_mp_table.ext_entries = ext_entries
def makeLinuxX86System(mem_mode, numCPUs = 1, mdesc = None,
Ruby = False):
self = LinuxX86System()
# Build up the x86 system and then specialize it for Linux
makeX86System(mem_mode, numCPUs, mdesc, self, Ruby)
# We assume below that there's at least 1MB of memory. We'll require 2
# just to avoid corner cases.
phys_mem_size = sum(map(lambda r: r.size(), self.mem_ranges))
assert(phys_mem_size >= 0x200000)
assert(len(self.mem_ranges) <= 2)
entries = \
[
# Mark the first megabyte of memory as reserved
X86E820Entry(addr = 0, size = '639kB', range_type = 1),
X86E820Entry(addr = 0x9fc00, size = '385kB', range_type = 2),
# Mark the rest of physical memory as available
X86E820Entry(addr = 0x100000,
size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
range_type = 1),
# Reserve the last 16kB of the 32-bit address space for the
# m5op interface
X86E820Entry(addr=0xFFFF0000, size='64kB', range_type=2),
]
# In case the physical memory is greater than 3GB, we split it into two
# parts and add a separate e820 entry for the second part. This entry
# starts at 0x100000000, which is the first address after the space
# reserved for devices.
if len(self.mem_ranges) == 2:
entries.append(X86E820Entry(addr = 0x100000000,
size = '%dB' % (self.mem_ranges[1].size()), range_type = 1))
self.e820_table.entries = entries
# Command line
self.boot_osflags = 'earlyprintk=ttyS0 console=ttyS0 lpj=7999923 ' + \
'root=/dev/hda1'
self.kernel = binary('x86_64-vmlinux-2.6.22.9')
return self
def makeDualRoot(full_system, testSystem, driveSystem, dumpfile):
self = Root(full_system = full_system)
self.testsys = testSystem
self.drivesys = driveSystem
self.etherlink = EtherLink()
if hasattr(testSystem, 'realview'):
self.etherlink.int0 = Parent.testsys.realview.ethernet.interface
self.etherlink.int1 = Parent.drivesys.realview.ethernet.interface
elif hasattr(testSystem, 'tsunami'):
self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface
self.etherlink.int1 = Parent.drivesys.tsunami.ethernet.interface
else:
fatal("Don't know how to connect these system together")
if dumpfile:
self.etherdump = EtherDump(file=dumpfile)
self.etherlink.dump = Parent.etherdump
return self
|
|
import unittest
from time import time
from nose.tools import eq_
from amon.apps.servers.models import (
ServerModel,
cloud_server_model
)
from amon.apps.devices.models import interfaces_model, volumes_model
from amon.apps.processes.models import process_model
from amon.apps.system.models import system_model
from amon.apps.tags.models import tags_model, tag_groups_model
from amon.apps.cloudservers.models import cloud_credentials_model
now = int(time())
minute_ago = (now-60)
two_minutes_ago = (now-120)
five_minutes_ago = (now-300)
class ServerModelTest(unittest.TestCase):
def setUp(self):
self.model = ServerModel()
self.collection = self.model.mongo.get_collection('servers')
def _cleanup(self):
self.collection.remove()
tags_model.collection.remove()
tag_groups_model.collection.remove()
def get_or_create_by_machine_id_test(self):
self._cleanup()
self.collection.insert({"name" : "cloud-server", "key": "somekeyhere", "instance_id": "150"})
self.model.get_or_create_by_machine_id(instance_id="150", machine_id="cloudkey")
result = self.collection.find_one()
assert result["key"] == "cloudkey"
assert result["name"] == "cloud-server"
self._cleanup()
self.collection.insert({"name" : "test", "key": "somekeyhere", "instance_id": ""})
self.model.get_or_create_by_machine_id(instance_id="", machine_id="somekeyhere")
result = self.collection.find_one()
assert result["key"] == "somekeyhere"
assert result["name"] == "test"
self._cleanup()
def get_all_with_tags_test(self):
self._cleanup()
tags = {'rds': 'value', 'ebs': 'volume'}
tags_list = tags_model.create_and_return_ids(tags)
self.collection.insert({"name" : "test", "tags": tags_list})
result = self.model.get_with_tags(tags=[tags_list[0]])
assert len(result) == 1
result = self.model.get_with_tags(tags=tags_list)
assert len(result) == 1
self._cleanup()
tags = {'rds': 'value', 'ebs': 'volume', 'region': 'uswest-1', 'provider': 'amazon'}
tags_list = tags_model.create_and_return_ids(tags)
self.collection.insert({"name" : "test", "tags": tags_list})
result = self.model.get_with_tags(tags=[tags_list[0], tags_list[1], tags_list[2]])
assert len(result) == 1
result = self.model.get_with_tags(tags=tags_list)
assert len(result) == 1
def check_server_exists_test(self):
self.collection.remove()
self.collection.insert({"name" : "test"})
result = self.model.server_exists('test')
eq_(result, 1)
self.collection.remove()
def update_server_test(self):
self.collection.remove()
self.collection.insert({"name" : "test"})
server = self.collection.find_one()
self.model.update({"name": "test_updated", "default": 1 }, server['_id'])
result = self.collection.find_one()
eq_(result['name'],'test_updated')
self.collection.remove()
def add_server_test(self):
self.collection.remove()
self.model.add('test')
result = self.collection.find_one()
eq_(result['name'],'test')
if result['key']:
assert True
self.collection.remove()
def get_server_test(self):
self.collection.remove()
self.collection.insert({"name" : "test"})
server = self.collection.find_one()
result = self.model.get_by_id(server['_id'])
eq_(result['name'],'test')
eq_(result['_id'],server['_id'])
self.collection.remove()
def get_active_last_five_minutes_test(self):
self.collection.remove()
for i in range(0, 100):
self.collection.insert({"name" : "test", 'last_check': now-i})
result = self.model.get_active_last_five_minutes(count=True)
eq_(result, 100)
self.collection.remove()
for i in range(0, 100):
self.collection.insert({"name" : "test", 'last_check': five_minutes_ago-i})
result = self.model.get_active_last_five_minutes(count=True)
eq_(result, 0)
def get_server_by_key_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
server = self.collection.find_one()
result = self.model.get_server_by_key('test_me')
eq_(result['name'],'test')
eq_(result['key'],'test_me')
eq_(result['_id'],server['_id'])
self.collection.remove()
def delete_server_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
server = self.collection.find_one()
self.model.delete(server['_id'])
result = self.collection.count()
eq_(result,0)
self.collection.remove()
def get_all_servers_test(self):
self.collection.remove()
for i in range(0, 1000):
name = "test-{0}".format(i)
key = "testkey-{0}".format(i)
self.collection.insert({"name" : name, "key": key, "last_check": minute_ago})
result = self.model.get_all()
eq_(len(result), 1000)
self.collection.remove()
def cleanup_test(self):
self.collection.remove()
self.collection.insert({"name" : "testserver", "key": "test_me"})
server = self.collection.find_one()
date_before = 100
process_collection = process_model.data_collection
process_collection.remove()
system_collection = system_model.data_collection
system_collection.remove()
interface_collection = interfaces_model.get_data_collection()
interface_collection.remove()
volume_collection = volumes_model.get_data_collection()
volume_collection.remove()
for i in range(0, date_before):
process_collection.insert({'i' : 'test', 't': i, 'server_id': server['_id']})
system_collection.insert({'i' : 'test', 'time': i, 'server_id': server['_id']})
interface_collection.insert({'i' : 'test', 't': i, 'server_id': server['_id']})
volume_collection.insert({'i' : 'test', 't': i, 'server_id': server['_id']})
params = {'server_id': server['_id']}
self.model.cleanup(server, date_before=date_before)
process_entries = process_collection.find(params).count()
eq_(process_entries, 0)
system_entries = system_collection.find(params).count()
eq_(system_entries, 0)
interface_entries = interface_collection.find(params).count()
eq_(interface_entries, 0)
volume_entries = volume_collection.find(params).count()
eq_(volume_entries, 0)
system_collection.remove()
process_collection.remove()
interface_collection.remove()
volume_collection.remove()
entries = interface_collection.find().count()
eq_(entries, 0)
for i in range(0, 300):
process_collection.insert({'i' : 'test', 't': i, 'server_id': server['_id']})
system_collection.insert({'i' : 'test', 'time': i, 'server_id': server['_id']})
interface_collection.insert({'i' : 'test', 't': i, 'server_id': server['_id']})
volume_collection.insert({'i' : 'test', 't': i, 'server_id': server['_id']})
process_collection.ensure_index('server_id', background=True)
process_collection.ensure_index('t', background=True)
system_collection.ensure_index('time', background=True)
system_collection.ensure_index('server_id', background=True)
interface_collection.ensure_index('t', background=True)
interface_collection.ensure_index('server_id', background=True)
volume_collection.ensure_index('t', background=True)
volume_collection.ensure_index('server_id', background=True)
self.model.cleanup(server, date_before=date_before)
process_entries = process_collection.find(params).count()
eq_(process_entries, 199)
for p in process_collection.find(sort=[('t', self.model.asc)]):
assert p['t'] > date_before
system_entries = system_collection.find(params).count()
eq_(system_entries, 199)
for p in system_collection.find(sort=[('time', self.model.asc)]):
assert p['time'] > date_before
entries = interface_collection.find(params).count()
eq_(entries, 199)
for p in interface_collection.find(sort=[('t', self.model.asc)]):
assert p['t'] > date_before
entries = volume_collection.find(params).count()
eq_(entries, 199)
for p in volume_collection.find(sort=[('t', self.model.asc)]):
assert p['t'] > date_before
process_collection.drop()
system_collection.drop()
interface_collection.drop()
volume_collection.drop()
class CloudServerModelTest(unittest.TestCase):
def setUp(self):
self.collection = cloud_server_model.mongo.get_collection('servers')
def _cleanup(self):
self.collection.remove()
tags_model.collection.remove()
cloud_credentials_model.collection.remove()
def update_cloud_server_test(self):
self._cleanup()
s = self.collection
s.remove()
s.insert({"account_id": 1, "name" : "test", "key": "server_key_test", "instance_id": 2})
result = s.find_one()
eq_(result['instance_id'], 2)
data = {'instance_id': 2, 'provider': 'amazon'}
cloud_server_model.update_server(data)
result = s.find_one()
eq_(result['provider'], 'amazon')
# Create new server if it does not exists
self.collection.remove()
data = {"name":"create_server", 'instance_id': 3}
cloud_server_model.update_server(data, account_id=1)
result = s.find_one()
assert(result['key'])
eq_(result['instance_id'], 3)
eq_(result['account_id'], 1)
self._cleanup()
def delete_servers_for_credentials_test(self):
self._cleanup()
credentials_id = "test_credentials"
self.collection.insert({"account_id": 1, "name" : "test", "key": "server_key_test", "credentials_id": credentials_id})
server = self.collection.find_one()
eq_(server['credentials_id'], 'test_credentials')
cloud_server_model.delete_servers_for_credentials(credentials_id=credentials_id)
result = self.collection.find().count()
eq_(result, 0)
self._cleanup()
def delete_all_for_credentials_test(self):
self._cleanup()
data = {'name': 'test', 'token': 'test-token'}
credentials_id = cloud_credentials_model.save(data=data, provider_id='digitalocean')
for i in range(5):
self.collection.insert({"account_id": 1, "name" : "test", "key": "server_key_test", "credentials_id": credentials_id})
cloud_server_model.delete_all_for_provider(credentials_id=credentials_id)
result = self.collection.find().count()
eq_(result, 0)
self._cleanup()
def get_all_for_credentials_test(self):
self._cleanup()
credentials_id = "test_credentials"
for i in range(5):
self.collection.insert({"account_id": 1, "name" : "test", "key": "server_key_test", "credentials_id": credentials_id})
result = cloud_server_model.get_all_for_provider(credentials_id=credentials_id)
eq_(result.count(), 5)
self._cleanup()
def get_instance_ids_list_test(self):
self._cleanup()
credentials_id = "test_credentials"
for i in range(5):
self.collection.insert({"account_id": 1, "name" : "test",
"key": "server_key_test",
"credentials_id": credentials_id,
"instance_id": "instance_id_{0}".format(i)
})
result = cloud_server_model.get_instance_ids_list(credentials_id=credentials_id)
eq_(sorted(result), [u'instance_id_0', u'instance_id_1', u'instance_id_2', u'instance_id_3', u'instance_id_4'] )
self._cleanup()
def diff_instance_ids_test(self):
old_instances = ['test', 'test1', 'test2']
new_instances = ['somethingnew', 'test1']
result = cloud_server_model.diff_instance_ids(old_instances=old_instances, new_instances=new_instances)
eq_(sorted(result), ['test', 'test2']) # These have to be removed
def save_test(self):
self._cleanup()
credentials_id = "test_credentials"
data = {'name': 'test', 'token': 'test-token'}
credentials_id = cloud_credentials_model.save(data=data, provider_id='digitalocean')
credentials = cloud_credentials_model.collection.find_one()
# Empty list
instance_list = []
cloud_server_model.save(instances=instance_list, credentials=credentials)
result = self.collection.find()
eq_(result.count(), 0)
# Normal list
for i in range(5):
instance = {
'name': 'test',
'instance_id': "instance_id_{0}".format(i),
'provider': "rackspace",
'credentials_id': credentials_id,
'region': 'eu-west1',
'type': 't1-micro'
}
instance_list.append(instance)
cloud_server_model.save(instances=instance_list, credentials=credentials)
result = self.collection.find()
for r in result.clone():
assert len(r['tags']) == 3
for tag in r['tags']:
tag_object = tags_model.get_by_id(tag)
name = tag_object.get('name')
group = tag_object.get('group', {}).get('name')
assert name in ['rackspace', 'eu-west1', 't1-micro']
assert group in ['region', 'provider', 'type']
eq_(r['credentials_id'], credentials_id)
eq_(result.count(), 5)
self._cleanup()
# Filter and delete some old instances
for i in range(4):
self.collection.insert({
"account_id": 1,
"name": "test",
"key": "server_key_test",
"credentials_id": credentials_id,
"instance_id": "instance_id_{0}".format(i)
})
result = self.collection.find().count()
eq_(result, 4)
# Check if duplicate tags are being saved
for i in ['rackspace', 'bla']:
tags_model.get_or_create_by_name(name=i)
instance_list = []
for i in range(5, 10):
instance = {
'name': 'test',
'instance_id': i,
'provider': "rackspace",
'credentials_id': credentials_id,
}
instance_list.append(instance)
cloud_server_model.save(instances=instance_list, credentials=credentials)
result = self.collection.find()
eq_(result.count(), 5)
for r in result:
for tag in r['tags']:
tag_object = tags_model.get_by_id(tag)
assert tag_object['name'] in ['rackspace', 'bla']
self.assertTrue(r['key'])
assert r['instance_id'] <= 10
assert r['instance_id'] >= 5
# Filter and delete all instances, the instance list is empty
instance_list = []
cloud_server_model.save(instances=instance_list, credentials=credentials)
result = self.collection.find()
eq_(result.count(), 0)
self._cleanup()
|
|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/pre_push_hook.py."""
from __future__ import annotations
import builtins
import os
import shutil
import subprocess
import sys
import tempfile
from core import python_utils
from core.tests import test_utils
from . import common
from . import install_backend_python_libs
from . import pre_push_hook
class PrePushHookTests(test_utils.GenericTestBase):
"""Test the methods for pre push hook script."""
def setUp(self):
super(PrePushHookTests, self).setUp()
process = subprocess.Popen(
['echo', 'test'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def mock_popen( # pylint: disable=unused-argument
unused_cmd_tokens, stdout=subprocess.PIPE,
stderr=subprocess.PIPE):
return process
def mock_get_remote_name():
return b'remote'
def mock_get_refs():
return ['ref1', 'ref2']
def mock_collect_files_being_pushed(unused_refs, unused_remote):
return {
'branch1': ([b'A:file1', b'M:file2'], [b'file1', b'file2']),
'branch2': ([], [])}
def mock_has_uncommitted_files():
return False
self.print_arr = []
def mock_print(msg):
self.print_arr.append(msg)
def mock_check_output(unused_cmd_tokens):
return 'Output'
self.linter_code = 0
def mock_start_linter(unused_files_to_lint):
return self.linter_code
self.mypy_check_code = 0
def mock_execute_mypy_checks():
return self.mypy_check_code
self.does_diff_include_js_or_ts_files = False
def mock_does_diff_include_js_or_ts_files(unused_diff_files):
return self.does_diff_include_js_or_ts_files
self.does_diff_include_ts_files = False
def mock_does_diff_include_ts_files(unused_diff_files):
return self.does_diff_include_ts_files
self.does_diff_include_ci_config_or_js_files = False
def mock_does_diff_include_ci_config_or_js_files(
unused_diff_files):
return self.does_diff_include_ci_config_or_js_files
def mock_check_backend_python_library_for_inconsistencies():
return
self.swap_check_backend_python_libs = self.swap(
pre_push_hook,
'check_for_backend_python_library_inconsistencies',
mock_check_backend_python_library_for_inconsistencies)
self.popen_swap = self.swap(subprocess, 'Popen', mock_popen)
self.get_remote_name_swap = self.swap(
pre_push_hook, 'get_remote_name', mock_get_remote_name)
self.get_refs_swap = self.swap(pre_push_hook, 'get_refs', mock_get_refs)
self.collect_files_swap = self.swap(
pre_push_hook, 'collect_files_being_pushed',
mock_collect_files_being_pushed)
self.uncommitted_files_swap = self.swap(
pre_push_hook, 'has_uncommitted_files', mock_has_uncommitted_files)
self.print_swap = self.swap(builtins, 'print', mock_print)
self.check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
self.start_linter_swap = self.swap(
pre_push_hook, 'start_linter', mock_start_linter)
self.execute_mypy_checks_swap = self.swap(
pre_push_hook, 'execute_mypy_checks', mock_execute_mypy_checks)
self.js_or_ts_swap = self.swap(
pre_push_hook, 'does_diff_include_js_or_ts_files',
mock_does_diff_include_js_or_ts_files)
self.ts_swap = self.swap(
pre_push_hook, 'does_diff_include_ts_files',
mock_does_diff_include_ts_files)
self.ci_config_or_js_files_swap = self.swap(
pre_push_hook,
'does_diff_include_ci_config_or_js_files',
mock_does_diff_include_ci_config_or_js_files)
def test_start_subprocess_for_result(self):
with self.popen_swap:
self.assertEqual(
pre_push_hook.start_subprocess_for_result('cmd'),
(b'test\n', b''))
def test_get_remote_name_without_errors(self):
process_for_remote = subprocess.Popen(
[b'echo', b'origin\nupstream'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_upstream_url = subprocess.Popen(
[b'echo', b'url.oppia/oppia.git'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_origin_url = subprocess.Popen(
[b'echo', b'url.other/oppia.git'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def mock_popen(cmd_tokens, stdout, stderr): # pylint: disable=unused-argument
if b'remote.origin.url' in cmd_tokens:
return process_for_origin_url
elif b'remote.upstream.url' in cmd_tokens:
return process_for_upstream_url
else:
return process_for_remote
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
with popen_swap:
self.assertEqual(pre_push_hook.get_remote_name(), b'upstream')
def test_get_remote_name_with_error_in_obtaining_remote(self):
def mock_communicate():
return (b'test', b'Error')
process = subprocess.Popen(
[b'echo', b'test'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate = mock_communicate
def mock_popen(unused_cmd_tokens, stdout, stderr): # pylint: disable=unused-argument
return process
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
with popen_swap, self.assertRaisesRegex(ValueError, 'Error'):
pre_push_hook.get_remote_name()
def test_get_remote_name_with_error_in_obtaining_remote_url(self):
def mock_communicate():
return ('test', 'Error')
process_for_remote = subprocess.Popen(
[b'echo', b'origin\nupstream'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_remote_url = subprocess.Popen(
[b'echo', b'test'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process_for_remote_url.communicate = mock_communicate
def mock_popen(cmd_tokens, stdout, stderr): # pylint: disable=unused-argument
if b'config' in cmd_tokens:
return process_for_remote_url
else:
return process_for_remote
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
with popen_swap, self.assertRaisesRegex(ValueError, 'Error'):
pre_push_hook.get_remote_name()
def test_get_remote_name_with_no_remote_set(self):
process_for_remote = subprocess.Popen(
[b'echo', b'origin\nupstream'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_upstream_url = subprocess.Popen(
[b'echo', b'url.other/oppia.git'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_origin_url = subprocess.Popen(
[b'echo', b'url.other/oppia.git'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def mock_popen(cmd_tokens, stdout, stderr): # pylint: disable=unused-argument
if b'remote.origin.url' in cmd_tokens:
return process_for_origin_url
elif b'remote.upstream.url' in cmd_tokens:
return process_for_upstream_url
else:
return process_for_remote
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
with popen_swap, self.assertRaisesRegex(
Exception,
'Error: Please set upstream for the lint checks to run '
'efficiently. To do that follow these steps:\n'
'1. Run the command \'git remote -v\'\n'
'2a. If upstream is listed in the command output, then run the '
'command \'git remote set-url upstream '
'https://github.com/oppia/oppia.git\'\n'
'2b. If upstream is not listed in the command output, then run the '
'command \'git remote add upstream '
'https://github.com/oppia/oppia.git\'\n'):
pre_push_hook.get_remote_name()
def test_get_remote_name_with_multiple_remotes_set(self):
process_for_remote = subprocess.Popen(
[b'echo', b'origin\nupstream'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_upstream_url = subprocess.Popen(
[b'echo', b'url.oppia/oppia.git'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_for_origin_url = subprocess.Popen(
[b'echo', b'url.oppia/oppia.git'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def mock_popen(cmd_tokens, stdout, stderr): # pylint: disable=unused-argument
if b'remote.origin.url' in cmd_tokens:
return process_for_origin_url
elif b'remote.upstream.url' in cmd_tokens:
return process_for_upstream_url
else:
return process_for_remote
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
with popen_swap, self.print_swap:
self.assertIsNone(pre_push_hook.get_remote_name())
self.assertTrue(
'Warning: Please keep only one remote branch for oppia:develop '
'to run the lint checks efficiently.\n' in self.print_arr)
def test_git_diff_name_status_without_error(self):
def mock_start_subprocess_for_result(unused_cmd_tokens):
return (b'M\tfile1\nA\tfile2', None)
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
with subprocess_swap:
self.assertEqual(
pre_push_hook.git_diff_name_status(
'left', 'right', diff_filter='filter'),
[
pre_push_hook.FileDiff(status=b'M', name=b'file1'),
pre_push_hook.FileDiff(status=b'A', name=b'file2')])
def test_git_diff_name_status_with_error(self):
def mock_start_subprocess_for_result(unused_cmd_tokens):
return ('M\tfile1\nA\tfile2', 'Error')
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
with subprocess_swap, self.assertRaisesRegex(ValueError, 'Error'):
pre_push_hook.git_diff_name_status(
'left', 'right', diff_filter='filter')
def test_compare_to_remote(self):
check_function_calls = {
'start_subprocess_for_result_is_called': False,
'git_diff_name_status_is_called': False,
'get_merge_base_is_called': False,
}
expected_check_function_calls = {
'start_subprocess_for_result_is_called': True,
'git_diff_name_status_is_called': True,
'get_merge_base_is_called': True,
}
def mock_start_subprocess_for_result(unused_cmd_tokens):
check_function_calls['start_subprocess_for_result_is_called'] = True
def mock_git_diff_name_status(unused_left, unused_right):
check_function_calls['git_diff_name_status_is_called'] = True
return 'Test'
def mock_get_merge_base(unused_left, unused_right):
check_function_calls['get_merge_base_is_called'] = True
return 'Merge Base'
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
git_diff_swap = self.swap(
pre_push_hook, 'git_diff_name_status', mock_git_diff_name_status)
get_merge_base_swap = self.swap(
pre_push_hook, 'get_merge_base', mock_get_merge_base)
with subprocess_swap, git_diff_swap, get_merge_base_swap:
self.assertEqual(
pre_push_hook.compare_to_remote('remote', 'local branch'),
'Test'
)
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_get_merge_base_reports_error(self):
def mock_start_subprocess_for_result(unused_cmd_tokens):
return None, 'Test'
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
with subprocess_swap, self.assertRaisesRegex(ValueError, 'Test'):
pre_push_hook.get_merge_base('A', 'B')
def test_get_merge_base_returns_merge_base(self):
check_function_calls = {
'start_subprocess_for_result_is_called': False,
}
expected_check_function_calls = {
'start_subprocess_for_result_is_called': True,
}
def mock_start_subprocess_for_result(unused_cmd_tokens):
check_function_calls['start_subprocess_for_result_is_called'] = True
return b'Test', None
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
with subprocess_swap:
self.assertEqual(pre_push_hook.get_merge_base('A', 'B'), 'Test')
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_extract_files_to_lint_with_empty_file_diffs(self):
self.assertEqual(pre_push_hook.extract_files_to_lint([]), [])
def test_extract_files_to_lint_with_non_empty_file_diffs(self):
self.assertEqual(
pre_push_hook.extract_files_to_lint([
pre_push_hook.FileDiff(status=b'M', name=b'file1'),
pre_push_hook.FileDiff(status=b'A', name=b'file2'),
pre_push_hook.FileDiff(status=b'W', name=b'file3')]),
[b'file1', b'file2'])
def test_get_parent_branch_name_for_diff_with_hotfix_branch(self):
def mock_get_branch():
return 'release-1.2.3-hotfix-1'
get_branch_swap = self.swap(
common, 'get_current_branch_name', mock_get_branch)
with get_branch_swap:
self.assertEqual(
pre_push_hook.get_parent_branch_name_for_diff(),
'release-1.2.3')
def test_get_parent_branch_name_for_diff_with_release_branch(self):
def mock_get_branch():
return 'release-1.2.3'
get_branch_swap = self.swap(
common, 'get_current_branch_name', mock_get_branch)
with get_branch_swap:
self.assertEqual(
pre_push_hook.get_parent_branch_name_for_diff(), 'develop')
def test_get_parent_branch_name_for_diff_with_non_release_branch(self):
def mock_get_branch():
return 'branch-1'
get_branch_swap = self.swap(
common, 'get_current_branch_name', mock_get_branch)
with get_branch_swap:
self.assertEqual(
pre_push_hook.get_parent_branch_name_for_diff(), 'develop')
def test_collect_files_being_pushed_with_empty_ref_list(self):
def mock_get_branch():
return 'branch-1'
get_branch_swap = self.swap(
common, 'get_current_branch_name', mock_get_branch)
with get_branch_swap:
self.assertEqual(
pre_push_hook.collect_files_being_pushed([], 'remote'), {})
def test_collect_files_being_pushed_with_non_empty_ref_list(self):
def mock_get_branch():
return 'branch-1'
def mock_compare_to_remote(
unused_remote, unused_local_branch, remote_branch=None): # pylint: disable=unused-argument
return ['A:file1', 'M:file2']
def mock_extract_files_to_lint(unused_file_diffs):
return ['file1', 'file2']
get_branch_swap = self.swap(
common, 'get_current_branch_name', mock_get_branch)
compare_to_remote_swap = self.swap(
pre_push_hook, 'compare_to_remote', mock_compare_to_remote)
extract_files_swap = self.swap(
pre_push_hook, 'extract_files_to_lint', mock_extract_files_to_lint)
with compare_to_remote_swap, extract_files_swap, get_branch_swap:
self.assertEqual(
pre_push_hook.collect_files_being_pushed([
pre_push_hook.GitRef(
local_ref='refs/heads/branch1', local_sha1='sha1',
remote_ref='remote/ref1', remote_sha1='rsha1'),
pre_push_hook.GitRef(
local_ref='refs/branch2', local_sha1='sha2',
remote_ref='remote/ref2', remote_sha1='rsha2')
], 'remote'),
{'branch1': (['A:file1', 'M:file2'], ['file1', 'file2'])})
def test_get_refs(self):
temp_stdin_file = tempfile.NamedTemporaryFile().name
with python_utils.open_file(temp_stdin_file, 'w') as f:
f.write('local_ref local_sha1 remote_ref remote_sha1')
with python_utils.open_file(temp_stdin_file, 'r') as f:
with self.swap(sys, 'stdin', f):
self.assertEqual(
pre_push_hook.get_refs(),
[
pre_push_hook.GitRef(
local_ref='local_ref', local_sha1='local_sha1',
remote_ref='remote_ref', remote_sha1='remote_sha1'
)])
def test_start_linter(self):
with self.popen_swap:
self.assertEqual(pre_push_hook.start_linter(['files']), 0)
def test_execute_mypy_checks(self):
with self.popen_swap:
self.assertEqual(pre_push_hook.execute_mypy_checks(), 0)
def test_run_script_and_get_returncode(self):
with self.popen_swap:
self.assertEqual(
pre_push_hook.run_script_and_get_returncode('script'), 0)
def test_has_uncommitted_files(self):
def mock_check_output(unused_cmd_tokens):
return 'file1'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap:
self.assertTrue(pre_push_hook.has_uncommitted_files())
def test_install_hook_with_existing_symlink(self):
def mock_islink(unused_file):
return True
def mock_exists(unused_file):
return True
def mock_start_subprocess_for_result(unused_cmd_tokens):
return ('Output', None)
islink_swap = self.swap(os.path, 'islink', mock_islink)
exists_swap = self.swap(os.path, 'exists', mock_exists)
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
with islink_swap, exists_swap, subprocess_swap, self.print_swap:
pre_push_hook.install_hook()
self.assertTrue('Symlink already exists' in self.print_arr)
self.assertTrue(
'pre-push hook file is now executable!' in self.print_arr)
def test_install_hook_with_error_in_making_pre_push_executable(self):
def mock_islink(unused_file):
return True
def mock_exists(unused_file):
return True
def mock_start_subprocess_for_result(unused_cmd_tokens):
return ('Output', 'Error')
islink_swap = self.swap(os.path, 'islink', mock_islink)
exists_swap = self.swap(os.path, 'exists', mock_exists)
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
with islink_swap, exists_swap, subprocess_swap, self.print_swap:
with self.assertRaisesRegex(ValueError, 'Error'):
pre_push_hook.install_hook()
self.assertTrue('Symlink already exists' in self.print_arr)
self.assertFalse(
'pre-push hook file is now executable!' in self.print_arr)
def test_install_hook_with_creation_of_symlink(self):
check_function_calls = {
'symlink_is_called': False
}
def mock_islink(unused_file):
return False
def mock_exists(unused_file):
return False
def mock_start_subprocess_for_result(unused_cmd_tokens):
return ('Output', None)
def mock_symlink(unused_path, unused_file):
check_function_calls['symlink_is_called'] = True
islink_swap = self.swap(os.path, 'islink', mock_islink)
exists_swap = self.swap(os.path, 'exists', mock_exists)
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
symlink_swap = self.swap(os, 'symlink', mock_symlink)
with islink_swap, exists_swap, subprocess_swap, symlink_swap, (
self.print_swap):
pre_push_hook.install_hook()
self.assertTrue(check_function_calls['symlink_is_called'])
self.assertTrue(
'Created symlink in .git/hooks directory' in self.print_arr)
self.assertTrue(
'pre-push hook file is now executable!' in self.print_arr)
def test_install_hook_with_error_in_creation_of_symlink(self):
check_function_calls = {
'symlink_is_called': False,
'copy_is_called': False
}
expected_check_function_calls = {
'symlink_is_called': True,
'copy_is_called': True
}
def mock_islink(unused_file):
return False
def mock_exists(unused_file):
return False
def mock_start_subprocess_for_result(unused_cmd_tokens):
return ('Output', None)
def mock_symlink(unused_path, unused_file):
check_function_calls['symlink_is_called'] = True
raise OSError
def mock_copy(unused_type, unused_file):
check_function_calls['copy_is_called'] = True
islink_swap = self.swap(os.path, 'islink', mock_islink)
exists_swap = self.swap(os.path, 'exists', mock_exists)
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
symlink_swap = self.swap(os, 'symlink', mock_symlink)
copy_swap = self.swap(shutil, 'copy', mock_copy)
with islink_swap, exists_swap, subprocess_swap, symlink_swap, copy_swap:
with self.print_swap:
pre_push_hook.install_hook()
self.assertEqual(check_function_calls, expected_check_function_calls)
self.assertTrue('Copied file to .git/hooks directory' in self.print_arr)
self.assertTrue(
'pre-push hook file is now executable!' in self.print_arr)
def test_install_hook_with_broken_symlink(self):
check_function_calls = {
'unlink_is_called': False,
'symlink_is_called': False
}
def mock_islink(unused_file):
return True
def mock_exists(unused_file):
return False
def mock_start_subprocess_for_result(unused_cmd_tokens):
return ('Output', None)
def mock_unlink(unused_file):
check_function_calls['unlink_is_called'] = True
def mock_symlink(unused_path, unused_file):
check_function_calls['symlink_is_called'] = True
islink_swap = self.swap(os.path, 'islink', mock_islink)
exists_swap = self.swap(os.path, 'exists', mock_exists)
subprocess_swap = self.swap(
pre_push_hook, 'start_subprocess_for_result',
mock_start_subprocess_for_result)
unlink_swap = self.swap(os, 'unlink', mock_unlink)
symlink_swap = self.swap(os, 'symlink', mock_symlink)
with islink_swap, exists_swap, subprocess_swap, self.print_swap:
with unlink_swap, symlink_swap:
pre_push_hook.install_hook()
self.assertTrue(check_function_calls['unlink_is_called'])
self.assertTrue(check_function_calls['symlink_is_called'])
self.assertTrue('Removing broken symlink' in self.print_arr)
self.assertTrue(
'pre-push hook file is now executable!' in self.print_arr)
def test_does_diff_include_js_or_ts_files_with_js_file(self):
self.assertTrue(
pre_push_hook.does_diff_include_js_or_ts_files(
[b'file1.js', b'file2.py']))
def test_does_diff_include_js_or_ts_files_with_no_file(self):
self.assertFalse(
pre_push_hook.does_diff_include_js_or_ts_files(
[b'file1.html', b'file2.py']))
def test_does_diff_include_ts_files(self):
self.assertTrue(
pre_push_hook.does_diff_include_ts_files(
[b'file1.ts', b'file2.ts', b'file3.js']))
def test_does_diff_include_ts_files_fail(self):
self.assertFalse(
pre_push_hook.does_diff_include_ts_files(
[b'file1.html', b'file2.yml', b'file3.js']))
def test_does_diff_include_ci_config_or_js_files(self):
self.assertTrue(
pre_push_hook.does_diff_include_ci_config_or_js_files(
[b'file1.js', b'protractor.conf.js', b'e2e_dummy.yml']))
def test_does_diff_include_ci_config_or_js_files_fail(self):
self.assertFalse(
pre_push_hook.does_diff_include_ci_config_or_js_files(
[b'file1.ts', b'file2.ts', b'file3.html']))
def test_repo_in_dirty_state(self):
def mock_has_uncommitted_files():
return True
uncommitted_files_swap = self.swap(
pre_push_hook, 'has_uncommitted_files', mock_has_uncommitted_files)
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, uncommitted_files_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertTrue(
'Your repo is in a dirty state which prevents the linting from'
' working.\nStash your changes or commit them.\n' in self.print_arr)
def test_error_while_branch_change(self):
def mock_check_output(cmd_tokens):
if 'symbolic-ref' in cmd_tokens:
return 'old-branch'
raise subprocess.CalledProcessError(1, 'cmd', output='Output')
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with check_output_swap, self.assertRaisesRegex(
SystemExit, '1'
):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertIn(
'\nCould not change branch to branch1. This is most probably '
'because you are in a dirty state. Change manually to the branch '
'that is being linted or stash your changes.',
self.print_arr
)
def test_lint_failure(self):
self.linter_code = 1
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with self.execute_mypy_checks_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertTrue(
'Push failed, please correct the linting issues above.'
in self.print_arr)
def test_mypy_check_failure(self):
self.mypy_check_code = 1
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with self.execute_mypy_checks_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertIn(
'Push failed, please correct the mypy type annotation issues '
'above.', self.print_arr)
def test_typescript_check_failiure(self):
self.does_diff_include_ts_files = True
def mock_run_script_and_get_returncode(unused_script):
return 1
run_script_and_get_returncode_swap = self.swap(
pre_push_hook, 'run_script_and_get_returncode',
mock_run_script_and_get_returncode)
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with self.ts_swap, run_script_and_get_returncode_swap:
with self.execute_mypy_checks_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertTrue(
'Push aborted due to failing typescript checks.' in self.print_arr)
def test_strict_typescript_check_failiure(self):
self.does_diff_include_ts_files = True
def mock_run_script_and_get_returncode(script):
if script == pre_push_hook.STRICT_TYPESCRIPT_CHECKS_CMDS:
return 1
return 0
run_script_and_get_returncode_swap = self.swap(
pre_push_hook, 'run_script_and_get_returncode',
mock_run_script_and_get_returncode)
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with self.ts_swap, run_script_and_get_returncode_swap:
with self.execute_mypy_checks_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertTrue(
'Push aborted due to failing typescript checks in '
'strict mode.' in self.print_arr)
def test_frontend_test_failure(self):
self.does_diff_include_js_or_ts_files = True
def mock_run_script_and_get_returncode(unused_script):
return 1
run_script_and_get_returncode_swap = self.swap(
pre_push_hook, 'run_script_and_get_returncode',
mock_run_script_and_get_returncode)
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with self.js_or_ts_swap, run_script_and_get_returncode_swap:
with self.execute_mypy_checks_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertTrue(
'Push aborted due to failing frontend tests.' in self.print_arr)
def test_invalid_ci_e2e_test_suites_failure(self):
self.does_diff_include_ci_config_or_js_files = True
def mock_run_script_and_get_returncode(unused_script):
return 1
run_script_and_get_returncode_swap = self.swap(
pre_push_hook, 'run_script_and_get_returncode',
mock_run_script_and_get_returncode)
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with run_script_and_get_returncode_swap:
with self.ci_config_or_js_files_swap:
with self.execute_mypy_checks_swap:
with self.assertRaisesRegex(SystemExit, '1'):
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
self.assertTrue(
'Push aborted due to failing e2e test configuration check.'
in self.print_arr)
def test_main_with_install_arg(self):
check_function_calls = {
'install_hook_is_called': False
}
def mock_install_hook():
check_function_calls['install_hook_is_called'] = True
with self.swap(
pre_push_hook, 'install_hook', mock_install_hook), (
self.swap_check_backend_python_libs):
pre_push_hook.main(args=['--install'])
def test_main_without_install_arg_and_errors(self):
with self.get_remote_name_swap, self.get_refs_swap, self.print_swap:
with self.collect_files_swap, self.uncommitted_files_swap:
with self.check_output_swap, self.start_linter_swap:
with self.js_or_ts_swap:
with self.execute_mypy_checks_swap:
with self.swap_check_backend_python_libs:
pre_push_hook.main(args=[])
def test_main_exits_when_mismatches_exist_in_backend_python_libs(self):
"""Test that main exits with correct error message when mismatches are
found between the installed python libraries in
`third_party/python_libs` and the compiled 'requirements.txt' file.
"""
def mock_get_mismatches():
return {
'library': ('version', 'version')
}
def mock_exit_error(error_code):
self.assertEqual(error_code, 1)
swap_get_mismatches = self.swap(
install_backend_python_libs, 'get_mismatches',
mock_get_mismatches)
swap_sys_exit = self.swap(sys, 'exit', mock_exit_error)
with self.print_swap, swap_sys_exit, swap_get_mismatches:
pre_push_hook.check_for_backend_python_library_inconsistencies()
self.assertEqual(
self.print_arr,
[
'Your currently installed python libraries do not match the\n'
'libraries listed in your "requirements.txt" file. Here is a\n'
'full list of library/version discrepancies:\n',
'Library |Requirements Version '
'|Currently Installed Version',
'library |version '
'|version ',
'\n',
'Please fix these discrepancies by editing the '
'`requirements.in`\nfile or running '
'`scripts.install_third_party` to regenerate\nthe '
'`third_party/python_libs` directory.\n\n'
])
def test_main_exits_when_missing_backend_python_lib(self):
"""Test that main exits with correct error message when a python
library required in `requirements.txt` is missing in
`third_party/python_libs`.
"""
def mock_get_mismatches():
return {
'library': ('version', None)
}
def mock_exit_error(error_code):
self.assertEqual(error_code, 1)
swap_get_mismatches = self.swap(
install_backend_python_libs, 'get_mismatches',
mock_get_mismatches)
swap_sys_exit = self.swap(sys, 'exit', mock_exit_error)
with self.print_swap, swap_sys_exit, swap_get_mismatches:
pre_push_hook.check_for_backend_python_library_inconsistencies()
self.assertEqual(
self.print_arr,
[
'Your currently installed python libraries do not match the\n'
'libraries listed in your "requirements.txt" file. Here is a\n'
'full list of library/version discrepancies:\n',
'Library |Requirements Version '
'|Currently Installed Version',
'library |version '
'|None ',
'\n',
'Please fix these discrepancies by editing the '
'`requirements.in`\nfile or running '
'`scripts.install_third_party` to regenerate\nthe '
'`third_party/python_libs` directory.\n\n'
])
def test_main_with_no_inconsistencies_in_backend_python_libs(self):
def mock_get_mismatches():
return {}
swap_get_mismatches = self.swap(
install_backend_python_libs,
'get_mismatches',
mock_get_mismatches)
with swap_get_mismatches, self.print_swap:
pre_push_hook.check_for_backend_python_library_inconsistencies()
self.assertEqual(
self.print_arr,
['Python dependencies consistency check succeeded.'])
|
|
import re
import sys
import urllib
if sys.version < '2.4':
from sets import ImmutableSet as frozenset
from routes.util import _url_quote as url_quote
class Route(object):
"""The Route object holds a route recognition and generation
routine.
See Route.__init__ docs for usage.
"""
def __init__(self, routepath, **kargs):
"""Initialize a route, with a given routepath for
matching/generation
The set of keyword args will be used as defaults.
Usage::
>>> from routes.base import Route
>>> newroute = Route(':controller/:action/:id')
>>> newroute.defaults
{'action': 'index', 'id': None}
>>> newroute = Route('date/:year/:month/:day',
... controller="blog", action="view")
>>> newroute = Route('archives/:page', controller="blog",
... action="by_page", requirements = { 'page':'\d{1,2}' })
>>> newroute.reqs
{'page': '\\\d{1,2}'}
.. Note::
Route is generally not called directly, a Mapper instance
connect method should be used to add routes.
"""
self.routepath = routepath
self.sub_domains = False
self.prior = None
self.minimization = kargs.pop('_minimize', True)
self.encoding = kargs.pop('_encoding', 'utf-8')
self.reqs = kargs.get('requirements', {})
self.decode_errors = 'replace'
# Don't bother forming stuff we don't need if its a static route
self.static = kargs.get('_static', False)
self.filter = kargs.pop('_filter', None)
self.absolute = kargs.pop('_absolute', False)
# Pull out the member/collection name if present, this applies only to
# map.resource
self.member_name = kargs.pop('_member_name', None)
self.collection_name = kargs.pop('_collection_name', None)
self.parent_resource = kargs.pop('_parent_resource', None)
# Pull out route conditions
self.conditions = kargs.pop('conditions', None)
# Determine if explicit behavior should be used
self.explicit = kargs.pop('_explicit', False)
# reserved keys that don't count
reserved_keys = ['requirements']
# special chars to indicate a natural split in the URL
self.done_chars = ('/', ',', ';', '.', '#')
# Strip preceding '/' if present, and not minimizing
if routepath.startswith('/') and self.minimization:
routepath = routepath[1:]
# Build our routelist, and the keys used in the route
self.routelist = routelist = self._pathkeys(routepath)
routekeys = frozenset([key['name'] for key in routelist \
if isinstance(key, dict)])
if not self.minimization:
self.make_full_route()
# Build a req list with all the regexp requirements for our args
self.req_regs = {}
for key, val in self.reqs.iteritems():
self.req_regs[key] = re.compile('^' + val + '$')
# Update our defaults and set new default keys if needed. defaults
# needs to be saved
(self.defaults, defaultkeys) = self._defaults(routekeys,
reserved_keys, kargs)
# Save the maximum keys we could utilize
self.maxkeys = defaultkeys | routekeys
# Populate our minimum keys, and save a copy of our backward keys for
# quicker generation later
(self.minkeys, self.routebackwards) = self._minkeys(routelist[:])
# Populate our hardcoded keys, these are ones that are set and don't
# exist in the route
self.hardcoded = frozenset([key for key in self.maxkeys \
if key not in routekeys and self.defaults[key] is not None])
def make_full_route(self):
"""Make a full routelist string for use with non-minimized
generation"""
regpath = ''
for part in self.routelist:
if isinstance(part, dict):
regpath += '%(' + part['name'] + ')s'
else:
regpath += part
self.regpath = regpath
def make_unicode(self, s):
"""Transform the given argument into a unicode string."""
if isinstance(s, unicode):
return s
elif isinstance(s, str):
return s.decode(self.encoding)
elif callable(s):
return s
else:
return unicode(s)
def _pathkeys(self, routepath):
"""Utility function to walk the route, and pull out the valid
dynamic/wildcard keys."""
collecting = False
current = ''
done_on = ''
var_type = ''
just_started = False
routelist = []
for char in routepath:
if char in [':', '*', '{'] and not collecting:
just_started = True
collecting = True
var_type = char
if char == '{':
done_on = '}'
just_started = False
if len(current) > 0:
routelist.append(current)
current = ''
elif collecting and just_started:
just_started = False
if char == '(':
done_on = ')'
else:
current = char
done_on = self.done_chars + ('-',)
elif collecting and char not in done_on:
current += char
elif collecting:
collecting = False
if var_type == '{':
opts = current.split(':')
if len(opts) > 1:
current = opts[0]
self.reqs[current] = opts[1]
var_type = ':'
routelist.append(dict(type=var_type, name=current))
if char in self.done_chars:
routelist.append(char)
done_on = var_type = current = ''
else:
current += char
if collecting:
routelist.append(dict(type=var_type, name=current))
elif current:
routelist.append(current)
return routelist
def _minkeys(self, routelist):
"""Utility function to walk the route backwards
Will also determine the minimum keys we can handle to generate
a working route.
routelist is a list of the '/' split route path
defaults is a dict of all the defaults provided for the route
"""
minkeys = []
backcheck = routelist[:]
# If we don't honor minimization, we need all the keys in the
# route path
if not self.minimization:
for part in backcheck:
if isinstance(part, dict):
minkeys.append(part['name'])
return (frozenset(minkeys), backcheck)
gaps = False
backcheck.reverse()
for part in backcheck:
if not isinstance(part, dict) and part not in self.done_chars:
gaps = True
continue
elif not isinstance(part, dict):
continue
key = part['name']
if self.defaults.has_key(key) and not gaps:
continue
minkeys.append(key)
gaps = True
return (frozenset(minkeys), backcheck)
def _defaults(self, routekeys, reserved_keys, kargs):
"""Creates default set with values stringified
Put together our list of defaults, stringify non-None values
and add in our action/id default if they use it and didn't
specify it.
defaultkeys is a list of the currently assumed default keys
routekeys is a list of the keys found in the route path
reserved_keys is a list of keys that are not
"""
defaults = {}
# Add in a controller/action default if they don't exist
if 'controller' not in routekeys and 'controller' not in kargs \
and not self.explicit:
kargs['controller'] = 'content'
if 'action' not in routekeys and 'action' not in kargs \
and not self.explicit:
kargs['action'] = 'index'
defaultkeys = frozenset([key for key in kargs.keys() \
if key not in reserved_keys])
for key in defaultkeys:
if kargs[key] is not None:
defaults[key] = self.make_unicode(kargs[key])
else:
defaults[key] = None
if 'action' in routekeys and not defaults.has_key('action') \
and not self.explicit:
defaults['action'] = 'index'
if 'id' in routekeys and not defaults.has_key('id') \
and not self.explicit:
defaults['id'] = None
newdefaultkeys = frozenset([key for key in defaults.keys() \
if key not in reserved_keys])
return (defaults, newdefaultkeys)
def makeregexp(self, clist):
"""Create a regular expression for matching purposes
Note: This MUST be called before match can function properly.
clist should be a list of valid controller strings that can be
matched, for this reason makeregexp should be called by the web
framework after it knows all available controllers that can be
utilized.
"""
if self.minimization:
reg = self.buildnextreg(self.routelist, clist)[0]
if not reg:
reg = '/'
reg = reg + '(/)?' + '$'
if not reg.startswith('/'):
reg = '/' + reg
else:
reg = self.buildfullreg(clist)
reg = '^' + reg
self.regexp = reg
self.regmatch = re.compile(reg)
def buildfullreg(self, clist):
"""Build the regexp by iterating through the routelist and
replacing dicts with the appropriate regexp match"""
regparts = []
for part in self.routelist:
if isinstance(part, dict):
var = part['name']
if var == 'controller':
partmatch = '|'.join(map(re.escape, clist))
elif part['type'] == ':':
partmatch = self.reqs.get(var) or '[^/]+?'
else:
partmatch = self.reqs.get(var) or '.+?'
regparts.append('(?P<%s>%s)' % (var, partmatch))
else:
regparts.append(re.escape(part))
regexp = ''.join(regparts) + '$'
return regexp
def buildnextreg(self, path, clist):
"""Recursively build our regexp given a path, and a controller
list.
Returns the regular expression string, and two booleans that
can be ignored as they're only used internally by buildnextreg.
"""
if path:
part = path[0]
else:
part = ''
reg = ''
# noreqs will remember whether the remainder has either a string
# match, or a non-defaulted regexp match on a key, allblank remembers
# if the rest could possible be completely empty
(rest, noreqs, allblank) = ('', True, True)
if len(path[1:]) > 0:
self.prior = part
(rest, noreqs, allblank) = self.buildnextreg(path[1:], clist)
if isinstance(part, dict) and part['type'] == ':':
var = part['name']
partreg = ''
# First we plug in the proper part matcher
if self.reqs.has_key(var):
partreg = '(?P<' + var + '>' + self.reqs[var] + ')'
elif var == 'controller':
partreg = '(?P<' + var + '>' + '|'.join(map(re.escape, clist))
partreg += ')'
elif self.prior in ['/', '#']:
partreg = '(?P<' + var + '>[^' + self.prior + ']+?)'
else:
if not rest:
partreg = '(?P<' + var + '>[^%s]+?)' % '/'
else:
end = ''.join(self.done_chars)
rem = rest
if rem[0] == '\\' and len(rem) > 1:
rem = rem[1]
elif rem.startswith('(\\') and len(rem) > 2:
rem = rem[2]
else:
rem = end
rem = frozenset(rem) | frozenset(['/'])
partreg = '(?P<' + var + '>[^%s]+?)' % ''.join(rem)
if self.reqs.has_key(var):
noreqs = False
if not self.defaults.has_key(var):
allblank = False
noreqs = False
# Now we determine if its optional, or required. This changes
# depending on what is in the rest of the match. If noreqs is
# true, then its possible the entire thing is optional as there's
# no reqs or string matches.
if noreqs:
# The rest is optional, but now we have an optional with a
# regexp. Wrap to ensure that if we match anything, we match
# our regexp first. It's still possible we could be completely
# blank as we have a default
if self.reqs.has_key(var) and self.defaults.has_key(var):
reg = '(' + partreg + rest + ')?'
# Or we have a regexp match with no default, so now being
# completely blank form here on out isn't possible
elif self.reqs.has_key(var):
allblank = False
reg = partreg + rest
# If the character before this is a special char, it has to be
# followed by this
elif self.defaults.has_key(var) and \
self.prior in (',', ';', '.'):
reg = partreg + rest
# Or we have a default with no regexp, don't touch the allblank
elif self.defaults.has_key(var):
reg = partreg + '?' + rest
# Or we have a key with no default, and no reqs. Not possible
# to be all blank from here
else:
allblank = False
reg = partreg + rest
# In this case, we have something dangling that might need to be
# matched
else:
# If they can all be blank, and we have a default here, we know
# its safe to make everything from here optional. Since
# something else in the chain does have req's though, we have
# to make the partreg here required to continue matching
if allblank and self.defaults.has_key(var):
reg = '(' + partreg + rest + ')?'
# Same as before, but they can't all be blank, so we have to
# require it all to ensure our matches line up right
else:
reg = partreg + rest
elif isinstance(part, dict) and part['type'] == '*':
var = part['name']
if noreqs:
if self.defaults.has_key(var):
reg = '(?P<' + var + '>.*)' + rest
else:
reg = '(?P<' + var + '>.*)' + rest
allblank = False
noreqs = False
else:
if allblank and self.defaults.has_key(var):
reg = '(?P<' + var + '>.*)' + rest
elif self.defaults.has_key(var):
reg = '(?P<' + var + '>.*)' + rest
else:
allblank = False
noreqs = False
reg = '(?P<' + var + '>.*)' + rest
elif part and part[-1] in self.done_chars:
if allblank:
reg = re.escape(part[:-1]) + '(' + re.escape(part[-1]) + rest
reg += ')?'
else:
allblank = False
reg = re.escape(part) + rest
# We have a normal string here, this is a req, and it prevents us from
# being all blank
else:
noreqs = False
allblank = False
reg = re.escape(part) + rest
return (reg, noreqs, allblank)
def match(self, url, environ=None, sub_domains=False,
sub_domains_ignore=None, domain_match=''):
"""Match a url to our regexp.
While the regexp might match, this operation isn't
guaranteed as there's other factors that can cause a match to
fail even though the regexp succeeds (Default that was relied
on wasn't given, requirement regexp doesn't pass, etc.).
Therefore the calling function shouldn't assume this will
return a valid dict, the other possible return is False if a
match doesn't work out.
"""
# Static routes don't match, they generate only
if self.static:
return False
match = self.regmatch.match(url)
if not match:
return False
if not environ:
environ = {}
sub_domain = None
if environ.get('HTTP_HOST') and sub_domains:
host = environ['HTTP_HOST'].split(':')[0]
sub_match = re.compile('^(.+?)\.%s$' % domain_match)
subdomain = re.sub(sub_match, r'\1', host)
if subdomain not in sub_domains_ignore and host != subdomain:
sub_domain = subdomain
if self.conditions:
if self.conditions.has_key('method') and \
environ.get('REQUEST_METHOD') not in self.conditions['method']:
return False
# Check sub-domains?
use_sd = self.conditions.get('sub_domain')
if use_sd and not sub_domain:
return False
if isinstance(use_sd, list) and sub_domain not in use_sd:
return False
matchdict = match.groupdict()
result = {}
extras = frozenset(self.defaults.keys()) - frozenset(matchdict.keys())
for key, val in matchdict.iteritems():
if key != 'path_info' and self.encoding:
# change back into python unicode objects from the URL
# representation
try:
val = val and val.decode(self.encoding, self.decode_errors)
except UnicodeDecodeError:
return False
if not val and self.defaults.has_key(key) and self.defaults[key]:
result[key] = self.defaults[key]
else:
result[key] = val
for key in extras:
result[key] = self.defaults[key]
# Add the sub-domain if there is one
if sub_domains:
result['sub_domain'] = sub_domain
# If there's a function, call it with environ and expire if it
# returns False
if self.conditions and self.conditions.has_key('function') and \
not self.conditions['function'](environ, result):
return False
return result
def generate_non_minimized(self, kargs):
"""Generate a non-minimal version of the URL"""
# Iterate through the keys that are defaults, and NOT in the route
# path. If its not in kargs, or doesn't match, or is None, this
# route won't work
for k in self.maxkeys - self.minkeys:
if k not in kargs:
return False
elif self.make_unicode(kargs[k]) != \
self.make_unicode(self.defaults[k]):
return False
# Ensure that all the args in the route path are present and not None
for arg in self.minkeys:
if arg not in kargs or kargs[arg] is None:
return False
return self.regpath % kargs
def generate_minimized(self, kargs):
"""Generate a minimized version of the URL"""
routelist = self.routebackwards
urllist = []
gaps = False
for part in routelist:
if isinstance(part, dict) and part['type'] == ':':
arg = part['name']
# For efficiency, check these just once
has_arg = kargs.has_key(arg)
has_default = self.defaults.has_key(arg)
# Determine if we can leave this part off
# First check if the default exists and wasn't provided in the
# call (also no gaps)
if has_default and not has_arg and not gaps:
continue
# Now check to see if there's a default and it matches the
# incoming call arg
if (has_default and has_arg) and self.make_unicode(kargs[arg]) == \
self.make_unicode(self.defaults[arg]) and not gaps:
continue
# We need to pull the value to append, if the arg is None and
# we have a default, use that
if has_arg and kargs[arg] is None and has_default and not gaps:
continue
# Otherwise if we do have an arg, use that
elif has_arg:
val = kargs[arg]
elif has_default and self.defaults[arg] is not None:
val = self.defaults[arg]
# No arg at all? This won't work
else:
return False
urllist.append(url_quote(val, self.encoding))
if has_arg:
del kargs[arg]
gaps = True
elif isinstance(part, dict) and part['type'] == '*':
arg = part['name']
kar = kargs.get(arg)
if kar is not None:
urllist.append(url_quote(kar, self.encoding))
gaps = True
elif part and part[-1] in self.done_chars:
if not gaps and part in self.done_chars:
continue
elif not gaps:
urllist.append(part[:-1])
gaps = True
else:
gaps = True
urllist.append(part)
else:
gaps = True
urllist.append(part)
urllist.reverse()
url = ''.join(urllist)
return url
def generate(self, _ignore_req_list=False, _append_slash=False, **kargs):
"""Generate a URL from ourself given a set of keyword arguments
Toss an exception if this
set of keywords would cause a gap in the url.
"""
# Verify that our args pass any regexp requirements
if not _ignore_req_list:
for key in self.reqs.keys():
val = kargs.get(key)
if val and not self.req_regs[key].match(self.make_unicode(val)):
return False
# Verify that if we have a method arg, its in the method accept list.
# Also, method will be changed to _method for route generation
meth = kargs.get('method')
if meth:
if self.conditions and 'method' in self.conditions \
and meth.upper() not in self.conditions['method']:
return False
kargs.pop('method')
if self.minimization:
url = self.generate_minimized(kargs)
else:
url = self.generate_non_minimized(kargs)
if url is False:
return url
if not url.startswith('/'):
url = '/' + url
extras = frozenset(kargs.keys()) - self.maxkeys
if extras:
if _append_slash and not url.endswith('/'):
url += '/'
url += '?'
fragments = []
# don't assume the 'extras' set preserves order: iterate
# through the ordered kargs instead
for key in kargs:
if key not in extras:
continue
if key == 'action' or key == 'controller':
continue
val = kargs[key]
if isinstance(val, (tuple, list)):
for value in val:
fragments.append((key, value))
else:
fragments.append((key, val))
url += urllib.urlencode(fragments)
elif _append_slash and not url.endswith('/'):
url += '/'
return url
|
|
# ed25519.py - Optimized version of the reference implementation of Ed25519
#
# Written in 2011? by Daniel J. Bernstein <djb@cr.yp.to>
# 2013 by Donald Stufft <donald@stufft.io>
# 2013 by Alex Gaynor <alex.gaynor@gmail.com>
# 2013 by Greg Price <price@mit.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
NB: This code is not safe for use with secret keys or secret data.
The only safe use of this code is for verifying signatures on public messages.
Functions for computing the public key of a secret key and for signing
a message are included, namely publickey_unsafe and signature_unsafe,
for testing purposes only.
The root of the problem is that Python's long-integer arithmetic is
not designed for use in cryptography. Specifically, it may take more
or less time to execute an operation depending on the values of the
inputs, and its memory access patterns may also depend on the inputs.
This opens it to timing and cache side-channel attacks which can
disclose data to an attacker. We rely on Python's long-integer
arithmetic, so we cannot handle secrets without risking their disclosure.
"""
import hashlib
import operator
import sys
__version__ = "1.0.dev0"
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
indexbytes = operator.getitem
intlist2bytes = bytes
int2byte = operator.methodcaller("to_bytes", 1, "big")
else:
int2byte = chr
range = xrange # noqa: F821
def indexbytes(buf, i):
return ord(buf[i])
def intlist2bytes(l):
return b"".join(chr(c) for c in l)
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def pow2(x, p):
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z):
r"""$= z^{-1} \mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def xrecover(y):
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q - x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = (Bx % q, By % q, 1, (Bx * By) % q)
ident = (0, 1, 1, 0)
def edwards_add(P, Q):
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1 - x1) * (y2 - x2) % q
b = (y1 + x1) * (y2 + x2) % q
c = t1 * 2 * d * t2 % q
dd = z1 * 2 * z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def edwards_double(P):
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
a = x1 * x1 % q
b = y1 * y1 % q
c = 2 * z1 * z1 % q
# dd = -a
e = ((x1 + y1) * (x1 + y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def scalarmult(P, e):
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow = []
def make_Bpow():
P = B
for i in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return b"".join(
[
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b // 8)
]
)
def encodepoint(P):
(x, y, z, t) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return b"".join(
[
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b // 8)
]
)
def bit(h, i):
return (indexbytes(h, i // 8) >> (i % 8)) & 1
def publickey_unsafe(sk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2**i * bit(h, i) for i in range(3, b - 2))
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h, i) for i in range(2 * b))
def signature_unsafe(m, sk, pk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2**i * bit(h, i) for i in range(3, b - 2))
r = Hint(
intlist2bytes([indexbytes(h, j) for j in range(b // 8, b // 4)]) + m
)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
(x, y, z, t) = P
return (
z % q != 0
and x * y % q == z * t % q
and (y * y - x * x - z * z - d * t * t) % q == 0
)
def decodeint(s):
return sum(2**i * bit(s, i) for i in range(0, b))
def decodepoint(s):
y = sum(2**i * bit(s, i) for i in range(0, b - 1))
x = xrecover(y)
if x & 1 != bit(s, b - 1):
x = q - x
P = (x, y, 1, (x * y) % q)
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
class SignatureMismatch(Exception):
pass
def checkvalid(s, m, pk):
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[: b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8 : b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, t1) = P = scalarmult_B(S)
(x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))
if (
not isoncurve(P)
or not isoncurve(Q)
or (x1 * z2 - x2 * z1) % q != 0
or (y1 * z2 - y2 * z1) % q != 0
):
raise SignatureMismatch("signature does not pass verification")
|
|
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Base Adapter
- This module allows a player to interact with a model using a standardized interface
"""
from abc import ABCMeta
import logging
import time
from diplomacy_research.models.datasets.queue_dataset import QueueDataset
from diplomacy_research.utils.cluster import is_ioloop_running
# Note: The following is imported in `load_from_checkpoint()` to avoid cyclical imports
# from diplomacy_research.utils.checkpoint import load_frozen_graph, load_graph_from_ckpt
# Constants
LOGGER = logging.getLogger(__name__)
class BaseAdapter(metaclass=ABCMeta):
""" Allows the evaluation of a policy adapter from a TensorFlow graph and session """
def __init__(self, feedable_dataset, graph=None, session=None):
""" Initializer
:param feedable_dataset: The feedable dataset to use (must be initiated under the graph provided)
:param graph: The graph object that contains the policy model to evaluate
:param session: The session to use to interact with the graph
:type feedable_dataset: diplomacy_research.models.datasets.feedable_dataset.FeedableDataset
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
self.graph = graph
self.session = session
self.feedable_dataset = feedable_dataset
self.iterator = self.feedable_dataset.iterator
self.features = {}
self.placeholders = {}
self.outputs = {}
# Checking if the IOLoop is started
if not is_ioloop_running():
LOGGER.error('This object requires a running IO-Loop. Please start it before instantiating this object.')
raise RuntimeError('IO Loop has not been started.')
# Loading features, outputs, placeholders
if graph is not None:
self._load_features_placeholders()
# Initializes the adapter
if self.session:
self.initialize(self.session)
# Creating queues
self.create_queues()
@staticmethod
def get_signature():
""" Returns the signature of all the possible calls using this adapter
Format: { method_signature_name: {'placeholders': {name: (value, numpy_dtype)},
'outputs': [output_name, output_name] } }
e.g. {'policy_evaluate': {'placeholders': {'decoder_type': ([SAMPLE_DECODER], np.uint8)},
'outputs: ['selected_tokens', 'log_probs', 'draw_prob']}}
"""
raise NotImplementedError()
def _load_features_placeholders(self):
""" Loads the features, outputs, and placeholders nodes from the model """
from diplomacy_research.utils.tensorflow import tf
graph = self.graph or tf.get_default_graph()
collection_keys = graph.get_all_collection_keys()
for key in collection_keys:
# If list, getting first element
key_value = graph.get_collection(key)
if isinstance(key_value, list) and key_value:
key_value = key_value[0]
# Setting in self.
if key.startswith('feature'):
self.features[key.replace('feature_', '')] = key_value
elif key.startswith('placeholder'):
self.placeholders[key.replace('placeholder_', '')] = key_value
else:
self.outputs[key] = key_value
@property
def is_trainable(self):
""" Returns a boolean that indicates if the policy model can be trained """
return len([key for key in self.outputs if 'is_trainable' in key]) > 0
def initialize(self, session):
""" Initialize the adapter (init global vars and the dataset)
:type session: tensorflow.python.client.session.Session
"""
if not self.feedable_dataset.can_support_iterator or not self.iterator:
return
from diplomacy_research.utils.tensorflow import tf
assert session, 'You must pass a session to initialize the adapter'
assert isinstance(self.feedable_dataset, QueueDataset), 'The dataset must be a QueueDataset'
self.session = session
# Initializes uninit global vars
graph = self.graph or tf.get_default_graph()
if not graph.finalized:
with graph.as_default():
var_to_initialize = tf.global_variables() + tf.local_variables()
is_initialized = self.session.run([tf.is_variable_initialized(var) for var in var_to_initialize])
not_initialized_vars = [var for (var, is_init) in zip(var_to_initialize, is_initialized) if not is_init]
if not_initialized_vars:
LOGGER.info('Initialized %d variables.', len(not_initialized_vars))
self.session.run(tf.variables_initializer(not_initialized_vars))
# Initializing the dataset to use the feedable model
if not self.feedable_dataset.is_started and self.session:
self.feedable_dataset.start(self.session)
elif not self.feedable_dataset.is_initialized and self.session:
self.feedable_dataset.initialize(self.session)
def load_from_checkpoint(self, checkpoint_path):
""" Loads the variable from the checkpoint into the current graph
:param checkpoint_path: Either 1) Path to a checkpoint (e.g. /path/model.ckpt-XXX) or
2) Path to a frozen graph (e.g. /path/frozen.pb)
:return: Nothing
"""
assert self.feedable_dataset.can_support_iterator, 'The dataset must be able to support an iterator'
assert isinstance(self.feedable_dataset, QueueDataset), 'The dataset must be a QueueDataset'
# ---- <Import> ----
# Loaded here to avoid cyclical imports
from diplomacy_research.utils.checkpoint import load_frozen_graph, load_graph_from_ckpt # pylint: disable=wrong-import-position
# ---- </Import> ----
# Loading graph from disk
if checkpoint_path[-3:] == '.pb':
load_frozen_graph(checkpoint_path, graph=self.graph, session=self.session)
else:
load_graph_from_ckpt(checkpoint_path, graph=self.graph, session=self.session)
# Loading features, outputs, placeholders
self._load_features_placeholders()
# Making sure we have an iterator resource
iterator_resource = [self.outputs[key] for key in self.outputs if 'iterator_resource' in key]
if not iterator_resource:
LOGGER.error('An "iterator_resource" key must be defined in checkpoints for models to be resumable.')
raise RuntimeError('"iterator_resource" not present.')
# Creating new iterator with the iterator_resource
iterator_resource = iterator_resource[0]
self.feedable_dataset.create_iterator(iterator_resource, features=self.features)
self.feedable_dataset.initialize(self.session)
# Rebuilding queues
self.create_queues()
def create_queues(self):
""" Generates queues to feed data directly in the dataset in feedable mode """
# The dataset must be a QueueDataset
if not isinstance(self.feedable_dataset, QueueDataset):
return
# We haven't loaded a model yet (probably going to load a frozen checkpoint instead)
# We can't build queue yets because the graph is not built.
if not self.outputs or not self.features:
return
# Building queues
signature = self.get_signature()
for method_name in signature:
placeholders = signature[method_name].get('placeholders', {})
outputs = signature[method_name]['outputs']
# Queue already created
if self.feedable_dataset.has_queue(method_name):
LOGGER.warning('Queue %s has already been created.', method_name)
continue
# Output not available
missing_outputs = [output_name for output_name in outputs if output_name not in self.outputs]
if missing_outputs:
LOGGER.warning('Unable to create queue "%s" - Missing outputs: %s', method_name, missing_outputs)
continue
# Placeholder not available
missing_pholders = [pholder_name for pholder_name in placeholders if pholder_name not in self.placeholders]
if missing_pholders:
LOGGER.warning('Unable to create queue "%s" - Missing placeholders: %s', method_name, missing_pholders)
continue
# Building queue
self.feedable_dataset.create_queue(method_name,
outputs=[self.outputs[output_name] for output_name in outputs],
placeholders={self.placeholders[ph_name]: placeholders[ph_name][0]
for ph_name in placeholders},
post_queue=lambda _: time.sleep(0.10)) # To collect many batches
|
|
from . import AWSObject, AWSProperty
from .validators import (
boolean, integer, network_port, positive_integer, ecs_proxy_type
)
LAUNCH_TYPE_EC2 = 'EC2'
LAUNCH_TYPE_FARGATE = 'FARGATE'
SCHEDULING_STRATEGY_REPLICA = 'REPLICA'
SCHEDULING_STRATEGY_DAEMON = 'DAEMON'
class Cluster(AWSObject):
resource_type = "AWS::ECS::Cluster"
props = {
'ClusterName': (basestring, False),
}
class LoadBalancer(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (network_port, True),
'LoadBalancerName': (basestring, False),
'TargetGroupArn': (basestring, False),
}
class DeploymentConfiguration(AWSProperty):
props = {
'MaximumPercent': (positive_integer, False),
'MinimumHealthyPercent': (positive_integer, False),
}
def placement_strategy_validator(x):
valid_values = ['random', 'spread', 'binpack']
if x not in valid_values:
raise ValueError("Placement Strategy type must be one of: %s" %
', '.join(valid_values))
return x
def placement_constraint_validator(x):
valid_values = ['distinctInstance', 'memberOf']
if x not in valid_values:
raise ValueError("Placement Constraint type must be one of: %s" %
', '.join(valid_values))
return x
def scope_validator(x):
valid_values = ['shared', 'task']
if x not in valid_values:
raise ValueError("Scope type must be one of: %s" %
', '.join(valid_values))
return x
class PlacementConstraint(AWSProperty):
props = {
'Type': (placement_constraint_validator, True),
'Expression': (basestring, False),
}
class PlacementStrategy(AWSProperty):
props = {
'Type': (placement_strategy_validator, True),
'Field': (basestring, False),
}
class AwsvpcConfiguration(AWSProperty):
props = {
'AssignPublicIp': (basestring, False),
'SecurityGroups': (list, False),
'Subnets': (list, True),
}
class NetworkConfiguration(AWSProperty):
props = {
'AwsvpcConfiguration': (AwsvpcConfiguration, False),
}
def launch_type_validator(x):
valid_values = [LAUNCH_TYPE_EC2, LAUNCH_TYPE_FARGATE]
if x not in valid_values:
raise ValueError("Launch Type must be one of: %s" %
', '.join(valid_values))
return x
class ServiceRegistry(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (integer, False),
'Port': (integer, False),
'RegistryArn': (basestring, False),
}
class Service(AWSObject):
resource_type = "AWS::ECS::Service"
props = {
'Cluster': (basestring, False),
'DeploymentConfiguration': (DeploymentConfiguration, False),
'DesiredCount': (positive_integer, False),
'HealthCheckGracePeriodSeconds': (positive_integer, False),
'LaunchType': (launch_type_validator, False),
'LoadBalancers': ([LoadBalancer], False),
'NetworkConfiguration': (NetworkConfiguration, False),
'Role': (basestring, False),
'PlacementConstraints': ([PlacementConstraint], False),
'PlacementStrategies': ([PlacementStrategy], False),
'PlatformVersion': (basestring, False),
'SchedulingStrategy': (basestring, False),
'ServiceName': (basestring, False),
'ServiceRegistries': ([ServiceRegistry], False),
'TaskDefinition': (basestring, True),
}
class Environment(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True),
}
class MountPoint(AWSProperty):
props = {
'ContainerPath': (basestring, True),
'SourceVolume': (basestring, True),
'ReadOnly': (boolean, False),
}
class PortMapping(AWSProperty):
props = {
'ContainerPort': (network_port, True),
'HostPort': (network_port, False),
'Protocol': (basestring, False),
}
class VolumesFrom(AWSProperty):
props = {
'SourceContainer': (basestring, True),
'ReadOnly': (boolean, False),
}
class HostEntry(AWSProperty):
props = {
'Hostname': (basestring, True),
'IpAddress': (basestring, True),
}
class Device(AWSProperty):
props = {
'ContainerPath': (basestring, False),
'HostPath': (basestring, False),
'Permissions': ([basestring], False),
}
class HealthCheck(AWSProperty):
props = {
'Command': ([basestring], True),
'Interval': (integer, False),
'Retries': (integer, False),
'StartPeriod': (integer, False),
'Timeout': (integer, False),
}
class KernelCapabilities(AWSProperty):
props = {
'Add': ([basestring], False),
'Drop': ([basestring], False),
}
class Tmpfs(AWSProperty):
props = {
'ContainerPath': (basestring, False),
'MountOptions': ([basestring], False),
'Size': (integer, False),
}
class LinuxParameters(AWSProperty):
props = {
'Capabilities': (KernelCapabilities, False),
'Devices': ([Device], False),
'InitProcessEnabled': (boolean, False),
'SharedMemorySize': (integer, False),
'Tmpfs': ([Tmpfs], False),
}
class LogConfiguration(AWSProperty):
props = {
'LogDriver': (basestring, True),
'Options': (dict, False),
}
class RepositoryCredentials(AWSProperty):
props = {
'CredentialsParameter': (basestring, False)
}
class Ulimit(AWSProperty):
props = {
'HardLimit': (integer, True),
'Name': (basestring, False),
'SoftLimit': (integer, True),
}
class ContainerDependency(AWSProperty):
props = {
'Condition': (basestring, True),
'ContainerName': (basestring, True)
}
class ContainerDefinition(AWSProperty):
props = {
'Command': ([basestring], False),
'Cpu': (positive_integer, False),
'DependsOn': ([ContainerDependency], False),
'DisableNetworking': (boolean, False),
'DnsSearchDomains': ([basestring], False),
'DnsServers': ([basestring], False),
'DockerLabels': (dict, False),
'DockerSecurityOptions': ([basestring], False),
'EntryPoint': ([basestring], False),
'Environment': ([Environment], False),
'Essential': (boolean, False),
'ExtraHosts': ([HostEntry], False),
'HealthCheck': (HealthCheck, False),
'Hostname': (basestring, False),
'Image': (basestring, True),
'Links': ([basestring], False),
'LinuxParameters': (LinuxParameters, False),
'LogConfiguration': (LogConfiguration, False),
'Memory': (positive_integer, False),
'MemoryReservation': (positive_integer, False),
'MountPoints': ([MountPoint], False),
'Name': (basestring, True),
'PortMappings': ([PortMapping], False),
'Privileged': (boolean, False),
'ReadonlyRootFilesystem': (boolean, False),
'RepositoryCredentials': (RepositoryCredentials, False),
'StartTimeout': (integer, False),
'StopTimeout': (integer, False),
'Ulimits': ([Ulimit], False),
'User': (basestring, False),
'VolumesFrom': ([VolumesFrom], False),
'WorkingDirectory': (basestring, False),
}
class Host(AWSProperty):
props = {
'SourcePath': (basestring, False),
}
class DockerVolumeConfiguration(AWSProperty):
props = {
'Autoprovision': (boolean, False),
'Driver': (basestring, False),
'DriverOpts': (dict, False),
'Labels': (dict, False),
'Scope': (scope_validator, False)
}
class Volume(AWSProperty):
props = {
'DockerVolumeConfiguration': (DockerVolumeConfiguration, False),
'Name': (basestring, True),
'Host': (Host, False),
}
class ProxyConfiguration(AWSProperty):
props = {
'ContainerName': (basestring, True),
'ProxyConfigurationProperties': (list, False),
'Type': (ecs_proxy_type, False)
}
class TaskDefinition(AWSObject):
resource_type = "AWS::ECS::TaskDefinition"
props = {
'ContainerDefinitions': ([ContainerDefinition], True),
'Cpu': (basestring, False),
'ExecutionRoleArn': (basestring, False),
'Family': (basestring, False),
'Memory': (basestring, False),
'NetworkMode': (basestring, False),
'PlacementConstraints': ([PlacementConstraint], False),
'RequiresCompatibilities': ([basestring], False),
'TaskRoleArn': (basestring, False),
'Volumes': ([Volume], False),
'ProxyConfiguration': (ProxyConfiguration, False)
}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorFullMatrixTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
# Set the hints to none to test non-symmetric PD code paths.
operator = linalg.LinearOperatorFullMatrix(
lin_op_matrix,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [1., 11.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
# Auto-detected.
self.assertTrue(operator.is_square)
def test_assert_non_singular_raises_if_cond_too_big_but_finite(self):
with self.cached_session():
tril = linear_operator_test_util.random_tril_matrix(
shape=(50, 50), dtype=np.float32)
diag = np.logspace(-2, 2, 50).astype(np.float32)
tril = array_ops.matrix_set_diag(tril, diag)
matrix = self.evaluate(math_ops.matmul(tril, tril, transpose_b=True))
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
# Ensure that we have finite condition number...just HUGE.
cond = np.linalg.cond(matrix)
self.assertTrue(np.isfinite(cond))
self.assertGreater(cond, 1e12)
operator.assert_non_singular().run()
def test_assert_non_singular_raises_if_cond_infinite(self):
with self.cached_session():
matrix = [[1., 1.], [1., 1.]]
# We don't pass the is_self_adjoint hint here, which means we take the
# generic code path.
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.cached_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
@test_util.disable_xla("Assert statements in kernels not supported in XLA")
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=True)
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
operator.assert_positive_definite().run()
def test_tape_safe(self):
matrix = variables_module.Variable([[2.]])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.check_tape_safe(operator)
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest.
In this test, the operator is constructed with hints that invoke the use of
a Cholesky decomposition for solves/determinant.
"""
def setUp(self):
# Increase from 1e-6 to 1e-5. This reduction in tolerance happens,
# presumably, because we are taking a different code path in the operator
# and the matrix. The operator uses a Cholesky, the matrix uses standard
# solve.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
@staticmethod
def dtypes_to_test():
return [dtypes.float32, dtypes.float64]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Matrix is always symmetric and positive definite in this class.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(
lin_op_matrix,
is_square=True,
is_self_adjoint=True,
is_positive_definite=True)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [0., 7.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_self_adjoint)
# Should be auto-set
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator._can_use_cholesky)
self.assertTrue(operator.is_square)
@test_util.disable_xla("Assert statements in kernels not supported in XLA")
def test_assert_non_singular(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
@test_util.disable_xla("Assert statements in kernels not supported in XLA")
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_positive_definite().run()
def test_tape_safe(self):
matrix = variables_module.Variable([[2.]])
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
self.check_tape_safe(operator)
@test_util.run_all_in_graph_and_eager_modes
class NonSquareLinearOperatorFullMatrixTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
return operator, matrix
def test_is_x_flags(self):
matrix = [[3., 2., 1.], [1., 1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_self_adjoint=False)
self.assertEqual(operator.is_positive_definite, None)
self.assertEqual(operator.is_non_singular, None)
self.assertFalse(operator.is_self_adjoint)
self.assertFalse(operator.is_square)
def test_matrix_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegex(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorFullMatrix([1.])
def test_tape_safe(self):
matrix = variables_module.Variable([[2., 1.]])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.check_tape_safe(operator)
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorFullMatrixTest)
linear_operator_test_util.add_tests(NonSquareLinearOperatorFullMatrixTest)
linear_operator_test_util.add_tests(
SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest)
test.main()
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SampleList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the SampleList
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource
:param task_sid: The SID of the Task associated with the resource
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleList
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleList
"""
super(SampleList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples'.format(**self._solution)
def stream(self, language=values.unset, limit=None, page_size=None):
"""
Streams SampleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(language=language, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, ))
def page(self, language=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SampleInstance records from the API.
Request is executed immediately
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage
"""
params = values.of({
'Language': language,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SamplePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SampleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SamplePage(self._version, response, self._solution)
def create(self, language, tagged_text, source_channel=values.unset):
"""
Create a new SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the new sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the new sample was captured
:returns: Newly created SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def get(self, sid):
"""
Constructs a SampleContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SampleContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.SampleList>'
class SamplePage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the SamplePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource
:param task_sid: The SID of the Task associated with the resource
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage
"""
super(SamplePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SampleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.SamplePage>'
class SampleContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid, task_sid, sid):
"""
Initialize the SampleContext
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource to fetch
:param task_sid: The SID of the Task associated with the Sample resource to create
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
"""
super(SampleContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the sample was captured
:returns: Updated SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.SampleContext {}>'.format(context)
class SampleInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, assistant_sid, task_sid, sid=None):
"""
Initialize the SampleInstance
:returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
super(SampleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'task_sid': payload.get('task_sid'),
'language': payload.get('language'),
'assistant_sid': payload.get('assistant_sid'),
'sid': payload.get('sid'),
'tagged_text': payload.get('tagged_text'),
'url': payload.get('url'),
'source_channel': payload.get('source_channel'),
}
# Context
self._context = None
self._solution = {
'assistant_sid': assistant_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SampleContext for this SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext
"""
if self._context is None:
self._context = SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def task_sid(self):
"""
:returns: The SID of the Task associated with the resource
:rtype: unicode
"""
return self._properties['task_sid']
@property
def language(self):
"""
:returns: An ISO language-country string that specifies the language used for the sample
:rtype: unicode
"""
return self._properties['language']
@property
def assistant_sid(self):
"""
:returns: The SID of the Assistant that is the parent of the Task associated with the resource
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def tagged_text(self):
"""
:returns: The text example of how end users might express the task
:rtype: unicode
"""
return self._properties['tagged_text']
@property
def url(self):
"""
:returns: The absolute URL of the Sample resource
:rtype: unicode
"""
return self._properties['url']
@property
def source_channel(self):
"""
:returns: The communication channel from which the sample was captured
:rtype: unicode
"""
return self._properties['source_channel']
def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
return self._proxy.fetch()
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the sample was captured
:returns: Updated SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, )
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.SampleInstance {}>'.format(context)
|
|
import math
import multiprocessing
import os
from abaqus import mdb, session
import part, mesh
from abaqusConstants import *
import regionToolset
class ImpactTestKernel():
# Initialize impact test kernel basing on configuration passed
def __init__(self, config, modelName="Model-1"):
# Model name - used both as model's name, job's name and input file name
self.modelName = str(modelName)
# Create new model database if not default
if modelName != "Model-1":
mdb.Model(self.modelName)
# If model is other than default parts and materials must be imported again
from ImpactTestGUI import importMaterials, importParts
importMaterials(self.modelName)
importParts(self.modelName)
del mdb.models['Model-1']
# Type of projectile - describing subdirectory name
self.projectileType = str(config['projectile']['type'])
# Projectile's velocity in [m/s]
self.projectileVelocity = config['projectile']['velocity']
# Target obliquity in [deg] - 0 means normal to projectile's direction
self.targetObliquity = config['armor']['obliquity']
# Target semi-minor axis in [m]
self.targetRadius = config['armor']['radius']
# Target center semi-minor axis in [m]
self.targetInnerRadius = config['armor']['innerRadius']
# List of target layers - describing layers thickness in [m] and material
self.targetLayers = config['armor']['layers']
# Average mesh element size in [m] used to seed parts
self.meshElementSize = config['meshElementSize']
# Failure coefficient to adjust material properties easily
self.failureCoefficient = config['failureCoefficient']
# Auxilliary list to store layer names, thicknesses and spacings in [m]
self.assemblyOrder = []
# Auxillary list of projectile component names
self.projectileComponents = []
# Perform all possible steps of model preparation
def run(self):
self.adjustDisplacementsAtFailure()
self.setModelConstants()
self.prepareProjectileParts()
self.createTargetParts()
self.createModelAssembly()
self.createProjectileMesh()
self.createTargetMesh()
self.createFakeSurfaceSets()
self.createInteractionProperties()
self.createInteractions()
self.createTieConstraints()
self.applyInitialFields()
self.applyBoundaryConditions()
self.createStep()
self.adjustOutputs()
self.createJob()
# self.injectContactToInput()
# Set absolute zero temperature and Stefan-Boltzmann constant
def setModelConstants(self):
mdb.models[self.modelName].setValues(
# Temperatures will be treated as [K]
absoluteZero=0,
stefanBoltzmann=5.67037E-008
)
# Create separate part for each target layer
def createTargetParts(self):
self.__createTargetSketches()
i = 1
for layer in self.targetLayers:
# Provide uniform target layer naming convention
name = 'Target-L' + str(i).zfill(3)
inner_name = name + "I"
outer_name = name + "O"
path_name = name + "P"
# Create deformable, three dimensional solids from common target sketches
# Inner part
part = mdb.models[self.modelName].Part(
inner_name,
dimensionality=THREE_D,
type=DEFORMABLE_BODY)
part.BaseShell(
mdb.models[self.modelName].sketches['Target-Sketch-Inner'],
)
part.DatumCsysByDefault(CARTESIAN)
part.ReferencePoint(
point=part.InterestingPoint(
edge=part.edges[0],
rule=CENTER
)
)
# Create sweep path
mdb.models[self.modelName].parts[inner_name].DatumPlaneByPrincipalPlane(principalPlane=YZPLANE, offset=0.0)
mdb.models[self.modelName].parts[inner_name].DatumAxisByPrincipalAxis(principalAxis=YAXIS)
part = mdb.models[self.modelName].parts[inner_name]
plane = part.datums[4]
axis = part.datums[5]
transform = part.MakeSketchTransform(
sketchPlane=plane,
sketchUpEdge=axis,
sketchPlaneSide=SIDE1,
sketchOrientation=RIGHT,
origin=(0.0, 0.0, 0.0)
)
sweepPath = mdb.models[self.modelName].ConstrainedSketch(
path_name,
layer['thickness'] * 2.0,
transform=transform
)
part.projectReferencesOntoSketch(sketch=sweepPath, filter=COPLANAR_EDGES)
sweepPath.Line(
point1=
(
0.0,
0.0
),
point2=
(
-layer['thickness'],
-layer['thickness'] * math.sin(math.pi * self.targetObliquity / 180.0)
)
)
part.SolidSweep(
pathPlane=plane,
pathUpEdge=axis,
profile=part.faces[0],
pathOrientation=RIGHT,
path=sweepPath
)
del sweepPath
# Assign target layer its material
section = mdb.models[self.modelName].HomogeneousSolidSection(
inner_name,
str(layer['material'])
)
part.SectionAssignment(
sectionName=inner_name,
region=regionToolset.Region(
cells=part.cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
)
)
# Outer part
part = mdb.models[self.modelName].Part(
outer_name,
dimensionality=THREE_D,
type=DEFORMABLE_BODY)
part.BaseShell(
mdb.models[self.modelName].sketches['Target-Sketch-Outer']
)
part.DatumCsysByDefault(CARTESIAN)
part.ReferencePoint(
point=part.InterestingPoint(
edge=part.edges[0],
rule=CENTER
)
)
# Create sweep path
mdb.models[self.modelName].parts[outer_name].DatumPlaneByPrincipalPlane(principalPlane=YZPLANE, offset=0.0)
mdb.models[self.modelName].parts[outer_name].DatumAxisByPrincipalAxis(principalAxis=YAXIS)
part = mdb.models[self.modelName].parts[outer_name]
plane = part.datums[4]
axis = part.datums[5]
transform = part.MakeSketchTransform(
sketchPlane=plane,
sketchUpEdge=axis,
sketchPlaneSide=SIDE1,
sketchOrientation=RIGHT,
origin=(0.0, 0.0, 0.0)
)
sweepPath = mdb.models[self.modelName].ConstrainedSketch(
path_name,
layer['thickness'] * 2.0,
transform=transform
)
part.projectReferencesOntoSketch(sketch=sweepPath, filter=COPLANAR_EDGES)
sweepPath.Line(
point1=
(
0.0,
0.0
),
point2=
(
-layer['thickness'],
-layer['thickness'] * math.sin(math.pi * self.targetObliquity / 180.0)
)
)
part.SolidSweep(
pathPlane=plane,
pathUpEdge=axis,
profile=part.faces[0],
pathOrientation=RIGHT,
path=sweepPath
)
del sweepPath
# Assign target layer its material
section = mdb.models[self.modelName].HomogeneousSolidSection(
outer_name,
str(layer['material'])
)
part.SectionAssignment(
sectionName=outer_name,
region=regionToolset.Region(
cells=part.cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
)
)
i += 1
# Add layer name and thickness to auxiliary layer list
self.assemblyOrder.append(
(
name,
layer['thickness'],
layer['spacing']
)
)
# Cut outer target layer in two
self.__partitionTargetLayer(part)
# Create model assembly out of target layers and projectile core and casing
def createModelAssembly(self):
assembly = mdb.models[self.modelName].rootAssembly
assembly.DatumCsysByDefault(CARTESIAN)
offset = 0.0
previousSpacing = 0.0
# Create instances of target layers placed one behind another
for element in self.assemblyOrder:
name = element[0]
inner_name = name + "I"
outer_name = name + "O"
thickness = element[1]
spacing = element[2]
offset -= thickness + previousSpacing
verticalOffset = -math.sin(math.pi * self.targetObliquity / 180.0) * offset
# Outer target part instance
part = mdb.models[self.modelName].parts[outer_name]
assembly.Instance(
name=outer_name,
part=part,
dependent=ON
)
# Inner target part instance
part = mdb.models[self.modelName].parts[inner_name]
assembly.Instance(
name=inner_name,
part=part,
dependent=ON
)
assembly.translate(
instanceList=
(
outer_name,
inner_name
),
vector=
(
0.0,
verticalOffset,
offset
)
)
previousSpacing = spacing
offset = self.assemblyOrder[0][1]
# Projectile offset preventing possible overlapping with target
stdOffset = 0.0005 + offset / math.cos(math.pi * self.targetObliquity / 180.0)
xyzOffset = (
0.0,
0.0,
stdOffset
)
# Projectile's center of rotation placed in the middle of target's first layer's thickness
axisPt = (
0.0,
0.0,
-offset / 2.0
)
axisDir = (
1.0,
0.0,
0.0
)
# Create instances of projectile casing and core
for part in self.projectileComponents:
assembly.Instance(
name=part,
part=mdb.models[self.modelName].parts[part],
dependent=ON
)
# Translate projectile away from the target
assembly.translate(
instanceList=
self.projectileComponents,
vector=xyzOffset
)
# Rotate projectile to introduce target's obliquity
assembly.rotate(
instanceList=self.projectileComponents,
axisPoint=axisPt,
axisDirection=axisDir,
angle=self.targetObliquity
)
# Translate projectile lower to compromise possible slipping/ricochet
xyzOffset = (
0.0,
-0.375 * self.targetInnerRadius * math.sin(math.pi * self.targetObliquity / 180.0),
0.0
)
assembly.translate(
instanceList=
self.projectileComponents,
vector=xyzOffset
)
# Create job for the model
def createJob(self):
# Allow use of multiple CPUs/cores
cpus=multiprocessing.cpu_count()
job = mdb.Job(
name=self.modelName,
model=self.modelName,
description='',
type=ANALYSIS,
atTime=None,
waitMinutes=0,
waitHours=0,
queue=None,
memory=90,
memoryUnits=PERCENTAGE,
getMemoryFromAnalysis=True,
explicitPrecision=SINGLE,
nodalOutputPrecision=SINGLE,
echoPrint=OFF,
modelPrint=OFF,
contactPrint=OFF,
historyPrint=OFF,
userSubroutine='',
scratch='',
resultsFormat=ODB,
parallelizationMethodExplicit=DOMAIN,
numDomains=cpus,
activateLoadBalancing=False,
multiprocessingMode=DEFAULT,
numCpus=cpus
)
job.writeInput(consistencyChecking=OFF)
# Create simulation step for impact and penetration phase
def createStep(self):
mdb.models[self.modelName].TempDisplacementDynamicsStep(
name='Impact',
previous='Initial',
timePeriod=self.__calculateTargetAbsoluteThickness() * 25.0 / self.projectileVelocity
)
# Create proper field/history output requests
def adjustOutputs(self):
mdb.models[self.modelName].historyOutputRequests['H-Output-1'].setValues(
numIntervals=1000
)
mdb.models[self.modelName].fieldOutputRequests['F-Output-1'].setValues(
variables=(
'S',
'SVAVG',
'PE',
'ER',
'ERV',
'PEVAVG',
'PEEQ',
'PEEQVAVG',
'LE',
'U',
'V',
'A',
'RF',
'CSTRESS',
'NT',
'HFL',
'RFL',
'EVF',
'STATUS',
'SDEG',
),
numIntervals=1000
)
# Create appropriate boundary conditions for the model
def applyBoundaryConditions(self):
self.__encastreTargetSides()
# Create initial fields of velocity and temperature
def applyInitialFields(self):
self.__applyProjectileVelocity()
self.__applyInitialTemperature()
# Create common interaction properties assuming friction coefficient equal 0.05 [-] and thermal conductivity
# equal 50 [W/(m*K)]
def createInteractionProperties(self):
mdb.models[self.modelName].ContactProperty('InteractionProperties')
mdb.models[self.modelName].interactionProperties['InteractionProperties'].TangentialBehavior(
formulation=PENALTY,
directionality=ISOTROPIC,
slipRateDependency=OFF,
pressureDependency=OFF,
temperatureDependency=OFF,
dependencies=0,
table=(
(
0.05,
),
),
shearStressLimit=None,
maximumElasticSlip=FRACTION,
fraction=0.005,
elasticSlipStiffness=None
)
mdb.models[self.modelName].interactionProperties['InteractionProperties'].ThermalConductance(
definition=TABULAR,
clearanceDependency=ON,
pressureDependency=OFF,
temperatureDependencyC=OFF,
massFlowRateDependencyC=OFF,
dependenciesC=0,
clearanceDepTable=(
(
50.0,
0.0
),
(
0.0,
0.001
)
)
)
# Mesh each target layer
def createTargetMesh(self):
for element in self.assemblyOrder:
name = element[0]
inner_part = mdb.models[self.modelName].parts[name + "I"]
outer_part = mdb.models[self.modelName].parts[name + "O"]
# Make outer, coarsely meshed region structured
regions = outer_part.cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
outer_part.setMeshControls(
regions=regions,
technique=STRUCTURED
)
# Make inner, finely meshed region medial-axis swept
regions = inner_part.cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
inner_part.setMeshControls(
regions=regions,
algorithm=MEDIAL_AXIS
)
# Seed inner part with default element size
inner_part.seedPart(
size=self.meshElementSize,
deviationFactor=0.1,
minSizeFactor=0.1
)
# Seed outer part with large element size
outer_part.seedPart(
size=self.meshElementSize * 4.0,
deviationFactor=0.1,
minSizeFactor=0.1
)
# Assign all target parts C3D8RT explicit element type with hourglass control and element deletion enabled
elemType1 = mesh.ElemType(
elemCode=C3D8RT,
elemLibrary=EXPLICIT,
kinematicSplit=AVERAGE_STRAIN,
secondOrderAccuracy=OFF,
hourglassControl=ENHANCED,
distortionControl=DEFAULT,
elemDeletion=ON,
maxDegradation=0.99
)
inner_part.setElementType(
regions=(
inner_part.cells,
),
elemTypes=(
elemType1,
)
)
outer_part.setElementType(
regions=(
outer_part.cells,
),
elemTypes=(
elemType1,
)
)
# Mesh part
inner_part.generateMesh()
outer_part.generateMesh()
# Mesh projectile's core and casing
def createProjectileMesh(self):
for part in self.projectileComponents:
part = mdb.models[self.modelName].parts[part]
# Make projectile's part TET free meshed - more refined meshes have to be applied manually
part_cells = part.cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
part.setMeshControls(
regions=part_cells,
elemShape=TET,
technique=FREE
)
# Seed part with default mesh element size
part.seedPart(
size=self.meshElementSize,
deviationFactor=0.1,
minSizeFactor=0.1
)
# Assign part C3D4T explicit element type
part.setElementType(
regions=(
part_cells,
),
elemTypes=(
mesh.ElemType(
elemCode=C3D4T,
elemLibrary=EXPLICIT,
secondOrderAccuracy=OFF,
elemDeletion=ON,
maxDegradation=0.99
),
)
)
# Mesh part
part.generateMesh()
# Create common outer and inner target part sketches for all target layers
def __createTargetSketches(self):
# Conversion from [deg] to [rad]
radians = math.pi * self.targetObliquity / 180.0
# Stretch ratio reducing risk of projectile entering coarsely meshed area of target
stretch = 1.0 / math.cos(radians)
# Create elliptic target sketch
sketch = mdb.models[self.modelName].ConstrainedSketch('Target-Sketch-Outer', self.targetRadius * 2.0)
# Outer bound
sketch.EllipseByCenterPerimeter(
center=
(
0.0,
0.0
),
axisPoint1=
(
0.0,
self.targetRadius * stretch
),
axisPoint2=
(
self.targetRadius,
0.0
)
)
# Inner bound
sketch.EllipseByCenterPerimeter(
center=
(
0.0,
0.0
),
axisPoint1=
(
0.0,
self.targetInnerRadius * stretch
),
axisPoint2=
(
self.targetInnerRadius,
0.0
)
)
# Create elliptic target partition sketch
innerSketch = mdb.models[self.modelName].ConstrainedSketch(
'Target-Sketch-Inner',
self.targetInnerRadius
)
innerSketch.EllipseByCenterPerimeter(
center=
(
0.0,
0.0
),
axisPoint1=
(
0.0,
self.targetInnerRadius * stretch
),
axisPoint2=
(
self.targetInnerRadius,
0.0
)
)
# Combine thicknesses of all target layers
def __calculateTargetAbsoluteThickness(self):
thickness = 0.0
for layer in self.targetLayers:
thickness += layer['thickness'] + layer['spacing']
return thickness
# Partition each target layer
def __partitionTargetLayer(self, part):
# Partition target's outer cell to allow hex swept meshing
part.DatumPlaneByPrincipalPlane(
principalPlane=YZPLANE,
offset=0.0
)
cells = part.cells
pickedCells = cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
datums = part.datums
part.PartitionCellByDatumPlane(
datumPlane=datums[8],
cells=pickedCells
)
# Create 'encastre' boundary condition on sides of each target layer
def __encastreTargetSides(self):
assembly = mdb.models[self.modelName].rootAssembly
# Create list of selections
faces = []
for layer in self.assemblyOrder:
name = layer[0] + "O"
faces.append(assembly.instances[name].faces.getSequenceFromMask(
mask=
(
'[#48 ]',
),
)
)
# Combine selections and create set
facesSet = faces[0]
for i in range(1, len(faces)):
facesSet = facesSet + faces[i]
region = assembly.Set(
faces=facesSet,
name='Target-sides'
)
# Create boundary condition
mdb.models[self.modelName].EncastreBC(
name='Fix-sides',
createStepName='Initial',
region=region,
localCsys=None
)
# Create uniform velocity field on projectile
def __applyProjectileVelocity(self):
assembly = mdb.models[self.modelName].rootAssembly
# Create selection out of casing's and core's cells
cells = None
for part in self.projectileComponents:
if cells is None:
cells = assembly.instances[part].cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
else:
cells = cells + assembly.instances[part].cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
# Create set
region = assembly.Set(
cells=cells,
name='Projectile-volume'
)
# Convert [deg] to [rad]
radians = self.targetObliquity * math.pi / 180.0
# Compute velocity vector components
velocityY = self.projectileVelocity * math.sin(radians)
velocityZ = -self.projectileVelocity * math.cos(radians)
# Create velocity field
mdb.models[self.modelName].Velocity(
name='Projectile-velocity',
region=region,
field='',
distributionType=MAGNITUDE,
velocity1=0.0,
velocity2=velocityY,
velocity3=velocityZ,
omega=0.0
)
# Create uniform temperature field on both target and projectile
def __applyInitialTemperature(self):
assembly = mdb.models[self.modelName].rootAssembly
# Create selection out of target's and projectile's cells
cells = None
for part in self.projectileComponents:
if cells is None:
cells = assembly.instances[part].cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
else:
cells = cells + assembly.instances[part].cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
for layer in self.assemblyOrder:
name = layer[0]
cells = cells + assembly.instances[name + "I"].cells.getSequenceFromMask(
mask=
(
'[#1 ]',
),
)
cells = cells + assembly.instances[name + "O"].cells.getSequenceFromMask(
mask=
(
'[#3 ]',
),
)
# Create set
region = assembly.Set(
cells=cells,
name='Entire-mass'
)
# Create temperature field
mdb.models[self.modelName].Temperature(
name='Temperature',
createStepName='Initial',
region=region,
distributionType=UNIFORM,
crossSectionDistribution=CONSTANT_THROUGH_THICKNESS,
# 293.15 [K] equals to 20 [*C]
magnitudes=
(
293.15,
)
)
# Write job input file, inject surface sets and set interactions between them - it's a workaround that will
# hopefully solve the problem with setting interior/exterior surface sets in Abaqus
def injectContactToInput(self):
job = mdb.models[self.modelName].jobs[self.modelName]
job.writeInput(
consistencyChecking=OFF
)
filename = self.__getInputFilename()
lines = self.__obtainLines(filename)
lines = self.__insertInteractions(lines)
lines = self.__insertSurfaceSet(lines)
self.__overrideInput(lines, filename)
# Load original input file and read its lines
def __obtainLines(self, filename):
with open(filename) as file:
lines = [line.strip('\n') for line in file.readlines()]
file.close()
return lines
# Insert surface set definition to input file lines
def __insertSurfaceSet(self, lines):
newlines = []
newlines.append('**')
newlines.append('** ELEMENT SURFACE SETS')
newlines.append('**')
for inst in mdb.models[self.modelName].rootAssembly.instances.values():
newlines.append('** %s' % inst.name)
newlines.append('** %s' % inst.name)
index = self.__getEndAssemblyIdx(lines)
lines[index:index] = newlines
# TODO: Implement surface set definition insertion
return lines
# Insert interaction definition to input file lines
def __insertInteractions(self, lines):
newlines = []
newlines.append('**')
newlines.append('** INTERACTIONS')
newlines.append('**')
index = self.__getLastMaterialConstantIdx(lines)
lines[index:index] = newlines
# TODO: Implement interaction definition insertion
return lines
# Override input file with modified lines
def __overrideInput(self, lines, filename):
with open(filename, 'w') as file:
for line in lines:
file.write("%s\n" % line)
file.close()
return
# Obtain input filename
def __getInputFilename(self):
fname = __file__
for i in range(0, 7):
fname = os.path.dirname(fname)
fname = fname + "/Commands/"+self.modelName+".inp"
return fname
# Obtain line index under which our properties should be inserted
def __getLastMaterialConstantIdx(self, lines):
for line in reversed(lines):
if line.startswith('Entire-mass'):
return lines.index(line) + 1
# Find index of the line after '*End Assembly [...]' line
def __getEndAssemblyIdx(self, lines):
for line in reversed(lines):
if line.startswith('*End Assembly'):
return lines.index(line) - 1
# Adjust materials' displacement criterion for J-C damage evolution to failure coefficient
def adjustDisplacementsAtFailure(self):
for material in mdb.models[self.modelName].materials.values():
if hasattr(material, 'johnsonCookDamageInitiation'):
if hasattr(material.johnsonCookDamageInitiation, 'damageEvolution'):
strainAtFailure = material.johnsonCookDamageInitiation.damageEvolution.table[0][0]
displacementAtFailure = strainAtFailure * self.failureCoefficient * 0.001
material.johnsonCookDamageInitiation.damageEvolution.setValues(
table=
(
(
displacementAtFailure,
),
)
)
if hasattr(material, 'maxpeDamageInitiation'):
if hasattr(material.maxpeDamageInitiation, 'damageEvolution'):
strainAtFailure = material.maxpeDamageInitiation.damageEvolution.table[0][0]
displacementAtFailure = strainAtFailure * self.failureCoefficient * 0.001
material.maxpeDamageInitiation.damageEvolution.setValues(
table=
(
(
displacementAtFailure,
),
)
)
# Create fake surface sets - they must be reselected by the user
def createFakeSurfaceSets(self):
# FIXME: Make Surface objects actually re-definable as mesh surfaces
assembly = mdb.models[self.modelName].rootAssembly
faces = assembly.instances['Target-L001I'].faces
faces = faces.getSequenceFromMask(
mask=
(
'[#2 ]',
),
)
faceSet = assembly.Set(
faces=faces,
name='Fake-contact-set'
)
assembly.SurfaceFromElsets(
name="Interior-Brown",
elementSetSeq=
(
(
faceSet,
S1
),
(
faceSet,
S2
)
)
)
assembly.SurfaceFromElsets(
name="Interior-Purple",
elementSetSeq=
(
(
faceSet,
S1
),
(
faceSet,
S2
)
)
)
assembly.SurfaceFromElsets(
name="Exterior",
elementSetSeq=
(
(
faceSet,
S1
),
(
faceSet,
S2
)
)
)
# Create interaction to handle contact between all mesh element faces
def createInteractions(self):
mdb.models[self.modelName].ContactExp(
name='Contact',
createStepName='Initial'
)
ext = mdb.models[self.modelName].rootAssembly.surfaces['Exterior']
inb = mdb.models[self.modelName].rootAssembly.surfaces['Interior-Brown']
inp = mdb.models[self.modelName].rootAssembly.surfaces['Interior-Purple']
mdb.models[self.modelName].interactions['Contact'].includedPairs.setValuesInStep(
stepName='Initial',
useAllstar=OFF,
addPairs=(
(
ext,
SELF
),
(
ext,
inb
),
(
inb,
ext
),
(
inp,
SELF
),
(
inb,
inp
),
(
inp,
inb
),
(
inp,
SELF
)
)
)
mdb.models[self.modelName].interactions['Contact'].contactPropertyAssignments.appendInStep(
stepName='Initial',
assignments=(
(
GLOBAL,
SELF,
'InteractionProperties'
),
)
)
def createTieConstraints(self):
for layer in self.assemblyOrder:
name = layer[0]
inner_name = name + "I"
outer_name = name + "O"
assembly = mdb.models[self.modelName].rootAssembly
inner_faces = assembly.instances[inner_name].faces.getSequenceFromMask(
mask=(
'[#1 ]',
),
)
assembly.Surface(
side1Faces=inner_faces,
name=inner_name + "_TIE"
)
inner_region = assembly.surfaces[inner_name+"_TIE"]
outer_faces = assembly.instances[outer_name].faces.getSequenceFromMask(
mask=(
'[#a0 ]',
),
)
assembly.Surface(
side1Faces=outer_faces,
name=outer_name + "_TIE"
)
outer_region = assembly.surfaces[outer_name+"_TIE"]
mdb.models[self.modelName].Tie(
name=name + "_TIE",
master=outer_region,
slave=inner_region,
positionToleranceMethod=COMPUTED,
adjust=ON,
constraintEnforcement=SURFACE_TO_SURFACE
)
def prepareProjectileParts(self):
for part_name in mdb.models[self.modelName].parts.keys():
if part_name.startswith("Projectile-"+self.projectileType):
self.projectileComponents.append(part_name)
|
|
import hashlib
import json
import logging
import os
import re
import shutil
import stat
import StringIO
import tempfile
import zipfile
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.utils.translation import trans_real as translation
from tower import ugettext as _
from amo.utils import rm_local_tmp_dir, strip_bom
from mkt.translations.utils import to_language
log = logging.getLogger('files.utils')
SIGNED_RE = re.compile('^META\-INF/(\w+)\.(rsa|sf)$')
def get_filepath(fileorpath):
"""Get the actual file path of fileorpath if it's a FileUpload object."""
if hasattr(fileorpath, 'path'): # FileUpload
return fileorpath.path
return fileorpath
def get_file(fileorpath):
"""Get a file-like object, whether given a FileUpload object or a path."""
if hasattr(fileorpath, 'path'): # FileUpload
return storage.open(fileorpath.path)
if hasattr(fileorpath, 'name'):
return fileorpath
return storage.open(fileorpath)
class WebAppParser(object):
def extract_locale(self, locales, key, default=None):
"""Gets a locale item based on key.
For example, given this:
locales = {'en': {'foo': 1, 'bar': 2},
'it': {'foo': 1, 'bar': 2}}
You can get english foo like:
self.extract_locale(locales, 'foo', 'en')
"""
ex = {}
for loc, data in locales.iteritems():
ex[loc] = data.get(key, default)
return ex
def get_json_data(self, fileorpath):
path = get_filepath(fileorpath)
if zipfile.is_zipfile(path):
zf = SafeUnzip(path)
zf.is_valid() # Raises forms.ValidationError if problems.
try:
data = zf.extract_path('manifest.webapp')
except KeyError:
raise forms.ValidationError(
_('The file "manifest.webapp" was not found at the root '
'of the packaged app archive.'))
else:
file_ = get_file(fileorpath)
data = file_.read()
file_.close()
return WebAppParser.decode_manifest(data)
@classmethod
def decode_manifest(cls, manifest):
"""
Returns manifest, stripped of BOMs and UTF-8 decoded, as Python dict.
"""
try:
data = strip_bom(manifest)
# Marketplace only supports UTF-8 encoded manifests.
decoded_data = data.decode('utf-8')
except (ValueError, UnicodeDecodeError) as exc:
msg = 'Error parsing manifest (encoding: utf-8): %s: %s'
log.error(msg % (exc.__class__.__name__, exc))
raise forms.ValidationError(
_('Could not decode the webapp manifest file. '
'Check your manifest file for special non-utf-8 '
'characters.'))
try:
return json.loads(decoded_data)
except Exception:
raise forms.ValidationError(
_('The webapp manifest is not valid JSON.'))
def parse(self, fileorpath):
data = self.get_json_data(fileorpath)
loc = data.get('default_locale', translation.get_language())
default_locale = self.trans_locale(loc)
locales = data.get('locales', {})
if type(locales) == list:
raise forms.ValidationError(
_('Your specified app locales are not in the correct format.'))
localized_descr = self.extract_locale(locales, 'description',
default='')
if 'description' in data:
localized_descr.update({default_locale: data['description']})
localized_name = self.extract_locale(locales, 'name',
default=data['name'])
localized_name.update({default_locale: data['name']})
developer_info = data.get('developer', {})
developer_name = developer_info.get('name')
if not developer_name:
# Missing developer name shouldn't happen if validation took place,
# but let's be explicit about this just in case.
raise forms.ValidationError(
_("Developer name is required in the manifest in order to "
"display it on the app's listing."))
return {'guid': None,
'name': self.trans_all_locales(localized_name),
'developer_name': developer_name,
'description': self.trans_all_locales(localized_descr),
'version': data.get('version', '1.0'),
'default_locale': default_locale,
'origin': data.get('origin')}
def trans_locale(self, locale):
return to_language(settings.SHORTER_LANGUAGES.get(locale, locale))
def trans_all_locales(self, locale_dict):
trans = {}
for key, item in locale_dict.iteritems():
key = self.trans_locale(key)
trans[key] = item
return trans
class SafeUnzip(object):
def __init__(self, source, mode='r'):
self.source = source
self.info = None
self.mode = mode
def is_valid(self, fatal=True):
"""
Runs some overall archive checks.
fatal: if the archive is not valid and fatal is True, it will raise
an error, otherwise it will return False.
"""
try:
zip = zipfile.ZipFile(self.source, self.mode)
except (zipfile.BadZipfile, IOError):
if fatal:
log.info('Error extracting', exc_info=True)
raise
return False
_info = zip.infolist()
for info in _info:
if '..' in info.filename or info.filename.startswith('/'):
log.error('Extraction error, invalid file name (%s) in '
'archive: %s' % (info.filename, self.source))
# L10n: {0} is the name of the invalid file.
raise forms.ValidationError(
_('Invalid file name in archive: {0}').format(
info.filename))
if info.file_size > settings.FILE_UNZIP_SIZE_LIMIT:
log.error('Extraction error, file too big (%s) for file (%s): '
'%s' % (self.source, info.filename, info.file_size))
# L10n: {0} is the name of the invalid file.
raise forms.ValidationError(
_('File exceeding size limit in archive: {0}').format(
info.filename))
self.info = _info
self.zip = zip
return True
def is_signed(self):
"""Tells us if an addon is signed."""
finds = []
for info in self.info:
match = SIGNED_RE.match(info.filename)
if match:
name, ext = match.groups()
# If it's rsa or sf, just look for the opposite.
if (name, {'rsa': 'sf', 'sf': 'rsa'}[ext]) in finds:
return True
finds.append((name, ext))
def extract_from_manifest(self, manifest):
"""
Extracts a file given a manifest such as:
jar:chrome/de.jar!/locale/de/browser/
or
locale/de/browser
"""
type, path = manifest.split(':')
jar = self
if type == 'jar':
parts = path.split('!')
for part in parts[:-1]:
jar = self.__class__(StringIO.StringIO(jar.zip.read(part)))
jar.is_valid(fatal=True)
path = parts[-1]
return jar.extract_path(path[1:] if path.startswith('/') else path)
def extract_path(self, path):
"""Given a path, extracts the content at path."""
return self.zip.read(path)
def extract_info_to_dest(self, info, dest):
"""Extracts the given info to a directory and checks the file size."""
self.zip.extract(info, dest)
dest = os.path.join(dest, info.filename)
if not os.path.isdir(dest):
# Directories consistently report their size incorrectly.
size = os.stat(dest)[stat.ST_SIZE]
if size != info.file_size:
log.error('Extraction error, uncompressed size: %s, %s not %s'
% (self.source, size, info.file_size))
raise forms.ValidationError(_('Invalid archive.'))
def extract_to_dest(self, dest):
"""Extracts the zip file to a directory."""
for info in self.info:
self.extract_info_to_dest(info, dest)
def close(self):
self.zip.close()
def extract_zip(source, remove=False, fatal=True):
"""Extracts the zip file. If remove is given, removes the source file."""
tempdir = tempfile.mkdtemp()
zip = SafeUnzip(source)
try:
if zip.is_valid(fatal):
zip.extract_to_dest(tempdir)
except:
rm_local_tmp_dir(tempdir)
raise
if remove:
os.remove(source)
return tempdir
def copy_over(source, dest):
"""
Copies from the source to the destination, removing the destination
if it exists and is a directory.
"""
if os.path.exists(dest) and os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
# mkdtemp will set the directory permissions to 700
# for the webserver to read them, we need 755
os.chmod(dest, stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
shutil.rmtree(source)
def extract_xpi(xpi, path, expand=False):
"""
If expand is given, will look inside the expanded file
and find anything in the whitelist and try and expand it as well.
It will do up to 10 iterations, after that you are on your own.
It will replace the expanded file with a directory and the expanded
contents. If you have 'foo.jar', that contains 'some-image.jpg', then
it will create a folder, foo.jar, with an image inside.
"""
expand_whitelist = ['.jar', '.xpi']
tempdir = extract_zip(xpi)
if expand:
for x in xrange(0, 10):
flag = False
for root, dirs, files in os.walk(tempdir):
for name in files:
if os.path.splitext(name)[1] in expand_whitelist:
src = os.path.join(root, name)
if not os.path.isdir(src):
dest = extract_zip(src, remove=True, fatal=False)
if dest:
copy_over(dest, src)
flag = True
if not flag:
break
copy_over(tempdir, path)
def parse_addon(pkg, addon=None):
"""
pkg is a filepath or a django.core.files.UploadedFile
or files.models.FileUpload.
"""
return WebAppParser().parse(pkg)
def _get_hash(filename, block_size=2 ** 20, hash=hashlib.md5):
"""Returns an MD5 hash for a filename."""
f = open(filename, 'rb')
hash_ = hash()
while True:
data = f.read(block_size)
if not data:
break
hash_.update(data)
return hash_.hexdigest()
def get_md5(filename, **kw):
return _get_hash(filename, **kw)
|
|
#!/usr/bin/env python
'''Train a simple deep CNN on the CIFAR10 small images dataset.
Distributed training.
run:
salloc -N2 -p hsw_p40 --comment=docker --qos=short --time=04:00:00
srun -l --ntasks-per-node=2 \
python examples/cifar/cifar10_cnn_distrib_v2_slurm.py --epochs=5 --rdma
# rdma default is verbs
# On psgcluster --network=ib.cluster is required for --rdma=gdr option
# couldn't get --rdma=gdr option to work.
'''
from __future__ import print_function
import sys
import os
from argparse import SUPPRESS
# from time import sleep
from parser_common import (parser_def_mgpu, remove_options)
from keras.utils import to_categorical
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import keras.layers as KL
from keras import backend as KB
from keras.callbacks import ModelCheckpoint
from keras.optimizers import RMSprop
from keras_exp.multigpu import (get_available_gpus, print_mgpu_modelsummary)
from keras_exp.multigpu import make_parallel
import tensorflow as tf
from keras_exp.distrib.cluster_parsers.slurm import SlurmClusterParser
from keras_exp.distrib.cluster_mgrs.tfcmgr import (
TFClusterManagerFacade, JobType) # , DevType
from keras_exp.distrib.cluster_mgrs.tfclusterdefs import ProtocolType
_DEVPROF = False
def parser_(desc):
parser = parser_def_mgpu(desc)
remove_options(parser, ['--mgpu', '--nccl'])
checkptfile = 'cifar10_cnn_distrib_v2.weights.best.hdf5'
parser.add_argument(
'--checkpt', action='store', nargs='?',
const=checkptfile, default=SUPPRESS,
help='S|Save (overwrites) and load the model weights if available.'
'\nOptionally specify a file/filepath if the default name is '
'undesired.\n(default: {})'.format(checkptfile))
parser.add_argument('--aug', action='store_true', default=False,
help='S|Perform data augmentation on cifar10 set.\n')
parser.add_argument('--logdevp', action='store_true', default=False,
help='S|Log device placement in Tensorflow.\n')
args = parser.parse_args()
return args
def make_model(inshape, num_classes, weights_file=None):
model = Sequential()
model.add(KL.InputLayer(input_shape=inshape[1:]))
# model.add(KL.Conv2D(32, (3, 3), padding='same', input_shape=inshape[1:]))
model.add(KL.Conv2D(32, (3, 3), padding='same'))
model.add(KL.Activation('relu'))
model.add(KL.Conv2D(32, (3, 3)))
model.add(KL.Activation('relu'))
model.add(KL.MaxPooling2D(pool_size=(2, 2)))
model.add(KL.Dropout(0.25))
model.add(KL.Conv2D(64, (3, 3), padding='same'))
model.add(KL.Activation('relu'))
model.add(KL.Conv2D(64, (3, 3)))
model.add(KL.Activation('relu'))
model.add(KL.MaxPooling2D(pool_size=(2, 2)))
model.add(KL.Dropout(0.25))
model.add(KL.Flatten())
model.add(KL.Dense(512))
model.add(KL.Activation('relu'))
model.add(KL.Dropout(0.5))
model.add(KL.Dense(num_classes))
model.add(KL.Activation('softmax'))
if weights_file is not None and os.path.exists(weights_file):
model.load_weights(weights_file)
return model
def main(argv=None):
'''
'''
main.__doc__ = __doc__
argv = sys.argv if argv is None else sys.argv.extend(argv)
desc = main.__doc__ # .format(os.path.basename(__file__))
# CLI parser
args = parser_(desc)
# mgpu = 0 if getattr(args, 'mgpu', None) is None else args.mgpu
# enqueue = args.enqueue
# usenccl = args.nccl
# syncopt = args.syncopt
rdma = args.rdma
network = args.network
checkpt = getattr(args, 'checkpt', None)
checkpt_flag = False if checkpt is None else True
filepath = checkpt
# print('CHECKPT:', checkpt)
batch_size = 32
num_classes = 10
epochs = args.epochs
data_augmentation = args.aug
logdevp = args.logdevp
# ---------------------------------------------- Distributed setup on SLURM
scpar = SlurmClusterParser(network=network)
cmgr_facade = TFClusterManagerFacade(scpar)
logdevp_flag = True if _DEVPROF or logdevp else False
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=logdevp_flag, # True,
allow_soft_placement=True,
gpu_options=gpu_options)
# TF 1.2.x RDMA: specify protocol='grpc+verbs' in server below.
protocol = ProtocolType.get_server_protocol_str(rdma)
server = cmgr_facade.get_server(
config,
protocol=protocol)
tfsess = cmgr_facade.get_session(server)
KB.set_session(tfsess)
#: :type cluster_spec: tf.train.ClusterSpec
# cluster_spec = cmgr_facade.get_cluster_spec()
job_type = cmgr_facade.myjobtype
# task_id = cmgr_facade.mytask_id
is_chief = cmgr_facade.is_chief
if job_type == JobType.ps:
# JOIN PARAMETER SERVERS
# server.join()
cmgr_facade.join(server)
ps_device = cmgr_facade.get_mypsdevice()
print('MYPS_DEVICE: {}'.format(ps_device)) # DEBUG
# sleep(2) # Have the chief wait just in case. Occasionally get errors.
# The ngpus per host needs to be done with MPI or somehow sync'd. Currently
# assuming all hosts have the same number of GPUs.
gdev_list = get_available_gpus()
ngpus = len(gdev_list)
# List of all devices. The devices might be associated to the same worker.
wgdev_list = cmgr_facade.get_allworkers_devlist(ngpus)
# If 2 workers ea. w/ 4 devices then nworker_devices_total == 2 * 4 = 8
# If 4 workers ea. w/ 1 devices then nworker_devices_total == 4 * 1 = 4
# nworker_devices_total = len(wgdev_list)
# Number of workers, not devices. Each worker can have multiple devices.
num_workers = cmgr_facade.num_workers
# List of devices associated with current worker/task.
mydevlist = cmgr_facade.get_mydevlist(ngpus)
nmydevs = len(mydevlist)
batch_size = batch_size * nmydevs
# ------------------------------------ Data loading and basic preprocessing
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nsamples = x_train.shape[0]
steps_per_epoch = (nsamples // num_workers) // batch_size
# NOTE: Naive dataset below split. With such a naive approach the random
# sampling gets screwed up. The convergence rate is slower as a
# result (hence defeats the purpose of scaling since more iterations
# are required when using more nodes), and if scaling to very many
# nodes might not converge. Instead using a generator that
# randomly chooses the samples for "mypart". Maybe implement a
# custom ImageDataGenerator for distributed case.
# split train dataset for myrank
# mytaskid = mypart = cmgr_facade.mytask_id
# nn = x_train.shape[0] // num_workers
# i1 = mypart * nn
# if mypart == num_workers - 1:
# x_train = x_train[i1:, ...]
# y_train = y_train[i1:, ...]
# else:
# i2 = (mypart + 1) * nn
# x_train = x_train_[i1:i2, ...]
# y_train = y_train[i1:i2, ...]
# print('TASK {}: train samples {}'.format(mytaskid, x_train.shape[0]))
# print('TASK {}: test samples {}'.format(mytaskid, x_test.shape[0]))
# nsamples = x_train.shape[0]
# steps_per_epoch = nsamples // batch_size
# --------------------------------------------- Setup model and parallelize
def _load_fn(unused_op):
return 1
cspec = cmgr_facade.get_cluster_spec()
num_ps = cmgr_facade.num_ps
ps_strategy = \
tf.contrib.training.GreedyLoadBalancingStrategy(num_ps, _load_fn)
rdsetter = tf.train.replica_device_setter(
cluster=cspec,
ps_strategy=ps_strategy,
)
with tf.device(rdsetter):
model_init = make_model(
x_train.shape, num_classes,
filepath if checkpt_flag else None
)
# if using checkpointing callback enable it on chief or use unique
# filepath for each worker task.
callbacks = None
if checkpt_flag and is_chief:
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True, mode='max')
callbacks = [checkpoint]
if is_chief:
print('\n\tCLUSTER_SPEC_DICT: {}\n\tWGDEV_LIST: {}\n'
.format(cmgr_facade.clusterspec_dict,
[dev.to_string() for dev in wgdev_list])) # DEBUG
print('\n\tMYWGDEV_LIST: {}\n'
.format([dev.to_string() for dev in mydevlist])) # DEBUG
# Data-Parallelize the model via function or class.
model = make_parallel(model_init, mydevlist, ps_device=ps_device)
print_mgpu_modelsummary(model)
# ------------------------------------------------------------ Run training
lr = 0.0001 * nmydevs
# lr = 0.0001 * nworker_devices_total
opt = RMSprop(lr=lr, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
if not data_augmentation:
print('Not using data augmentation.')
# model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# validation_data=(x_test, y_test),
# shuffle=True,
# callbacks=callbacks) # verbose=is_chief)
datagen = ImageDataGenerator()
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
# divide inputs by std of the dataset
featurewise_std_normalization=False,
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=0,
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks)
# ------------------------------------------------------------- STOP SERVER
if not is_chief:
# JOIN WORKERS EXCEPT FOR CHIEF
cmgr_facade.join(server)
cmgr_facade.stop_chief(server)
if __name__ == '__main__':
main()
|
|
from __future__ import print_function, division
from sympy import (Basic, exp, pi, Lambda, Trace, S, MatrixSymbol, Integral,
gamma, Product, Dummy, Sum, Abs, IndexedBase, I)
from sympy.core.sympify import _sympify
from sympy.stats.rv import (_symbol_converter, Density, RandomMatrixSymbol,
RandomSymbol)
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.random_matrix import RandomMatrixPSpace
from sympy.tensor.array import ArrayComprehension
__all__ = [
'CircularEnsemble',
'CircularUnitaryEnsemble',
'CircularOrthogonalEnsemble',
'CircularSymplecticEnsemble',
'GaussianEnsemble',
'GaussianUnitaryEnsemble',
'GaussianOrthogonalEnsemble',
'GaussianSymplecticEnsemble',
'joint_eigen_distribution',
'JointEigenDistribution',
'level_spacing_distribution'
]
class RandomMatrixEnsemble(Basic):
"""
Base class for random matrix ensembles.
It acts as an umbrella and contains
the methods common to all the ensembles
defined in sympy.stats.random_matrix_models.
"""
def __new__(cls, sym, dim=None):
sym, dim = _symbol_converter(sym), _sympify(dim)
if dim.is_integer == False:
raise ValueError("Dimension of the random matrices must be "
"integers, received %s instead."%(dim))
self = Basic.__new__(cls, sym, dim)
rmp = RandomMatrixPSpace(sym, model=self)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
symbol = property(lambda self: self.args[0])
dimension = property(lambda self: self.args[1])
def density(self, expr):
return Density(expr)
class GaussianEnsemble(RandomMatrixEnsemble):
"""
Abstract class for Gaussian ensembles.
Contains the properties common to all the
gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Gaussian_ensembles
.. [2] https://arxiv.org/pdf/1712.07903.pdf
"""
def _compute_normalization_constant(self, beta, n):
"""
Helper function for computing normalization
constant for joint probability density of eigen
values of Gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Selberg_integral#Mehta's_integral
"""
n = S(n)
prod_term = lambda j: gamma(1 + beta*S(j)/2)/gamma(S.One + beta/S(2))
j = Dummy('j', integer=True, positive=True)
term1 = Product(prod_term(j), (j, 1, n)).doit()
term2 = (2/(beta*n))**(beta*n*(n - 1)/4 + n/2)
term3 = (2*pi)**(n/2)
return term1 * term2 * term3
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function for computing the joint
probability distribution of eigen values
of the random matrix.
"""
n = self.dimension
Zbn = self._compute_normalization_constant(beta, n)
l = IndexedBase('l')
i = Dummy('i', integer=True, positive=True)
j = Dummy('j', integer=True, positive=True)
k = Dummy('k', integer=True, positive=True)
term1 = exp((-S(n)/2) * Sum(l[k]**2, (k, 1, n)).doit())
sub_term = Lambda(i, Product(Abs(l[j] - l[i])**beta, (j, i + 1, n)))
term2 = Product(sub_term(i).doit(), (i, 1, n - 1)).doit()
syms = ArrayComprehension(l[k], (k, 1, n)).doit()
return Lambda(tuple(syms), (term1 * term2)/Zbn)
class GaussianUnitaryEnsemble(GaussianEnsemble):
"""
Represents Gaussian Unitary Ensembles.
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE, density
>>> G = GUE('U', 2)
>>> density(G)
Lambda(H, exp(-Trace(H**2))/(2*pi**2))
"""
@property
def normalization_constant(self):
n = self.dimension
return 2**(S(n)/2) * pi**(S(n**2)/2)
def density(self, expr):
n, ZGUE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n)/2 * Trace(H**2))/ZGUE)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
def level_spacing_distribution(self):
s = Dummy('s')
f = (32/pi**2)*(s**2)*exp((-4/pi)*s**2)
return Lambda(s, f)
class GaussianOrthogonalEnsemble(GaussianEnsemble):
"""
Represents Gaussian Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import GaussianOrthogonalEnsemble as GOE, density
>>> G = GOE('U', 2)
>>> density(G)
Lambda(H, exp(-Trace(H**2)/2)/Integral(exp(-Trace(_H**2)/2), _H))
"""
@property
def normalization_constant(self):
n = self.dimension
_H = MatrixSymbol('_H', n, n)
return Integral(exp(-S(n)/4 * Trace(_H**2)))
def density(self, expr):
n, ZGOE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n)/4 * Trace(H**2))/ZGOE)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S.One)
def level_spacing_distribution(self):
s = Dummy('s')
f = (pi/2)*s*exp((-pi/4)*s**2)
return Lambda(s, f)
class GaussianSymplecticEnsemble(GaussianEnsemble):
"""
Represents Gaussian Symplectic Ensembles.
Examples
========
>>> from sympy.stats import GaussianSymplecticEnsemble as GSE, density
>>> G = GSE('U', 2)
>>> density(G)
Lambda(H, exp(-2*Trace(H**2))/Integral(exp(-2*Trace(_H**2)), _H))
"""
@property
def normalization_constant(self):
n = self.dimension
_H = MatrixSymbol('_H', n, n)
return Integral(exp(-S(n) * Trace(_H**2)))
def density(self, expr):
n, ZGSE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n) * Trace(H**2))/ZGSE)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def level_spacing_distribution(self):
s = Dummy('s')
f = ((S(2)**18)/((S(3)**6)*(pi**3)))*(s**4)*exp((-64/(9*pi))*s**2)
return Lambda(s, f)
class CircularEnsemble(RandomMatrixEnsemble):
"""
Abstract class for Circular ensembles.
Contains the properties and methods
common to all the circular ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Circular_ensemble
"""
def density(self, expr):
# TODO : Add support for Lie groups(as extensions of sympy.diffgeom)
# and define measures on them
raise NotImplementedError("Support for Haar measure hasn't been "
"implemented yet, therefore the density of "
"%s cannot be computed."%(self))
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function to compute the joint distribution of phases
of the complex eigen values of matrices belonging to any
circular ensembles.
"""
n = self.dimension
Zbn = ((2*pi)**n)*(gamma(beta*n/2 + 1)/S((gamma(beta/2 + 1)))**n)
t = IndexedBase('t')
i, j, k = (Dummy('i', integer=True), Dummy('j', integer=True),
Dummy('k', integer=True))
syms = ArrayComprehension(t[i], (i, 1, n)).doit()
f = Product(Product(Abs(exp(I*t[k]) - exp(I*t[j]))**beta, (j, k + 1, n)).doit(),
(k, 1, n - 1)).doit()
return Lambda(tuple(syms), f/Zbn)
class CircularUnitaryEnsemble(CircularEnsemble):
"""
Represents Cicular Unitary Ensembles.
Examples
========
>>> from sympy.stats import CircularUnitaryEnsemble as CUE, density
>>> from sympy.stats import joint_eigen_distribution
>>> C = CUE('U', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**2, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarUnitaryEnsemble
is not evaluated becuase the exact definition is based on haar measure of
unitary group which is not unique.
"""
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
class CircularOrthogonalEnsemble(CircularEnsemble):
"""
Represents Cicular Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import CircularOrthogonalEnsemble as COE, density
>>> from sympy.stats import joint_eigen_distribution
>>> C = COE('O', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k])), (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarOrthogonalEnsemble
is not evaluated becuase the exact definition is based on haar measure of
unitary group which is not unique.
"""
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S.One)
class CircularSymplecticEnsemble(CircularEnsemble):
"""
Represents Cicular Symplectic Ensembles.
Examples
========
>>> from sympy.stats import CircularSymplecticEnsemble as CSE, density
>>> from sympy.stats import joint_eigen_distribution
>>> C = CSE('S', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarSymplecticEnsemble
is not evaluated becuase the exact definition is based on haar measure of
unitary group which is not unique.
"""
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def joint_eigen_distribution(mat):
"""
For obtaining joint probability distribution
of eigen values of random matrix.
Parameters
==========
mat: RandomMatrixSymbol
The matrix symbol whose eigen values are to be considered.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import joint_eigen_distribution
>>> U = GUE('U', 2)
>>> joint_eigen_distribution(U)
Lambda((l[1], l[2]), exp(-l[1]**2 - l[2]**2)*Product(Abs(l[_i] - l[_j])**2, (_j, _i + 1, 2), (_i, 1, 1))/pi)
"""
if not isinstance(mat, RandomMatrixSymbol):
raise ValueError("%s is not of type, RandomMatrixSymbol."%(mat))
return mat.pspace.model.joint_eigen_distribution()
def JointEigenDistribution(mat):
"""
Creates joint distribution of eigen values of matrices with random
expressions.
Parameters
==========
mat: Matrix
The matrix under consideration
Returns
=======
JointDistributionHandmade
Examples
========
>>> from sympy.stats import Normal, JointEigenDistribution
>>> from sympy import Matrix
>>> A = [[Normal('A00', 0, 1), Normal('A01', 0, 1)],
... [Normal('A10', 0, 1), Normal('A11', 0, 1)]]
>>> JointEigenDistribution(Matrix(A))
JointDistributionHandmade(-sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2
+ A00/2 + A11/2, sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2 + A00/2 + A11/2)
"""
eigenvals = mat.eigenvals(multiple=True)
if any(not eigenval.has(RandomSymbol) for eigenval in set(eigenvals)):
raise ValueError("Eigen values don't have any random expression, "
"joint distribution cannot be generated.")
return JointDistributionHandmade(*eigenvals)
def level_spacing_distribution(mat):
"""
For obtaining distribution of level spacings.
Parameters
==========
mat: RandomMatrixSymbol
The random matrix symbol whose eigen values are
to be considered for finding the level spacings.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import level_spacing_distribution
>>> U = GUE('U', 2)
>>> level_spacing_distribution(U)
Lambda(_s, 32*_s**2*exp(-4*_s**2/pi)/pi**2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Distribution_of_level_spacings
"""
return mat.pspace.model.level_spacing_distribution()
|
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
from os.path import join, dirname, abspath, basename, isdir, exists
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
from Queue import Queue, Empty
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = time.time()
output = case.Run()
case.duration = (time.time() - start)
except BreakNowException:
self.terminate = True
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class BreakNowException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def TestsIsolates(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self, self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def GetCustomFlags(self, mode):
return None
def Run(self):
self.BeforeRun()
result = None
try:
result = self.RunCommand(self.GetCommand())
except:
self.terminate = True
raise BreakNowException("User pressed CTRL+C or IO went wrong")
finally:
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode)
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = '"' + subprocess.list2cmdline(args) + '"'
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
# On Windows, when run with -jN in parallel processes,
# OS often fails to unlink the temp file. Not sure why.
# Need to retry.
# Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
retry_count = 0
while retry_count < 30:
try:
os.unlink(name)
return
except OSError, e:
retry_count += 1
time.sleep(retry_count * 0.1)
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[],
['--stress-opt', '--always-opt'],
['--nocrankshaft']]
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
def VariantFlags(self):
return VARIANT_FLAGS
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in self.GetConfiguration(context).VariantFlags():
tests = self.GetConfiguration(context).ListTests(current_path, path, mode, v)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode, variant_flags):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
name = self.vm_root + SUFFIX[mode]
if utils.IsWindows() and not name.endswith('.exe'):
name = name + '.exe'
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
flags = testcase.GetCustomFlags(mode)
if flags is None:
flags = FLAGS[mode]
return testcase.variant_flags + flags
def GetTimeout(self, testcase, mode):
result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
if '--stress-opt' in self.GetVmFlags(testcase, mode):
return result * 2
else:
return result
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
result = 0
try:
result = progress.Run(tasks)
except Exception, e:
print "\n", e
return result
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
def Evaluate(self, env, defs):
return env[self.name]
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
def TestsIsolates(self):
return self.case.TestsIsolates()
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
TIMEOUT_DEFAULT = 60;
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=False, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--build-system", help="Build system in use (scons or gyp)",
default='scons')
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=-1, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--mips-arch-variant", help="mips architecture variant: mips32r1/mips32r2", default="mips32r2");
result.add_option("--shell", help="Path to V8 shell", default="d8")
result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, action="store_true")
result.add_option("--crankshaft",
help="Run with the --crankshaft flag",
default=False, action="store_true")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--noprof", help="Disable profiling support",
default=False)
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
# Simulators are slow, therefore allow a longer default timeout.
if options.timeout == -1:
if options.arch == 'arm' or options.arch == 'mips':
options.timeout = 2 * TIMEOUT_DEFAULT;
else:
options.timeout = TIMEOUT_DEFAULT;
if options.snapshot:
options.scons_flags.append("snapshot=on")
global VARIANT_FLAGS
if options.mips_arch_variant:
options.scons_flags.append("mips_arch_variant=" + options.mips_arch_variant)
if options.stress_only:
VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
if options.nostress:
VARIANT_FLAGS = [[],['--nocrankshaft']]
if options.crankshaft:
if options.special_command:
options.special_command += " --crankshaft"
else:
options.special_command = "@ --crankshaft"
if options.shell.endswith("d8"):
if options.special_command:
options.special_command += " --test"
else:
options.special_command = "@ --test"
if options.noprof:
options.scons_flags.append("prof=off")
options.scons_flags.append("profilingsupport=off")
if options.build_system == 'gyp':
if options.build_only:
print "--build-only not supported for gyp, please build manually."
options.build_only = False
return True
def DoSkip(case):
return (SKIP in case.outcomes) or (SLOW in case.outcomes)
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(timeout)4d tests are expected to timeout sometimes
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not DoSkip(c)]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'timeout': len([t for t in unskipped if TIMEOUT in t.outcomes]),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message', 'preparser']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def ShardTests(tests, options):
if options.shard_count < 2:
return tests
if options.shard_run < 1 or options.shard_run > options.shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % options.shard_count == options.shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
if options.build_system == 'gyp':
SUFFIX['debug'] = ''
shell = abspath(options.shell)
buildspace = dirname(shell)
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator,
'crankshaft': options.crankshaft,
'isolates': options.isolates
}
test_list = root.ListTests([], path, context, mode, [])
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += ShardTests(cases, options)
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if not options.isolates:
all_cases = [c for c in all_cases if not c.TestsIsolates()]
if options.report:
PrintReport(all_cases)
result = None
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
|
"""
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
try:
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
from .xmlutils import SimplerXMLGenerator
from .encoding import force_text, iri_to_uri
from . import datetime_safe
from . import six
from .six import StringIO
from .timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependant results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if not six.PY3: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if not six.PY3: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {'xmlns:atom': 'http://www.w3.org/2005/Atom',
'version': self._version}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement("guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement("updated", rfc3339_date(item['pubdate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
|
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
import datetime
import decimal
import os
import platform
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.AL32UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import Oracle_datetime # NOQA isort:skip
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BigAutoField': 'NUMBER(19)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT']:
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(self._connect_string(), **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" +
(" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')
)
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError(*tuple(e.args))
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_text(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper:
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# The default for cx_Oracle < 5.3 is 50.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = query
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params.keys()}
query = query % args
elif unify_by_values and len(params) > 0:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'}
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))}
args = [params_dict[param] for param in params]
params = {value: key for key, value in params_dict.items()}
query = query % tuple(args)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = query % tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchall())
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator:
"""
Cursor iterator wrapper that invokes our custom row factory.
"""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision = desc[4] or 0
scale = desc[5] or 0
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
casted.append(value)
return tuple(casted)
|
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
from deepctr_torch.models.deepfm import *
from deepctr_torch.models.basemodel import *
from evaluation import uAUC
from bigdl.friesian.feature import FeatureTable
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.learn.pytorch import Estimator
from bigdl.orca.learn.metrics import Accuracy, AUC
import argparse
spark_conf = {"spark.network.timeout": "10000000",
"spark.sql.broadcastTimeout": "7200",
"spark.sql.shuffle.partitions": "2000",
"spark.locality.wait": "0s",
"spark.sql.hive.filesourcePartitionFileCacheSize": "4096000000",
"spark.sql.crossJoin.enabled": "true",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.kryo.unsafe": "true",
"spark.kryoserializer.buffer.max": "1024m",
"spark.task.cpus": "1",
"spark.executor.heartbeatInterval": "200s",
"spark.driver.maxResultSize": "40G",
"spark.eventLog.enabled": "true",
"spark.app.name": "recsys-2tower",
"spark.executor.memoryOverhead": "120g"}
class DeepFM(BaseModel):
def __init__(self,
linear_feature_columns, dnn_feature_columns, use_fm=True,
dnn_hidden_units=(1024, 512, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001,
l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu',
dnn_use_bn=False, task='binary', device='cpu', gpus=None):
super(DeepFM, self).__init__(linear_feature_columns, dnn_feature_columns,
l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding,
init_std=init_std, seed=seed, task=task,
device=device, gpus=gpus)
self.use_fm = use_fm
self.use_dnn = len(dnn_feature_columns) > 0 and len(dnn_hidden_units) > 0
if use_fm:
self.fm = FM()
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout,
use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
self.add_regularization_weight(
filter(lambda x: 'weight' in x[0] and 'bn' not in x[0],
self.dnn.named_parameters()), l2=l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2=l2_reg_dnn)
self.to(device)
def forward(self, X):
sparse_embedding_list, dense_value_list = self\
.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
logit = self.linear_model(X)
if self.use_fm and len(sparse_embedding_list) > 0:
fm_input = torch.cat(sparse_embedding_list, dim=1)
logit += self.fm(fm_input)
if self.use_dnn:
dnn_input = combined_dnn_input(
sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit += dnn_logit
y_pred = self.out(logit)
return y_pred
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Deep FM Training')
parser.add_argument('--cluster_mode', type=str, default="local",
help='The cluster mode, such as local, yarn or standalone.')
parser.add_argument('--master', type=str, default=None,
help='The master url, only used when cluster mode is standalone.')
parser.add_argument('--executor_cores', type=int, default=8,
help='The executor core number.')
parser.add_argument('--executor_memory', type=str, default="160g",
help='The executor memory.')
parser.add_argument('--num_executor', type=int, default=8,
help='The number of executor.')
parser.add_argument('--driver_cores', type=int, default=4,
help='The driver core number.')
parser.add_argument('--driver_memory', type=str, default="36g",
help='The driver memory.')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--epochs', default=1, type=int, help='train epoch')
parser.add_argument('--batch_size', default=8000, type=int, help='batch size')
parser.add_argument('--model_dir', default='snapshot', type=str,
help='snapshot directory name (default: snapshot)')
parser.add_argument('--data_dir', type=str, help='data directory')
parser.add_argument('--frequency_limit', type=int, default=25, help='frequency limit')
args = parser.parse_args()
if args.cluster_mode == "local":
sc = init_orca_context("local")
elif args.cluster_mode == "yarn":
sc = init_orca_context("yarn-client", cores=args.executor_cores,
num_nodes=args.num_executor, memory=args.executor_memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
conf=spark_conf, object_store_memory="40g", init_ray_on_spark=True,
extra_python_lib="evaluation.py")
elif args.cluster_mode == "spark-submit":
sc = init_orca_context("spark-submit", object_store_memory="40g")
else:
raise ValueError(
"cluster_mode should be one of 'local', 'yarn' and 'spark-submit'"
", but got " + args.cluster_mode)
num_cols = ["enaging_user_follower_count", 'enaging_user_following_count',
"engaged_with_user_follower_count", "engaged_with_user_following_count",
"len_hashtags", "len_domains", "len_links"]
cat_cols = ["engaged_with_user_is_verified", "enaging_user_is_verified", "present_media",
"tweet_type", "language", 'present_media_language']
embed_cols = ["enaging_user_id", "engaged_with_user_id", "hashtags", "present_links",
"present_domains"]
train = FeatureTable.read_parquet(args.data_dir + "/train_parquet")
test = FeatureTable.read_parquet(args.data_dir + "/test_parquet")
test_user_ids = test.select("engaged_with_user_id").cast("engaged_with_user_id", "str").\
to_list("engaged_with_user_id")
test_labels = test.select("label").to_list("label")
full = train.concat(test)
reindex_tbls = full.gen_reindex_mapping(embed_cols, freq_limit=args.frequency_limit)
full, min_max_dict = full.min_max_scale(num_cols)
sparse_dims = {}
for i, c, in enumerate(embed_cols):
sparse_dims[c] = max(reindex_tbls[i].df.agg({c+"_new": "max"}).collect()[0]) + 1
cat_dims = full.max(cat_cols).to_dict()
cat_dims = dict(zip(cat_dims['column'], [dim + 1 for dim in cat_dims['max']]))
sparse_dims.update(cat_dims)
feature_columns = [SparseFeat(feat, int(sparse_dims[feat]), 16) for feat in sparse_dims] + \
[DenseFeat(feat, 1) for feat in num_cols]
feature_names = get_feature_names(feature_columns)
train = train.reindex(embed_cols, reindex_tbls)\
.transform_min_max_scale(num_cols, min_max_dict)\
.merge_cols(feature_names, "feature") \
.select(["label", "feature"])\
.apply("label", "label", lambda x: [float(x)], dtype="array<float>")
test = test.reindex(embed_cols, reindex_tbls) \
.transform_min_max_scale(num_cols, min_max_dict) \
.merge_cols(feature_names, "feature") \
.select(["label", "feature"]) \
.apply("label", "label", lambda x: [float(x)], dtype="array<float>")
test.cache()
def model_creator(config):
model = DeepFM(linear_feature_columns=config["linear_feature_columns"],
dnn_feature_columns=config["dnn_feature_columns"],
task='binary', l2_reg_embedding=1e-1)
model.float()
print(model)
return model
def optim_creator(model, config):
return torch.optim.Adam(model.parameters(), config['lr'])
criterion = torch.nn.BCELoss()
config = {'linear_feature_columns': feature_columns,
'dnn_feature_columns': feature_columns,
'feature_names': feature_names,
'lr': args.lr}
est = Estimator.from_torch(model=model_creator, optimizer=optim_creator, loss=criterion,
metrics=[Accuracy(), AUC()], use_tqdm=True, backend="torch_distributed",
config=config)
train_stats = est.fit(data=train.df, feature_cols=["feature"], label_cols=["label"],
epochs=args.epochs, batch_size=args.batch_size)
valid_stats = est.evaluate(data=test.df, feature_cols=["feature"], label_cols=["label"],
batch_size=args.batch_size)
est.save(args.model_dir)
print("Train stats: {}".format(train_stats))
print("Validation stats: {}".format(valid_stats))
predicts = est.predict(data=test.df, feature_cols=["feature"], batch_size=args.batch_size)
predicts.show(10, False)
est.shutdown()
stop_orca_context()
|
|
"""The f90nml namelist parser.
The ``Parser`` object converts the contents of a Fortran namelist into a
hierarchy of Python dicts containing equivalent intrinsic Python data types.
:copyright: Copyright 2014 Marshall Ward, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from __future__ import print_function
import warnings
import copy
from string import whitespace
import itertools
from f90nml.findex import FIndex
from f90nml.fpy import pyfloat, pycomplex, pybool, pystr
from f90nml.namelist import Namelist
from f90nml.tokenizer import Tokenizer
class Parser(object):
"""Fortran namelist parser."""
def __init__(self):
"""Create the parser object."""
# Token management
self.tokens = None
self.token = None
self.prior_token = None
# Patching
self.pfile = None
# Configuration
self._default_start_index = 1
self._global_start_index = None
self._comment_tokens = '!'
self._sparse_arrays = False
self._row_major = False
self._strict_logical = True
@property
def comment_tokens(self):
"""Return a string of single-character comment tokens in the namelist.
:type: ``str``
:default: ``'!'``
Some Fortran programs will introduce alternative comment tokens (e.g.
``#``) for internal preprocessing.
If you need to support these tokens, create a ``Parser`` object and set
the comment token as follows:
>>> parser = f90nml.Parser()
>>> parser.comment_tokens += '#'
>>> nml = parser.read('sample.nml')
Be aware that this is non-standard Fortran and could mangle any strings
using the ``#`` characters. Characters inside string delimiters should
be protected, however.
"""
return self._comment_tokens
@comment_tokens.setter
def comment_tokens(self, value):
"""Validate and set the comment token string."""
if not isinstance(value, str):
raise TypeError('comment_tokens attribute must be a string.')
self._comment_tokens = value
@property
def default_start_index(self):
"""Assumed starting index for a vector.
:type: ``int``
:default: 1
Since Fortran allows users to set an arbitrary start index, it is not
always possible to assign an index to values when no index range has
been provided.
For example, in the namelist ``idx.nml`` shown below, the index of the
values in the second assignment are ambiguous and depend on the
implicit starting index.
.. code-block:: fortran
&idx_nml
v(3:5) = 3, 4, 5
v = 1, 2
/
The indices of the second entry in ``v`` are ambiguous. The result for
different values of ``default_start_index`` are shown below.
>>> parser = f90nml.Parser()
>>> parser.default_start_index = 1
>>> nml = parser.read('idx.nml')
>>> nml['idx_nml']['v']
[1, 2, 3, 4, 5]
>>> parser.default_start_index = 0
>>> nml = parser.read('idx.nml')
>>> nml['idx_nml']['v']
[1, 2, None, 3, 4, 5]
"""
return self._default_start_index
@default_start_index.setter
def default_start_index(self, value):
"""Validate and set the default start index."""
if not isinstance(value, int):
raise TypeError('default_start_index attribute must be of int '
'type.')
self._default_start_index = value
@property
def global_start_index(self):
"""Define an explicit start index for all vectors.
:type: ``int``, ``None``
:default: ``None``
When set to ``None``, vectors are assumed to start at the lowest
specified index. If no index appears in the namelist, then
``default_start_index`` is used.
When ``global_start_index`` is set, then all vectors will be created
using this starting index.
For the namelist file ``idx.nml`` shown below,
.. code-block:: fortran
&idx_nml
v(3:5) = 3, 4, 5
/
the following Python code behaves as shown below.
>>> parser = f90nml.Parser()
>>> nml = parser.read('idx.nml')
>>> nml['idx_nml']['v']
[3, 4, 5]
>>> parser.global_start_index = 1
>>> nml = parser.read('idx.nml')
>>> nml['idx_nml']['v']
[None, None, 3, 4, 5]
Currently, this property expects a scalar, and applies this value to
all dimensions.
"""
return self._global_start_index
@global_start_index.setter
def global_start_index(self, value):
"""Set the global start index."""
if not isinstance(value, int) and value is not None:
raise TypeError('global_start_index attribute must be of int '
'type.')
self._global_start_index = value
@property
def row_major(self):
"""Read multidimensional arrays in row-major format.
:type: ``bool``
:default: ``False``
Multidimensional array data contiguity is preserved by default, so that
column-major Fortran data is represented as row-major Python list of
lists.
The ``row_major`` flag will reorder the data to preserve the index
rules between Fortran to Python, but the data will be converted to
row-major form (with respect to Fortran).
"""
return self._row_major
@row_major.setter
def row_major(self, value):
"""Validate and set row-major format for multidimensional arrays."""
if value is not None:
if not isinstance(value, bool):
raise TypeError(
'f90nml: error: row_major must be a logical value.')
else:
self._row_major = value
@property
def sparse_arrays(self):
"""Store unset rows of multidimensional arrays as empty lists.
:type: ``bool``
:default: ``False``
Enabling this flag will replace rows of unset values with empty lists,
and will also not pad any existing rows when other rows are expanded.
This is not a true sparse representation, but rather is slightly more
sparse than the default dense array representation.
"""
return self._sparse_arrays
@sparse_arrays.setter
def sparse_arrays(self, value):
"""Validate and enable spare arrays."""
if not isinstance(value, bool):
raise TypeError('sparse_arrays attribute must be a logical type.')
self._sparse_arrays = value
@property
def strict_logical(self):
"""Use strict rules for parsing logical data value parsing.
:type: ``bool``
:default: ``True``
The ``strict_logical`` flag will limit the parsing of non-delimited
logical strings as logical values. The default value is ``True``.
When ``strict_logical`` is enabled, only ``.true.``, ``.t.``, ``true``,
and ``t`` are interpreted as ``True``, and only ``.false.``, ``.f.``,
``false``, and ``f`` are interpreted as false.
When ``strict_logical`` is disabled, any value starting with ``.t`` or
``t`` is interpreted as ``True``, while any string starting with ``.f``
or ``f`` is interpreted as ``False``, as described in the language
standard. However, it can interfere with namelists which contain
non-delimited strings.
"""
return self._strict_logical
@strict_logical.setter
def strict_logical(self, value):
"""Validate and set the strict logical flag."""
if value is not None:
if not isinstance(value, bool):
raise TypeError(
'f90nml: error: strict_logical must be a logical value.')
else:
self._strict_logical = value
def read(self, nml_fname, nml_patch_in=None, patch_fname=None):
"""Parse a Fortran namelist file and store the contents.
>>> parser = f90nml.Parser()
>>> data_nml = parser.read('data.nml')
"""
# For switching based on files versus paths
nml_is_path = not hasattr(nml_fname, 'read')
patch_is_path = not hasattr(patch_fname, 'read')
# Convert patch data to a Namelist object
if nml_patch_in is not None:
if not isinstance(nml_patch_in, dict):
raise TypeError('Input patch must be a dict or a Namelist.')
nml_patch = copy.deepcopy(Namelist(nml_patch_in))
if not patch_fname and nml_is_path:
patch_fname = nml_fname + '~'
elif not patch_fname:
raise ValueError('f90nml: error: No output file for patch.')
elif nml_fname == patch_fname:
raise ValueError('f90nml: error: Patch filepath cannot be the '
'same as the original filepath.')
if patch_is_path:
self.pfile = open(patch_fname, 'w')
else:
self.pfile = patch_fname
else:
nml_patch = Namelist()
try:
nml_file = open(nml_fname, 'r') if nml_is_path else nml_fname
try:
return self._readstream(nml_file, nml_patch)
except StopIteration:
raise ValueError('End-of-file reached before end of namelist.')
# Close the files we opened on any exceptions within readstream
finally:
if nml_is_path:
nml_file.close()
finally:
if self.pfile and patch_is_path:
self.pfile.close()
def reads(self, nml_string):
"""Parse a namelist string and return an equivalent Namelist object.
>>> parser = f90nml.Parser()
>>> data_nml = parser.reads('&data_nml x=1 y=2 /')
"""
try:
return self._readstream(iter(nml_string.splitlines(True)))
except StopIteration:
raise ValueError('End-of-file reached before end of namelist.')
def _readstream(self, nml_file, nml_patch_in=None):
"""Parse an input stream containing a Fortran namelist."""
nml_patch = nml_patch_in if nml_patch_in is not None else Namelist()
tokenizer = Tokenizer()
tokenizer.comment_tokens = self.comment_tokens
f90lex = []
for line in nml_file:
toks = tokenizer.parse(line)
while tokenizer.prior_delim:
new_toks = tokenizer.parse(next(nml_file))
# Skip empty lines
if not new_toks:
continue
# The tokenizer always pre-tokenizes the whitespace (leftover
# behaviour from Fortran source parsing) so this must be added
# manually.
if new_toks[0].isspace():
toks[-1] += new_toks.pop(0)
# Append the rest of the string (if present)
if new_toks:
toks[-1] += new_toks[0]
# Attach the rest of the tokens
toks.extend(new_toks[1:])
toks.append('\n')
f90lex.extend(toks)
self.tokens = iter(f90lex)
nmls = Namelist()
# Attempt to get first token; abort on empty file
try:
self._update_tokens(write_token=False)
except StopIteration:
return nmls
# TODO: Replace "while True" with an update_token() iterator
while True:
try:
# Check for classic group terminator
if self.token == 'end':
self._update_tokens()
# Ignore tokens outside of namelist groups
while self.token not in ('&', '$'):
self._update_tokens()
except StopIteration:
break
# Create the next namelist
try:
self._update_tokens()
except StopIteration:
raise ValueError('End-of-file after namelist group token `&`.')
g_name = self.token
g_vars = Namelist()
v_name = None
# TODO: Edit `Namelist` to support case-insensitive `get` calls
grp_patch = nml_patch.pop(g_name.lower(), Namelist())
# Populate the namelist group
while g_name:
if self.token not in ('=', '%', '('):
try:
self._update_tokens()
except StopIteration:
raise ValueError(
'End-of-file before end of namelist group: \'&{}\''
''.format(g_name)
)
# Set the next active variable
if self.token in ('=', '(', '%'):
v_name, v_values = self._parse_variable(
g_vars,
patch_nml=grp_patch
)
if v_name in g_vars:
v_prior_values = g_vars[v_name]
v_values = merge_values(v_prior_values, v_values)
g_vars[v_name] = v_values
# Squeeze 1d list due to repeated variables
for v_name, v_values in g_vars.items():
if (
isinstance(v_values, list)
and len(v_values) == 1
and v_name not in g_vars.start_index
):
g_vars[v_name] = v_values[0]
# Deselect variable
v_name = None
v_values = []
# Finalise namelist group
if self.token in ('/', '&', '$'):
# Append any remaining patched variables
for v_name, v_val in grp_patch.items():
g_vars[v_name] = v_val
v_strs = nmls._var_strings(v_name, v_val)
for v_str in v_strs:
self.pfile.write(v_str + '\n')
# Append the grouplist to the namelist
if g_name in nmls:
nmls.add_cogroup(g_name, g_vars)
else:
nmls[g_name] = g_vars
# Reset state
g_name, g_vars = None, None
try:
self._update_tokens()
except StopIteration:
break
if nml_patch:
# Append the contents to the namelist patch
print(file=self.pfile)
print(nml_patch, file=self.pfile)
# Now append the values to the output namelist
for grp in nml_patch:
nmls[grp] = nml_patch[grp]
return nmls
def _parse_variable(self, parent, patch_nml=None):
"""Parse a variable and return its name and values."""
if not patch_nml:
patch_nml = Namelist()
v_name = self.prior_token
v_values = []
# Patch state
patch_values = None
# Derived type parent index (see notes below)
dt_idx = None
if self.token == '(':
v_idx_bounds = self._parse_indices()
v_idx = FIndex(v_idx_bounds, self.global_start_index)
# Update starting index against namelist record
if v_name.lower() in parent.start_index:
p_idx = parent.start_index[v_name.lower()]
for idx, pv in enumerate(zip(p_idx, v_idx.first)):
if all(i is None for i in pv):
i_first = None
else:
i_first = min(i for i in pv if i is not None)
v_idx.first[idx] = i_first
# Resize vector based on starting index
parent[v_name] = prepad_array(parent[v_name], p_idx,
v_idx.first)
else:
# If variable already existed without an index, then assume a
# 1-based index
# FIXME: Need to respect undefined `None` starting indexes?
if v_name in parent:
v_idx.first = [self.default_start_index
for _ in v_idx.first]
parent.start_index[v_name.lower()] = v_idx.first
self._update_tokens()
# Derived type parent check
# NOTE: This assumes single-dimension derived type vectors
# (which I think is the only case supported in Fortran)
if self.token == '%':
assert v_idx_bounds[0][1] - v_idx_bounds[0][0] == 1
dt_idx = v_idx_bounds[0][0] - v_idx.first[0]
# NOTE: This is the sensible play to call `parse_variable`
# but not yet sure how to implement it, so we currently pass
# along `dt_idx` to the `%` handler.
else:
v_idx = None
# If indexed variable already exists, then re-index this new
# non-indexed variable using the global start index
if v_name in parent.start_index:
p_start = parent.start_index[v_name.lower()]
v_start = [self.default_start_index for _ in p_start]
# Resize vector based on new starting index
for i_p, i_v in zip(p_start, v_start):
if i_v < i_p:
pad = [None for _ in range(i_p - i_v)]
parent[v_name] = pad + parent[v_name]
parent.start_index[v_name.lower()] = v_start
if self.token == '%':
# Resolve the derived type
# Check for value in patch
v_patch_nml = None
if v_name in patch_nml:
v_patch_nml = patch_nml.pop(v_name.lower())
if parent:
vpar = parent.get(v_name.lower())
if vpar and isinstance(vpar, list):
# If new element is not a list, then assume it's the first
# element of the list.
if dt_idx is None:
dt_idx = self.default_start_index
try:
v_parent = vpar[dt_idx]
except IndexError:
v_parent = Namelist()
elif vpar:
v_parent = vpar
else:
v_parent = Namelist()
else:
v_parent = Namelist()
parent[v_name] = v_parent
self._update_tokens()
self._update_tokens()
v_att, v_att_vals = self._parse_variable(
v_parent,
patch_nml=v_patch_nml
)
next_value = Namelist()
next_value[v_att] = v_att_vals
self._append_value(v_values, next_value, v_idx)
else:
# Construct the variable array
assert self.token == '='
n_vals = None
self._update_tokens()
# Check if value is in the namelist patch
# TODO: Edit `Namelist` to support case-insensitive `pop` calls
# (Currently only a problem in PyPy2)
if v_name in patch_nml:
patch_values = patch_nml.pop(v_name.lower())
if not isinstance(patch_values, list):
patch_values = [patch_values]
p_idx = 0
# Add variables until next variable trigger
while (self.token not in ('=', '(', '%') or
(self.prior_token, self.token) in (('=', '('), (',', '('))):
# Check for repeated values
if self.token == '*':
n_vals = self._parse_value()
assert isinstance(n_vals, int)
self._update_tokens()
elif not n_vals:
n_vals = 1
# First check for implicit null values
if self.prior_token in ('=', '%', ','):
if (self.token in (',', '/', '&', '$') and
not (self.prior_token == ',' and
self.token in ('/', '&', '$'))):
self._append_value(v_values, None, v_idx, n_vals)
elif self.prior_token == '*':
if self.token not in ('/', '&', '$'):
self._update_tokens()
if (self.prior_token == ',' or self.token == '='
or (self.token in ('/', '&', '$')
and self.prior_token == '*')):
next_value = None
# XXX: Repeated ,, after N*, will be off by one...
if self.prior_token == ',' and self.token == ',':
n_vals += 1
else:
next_value = self._parse_value()
self._append_value(v_values, next_value, v_idx, n_vals)
else:
next_value = self._parse_value()
self._append_value(v_values, next_value, v_idx, n_vals)
# Reset default repeat factor for subsequent values
n_vals = 1
# Exit for end of nml group (/, &, $) or null broadcast (=)
if self.token in ('/', '&', '$', '='):
break
else:
# NOTE: it is probably very inefficient to keep re-creating
# iterators upon every element; this solution reflects the
# absence of mature lookahead in the script.
#
# This is a temporary fix to address errors caused by
# patches of different length from the original value, and
# represents a direction to fully rewrite the parser using
# `tee`.
# NOTE: We may be able to assume that self.token is a value
# rather than prepending it to the iterator.
self.tokens, pre_lookahead = itertools.tee(self.tokens)
lookahead = itertools.chain([self.token], pre_lookahead)
if patch_values:
# TODO: Patch indices that are not set in the namelist
if (p_idx < len(patch_values)
and check_for_value(lookahead)
and self.token != ','):
p_val = patch_values[p_idx]
p_repr = patch_nml._f90repr(patch_values[p_idx])
p_idx += 1
self._update_tokens(override=p_repr)
if isinstance(p_val, complex):
# Skip over the complex content
# NOTE: Assumes input and patch are complex
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
else:
# Skip any values beyond the patch size
skip = (p_idx >= len(patch_values))
self._update_tokens(patch_skip=skip)
else:
self._update_tokens()
if patch_values:
v_values = patch_values
if not v_idx:
v_values = delist(v_values)
return v_name, v_values
def _parse_indices(self):
"""Parse a sequence of Fortran vector indices as a list of tuples."""
v_name = self.prior_token
v_indices = []
while self.token in (',', '('):
v_indices.append(self._parse_index(v_name))
return v_indices
def _parse_index(self, v_name):
"""Parse Fortran vector indices into a tuple of Python indices."""
i_start = i_end = i_stride = None
# Start index
self._update_tokens()
try:
i_start = int(self.token)
self._update_tokens()
except ValueError:
if self.token in (',', ')'):
raise ValueError('{0} index cannot be empty.'.format(v_name))
elif not self.token == ':':
raise
# End index
if self.token == ':':
self._update_tokens()
try:
i_end = 1 + int(self.token)
self._update_tokens()
except ValueError:
if self.token == ':':
raise ValueError('{0} end index cannot be implicit '
'when using stride.'.format(v_name))
elif self.token not in (',', ')'):
raise
elif self.token in (',', ')'):
# Replace index with single-index range
if i_start:
i_end = 1 + i_start
# Stride index
if self.token == ':':
self._update_tokens()
try:
i_stride = int(self.token)
except ValueError:
if self.token == ')':
raise ValueError('{0} stride index cannot be '
'implicit.'.format(v_name))
else:
raise
if i_stride == 0:
raise ValueError('{0} stride index cannot be zero.'
''.format(v_name))
self._update_tokens()
if self.token not in (',', ')'):
raise ValueError('{0} index did not terminate '
'correctly.'.format(v_name))
idx_triplet = (i_start, i_end, i_stride)
return idx_triplet
def _parse_value(self, write_token=True, override=None):
"""Convert string repr of Fortran type to equivalent Python type."""
v_str = self.prior_token
# Construct the complex string
if v_str == '(':
v_re = self.token
self._update_tokens(write_token)
assert self.token == ','
self._update_tokens(write_token)
v_im = self.token
self._update_tokens(write_token)
assert self.token == ')'
self._update_tokens(write_token, override)
v_str = '({0}, {1})'.format(v_re, v_im)
recast_funcs = [int, pyfloat, pycomplex, pybool, pystr]
for f90type in recast_funcs:
try:
# Unclever hack.. integrate this better
if f90type == pybool:
value = pybool(v_str, self.strict_logical)
else:
value = f90type(v_str)
return value
except ValueError:
continue
def _update_tokens(self, write_token=True, override=None,
patch_skip=False):
"""Update tokens to the next available values."""
next_token = next(self.tokens)
patch_value = ''
patch_tokens = ''
if self.pfile and write_token:
token = override if override else self.token
patch_value += token
while next_token[0] in self.comment_tokens + whitespace:
if self.pfile:
if next_token[0] in self.comment_tokens:
while not next_token == '\n':
patch_tokens += next_token
next_token = next(self.tokens)
patch_tokens += next_token
# Several sections rely on StopIteration to terminate token search
# If that occurs, dump the patched tokens immediately
try:
next_token = next(self.tokens)
except StopIteration:
if not patch_skip or next_token in ('=', '(', '%'):
patch_tokens = patch_value + patch_tokens
if self.pfile:
self.pfile.write(patch_tokens)
raise
# Write patched values and whitespace + comments to file
if not patch_skip or next_token in ('=', '(', '%'):
patch_tokens = patch_value + patch_tokens
if self.pfile:
self.pfile.write(patch_tokens)
# Update tokens, ignoring padding
self.token, self.prior_token = next_token, self.token
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1):
"""Update a list of parsed values with a new value."""
for _ in range(n_vals):
if v_idx:
try:
v_i = next(v_idx)
except StopIteration:
# Repeating commas are null-statements and can be ignored
# Otherwise, we warn the user that this is a bad namelist
if next_value is not None:
warnings.warn(
'f90nml: warning: Value {v} is not assigned to '
'any variable and has been removed.'
''.format(v=next_value)
)
# There are more values than indices, so we stop here
break
v_s = [self.default_start_index if idx is None else idx
for idx in v_idx.first]
if not self.row_major:
v_i = v_i[::-1]
v_s = v_s[::-1]
# Multidimensional arrays
if not self.sparse_arrays:
pad_array(v_values, list(zip(v_i, v_s)))
# We iterate inside the v_values and inspect successively
# deeper lists within the list tree. If the requested index is
# missing, we re-size that particular entry.
# (NOTE: This is unnecessary when sparse_arrays is disabled.)
v_subval = v_values
for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]):
try:
v_subval = v_subval[i_v - i_s]
except IndexError:
size = len(v_subval)
v_subval.extend([] for _ in range(size, i_v - i_s + 1))
v_subval = v_subval[i_v - i_s]
# On the deepest level, we explicitly assign the value
i_v, i_s = v_i[-1], v_s[-1]
try:
v_subval[i_v - i_s] = next_value
except IndexError:
size = len(v_subval)
v_subval.extend(None for _ in range(size, i_v - i_s + 1))
v_subval[i_v - i_s] = next_value
else:
v_values.append(next_value)
# Support functions
def prepad_array(var, v_start_idx, new_start_idx):
"""Return a resized vector based on the new start index."""
prior_var = var[:]
# Read the outer values
i_p = v_start_idx[-1]
i_v = new_start_idx[-1]
# Compute the outer index padding
if i_p is not None and i_v is not None and i_v < i_p:
pad = [None for _ in range(i_p - i_v)]
else:
pad = []
# Apply prepad rules to interior arrays
for i, v in enumerate(var):
if isinstance(v, list):
prior_var[i] = prepad_array(v, v_start_idx[:-1],
new_start_idx[:-1])
return pad + prior_var
def pad_array(v, idx):
"""Expand lists in multidimensional arrays to pad unset values."""
i_v, i_s = idx[0]
if len(idx) > 1:
# Append missing subarrays
v.extend([[] for _ in range(len(v), i_v - i_s + 1)])
# Pad elements
for e in v:
pad_array(e, idx[1:])
else:
v.extend([None for _ in range(len(v), i_v - i_s + 1)])
def merge_values(src, new):
"""Merge two lists or dicts into a single element."""
if isinstance(src, dict) and isinstance(new, dict):
return merge_dicts(src, new)
else:
if not isinstance(src, list):
src = [src]
if not isinstance(new, list):
new = [new]
return merge_lists(src, new)
def merge_lists(src, new):
"""Update a value list with a list of new or updated values."""
l_min, l_max = (src, new) if len(src) < len(new) else (new, src)
l_min.extend(None for i in range(len(l_min), len(l_max)))
for i, val in enumerate(new):
if isinstance(val, dict) and isinstance(src[i], dict):
new[i] = merge_dicts(src[i], val)
elif isinstance(val, list) and isinstance(src[i], list):
new[i] = merge_lists(src[i], val)
elif val is not None:
new[i] = val
else:
new[i] = src[i]
return new
def merge_dicts(src, patch):
"""Merge contents of dict `patch` into `src`."""
for key in patch:
if key in src:
if isinstance(src[key], dict) and isinstance(patch[key], dict):
merge_dicts(src[key], patch[key])
else:
src[key] = merge_values(src[key], patch[key])
else:
src[key] = patch[key]
return src
def delist(values):
"""Reduce lists of zero or one elements to individual values."""
assert isinstance(values, list)
if not values:
return None
elif len(values) == 1:
return values[0]
return values
def check_for_value(tokens):
"""Return True if the next token is a value to be assigned."""
ntoks = 0
for tok in tokens:
if tok.isspace() or tok == ',':
continue
elif tok in ('=', '/', '$', '&'):
break
else:
ntoks += 1
# If ntoks reaches 2, then there must be at least one value.
if ntoks > 1:
break
return ntoks > 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.