content stringlengths 5 1.05M |
|---|
c=j=k=0
for _ in input():
if _.isupper():c+=k%4;k+=k%4+1
k+=1
print(c)
|
#! /usr/bin/python
'''Script that extracts the fish classification for the entire possible grid of bounding boxes. Stores the results in a bag file.'''
usage='ExtractGridClassification.py [options]'
import roslib; roslib.load_manifest('species_id')
import rospy
import rosbag
import numpy as np
from optparse import OptionParser
import os
import os.path
import random
import re
import csv
import subprocess
import string
import time
import gc
from reefbot_msgs.msg import ImageRegion
from sensor_msgs.msg import RegionOfInterest
from objdetect_msgs.msg import DetectObjectGrid
from species_id.srv import SpeciesIDGrid
from species_id.msg import SpeciesIDScoring
from ParameterSetter import ParameterSetter
import copy
import cv2
from cv_blobs import Blob
import cv_bridge
from multiprocessing import Process
def ParseFishLabels(filename, videoRegexp, blobIdRegexp, blobRegexp):
'''Parses the input csv so that each entry becomes a tuple of (blobFile, blobId, videoId, label).'''
retVal = []
f = open(filename)
try:
reader = csv.reader(f)
for line in reader:
imageFile = line[0]
label = line[1]
tup = (blobRegexp.search(imageFile).groups()[0],
int(blobIdRegexp.search(imageFile).groups()[0]),
videoRegexp.search(imageFile).groups()[0],
label)
retVal.append(tup)
finally:
f.close()
return retVal
def ParseNegList(filename, blobRegexp, videoRegexp):
'''Parses a list of negative examples, on per line, so that each entry is a tuple of (blobFile, videoId).'''
retVal = []
f = open(filename)
try:
for line in f:
imageFile = line.strip()
tup = (blobRegexp.search(imageFile).groups()[0],
videoRegexp.search(imageFile).groups()[0])
retVal.append(tup)
finally:
f.close()
return retVal
def WriteNegativeExamples(labelFile, negEntries, nDesired, blobDir):
entriesAdded = 0
mixEntries = copy.deepcopy(negEntries)
random.shuffle(mixEntries)
for blobFile, videoId in mixEntries:
if entriesAdded >= nDesired:
break
# Open the blob file and count the number of lines because the
# number of blobs in there will be nLines - 1
curBlobId = 0
blobStream = open(os.path.join(blobDir, blobFile))
try:
garb = blobStream.readline()
for line in blobStream:
labelFile.write('%s,%i,1\n' % (blobFile, curBlobId))
curBlobId += 1
finally:
blobStream.close()
entriesAdded += curBlobId
rospy.loginfo('Using %s negative examples' % entriesAdded)
def GetRandomRunName():
retval = ''.join(random.choice(string.letters) for i in xrange(10))
rospy.loginfo('Name of this run is ' + retval)
return retval
def BuildFishIndex(labelFilename, indexFilename, blobDir, paramList):
retval = 1
try:
procSequence = ['bin/BuildFishIndex', '--input', labelFilename,
'--output', indexFilename,
'--blob_prefix', blobDir + '/'] + \
['--%s=%s' % (x[0], x[1]) for x in paramList]
rospy.loginfo("Running %s", procSequence)
retval = subprocess.call(procSequence)
finally:
if retval <> 0:
raise Exception("Could not build the fish index. Return code: %i\n %s" %
(retval, ' '.join(procSequence)))
def StartSpeciesIDNode(outputDir, namespace, outBag, bagTime, paramList,
indexFilename):
rospy.loginfo('Running the SpeciesIDNode')
nodeName = '%s_species_id_node' % namespace
serviceName = '%s/detect_object_grid_service' % nodeName
params = copy.deepcopy(paramList)
params.append(('index_file', indexFilename))
parameterHandler = ParameterSetter(nodeName, params, outBag, 'parameters',
bagTime)
vuEnv = os.environ
#vuEnv['CPUPROFILE'] = '/home/mdesnoye/tmp/fish_classifier.prof'
proc = subprocess.Popen(['bin/SpeciesIDRosNode',
'__name:=' + nodeName,
'__log:=%s/%s_log.out' % (outputDir, nodeName),
'species_id_grid_service:=%s' % serviceName],
env=vuEnv)
rospy.loginfo('Waiting for the SpeciesIDNode to startup')
rospy.wait_for_service(serviceName, timeout=6000)
rospy.loginfo('SpeciesIDNode is running')
serviceProxy = rospy.ServiceProxy(serviceName, SpeciesIDGrid)
return (proc, parameterHandler, serviceProxy)
def RunOneExtraction(fishEntries,
negEntries,
blobDir,
experimentDir,
outputPrefix,
videoId=None,
shapeDictFilename=None,
useSurfDescriptor=False,
useSiftDescriptor=False,
useOpponentSurf=False,
useCInvariantSurf=False,
saveIndex=False,
hessThresh=400.0,
numNegBlobs=5.0,
options=None):
# Split up the training and test entries
if videoId is not None:
trainEntries = filter(lambda x: x[2] <> videoId, fishEntries)
trainNegEntries = filter(lambda x: x[1] <> videoId, negEntries)
testEntries = filter(lambda x: x[2] == videoId, fishEntries)
testBlobFiles = sorted(set([x[0] for x in testEntries]))
rospy.loginfo('Testing with entries of video id ' + videoId)
else:
rospy.logfatal('Must have a video id. We are done')
return
runName = GetRandomRunName()
# Now build up a list of training and negative entries entries to be
# used by the BuildFishIndex program.
labelFilename = os.path.join(experimentDir, 'train_%s.label' % runName)
labelFile = open(labelFilename, 'w')
try:
for blobFile, blobId, trainVideoId, label in trainEntries:
labelFile.write('%s,%i,%s\n' % (blobFile, blobId, label))
WriteNegativeExamples(labelFile, trainNegEntries,
numNegBlobs*len(trainEntries),
blobDir)
finally:
labelFile.close()
# Setup the parameters used by the routines
paramList = [
('color_dict_filename', ''),
('color_converter', ''),
('color_frac', 0),
('shape_dict_filename', shapeDictFilename),
('surf_detector', True),
('surf_hessian_threshold', hessThresh),
('surf_octaves', 3),
('surf_octave_layers', 4),
('surf_extended', False),
('surf_descriptor', useSurfDescriptor),
('sift_descriptor', useSiftDescriptor),
('shape_weight', 1.0),
('min_shape_val', 0.01),
('min_color_val', 0.01),
('min_score', 0.005),
('opponent_color_surf', useOpponentSurf),
('cinvariant_color_surf', useCInvariantSurf),
('use_bounding_box', True),
('bounding_box_expansion', 1.0),
('geometric_rerank', False),
('geo_rerank_inlier_thresh', 3.0)]
rospy.loginfo('Using parameters: ' + str(paramList))
indexFilename = os.path.join(experimentDir, 'train_%s.index' % runName)
# Build the search index for the fish
BuildFishIndex(labelFilename, indexFilename, blobDir, paramList)
# Open up the bag which we will output stuff to
resultFile = '%s_%s.bag' % (outputPrefix, videoId)
outBag = rosbag.Bag(os.path.join(experimentDir, resultFile), 'w', 'bz2')
# Start a SpeciesIDNode to serve the requests and grab the service
# hook to it
speciesIdProc, speciesIdParams, ClassifyGrid = StartSpeciesIDNode(
experimentDir, runName, outBag, rospy.Time.from_sec(0.0), paramList,
indexFilename)
try:
blobSerializer = Blob.BlobSerializer()
cvBridge = cv_bridge.CvBridge()
npBridge = cv_bridge.NumpyBridge()
curFrameNum = 0
for blobFile in testBlobFiles:
# The result message to store
resultMsg = SpeciesIDScoring()
# Read the blob file
blobStream = open(os.path.join(blobDir, blobFile), 'r')
try:
blobs, imageFile = blobSerializer.Deserialize(blobStream, blobDir)
finally:
blobStream.close()
rospy.loginfo('Processing %s' % imageFile)
# Fill out the ground truth in the result message
curEntries = filter(lambda x: x[0] == blobFile, testEntries)
curEntries = sorted(curEntries, key=lambda x: x[1])
curBlobId = 0
for blob in blobs:
curEntry = [x for x in curEntries if x[1] == curBlobId]
if len(curEntry) == 1 and int(curEntry[0][3]) > 3:
# The human label says it's a fish so add the label to the results
resultMsg.labels.append(int(curEntries[curBlobId][3]))
bbox = RegionOfInterest()
bbox.x_offset = blob.minX
bbox.y_offset = blob.minY
bbox.height = blob.maxY - blob.minY
bbox.width = blob.maxX - blob.minX
resultMsg.regions.append(bbox)
curBlobId += 1
# Open up the image and package it into a message
cvImage = cv2.imread(imageFile)
imgMsg = cvBridge.cv_to_imgmsg(cvImage, 'bgr8')
imgMsg.header.stamp = rospy.Time.from_sec(curFrameNum)
imgMsg.header.seq = curFrameNum
# Build up the request
request = DetectObjectGrid()
request.header = imgMsg.header
request.image = imgMsg
request.grid.minX = 0
request.grid.minY = 0
request.grid.minH = options.min_region_height
request.grid.minW = options.min_region_width
request.grid.strideX = options.win_stride
request.grid.strideY = options.win_stride
request.grid.strideH = options.scale_stride
request.grid.strideW = options.scale_stride
request.grid.fixAspect = False
request.mask.encoding = "8U"
# Process this image
response = ClassifyGrid(request)
# Build up the result message to store
resultMsg.image = imageFile
resultMsg.grid = response.grid
resultMsg.confidence = response.confidence
resultMsg.top_label = response.top_label
resultMsg.not_fish_confidence = response.not_fish_confidence
resultMsg.processing_time = response.processing_time
# Record the result in the bag file
outBag.write('results', resultMsg, request.header.stamp)
outBag.flush()
curFrameNum += 1
finally:
outBag.close()
speciesIdParams.RemoveParameters()
ClassifyGrid.close()
speciesIdProc.send_signal(2)
tries = 0
while speciesIdProc.returncode == None and tries < 10:
speciesIdProc.poll()
tries = tries+1
time.sleep(1)
if speciesIdProc.returncode == None:
speciesIdProc.kill()
if not options.save_index:
os.remove(indexFilename)
gc.collect()
if __name__ == '__main__':
# All of the command line flags
parser = OptionParser(usage=usage)
parser.add_option('--blob_dir', dest='blob_dir',
help='Directory where the blob files reside. Must include positive and negative blobs.',
default=None)
parser.add_option('--experiment_dir', dest='experiment_dir',
help='Directory to put the files used in the experiment and the results',
default=None)
parser.add_option('--output_prefix', default='',
help='The prefix for the output bag filename')
parser.add_option('--image_list', dest='image_list',
help='CSV file of <image_filename>,<label> listing all the entries for known fish')
parser.add_option('--neg_blob_list', default=None,
help='Text file with a filename on each line. One per blob file that contains negative blobs.')
parser.add_option('--num_neg_blobs', type='float', default=5.0,
help='Number of negative examples to add to the index as a multiple of positive examples')
parser.add_option('--video_regexp', dest='video_regexp',
help='Regular expression used to extract the video id from the filename',
default='(([0-9][0-9]-){3})')
parser.add_option('--blobid_regexp', dest='blobid_regexp',
help='Regular expression used to extract the blob id from the filename',
default='blob\.([0-9]+)\.')
parser.add_option('--blob_regexp', dest='blob_regexp',
help='Regular expression used to extract the blob filename from the image filename',
default='/*(.+\.blob)')
parser.add_option('--shape_dict_filename',
help='Filename of the shape dictionary',
default='/data/mdesnoye/fish/experiments/extraction081011/20110102/osurf.dict')
parser.add_option('--use_surf_descriptor', action='store_true',
default=False, help='Use SURF descriptors?')
parser.add_option('--use_sift_descriptor', action='store_true',
default=False, help='Use SIFT descriptors?')
parser.add_option('--use_opponent_surf', action='store_true',
default=False, help='Use Opoonent color SURF descriptors?')
parser.add_option('--use_cinvariant_surf', action='store_true',
default=False, help='Use C-Invariant SURF descriptors?')
parser.add_option('--video_ids', default=None,
help='Python code specifying which video ids to use')
parser.add_option('--save_index', action='store_true', default=False,
help="Save the index that was built")
parser.add_option('--hess_thresh', default=300, type="float",
help='Hessian threshold used to find interest points.')
parser.add_option('--random_seed', default=495198, type='int')
# Sampling parameters
parser.add_option('--min_region_width', type='int', default=32,
help='Width of the minimum region to evaluation in pixels')
parser.add_option('--min_region_height', type='int', default=32,
help='Height of the minimum region to evaluation in pixels')
parser.add_option('--win_stride', type='int', default=16,
help='When sampling, the stride in pixels for identifying regions')
parser.add_option('--scale_stride', type='float', default=1.50,
help='When sampling, the scaling factor between levels')
parser.add_option('--do_testing', action='store_true', default=False,
help='Set to be in a testing mode')
(options, args) = parser.parse_args()
random.seed(options.random_seed)
rospy.init_node('ExtractGridClassification', anonymous=True)
fishEntries = ParseFishLabels(options.image_list,
re.compile(options.video_regexp),
re.compile(options.blobid_regexp),
re.compile(options.blob_regexp))
negEntries = ParseNegList(options.neg_blob_list,
re.compile(options.blob_regexp),
re.compile(options.video_regexp))
# Get the list of unique video ids
if options.video_ids is None:
videoIds = set([x[2] for x in fishEntries])
else:
videoIds = eval(options.video_ids)
for videoId in videoIds:
args = (fishEntries,
negEntries,
options.blob_dir,
options.experiment_dir,
options.output_prefix)
kwargs = {'videoId': videoId,
'shapeDictFilename': options.shape_dict_filename,
'useSurfDescriptor': options.use_surf_descriptor,
'useSiftDescriptor': options.use_sift_descriptor,
'useOpponentSurf': options.use_opponent_surf,
'useCInvariantSurf': options.use_cinvariant_surf,
'saveIndex': options.save_index,
'hessThresh': options.hess_thresh,
'numNegBlobs': options.num_neg_blobs,
'options': options}
if options.do_testing:
RunOneExtraction(*args, **kwargs)
else:
p = Process(target=RunOneExtraction,
args=args, kwargs=kwargs)
p.start()
p.join()
|
# -*- coding: utf-8 -*-
import logging
import traceback
import easyjoblite.exception
from easyjoblite import constants
from easyjoblite.consumers.base_rmq_consumer import BaseRMQConsumer
from easyjoblite.job import EasyJob
from easyjoblite.job_response import JobResponse
from easyjoblite.response import EasyResponse
class WorkQueueConsumer(BaseRMQConsumer):
"""
worker consumes from worker queue, calls underlying crs booking apis, and
based on error responses decides whether to retry, how many times, and what
to do if the work item fails too many times
"""
def consume_from_work_queue(self, queue):
"""
starts the process of consuming jobs from the queue
:param queue: the queue from which to consume
:return: NA
"""
self.consume(queue)
def process_message(self, body, message):
"""
gets called back once a message arrives in the work queue
1. calls embedded api with the payload as its parameters when a message arrives
2. if the call is successful, acks the message
3. for remote call
a. in case the call fails with 4XX, just acks the message, no further action
b. in case the call fails with a 5XX,
- adds the error to error-log header
- if num-retries are more than max_retries,
- puts the message in dead-letter-queue
- else
- increases num-retries by 1
- puts the message in error-queue
4. for local call
a. in case the call fails with a exception then adds the call to a dead letter queue
:param body: message payload
:param message: queued message with headers and other metadata (contains a EasyJob object in headers)
"""
logger = logging.getLogger(self.__class__.__name__)
try:
job = EasyJob.create_from_dict(message.headers)
except easyjoblite.exception.UnableToCreateJob as e:
logger.error(e.message + " data: " + str(e.data))
message.ack()
self.__push_raw_msg_to_dlq(body=body,
message=message,
err_msg=e.message,
)
return
try:
api = job.api
logger.debug("recieved api: " + str(api))
response = job.execute(body, self.get_config().async_timeout)
message.ack()
status = JobResponse(response.status_code)
if status == JobResponse.IGNORE_RESPONSE_AND_RETRY:
# retry job without incrementing retry count
logger.info("{status}: {resp}".format(status=response.status_code,
resp=response.message))
self._push_message_to_error_queue(body=body, message=message,
job=job, update_retry_count=False)
elif status == JobResponse.RETRYABLE_FAILURE:
# we have a retry-able failure
logger.info("{status}: {resp}".format(status=response.status_code,
resp=response.message))
self._push_message_to_error_queue(body=body, message=message, job=job)
elif status == JobResponse.NON_RETRYABLE_FAILURE:
# push non retry-able error to dead letter queue
self._push_msg_to_dlq(body=body, message=message, job=job)
except (Exception, easyjoblite.exception.ApiTimeoutException) as e:
traceback.print_exc()
logger.error(str(e))
message.ack()
self._push_message_to_error_queue(body, message, job)
def __push_raw_msg_to_dlq(self, body, message, err_msg):
"""
pushes the raw message to dead letter queue for manual intervension and notification
:param body: body of the message
:param message: kombu amqp message object with headers and other metadata
:param error_mesg: what error caused this push to error queue
"""
logger = logging.getLogger(self.__class__.__name__)
try:
logger.info("Moving raw item to DLQ for notification and manual intervention")
job = EasyJob.create_dummy_clone_from_dict(message.headers)
job.add_error(EasyResponse(400, err_msg).__dict__)
self.produce_to_queue(constants.DEAD_LETTER_QUEUE, body, job)
except Exception as e:
traceback.print_exc()
logger.error("Error moving the raw-error to dead-letter-queue: {err}".format(err=str(e)))
def _push_msg_to_dlq(self, body, message, job):
"""
pushes the message to dead letter queue for manual intervension and notification
:param body: body of the message
:param message: kombu amqp message object with headers and other metadata
:param job: what job to be moved to dlq
"""
logger = logging.getLogger(self.__class__.__name__)
try:
logger.info("Moving item to DLQ for notification and manual intervention")
self.produce_to_queue(constants.DEAD_LETTER_QUEUE, body, job)
except Exception as e:
traceback.print_exc()
err_msg = "Error moving the work-item to dead-letter-queue: {err}".format(err=str(e))
logger.error(err_msg)
self.__push_raw_msg_to_dlq(body, message, err_msg)
def _push_message_to_error_queue(self, body, message, job, update_retry_count=True):
"""
pushes the message to appropriate error queue based on number of
retries on the message so far
:param body: body of the message
:param message: kombu amqp message object with headers and other metadata
:param job: the job which failed
"""
logger = logging.getLogger(self.__class__.__name__)
if job.no_of_retries < self.get_config().max_retries:
# we are allowed more retries, so move this to error queue
logger.debug("Moving work-item {t}:'{d}' to error-queue for retry later".format(t=job.tag,
d=body))
try:
if update_retry_count:
job.increment_retries()
self.produce_to_queue(constants.RETRY_QUEUE, body, job)
except Exception as e:
traceback.print_exc()
logger.error("Error moving the work-item to error-queue: {err}".format(err=str(e)))
self.__push_raw_msg_to_dlq(body, message, str(e))
else:
er_message = "Max retries exceeded, moving work-item to DLQ for manual intervention."
logger.info(er_message)
self._push_msg_to_dlq(body=body, message=message, job=job)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from storm.locals import *
from mdcorpus.orm import *
database = create_database("sqlite:")
store = Store(database)
store.execute(MovieConversation.CREATE_SQL)
store.execute(MovieLine.CREATE_SQL)
conversation = store.add(MovieConversation(0, 2, 0))
line194 = store.add(MovieLine(
194, "Can we make this quick? Roxanne Korrine and Andrew Barrett are having an incredibly horrendous public break- up on the quad. Again."))
line195 = store.add(MovieLine(
195, "Well, I thought we'd start with pronunciation, if that's okay with you."))
line196 = store.add(MovieLine(
196, "Not the hacking and gagging and spitting part. Please."))
line197 = store.add(MovieLine(
197, "Okay... then how 'bout we try out some French cuisine. Saturday? Night?"))
store.flush()
line_id_list = [194, 195, 196, 197]
for (i, line_id) in enumerate(line_id_list):
line = store.find(MovieLine, MovieLine.id == line_id).one()
line.number = i + 1
conversation.lines.add(line)
store.commit()
for line in conversation.line_list():
print "'" + line.text + "'"
|
from __future__ import division
import numpy as np
# import cv2
import time, io
# from matplotlib import pyplot as plt
from google.cloud import vision
from google.cloud.vision.feature import Feature,FeatureTypes
MIN_MATCH_COUNT = 200
# only using match count right now
MIN_MATCH_RATIO = .2
NUM_LABELS = 20
NUM_LANDMARKS = 3
NUM_LOGOS = 3
MIN_SCORE = 0
def compare(img1_name, img2_name):
"""
Return whether img1 and img2 differ signficiantly
Determined through feature matching and comparison
(the number of good matches must be greater than MIN_MATCH_COUNT)
"""
img1 = cv2.imread(img1_name)
img2 = cv2.imread(img2_name)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# count the number of good matches
num_good_matches = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
num_good_matches += 1
print('Number of good features matched: ' + str(num_good_matches))
return num_good_matches>MIN_MATCH_COUNT
def get_features(img_path):
"""
Returns a list of features from an image
Optionally pass a certainty_threshold value to give a threshold in [0,1] on how certain
Google's identification is.
"""
v_c = vision.Client()
with io.open(img_path, 'rb') as image_file:
content = image_file.read()
img = v_c.image(content=content)
output = []
features = [Feature(FeatureTypes.LABEL_DETECTION, NUM_LABELS),
Feature(FeatureTypes.LANDMARK_DETECTION, NUM_LANDMARKS),
Feature(FeatureTypes.LOGO_DETECTION, NUM_LOGOS)]
annotations = img.detect(features)
for label in annotations[0].labels:
if label.score >= MIN_SCORE:
output.append(label.description.encode('utf-8'))
for landmark in annotations[0].landmarks:
if landmark.score >= MIN_SCORE:
output.append(landmark.description.encode('utf-8'))
for logo in annotations[0].logos:
if logo.score >= MIN_SCORE:
output.append(logo.description.encode('utf-8'))
return output
def has_features(img_path, features):
"""
Returns whether or not features are in the image provided.
"""
f = get_features(img_path)
for feature in features:
if feature in f:
return True
return False
|
import glob
import os
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from lxml import etree
from ead.constants import NS_MAP
from ead.models import EAD, Relation, RelationEntry
class Command(BaseCommand):
help = "Re-imports sh_connection items from specified XML, overwriting existing connections."
def add_arguments(self, parser):
parser.add_argument("xml_dir", metavar="DIR",
help="Directory containing XML to import.")
@transaction.atomic
def handle(self, *args, **options):
parser = etree.XMLParser(ns_clean=True, remove_blank_text=True)
xml_pattern = os.path.join(options["xml_dir"], "*.xml")
for xml_path in glob.glob(xml_pattern):
self.reimport_file(xml_path, parser)
def reimport_file(self, xml_path, parser):
"""Reimport connections from EAD3 XML file at `xml_path`."""
self.stdout.write("Re-importing file at {}.".format(xml_path))
tree = etree.parse(xml_path, parser=parser)
ead = tree.getroot()
recordid = ead.xpath("e:control/e:recordid/text()",
namespaces=NS_MAP)[0]
try:
record = EAD.objects.get(recordid=recordid)
except EAD.DoesNotExist:
raise CommandError(
"Record with record ID {} does not exist.".format(recordid))
self.stdout.write("Deleting existing connections.")
for relation in record.relation_set.filter(
otherrelationtype="sh_connection"):
relation.delete()
for relation_el in ead.xpath("e:archdesc/e:relations/e:relation[@otherrelationtype='sh_connection']", namespaces=NS_MAP):
relation = Relation(
relations=record, relationtype="otherrelationtype",
otherrelationtype="sh_connection")
relation.save()
for entry_el in relation_el.xpath('e:relationentry',
namespaces=NS_MAP):
entry = RelationEntry(
relation=relation, relationentry=entry_el.text,
localtype=entry_el.get("localtype"))
entry.save()
|
import pytest
import numpy as np
from numpy import testing
from gammy import utils
from gammy.arraymapper import ArrayMapper, lift
np.random.seed(42)
data = np.random.randn(42, 8)
@pytest.mark.parametrize("op", [
(lambda _, y: y),
(lambda x, _: x[:, 2]),
(lambda x, y: x + y),
(lambda x, y: x - y),
(lambda x, y: x * y),
(lambda x, y: x / y),
(lambda x, _: x ** 2),
(lambda x, _: -x)
])
def test_arithmetic(op):
def function(t):
return t + 42
x = ArrayMapper()
y = ArrayMapper(function)
testing.assert_almost_equal(
op(x, y)(data),
op(data, function(data)),
decimal=8
)
return
def test_lift():
def function(t):
return t ** 2
def f(t):
return t - 2
x = ArrayMapper(function)
testing.assert_almost_equal(
lift(f)(x)(data),
utils.compose(f, function)(data),
decimal=8
)
return
def test_ravel():
x = ArrayMapper()
testing.assert_array_equal(
x.ravel()(data),
data.ravel()
)
def test_reshape():
x = ArrayMapper()
testing.assert_array_equal(
x.reshape(4, 84)(data),
data.reshape(4, 84)
)
|
import sys
sys.path.append("../../utils") # Use predefined utility functions
import cv2
import numpy as np
from scipy.ndimage import convolve, gaussian_filter
from scipy.interpolate import splprep, splev, RectBivariateSpline
from contour import getContour
## Sobel kernels
# Here, we are going to use image coordinates matching
# array coordinates like (x:height, y:width)
SOBEL_X = .125*np.array([[ 1., 2., 1.],
[ 0., 0., 0.],
[-1., -2., -1.]])
SOBEL_Y = .125*np.array([[ 1., 0., -1.],
[ 2., 0., -2.],
[ 1., 0., -1.]])
def _calcEdgeEnergy(u):
''' Return image edge energy by using Sobel operator
'''
# Extract image edges with Sobel filter
gradx = convolve(u, SOBEL_X, mode="nearest")
grady = convolve(u, SOBEL_Y, mode="nearest")
'''
# Pad input domain
pad = np.pad(u, pad_width=1, mode="edge")
# Calculate derivatives
fx = .5*(pad[2:, 1:-1] - pad[:-2, 1:-1])
fy = .5*(pad[1:-1, 2:] - pad[1:-1, :-2])
return -(fx**2. + fy**2.)
'''
return (gradx**2. + grady**2.)
def _calcScaleEnergy(u, sigma=3):
''' Return image scale energy
'''
# Apply gaussian filter
u = gaussian_filter(u, sigma=sigma)
# Pad input domain
pad = np.pad(u, pad_width=1, mode="edge")
# Calculate derivatives
fxx = pad[2:, 1:-1] - 2.*u + pad[:-2, 1:-1]
fyy = pad[1:-1, 2:] - 2.*u + pad[1:-1, :-2]
return -(fxx**2. + fyy**2.)
def _calcCurvatureEnergy(u, eta=1.e-8):
''' Return curvature energy
'''
# Pad input domain
pad = np.pad(u, pad_width=1, mode="edge")
# Calculate derivatives
fx = .5*(pad[2:, 1:-1] - pad[:-2, 1:-1])
fy = .5*(pad[1:-1, 2:] - pad[1:-1, :-2])
fxx = pad[2:, 1:-1] - 2.*u + pad[:-2, 1:-1]
fyy = pad[1:-1, 2:] - 2.*u + pad[1:-1, :-2]
fxy = .25*(pad[2:, 2:] + pad[:-2, :-2]
- pad[:-2, 2:] - pad[2:, :-2])
return ((fxx*fy**2 - 2*fxy*fx*fy + fyy*fx**2)
/ (np.power(fx**2. + fy**2., 1.5) + eta))
class Snake(object):
''' Snake Algotirhm of Active-Contour Based Image Segmentation
Parameters
----------------
image: (H, W) ndarray
input image.
seed: (H, W) ndarray
input seed.
Returns
----------------
Region (H, W) ndarray
segmentation label
'''
def __init__(self, alpha=.01, beta=.1, gamma=.01,
wline=0., wedge=1., wscale=0., wcurv=0.,
maxIter=1000,
maxDispl=1.,
eps=.1,
period=10):
# Model parameters
self.alpha = alpha # continuity parameter
self.beta = beta # smoothness parameter
self.gamma = gamma # artificial time step
# Contraint weights
self.wline = wline # weight of line functional
self.wedge = wedge # weight of edge functional
self.wscale = wscale # weight of scale functional
self.wcurv = wcurv # weight of curvature funtional
# Numerical parameters
self.maxIter = int(maxIter)
self.maxDispl = float(maxDispl)
self.eps = eps
# Evolution history
self.period = period
self.xhistory = [0] * period # container of snake energies for evolution
self.yhistory = [0] * period
def run(self, image, seed):
# Convert input image format to be a float container
image = np.array(image, dtype=np.float32)
image = (image - image.min())/(image.max() - image.min())
# Get input dimensions
if len(image.shape) == 2:
height, width = image.shape
elif len(image.shape) == 3:
height, width, _ = image.shape
image = np.mean(image, axis=-1)
# Get contour from seed region
contour = np.array(getContour(seed), dtype=np.float32)
# Initialize snake pivots
tck, _ = splprep(contour.T, s=0)
snake = splev(np.linspace(0, 1, 2*len(contour)), tck)
snake = np.array(snake).T.astype(np.float32)
snake = np.array(contour).astype(np.float32)
# Discretize snake
xx, yy = snake[:, 0], snake[:, 1]
for p in range(self.period):
self.xhistory[p] = np.zeros(len(snake), dtype=np.float32)
self.yhistory[p] = np.zeros(len(snake), dtype=np.float32)
# Evaluate image energies
Eedge = _calcEdgeEnergy(image)
Escale = _calcScaleEnergy(image)
Ecurv = _calcCurvatureEnergy(image)
# Get total image energy
Etot = (self.wline*image + self.wedge*Eedge
+ self.wscale*Escale + self.wcurv*Ecurv)
# Get continuous image field
interp = RectBivariateSpline(np.arange(height),
np.arange(width),
Etot,
kx=2, ky=2, s=0)
# Build snake shape matrix
matrix = np.eye(len(snake), dtype=float)
a = (np.roll(matrix, -1, axis=0)
+ np.roll(matrix, -1, axis=1)
- 2.*matrix) # second order derivative, central difference
b = (np.roll(matrix, -2, axis=0)
+ np.roll(matrix, -2, axis=1)
- 4.*np.roll(matrix, -1, axis=0)
- 4.*np.roll(matrix, -1, axis=1)
+ 6.*matrix) # fourth order derivative, central difference
A = -self.alpha*a + self.beta*b
# Make inverse matrix needed for the numerical scheme
inv = np.linalg.inv(A + self.gamma*matrix).astype(np.float32)
# Do optimization
for step in range(self.maxIter):
# Get point-wise energy values
fx = interp(xx, yy, dx=1, grid=False).astype(np.float32)
fy = interp(xx, yy, dy=1, grid=False).astype(np.float32)
# Evaluate new snake
xn = inv @ (self.gamma*xx + fx)
yn = inv @ (self.gamma*yy + fy)
# Confine displacements
dx = self.maxDispl*np.tanh(xn - xx)
dy = self.maxDispl*np.tanh(yn - yy)
# Update snake
xx, yy = xx + dx, yy +dy
# Verify nermerical convergency
# Update histories
index = step % (self.period + 1)
if index < self.period:
self.xhistory[index] = xx
self.yhistory[index] = yy
else:
distance = np.min(np.max(np.abs(np.array(self.xhistory) - xx)
+ np.abs(np.array(self.yhistory) - yy), axis=1))
if distance < self.eps: break
# Remark region of snake contour into pixel map
seed = cv2.fillConvexPoly(seed, points=snake.astype(np.int), color=1)
return seed, np.stack([xx, yy], axis=1)
|
#!/usr/bin/env python3
# Dump Android Verified Boot Signature (c) B.Kerler 2017-2018
import hashlib
import struct
import binascii
import rsa
import sys
import argparse
from rsa import common, transform, core
from Crypto.Util.asn1 import DerSequence
from Crypto.PublicKey import RSA
version="v1.3"
def extract_hash(pub_key,data):
hashlen = 32 #SHA256
keylen = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(data)
decrypted = transform.int2bytes(core.decrypt_int(encrypted, pub_key.e, pub_key.n),keylen)
hash = decrypted[-hashlen:]
if (decrypted[0:2] != b'\x00\x01') or (len(hash) != hashlen):
raise Exception('Signature error')
return hash
def dump_signature(data):
#print (binascii.hexlify(data[0:10]))
if data[0:2] == b'\x30\x82':
slen = struct.unpack('>H', data[2:4])[0]
total = slen + 4
cert = struct.unpack('<%ds' % total, data[0:total])[0]
der = DerSequence()
der.decode(cert)
cert0 = DerSequence()
cert0.decode(bytes(der[1]))
pk = DerSequence()
pk.decode(bytes(cert0[0]))
subjectPublicKeyInfo = pk[6]
meta = DerSequence().decode(bytes(der[3]))
name = meta[0][2:]
length = meta[1]
signature = bytes(der[4])[4:0x104]
pub_key = RSA.importKey(subjectPublicKeyInfo)
pub_key = rsa.PublicKey(int(pub_key.n), int(pub_key.e))
hash=extract_hash(pub_key,signature)
return [name,length,hash,pub_key,bytes(der[3])[1:2]]
class androidboot:
magic="ANDROID!" #BOOT_MAGIC_SIZE 8
kernel_size=0
kernel_addr=0
ramdisk_size=0
ramdisk_addr=0
second_addr=0
second_size=0
tags_addr=0
page_size=0
qcdt_size=0
os_version=0
name="" #BOOT_NAME_SIZE 16
cmdline="" #BOOT_ARGS_SIZE 512
id=[] #uint*8
extra_cmdline="" #BOOT_EXTRA_ARGS_SIZE 1024
def getheader(inputfile):
param = androidboot()
with open(inputfile, 'rb') as rf:
header = rf.read(0x660)
fields = struct.unpack('<8sIIIIIIIIII16s512s8I1024s', header)
param.magic = fields[0]
param.kernel_size = fields[1]
param.kernel_addr = fields[2]
param.ramdisk_size = fields[3]
param.ramdisk_addr = fields[4]
param.second_size = fields[5]
param.second_addr = fields[6]
param.tags_addr = fields[7]
param.page_size = fields[8]
param.qcdt_size = fields[9]
param.os_version = fields[10]
param.name = fields[11]
param.cmdline = fields[12]
param.id = [fields[13],fields[14],fields[15],fields[16],fields[17],fields[18],fields[19],fields[20]]
param.extra_cmdline = fields[21]
return param
def int_to_bytes(x):
return x.to_bytes((x.bit_length() + 7) // 8, 'big')
def main(argv):
print("\nBoot Signature Tool (c) B.Kerler 2017-2018")
print("------------------------------------------------------")
parser = argparse.ArgumentParser(description='Boot Signature Tool (c) B.Kerler 2017-2018')
parser.add_argument('--file','-f', dest='filename', default="", action='store', help='boot or recovery image filename')
parser.add_argument('--length','-l', dest='inject', action='store_true', default=False, help='adapt signature length')
args = parser.parse_args()
if args.filename=="":
print("Usage: verify_signature.py -f [boot.img]")
exit(0)
param=getheader(args.filename)
kernelsize = int((param.kernel_size + param.page_size - 1) / param.page_size) * param.page_size
ramdisksize = int((param.ramdisk_size + param.page_size - 1) / param.page_size) * param.page_size
secondsize = int((param.second_size + param.page_size - 1) / param.page_size) * param.page_size
qcdtsize = int((param.qcdt_size + param.page_size - 1) / param.page_size) * param.page_size
print("Kernel=0x%08X, length=0x%08X" % (param.page_size, kernelsize))
print("Ramdisk=0x%08X, length=0x%08X" % ((param.page_size+kernelsize),ramdisksize))
print("Second=0x%08X, length=0x%08X" % ((param.page_size+kernelsize+ramdisksize),secondsize))
print("QCDT=0x%08X, length=0x%08X" % ((param.page_size+kernelsize+ramdisksize+secondsize),qcdtsize))
length=param.page_size+kernelsize+ramdisksize+secondsize+qcdtsize
print("Signature start=0x%08X" % length)
sha256=hashlib.sha256()
with open(args.filename,'rb') as fr:
data=fr.read(length)
sha256.update(data)
signature = fr.read()
target,siglength,hash,pub_key,flag=dump_signature(signature)
id=binascii.hexlify(data[576:576+32])
print("ID: "+id.decode('utf-8'))
print("\nImage-Target: "+str(target))
print("Image-Length: "+hex(length))
print("Signature-Length: "+hex(siglength))
meta=b"\x30"+flag+b"\x13"+bytes(struct.pack('B',len(target)))+target+b"\x02\x04"+bytes(struct.pack(">I",length))
print(meta)
sha256.update(meta)
digest=sha256.digest()
print("\nImage-Hash: "+str(binascii.hexlify(digest)))
print("Signature-Hash: " + str(binascii.hexlify(hash)))
if str(binascii.hexlify(digest))==str(binascii.hexlify(hash)):
print("AVB-Status: VERIFIED, 0")
else:
print("AVB-Status: RED, 3 or ORANGE, 1")
modulus=int_to_bytes(pub_key.n)
exponent=int_to_bytes(pub_key.e)
mod=str(binascii.hexlify(modulus).decode('utf-8'))
print("\nSignature-RSA-Modulus (n): "+mod)
print("Signature-RSA-Exponent (e): " + str(binascii.hexlify(exponent).decode('utf-8')))
if mod=="eb0478815591b50e090702347db475af966f886ba5d3c1baa273851400aea7cc8481398defb7b747c33fda93512b9aefa538ea4ffc907b4836410782e57dbf7241080f5f380dd2362345fc09c3f15e122176951d07d06802fa5f2a821856dd002a8699fedad774d60be1ebc6c05e0db849375a43228c54d6c2fe28e88d530d971604ef7dc1a4e4faad79bff2e4bcc783dddcc798bbf7e0b9fc43e0d74930f8ae93d5c3f5971b0ddbcc881b9117267cdfa3d29d276fc8909440ef0cfa410a866ece65be77c551a3c838d629cebd27c7d62f38535f68484d248703c686359fa6ab3fdc6591153d79c50af6972d2b02fd3ddabef019d5da8699367ceceb853e4d3f":
print("\n!!!! Image seems to be signed by google test keys, yay !!!!")
sha256 = hashlib.sha256()
sha256.update(modulus+exponent)
pubkey_hash=sha256.digest()
locked=pubkey_hash+struct.pack('<I',0x0)
unlocked = pubkey_hash + struct.pack('<I', 0x1)
sha256 = hashlib.sha256()
sha256.update(locked)
root_of_trust_locked=sha256.digest()
sha256 = hashlib.sha256()
sha256.update(unlocked)
root_of_trust_unlocked=sha256.digest()
print("\nTZ Root of trust (locked): " + str(binascii.hexlify(root_of_trust_locked)))
print("TZ Root of trust (unlocked): " + str(binascii.hexlify(root_of_trust_unlocked)))
if (args.inject==True):
pos = signature.find(target)
if (pos != -1):
lenpos = signature.find(struct.pack(">I",length)[0],pos)
if (lenpos!=-1):
with open(args.filename[0:-4]+"_signed.bin",'wb') as wf:
wf.write(data)
wf.write(signature[0:lenpos])
wf.write(struct.pack(">I",length))
wf.write(signature[lenpos+4:])
print("Successfully injected !")
if __name__ == "__main__":
main(sys.argv[1:]) |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas_rhino.geometry import RhinoMesh
from compas_rv2.datastructures import Pattern
from compas_rv2.rhino import get_scene
from compas_rv2.rhino import rv2_undo
from compas_rv2.rhino import rv2_error
__commandname__ = "RV2pattern_from_mesh"
@rv2_error()
@rv2_undo
def RunCommand(is_interactive):
scene = get_scene()
if not scene:
return
guid = compas_rhino.select_mesh()
if not guid:
return
pattern = RhinoMesh.from_guid(guid).to_compas(cls=Pattern)
compas_rhino.rs.HideObject(guid)
scene.clear()
scene.add(pattern, name='pattern')
scene.update()
print("Pattern object successfully created. Input mesh has been hidden.")
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
RunCommand(True)
|
#!/usr/bin/env python
from bob.bio.face.database import ReplayBioDatabase
from bob.bio.base.pipelines.vanilla_biometrics import DatabaseConnector
from bob.extension import rc
replay_attack_directory = rc["bob.db.replay.directory"]
database = DatabaseConnector(
ReplayBioDatabase(
original_directory=replay_attack_directory,
original_extension=".mov",
protocol="grandtest-spoof",
),
annotation_type="bounding-box",
fixed_positions=None,
# Only compare with spoofs from the same target identity
allow_scoring_with_all_biometric_references=False,
)
|
import unittest
from ptools.lipytools.little_methods import stamp, print_nested_dict
class TestStamp(unittest.TestCase):
def test_stamp(self):
print(stamp())
print(stamp(letters=None))
print(stamp(year=True))
def test_print_nested_dict(self):
dc = {
'a0': {
'a1': {
'a2': ['el1','el2']
}
},
'b0': ['el1','el2','el3']
}
print_nested_dict(dc)
if __name__ == '__main__':
unittest.main() |
from .constant import Service as Service_keys
from .services.base import Services
from .services import (
resource,
config,
logger,
)
class Application(object):
def __init__(self):
self.services = Services()
self.services.init()
def init(self):
services_tuple = (
(Service_keys.RESOURCE, resource.ResourceService),
(Service_keys.CONFIG_LOCAL, config.ConfigServiceLocal),
(Service_keys.LOGGER, logger.LoggerService),
)
for key, service in services_tuple:
self.services.register_service(key, service())
app = Application()
|
# Sum of multiples of 3 and 5
# OPTIMAL (<0.1s)
#
# APPROACH:
# Simple brute force is enough.
DUMMY_LIMIT = 10
DUMMY_RESULT = 23
LIMIT = 1000
def sum_multiples(limit):
return sum(i for i in range(1, limit) if not i % 3 or not i % 5)
assert sum_multiples(DUMMY_LIMIT) == DUMMY_RESULT
result = sum_multiples(LIMIT) |
from DAL.DAL_Prescription import DAL_Prescription
class BUS_Prescription():
def __init__(self):
self.dalPrescription = DAL_Prescription()
def loadPrescription(self, IDPatient):
return self.dalPrescription.selectAllPrescription(IDPatient=IDPatient)
def addPrescription(self, dtoPrescription):
try:
self.dalPrescription.addPrescription(dtoPrescription=dtoPrescription)
return 1
except:
return 0
def updatePrescription(self, dtoPrescription):
try:
self.dalPrescription.updatePrescription(dtoPrescription=dtoPrescription)
return 1
except:
return 0
def deletePrescription(self, IDPrescription):
try:
self.dalPrescription.deletePrescription(IDPrescription=IDPrescription)
return 1
except:
return 0 |
import csv
class Helper:
def __init__(self,symbol,volume,min,max):
self.symbol = symbol
self.volume = volume
self.min = min
self.max = max
with open("example.csv", "r") as f:
reader = csv.reader(f)
line = next(reader)
d=dict()
for x in reader:
if len(x)<1:
continue
if not d.keys().__contains__(x[0]):
d[x[0]] = Helper(x[0], int(x[5]), float(x[1]), float(x[1]))
continue
if float(x[1]) > float(d[x[0]].max):
d[x[0]].max = float(x[1])
if float(x[1]) < float(d[x[0]].min):
d[x[0]].min = float(x[1])
d[x[0]].volume = int(x[5])+int(d[x[0]].volume)
with open("example1.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["Symbol","Volume","Max","Min"])
for x in sorted(d.keys()):
writer.writerow([d[x].symbol,d[x].volume,d[x].max,d[x].min])
|
import numpy as np
import sys
sys.path.insert(0, "../")
from nn_models import model2 as md
from data import formatImage
import matplotlib.image as image
"""
This initalizes a neural net with a given file and test the file on a given image.
"""
inputSize = 28 * 28 # the pixels space
outputSize = 10 # the number of choices for objects
# labels for the data : this order matters!!
BALL = 0
LIGHTBULB = 1
SUN = 2
CLOUD = 3
EYE = 4
BIKE = 5
DOG = 6
FLOWER = 7
#labels = ["ball", "lightbulb", "sun", "cloud", "eye", "bike", "dog", "flower"]
labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
imgs = [
"../data/pics/basketball.png",
"../data/pics/LightBulb.png",
"../data/pics/sun.png",
"../data/pics/cloud.png",
"../data/pics/eye.png",
"../data/pics/bike.png",
"../data/pics/dog.png",
"../data/pics/flower.png"
]
def main():
composition = [inputSize, 100, 10,
outputSize] # the network composition
nn = md.Network(composition)
while True:
name = input(
"please enter the name of the file to initalize the network with (ex. net1): ")
try:
nn.load(name)
break
except Exception as e:
print(e)
while True:
imgName = input(
"please enter the file path of the png pic to test (enter q to quit): ")
if imgName == "q":
break
testImage(imgName, nn)
def testImage(img, nn):
"""
Test an image with the network
"""
if ".png" in img:
try:
if img in imgs:
pic = image.imread(img)
else:
formatImage.format(img, invert=True)
pic = image.imread(img)
pixels = pic.reshape(int(28 * 28), 1)
v = nn.test(pixels)
# print(str(v))
ans = np.argmax(v)
label = img.split("/")[-1]
print(labels[ans] + " " + str(v[ans]) +
" : EXPECTED " + label + "\n")
except Exception as e:
print(e)
pass
if __name__ == "__main__":
main()
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The view component of the data store configuration wizard.
"""
from qt import QPalette
from qt import QColorGroup
from qt import Qt
from datafinder.gui.gen.DataStoreConfigurationWizard import DataStoreConfigurationWizard
from datafinder.gui.admin.datastore_configuration_wizard import constants
from datafinder.gui.admin.common.utils import getPixmapForImageName
__version__ = "$Revision-Id:$"
# constant tuple that holds the "error color red" (RGB)
_errorRgbColorCode = (255, 0, 0)
class DataStoreConfigurationWizardView(DataStoreConfigurationWizard):
""" This class visualizes the Data Store configuration options. """
# pylint: disable=R0901
# There too many parent classes as it inherits from QPopupMenu
def __init__(self, parentFrame):
"""
Contructor.
"""
DataStoreConfigurationWizard.__init__(self, parentFrame)
self.helpButton().hide()
# init dictionary to access page by the page constants
self.pageDictionary = {}
for pageIndex in range(self.pageCount()):
page = self.page(pageIndex)
title = unicode(self.title(page))
self.pageDictionary[title] = page
# init dictionary to access the error displaying elements by the page constants
self.errorLabelDictionary = {
constants.standardOptionsPage: (self.errorMessageLabel0, self.errorMessagePixmapLabel0),
constants.storageOptionsPage: (self.errorMessageLabel1, self.errorMessagePixmapLabel1),
constants.securityOptionsPage: (self.errorMessageLabel2, self.errorMessagePixmapLabel2),
constants.authenticationOptionsPage: (self.errorMessageLabel3, self.errorMessagePixmapLabel3),
constants.performanceOptionsPage: (self.errorMessageLabel4, self.errorMessagePixmapLabel4)
}
# save standard colors to be able to reset them
self.backGroundColor = self.palette().color(QPalette.Active, QColorGroup.Base)
def showCurrentErrorLabels(self, showIt, pageType, errorMessage=""):
""" Shows the given error message. """
errorMessageLabel = self.errorLabelDictionary[pageType][0]
errorMessagePixmapLabel = self.errorLabelDictionary[pageType][1]
if showIt:
errorMessagePixmapLabel.show()
else:
errorMessagePixmapLabel.hide()
errorMessageLabel.setText(errorMessage)
# resize wizard if the whole error message cannot be shown
errorMessageLabel.adjustSize()
if errorMessageLabel.width() > errorMessageLabel.minimumWidth():
self.adjustSize()
def showErrorSource(self, source, showIt):
""" Indicates the given error source with an "error color" border or removes this border. """
if source:
if showIt:
source.palette().setColor(QPalette.Active, QColorGroup.Base, Qt.red)
else:
source.palette().setColor(QPalette.Active, QColorGroup.Base, self.backGroundColor)
def setPageSequence(self, pageSequenceList):
"""
Adapts the sequence of the wizard pages.
@param pageSequenceList: list of page titles
"""
for index in range(self.pageCount() - 1, -1, -1):
page = self.page(index)
if page:
self.removePage(page)
count = 0
for pageTitle in pageSequenceList:
self.insertPage(self.pageDictionary[pageTitle],
pageTitle,
count)
count = count + 1
self.showPage(self.page(0))
def checkFinishButtonEnabledState(self):
"""
Checks the enabled state of the "Finish" button.
"""
currentPageIndex = self.indexOf(self.currentPage())
if self.pageCount() - 1 == currentPageIndex:
self.setFinishEnabled(self.currentPage(), True)
else:
self.setFinishEnabled(self.currentPage(), False)
def setDatastoreIcon(self, iconName):
"""
Shows the specified DataStore icon.
@param iconName: Absolute name of the icon.
@type iconName: C{unicode}
"""
pixmap = getPixmapForImageName(iconName, False)
self.selectedIconLabel.setPixmap(pixmap)
def transitionEnabled(self, enabled):
""" Enables or disables the transition to the next wizard page. """
currentPageIndex = self.indexOf(self.currentPage())
if self.pageCount() - 1 == currentPageIndex:
self.setFinishEnabled(self.currentPage(), enabled)
else:
self.setNextEnabled(self.currentPage(), enabled)
if not currentPageIndex == 0:
self.setBackEnabled(self.currentPage(), enabled)
|
# standard library
from datetime import date
# Django
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
# from django.template.loader import render_to_string
from django.urls import reverse_lazy
# local Django
from accounts.models import Profile
from home.models import Calendar, Schedule
from home.views import has_authenticated_profile, is_volunteer
from .models import (
UpdateScheduleRequest, Volunteer,
VolunteerAttendance, VolunteerSchedule,
)
User = get_user_model()
# VIEWS FUNCTIONS
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
def index(request):
return HttpResponse('Hello there!')
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
def profile(request, pk):
volunteer = get_object_or_404(Volunteer, id=pk)
volun_schedule = VolunteerSchedule.objects.filter(volun=volunteer).order_by('day')
context = {
'volunteer': volunteer,
'self_profile': volunteer.profile.user == request.user,
'volun_schedule' : volun_schedule,
}
return render(request, 'volunteers/profile.html', context)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
# @permissions_required
def attendance(request):
today_cal = Calendar.objects.filter(date=date.today())
# TO BE REMOVED...
# Update today's date in Calendar if not already there
if today_cal.exists():
today_cal = today_cal[0]
else:
today_cal_new = Calendar(date=date.today())
today_cal_new.save()
today_cal = Calendar.objects.get(date=date.today())
# ...TILL HERE
context = {
'today_date': date.today(),
}
if not today_cal.class_scheduled:
context['no_class_today'] = True
return render(request, 'volunteers/attendance.html', context)
if not VolunteerAttendance.objects.filter(cal_date__date=date.today()).exists():
# Create Empty Volunteer Attendance Instances
today_vol_sch = VolunteerSchedule.objects.filter(
day=date.today().strftime("%w"))
for vol_sch in today_vol_sch:
vol_attendance = VolunteerAttendance(
volun=vol_sch.volun, cal_date=today_cal)
# TODO: Use bulk_create command instead.
vol_attendance.save()
context['today_vol_att'] = VolunteerAttendance.objects.filter(
cal_date=today_cal).order_by('volun__roll_no')
return render(request, 'volunteers/attendance.html', context)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
# @permissions_required
def ajax_mark_attendance(request):
"""Mark/unmark volunteer attendance."""
today_cal = Calendar.objects.get(date=date.today())
vol_id = request.GET['volun_id']
is_present = request.GET['is_present']
vol_att = VolunteerAttendance.objects.get(
volun__id=vol_id, cal_date=today_cal)
print(vol_att)
vol_att.present = True if is_present == 'true' else False
vol_att.save()
data = {'success': True}
return JsonResponse(data)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
# @permissions_required
def ajax_add_extra_vol(request):
"""Mark/unmark volunteer attendance."""
today_cal = Calendar.objects.get(date=date.today())
roll_no = request.GET['roll_no']
volun = Volunteer.objects.filter(roll_no=roll_no).first()
if not volun:
data = {'success': False, 'error': 'Volunteer not found.'}
return JsonResponse(data)
vol_att = VolunteerAttendance.objects.filter(
volun=volun, cal_date=today_cal)
if vol_att.exists():
data = {'success': False, 'error': 'Volunteer already present in the list.'}
return JsonResponse(data)
# Create a new VolunteerAttendance instance
VolunteerAttendance.objects.create(
volun=volun, cal_date=today_cal, present=True, extra=True)
data = {
'success': True,
'roll_no': volun.roll_no,
'name': volun.profile.get_full_name,
'volun_id': volun.id
}
return JsonResponse(data)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
# @permissions_required
def volunteers_list(request):
context = {
'day': Schedule.DAY,
}
return render(request, 'volunteers/volunteers_list.html', context)
@login_required
@user_passes_test(
has_authenticated_profile, redirect_field_name=None,
login_url=reverse_lazy('accounts:complete_profile')
)
# @permissions_required
def ajax_volunteers_list(request):
data = {}
vol_list_day = request.GET.get('vol_list_day', None)
if not vol_list_day:
return JsonResponse(data)
vol_to_show = VolunteerSchedule.objects.filter(day=vol_list_day)
for vol_sch in vol_to_show:
section_id = vol_sch.schedule.section.section_id
section_name = vol_sch.schedule.section.name
volun_id = vol_sch.volun_id
volun_roll_no = vol_sch.volun.roll_no
volun_name = vol_sch.volun.profile.get_full_name
# Use 'section_id' as key to display volunteers sorted by 'section_id'
# and 'roll_no' to make every key unique.
key = str(section_id) + str(volun_roll_no)
data[key] = [volun_id, volun_roll_no, volun_name, section_name]
return JsonResponse(data)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
def update_profile(request):
profile = Profile.objects.get(user=request.user)
volun = Volunteer.objects.get(profile=profile)
context = {
'profile': profile,
'volun': volun,
}
if request.method == 'POST':
roll_no = request.POST['roll_no']
if volun.roll_no != roll_no:
duplicate_roll_check = Volunteer.objects.filter(roll_no=roll_no)
if duplicate_roll_check.exists():
context['update_error'] = "A volunteer with entered roll no. already exists."
messages.error(request, 'Profile update failed!')
return render(request, 'volunteers/update_profile.html', context)
else:
volun.roll_no = roll_no
profile.first_name = request.POST['first_name']
profile.last_name = request.POST['last_name']
profile.gender = request.POST['gender']
profile.alt_email = request.POST['alt_email']
profile.street_address1 = request.POST['street_address1']
profile.street_address2 = request.POST['street_address2']
profile.pincode = request.POST['pincode']
profile.city = request.POST['city']
profile.state = request.POST['state']
profile.contact_no = request.POST['contact_no']
if 'profile_image' in request.FILES:
# Delete the previous profile image.
profile.profile_image.delete(False)
profile.profile_image = request.FILES.get('profile_image')
profile.save()
volun.batch = request.POST['batch']
volun.programme = request.POST['programme']
volun.dob = request.POST['dob']
volun.save()
messages.success(request, 'Profile updated Successfully!')
return redirect('volunteers:update_profile')
return render(request, 'volunteers/update_profile.html', context)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
def update_schedule(request):
volun = Volunteer.objects.get(profile__user=request.user)
last_pending_req = UpdateScheduleRequest.objects.filter(
volun=volun, approved=False, declined=False, by_admin=False, cancelled=False)
if request.method == 'POST':
if request.POST.get('submit') == 'update-schedule':
new_day = request.POST['day']
new_section_id = request.POST['section']
schedule = Schedule.objects.get(
day=new_day, section__section_id=new_section_id)
# Cancel last pending request.
if last_pending_req.exists():
last_pending_req = last_pending_req[0]
last_pending_req.cancelled = True
last_pending_req.save()
# Create new request
update_req = UpdateScheduleRequest(
volun=volun, new_schedule=schedule)
prev_vol_sch = VolunteerSchedule.objects.filter(volun=volun)
if prev_vol_sch.exists():
update_req.previous_schedule = prev_vol_sch[0].schedule
update_req.save()
messages.success(
request, 'Schedule update requested successfully!')
return redirect('volunteers:update_schedule')
elif request.POST.get('submit') == 'cancel-last-req':
if last_pending_req.exists():
last_pending_req = last_pending_req[0]
last_pending_req.cancelled = True
last_pending_req.save()
messages.success(
request, 'Last request cancelled successfully!')
else:
messages.error(request, 'No pending requests.')
return redirect('volunteers:update_schedule')
context = {
'day': Schedule.DAY,
'update_req': UpdateScheduleRequest.objects.filter(volun=volun).order_by('-date'),
'last_pending_req': last_pending_req.exists(),
}
return render(request, 'volunteers/update_schedule.html', context)
@login_required
@user_passes_test(
has_authenticated_profile, redirect_field_name=None,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
def ajax_update_schedule(request):
sch_day = request.GET.get('sch_day', None)
data = {}
schedule = Schedule.objects.filter(
day=sch_day).order_by('section__section_id')
for sch in schedule:
data[sch.section.section_id] = sch.section.name
return JsonResponse(data)
|
#!/usr/bin/env python3
##############################################################################
# Copyright 2020 spcnvdr <spcnvdrr@protonmail.com> #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# #
# This file is meant to hold the functions used to print the route delays #
# in a nice looking ASCII art box. #
# #
##############################################################################
# Print the start of the table used to display hop delays
#
def print_heading():
print("\n")
print("\u250C" + "\u2500"*7 + "\u252C" + "\u2500"*20 + "\u2510")
print("\u2502" + " Hop # " + "\u2502" + " Delay (in seconds) " + "\u2502")
print("\u251c" + "\u2500" * 7 + "\u253c" + "\u2500" * 20 + "\u2524")
# Print a row in the hop delay table
# @param hop the hop number to display in the row
# @param delay the delay time to display in the row
#
def print_row(hop, delay):
h_format = "{0:{align}{width}}".format(hop, align="^", width=7)
d_format = "{0:{align}{width}}".format(delay, align="^", width=20)
print("\u2502" + h_format + "\u2502" + d_format + "\u2502")
# Print the separator between rows of data in the table
#
def print_sep():
print("\u251c" + "\u2500" * 7 + "\u253c" + "\u2500" * 20 + "\u2524")
# Print the end the table
#
def print_end():
print("\u2514" + "\u2500" * 7 + "\u2534" + "\u2500" * 20 + "\u2518")
# Print the total delay time centered in a nice format
#
def print_total(time):
if time//60 != 0:
mins = int(time // 60)
sec = int(time % 60)
output = "Total: " + str(mins) + " min. " + str(sec) + " sec."
total = "{0:{align}{width}}".format(output, align="^", width=30)
print(total)
else:
output = "Total: " + str(time) + " sec."
print("{0:{align}{width}}".format(output, align="^", width=30))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 11:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('houses', '0087_auto_20170109_1214'),
]
operations = [
migrations.RenameField(
model_name='room',
old_name='people_now_living_female',
new_name='looking_for_female',
),
migrations.RenameField(
model_name='room',
old_name='people_now_living_male',
new_name='looking_for_male',
),
migrations.RenameField(
model_name='room',
old_name='looking_for_gender',
new_name='people_now_living_gender',
),
]
|
from typing import List, Optional
from pypika import Table
from pypika.enums import Dialects
from pypika.functions import Lower
from pypika.queries import QueryBuilder
from app.models.division_model import Division, DivisionStatus
from app.repositories.base_repository import BaseRepository
from app.utils.postgres import db_connection
class DivisionRepository(BaseRepository):
ENTITY = Division
TABLE_NAME = "divisions"
TABLE = Table(TABLE_NAME)
@classmethod
async def index(
cls,
limit: int = 10,
offset: int = 0,
name_prefix: Optional[str] = None,
status: Optional[DivisionStatus] = None,
) -> List[Division]:
query = QueryBuilder(dialect=Dialects.POSTGRESQL).from_(cls.TABLE).select("*")
if name_prefix:
query = query.where(Lower(cls.TABLE.name).like(name_prefix))
if status:
query = query.where(cls.TABLE.status == status)
query = query[offset:limit]
sql = query.get_sql()
async with db_connection() as connection:
records = await connection.fetch(sql)
divisions = [
Division(id=record["id"], status=record["status"], name=record["name"], lead_id=record["lead_id"],)
for record in records
]
return divisions
@classmethod
async def upsert(cls, division: Division):
async with db_connection() as connection:
record = await connection.fetchrow(
f"""
INSERT INTO {cls.TABLE_NAME}
(status, name, lead_id)
VALUES ($1, $2, $3)
ON CONFLICT (id) DO UPDATE SET
status = EXCLUDED.status,
name = EXCLUDED.name,
lead_id = EXCLUDED.lead_id
RETURNING *
""",
*cls.model_to_tuple(division),
)
return cls.record_to_entity(record)
@classmethod
def model_to_tuple(cls, model):
return (model.status, model.name, model.lead_id)
@classmethod
def record_to_entity(cls, record) -> Division:
division = cls.ENTITY(id=record["id"], status=record["status"], name=record["name"], lead_id=record["lead_id"],)
return division
|
import pytest
import numpy as np
import pybnesian as pbn
from pybnesian import BayesianNetwork, GaussianNetwork
import util_test
df = util_test.generate_normal_data(10000)
def test_create_bn():
gbn = GaussianNetwork(['a', 'b', 'c', 'd'])
assert gbn.num_nodes() == 4
assert gbn.num_arcs() == 0
assert gbn.nodes() == ['a', 'b', 'c', 'd']
gbn = GaussianNetwork(['a', 'b', 'c', 'd'], [('a', 'c')])
assert gbn.num_nodes() == 4
assert gbn.num_arcs() == 1
assert gbn.nodes() == ['a', 'b', 'c', 'd']
gbn = GaussianNetwork([('a', 'c'), ('b', 'd'), ('c', 'd')])
assert gbn.num_nodes() == 4
assert gbn.num_arcs() == 3
assert gbn.nodes() == ['a', 'c', 'b', 'd']
with pytest.raises(TypeError) as ex:
gbn = GaussianNetwork(['a', 'b', 'c'], [('a', 'c', 'b')])
assert "incompatible constructor arguments" in str(ex.value)
with pytest.raises(IndexError) as ex:
gbn = GaussianNetwork(['a', 'b', 'c'], [('a', 'd')])
assert "not present in the graph" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn = GaussianNetwork([('a', 'b'), ('b', 'c'), ('c', 'a')])
assert "must be a DAG" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn = GaussianNetwork(['a', 'b', 'c', 'd'], [('a', 'b'), ('b', 'c'), ('c', 'a')])
assert "must be a DAG" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn = BayesianNetwork(pbn.GaussianNetworkType(), ['a', 'b', 'c', 'd'], [], [('a', pbn.CKDEType())])
assert "Wrong factor type" in str(ex.value)
def gbn_generator():
# Test different Networks created with different constructors.
gbn = GaussianNetwork(['a', 'b', 'c', 'd'])
yield gbn
gbn = GaussianNetwork([('a', 'c'), ('b', 'd'), ('c', 'd')])
yield gbn
gbn = GaussianNetwork(['a', 'b', 'c', 'd'], [('a', 'b'), ('b', 'c')])
yield gbn
def test_nodes_util():
for gbn in gbn_generator():
assert gbn.num_nodes() == 4
nodes = gbn.nodes()
indices = gbn.indices()
assert nodes[gbn.index('a')] == 'a'
assert nodes[gbn.index('b')] == 'b'
assert nodes[gbn.index('c')] == 'c'
assert nodes[gbn.index('d')] == 'd'
assert indices[gbn.name(0)] == 0
assert indices[gbn.name(1)] == 1
assert indices[gbn.name(2)] == 2
assert indices[gbn.name(3)] == 3
assert gbn.contains_node('a')
assert gbn.contains_node('b')
assert gbn.contains_node('c')
assert gbn.contains_node('d')
assert not gbn.contains_node('e')
def test_parent_children():
gbn = GaussianNetwork(['a', 'b', 'c', 'd'])
assert gbn.num_parents('a') == 0
assert gbn.num_parents('b') == 0
assert gbn.num_parents('c') == 0
assert gbn.num_parents('d') == 0
assert gbn.parents('a') == []
assert gbn.parents('b') == []
assert gbn.parents('c') == []
assert gbn.parents('d') == []
assert gbn.num_children('a') == 0
assert gbn.num_children('b') == 0
assert gbn.num_children('c') == 0
assert gbn.num_children('d') == 0
gbn = GaussianNetwork([('a', 'c'), ('b', 'd'), ('c', 'd')])
assert gbn.num_parents('a') == 0
assert gbn.num_parents('b') == 0
assert gbn.num_parents('c') == 1
assert gbn.num_parents('d') == 2
assert gbn.parents('a') == []
assert gbn.parents('b') == []
assert gbn.parents('c') == ['a']
assert set(gbn.parents('d')) == set(['b', 'c'])
assert gbn.num_children('a') == 1
assert gbn.num_children('b') == 1
assert gbn.num_children('c') == 1
assert gbn.num_children('d') == 0
gbn = GaussianNetwork(['a', 'b', 'c', 'd'], [('a', 'b'), ('b', 'c')])
assert gbn.num_parents('a') == 0
assert gbn.num_parents('b') == 1
assert gbn.num_parents('c') == 1
assert gbn.num_parents('d') == 0
assert gbn.parents('a') == []
assert gbn.parents('b') == ['a']
assert gbn.parents('c') == ['b']
assert gbn.parents('d') == []
assert gbn.num_children('a') == 1
assert gbn.num_children('b') == 1
assert gbn.num_children('c') == 0
assert gbn.num_children('d') == 0
def test_arcs():
gbn = GaussianNetwork(['a', 'b', 'c', 'd'])
assert gbn.num_arcs() == 0
assert gbn.arcs() == []
assert not gbn.has_arc('a', 'b')
gbn.add_arc('a', 'b')
assert gbn.num_arcs() == 1
assert gbn.arcs() == [('a', 'b')]
assert gbn.parents('b') == ['a']
assert gbn.num_parents('b') == 1
assert gbn.num_children('a') == 1
assert gbn.has_arc('a', 'b')
gbn.add_arc('b', 'c')
assert gbn.num_arcs() == 2
assert set(gbn.arcs()) == set([('a', 'b'), ('b', 'c')])
assert gbn.parents('c') == ['b']
assert gbn.num_parents('c') == 1
assert gbn.num_children('b') == 1
assert gbn.has_arc('b', 'c')
gbn.add_arc('d', 'c')
assert gbn.num_arcs() == 3
assert set(gbn.arcs()) == set([('a', 'b'), ('b', 'c'), ('d', 'c')])
assert set(gbn.parents('c')) == set(['b', 'd'])
assert gbn.num_parents('c') == 2
assert gbn.num_children('d') == 1
assert gbn.has_arc('d', 'c')
assert gbn.has_path('a', 'c')
assert not gbn.has_path('a', 'd')
assert gbn.has_path('b', 'c')
assert gbn.has_path('d', 'c')
assert not gbn.can_add_arc('c', 'a')
# This edge exists, but virtually we consider that the addition is allowed.
assert gbn.can_add_arc('b', 'c')
assert gbn.can_add_arc('d', 'a')
gbn.add_arc('b', 'd')
assert gbn.num_arcs() == 4
assert set(gbn.arcs()) == set([('a', 'b'), ('b', 'c'), ('d', 'c'), ('b', 'd')])
assert gbn.parents('d') == ['b']
assert gbn.num_parents('d') == 1
assert gbn.num_children('b') == 2
assert gbn.has_arc('b', 'd')
assert gbn.has_path('a', 'd')
assert not gbn.can_add_arc('d', 'a')
assert not gbn.can_flip_arc('b', 'c')
assert gbn.can_flip_arc('a', 'b')
# This edge does not exist, but it could be flipped if it did.
assert gbn.can_flip_arc('d', 'a')
# We can add an edge twice without changes.
gbn.add_arc('b', 'd')
assert gbn.num_arcs() == 4
assert set(gbn.arcs()) == set([('a', 'b'), ('b', 'c'), ('d', 'c'), ('b', 'd')])
assert gbn.parents('d') == ['b']
assert gbn.num_parents('d') == 1
assert gbn.num_children('b') == 2
assert gbn.has_arc('b', 'd')
gbn.remove_arc('b', 'c')
assert gbn.num_arcs() == 3
assert set(gbn.arcs()) == set([('a', 'b'), ('d', 'c'), ('b', 'd')])
assert gbn.parents('c') == ['d']
assert gbn.num_parents('c') == 1
assert gbn.num_children('b') == 1
assert not gbn.has_arc('b', 'c')
assert gbn.can_add_arc('b', 'c')
assert not gbn.can_add_arc('c', 'b')
assert gbn.has_path('a', 'c')
assert gbn.has_path('b', 'c')
gbn.remove_arc('d', 'c')
assert gbn.num_arcs() == 2
assert set(gbn.arcs()) == set([('a', 'b'), ('b', 'd')])
assert gbn.parents('c') == []
assert gbn.num_parents('c') == 0
assert gbn.num_children('d') == 0
assert not gbn.has_arc('d', 'c')
assert gbn.can_add_arc('b', 'c')
assert gbn.can_add_arc('c', 'b')
assert not gbn.has_path('a', 'c')
assert not gbn.has_path('b', 'c')
def test_bn_fit():
gbn = GaussianNetwork([('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')])
with pytest.raises(ValueError) as ex:
for n in gbn.nodes():
cpd = gbn.cpd(n)
assert "not added" in str(ex.value)
gbn.fit(df)
for n in gbn.nodes():
cpd = gbn.cpd(n)
assert cpd.variable() == n
assert cpd.evidence() == gbn.parents(n)
gbn.fit(df)
gbn.remove_arc('a', 'b')
cpd_b = gbn.cpd('b')
assert cpd_b.evidence != gbn.parents('b')
gbn.fit(df)
cpd_b = gbn.cpd('b')
assert cpd_b.evidence() == gbn.parents('b')
def test_add_cpds():
gbn = GaussianNetwork([('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')])
with pytest.raises(ValueError) as ex:
gbn.add_cpds([pbn.LinearGaussianCPD('e', [])])
assert "variable which is not present" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn.add_cpds([pbn.LinearGaussianCPD('a', ['e'])])
assert "Evidence variable" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn.add_cpds([pbn.LinearGaussianCPD('a', ['b'])])
assert "CPD do not have the model's parent set as evidence" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn.add_cpds([pbn.LinearGaussianCPD('b', [])])
assert "CPD do not have the model's parent set as evidence" in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn.add_cpds([pbn.LinearGaussianCPD('b', ['c'])])
assert "CPD do not have the model's parent set as evidence" in str(ex.value)
lg = pbn.LinearGaussianCPD('b', ['a'], [2.5, 1.65], 4)
assert lg.fitted()
gbn.add_cpds([lg])
cpd_b = gbn.cpd('b')
assert cpd_b.variable() == 'b'
assert cpd_b.evidence() == ['a']
assert cpd_b.fitted()
assert np.all(cpd_b.beta == np.asarray([2.5, 1.65]))
assert cpd_b.variance == 4
with pytest.raises(ValueError) as ex:
cpd_a = gbn.cpd('a')
assert "CPD of variable \"a\" not added. Call add_cpds() or fit() to add the CPD." in str(ex.value)
with pytest.raises(ValueError) as ex:
cpd_c = gbn.cpd('c')
assert "CPD of variable \"c\" not added. Call add_cpds() or fit() to add the CPD." in str(ex.value)
with pytest.raises(ValueError) as ex:
cpd_d = gbn.cpd('d')
assert "CPD of variable \"d\" not added. Call add_cpds() or fit() to add the CPD." in str(ex.value)
with pytest.raises(ValueError) as ex:
gbn.add_cpds([pbn.LinearGaussianCPD('e', [])])
assert "variable which is not present" in str(ex.value)
def test_bn_logl():
gbn = GaussianNetwork([('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')])
gbn.fit(df)
test_df = util_test.generate_normal_data(5000)
ll = gbn.logl(test_df)
sll = gbn.slogl(test_df)
sum_ll = np.zeros((5000,))
sum_sll = 0
for n in gbn.nodes():
cpd = gbn.cpd(n)
l = cpd.logl(test_df)
s = cpd.slogl(test_df)
assert np.all(np.isclose(s, l.sum()))
sum_ll += l
sum_sll += s
assert np.all(np.isclose(ll, sum_ll))
assert np.isclose(sll, ll.sum())
assert sll == sum_sll
def test_bn_sample():
gbn = GaussianNetwork(['a', 'c', 'b', 'd'], [('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')])
gbn.fit(df)
sample = gbn.sample(1000, 0, False)
# Not ordered, so topological sort.
assert sample.schema.names == ['a', 'b', 'c', 'd']
assert sample.num_rows == 1000
sample_ordered = gbn.sample(1000, 0, True)
assert sample_ordered.schema.names == ['a', 'c', 'b', 'd']
assert sample_ordered.num_rows == 1000
assert sample.column(0).equals(sample_ordered.column(0))
assert sample.column(1).equals(sample_ordered.column(2))
assert sample.column(2).equals(sample_ordered.column(1))
assert sample.column(3).equals(sample_ordered.column(3))
other_seed = gbn.sample(1000, 1, False)
assert not sample.column(0).equals(other_seed.column(0))
assert not sample.column(1).equals(other_seed.column(2))
assert not sample.column(2).equals(other_seed.column(1))
assert not sample.column(3).equals(other_seed.column(3))
|
from urllib.parse import urlparse
def check_if_legit(domain_name):
true_points = 0
if domain_name in illegit_sites:
return "Fake / Malicious"
if domain_name in legit_sites:
return ("Authentic")
return "Not found as a news aggregator"
def checker(complete_link):
global legit_sites
global illegit_sites
with open("fakenewsFE\\true_dataset.txt") as true_data:
legit_sites = true_data.readlines()
legit_sites = [x.strip() for x in legit_sites]
with open("fakenewsFE\\fake_dataset.txt") as fake_data:
illegit_sites = fake_data.readlines()
illegit_sites = [x.strip() for x in illegit_sites]
broken_url = urlparse(complete_link)
domain_name = '{uri.scheme}://{uri.netloc}/'.format(uri=broken_url)
if(domain_name[:4] != 'http'):
domain_name = "http://" + domain_name
print(domain_name)
if(domain_name[:5] == 'https'):
domain_name = domain_name.replace('https://www.', '')
domain_name = domain_name.replace('https://', '')
if(domain_name[-1] == '/'):
domain_name = domain_name[:-1]
outputx = check_if_legit(domain_name)
return outputx
if(domain_name[:4] == 'http'):
domain_name = domain_name.replace('http://www.', '')
domain_name = domain_name.replace('https://', '')
if(domain_name[-1] == '/'):
domain_name = domain_name[:-1]
outputx = check_if_legit(domain_name)
return outputx
domain_name = domain_name.replace('www.', '')
if(domain_name[-1] == '/'):
domain_name = domain_name[:-1]
outputx = check_if_legit(domain_name)
return outputx
legit_sites = []
illegit_sites = []
pp = checker("http://www.ndtv.com")
#print(legit_sites)
print(pp)
|
#!/usr/bin/env python3
# coding: utf8
print('Hello Python!!')
|
#!/usr/bin/env python3
#
# Note that we do not deploy pip as part of the installation, since it does take
# quite a bit of space (> 8 MB), and using pip together with an "embedded Python"
# isn't officially supported. If we do want to ship pip in a future version,
# please see the following as a starting point on how to do this:
#
# https://stackoverflow.com/questions/42666121/pip-with-embedded-python
#
# Note that instead of "shipping VGC with pip", one option could be to ship a
# script that downloads get-pip.py, runs it, and adds to pythonXY._pth:
#
# Lib/site-packages
# import site
#
from pathlib import Path
import argparse
import shutil
import sys
import urllib.request
import zipfile
# Copies a file from the given src Path to the given dest Path, creating
# directories as necessary.
#
def copyFile(src, dest):
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(str(src), str(dest))
# Script entry point.
#
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("buildDir", help="path to the build directory")
parser.add_argument("config", help="build configuration (e.g.: 'Release', 'Debug')")
args = parser.parse_args()
# Import arguments into the global namespace.
# This allows to more simply write `foo` instead of `args.foo`.
globals().update(vars(args))
# Get useful paths
buildDir = Path(buildDir)
pythonDir = Path(sys.executable).parent
embedDir = pythonDir / "embed"
binDir = buildDir / config / "bin"
# Get version-dependent python name, e.g.:
# pythonXY = "python37"
# versionXYZ = "3.7.4"
version = sys.version_info
pythonXY = "python{}{}".format(version.major, version.minor)
versionXYZ = "{}.{}.{}".format(version.major, version.minor, version.micro)
# We first look inside the python installation to see if the embed folder is
# there. If not, we download the embed folder directly in the build dir.
if not embedDir.exists():
embedDir = buildDir / "embed"
if not embedDir.exists():
embedZipFileName = "python-{}-embed-amd64.zip".format(versionXYZ)
embedZipUrl = "https://www.python.org/ftp/python/{}/{}".format(versionXYZ, embedZipFileName)
embedZipPath = buildDir / embedZipFileName
if not embedZipPath.exists():
print("Downloading {}".format(embedZipUrl))
print(" to {}".format(str(embedZipPath)))
urllib.request.urlretrieve(embedZipUrl, str(embedZipPath))
print("Done.")
embedZipFile = zipfile.ZipFile(str(embedZipPath), 'r')
embedZipFile.extractall(str(embedDir))
embedZipFile.close()
# Extract Python built-in libraries to <build>/<config>/python
# We don't keep them zipped for runtime performance
zipPath = embedDir / (pythonXY + ".zip")
zipFile = zipfile.ZipFile(str(zipPath), 'r')
zipFile.extractall(str(buildDir / config / "python"))
zipFile.close()
# Copy all other files to <build>/<config>/python
for child in embedDir.iterdir():
if child != zipPath and child.is_file():
copyFile(child, binDir / child.name)
# Modify the default pythonXY._pth file:
# - add correct location of python libs
# - uncomment 'import site' so that functions like exit() and help() work
pthPath = binDir / (pythonXY + "._pth")
pthPath.write_text("../python\n.\nimport site")
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import os
from absl.testing import absltest
from alphafold_paddle.common import protein
from alphafold_paddle.relax import utils
import numpy as np
# Internal import (7716).
class UtilsTest(absltest.TestCase):
def test_overwrite_b_factors(self):
testdir = os.path.join(
absltest.get_default_test_srcdir(),
'alphafold/relax/testdata/'
'multiple_disulfides_target.pdb')
with open(testdir) as f:
test_pdb = f.read()
n_residues = 191
bfactors = np.stack([np.arange(0, n_residues)] * 37, axis=-1)
output_pdb = utils.overwrite_b_factors(test_pdb, bfactors)
# Check that the atom lines are unchanged apart from the B-factors.
atom_lines_original = [l for l in test_pdb.split('\n') if l[:4] == ('ATOM')]
atom_lines_new = [l for l in output_pdb.split('\n') if l[:4] == ('ATOM')]
for line_original, line_new in zip(atom_lines_original, atom_lines_new):
self.assertEqual(line_original[:60].strip(), line_new[:60].strip())
self.assertEqual(line_original[66:].strip(), line_new[66:].strip())
# Check B-factors are correctly set for all atoms present.
as_protein = protein.from_pdb_string(output_pdb)
np.testing.assert_almost_equal(
np.where(as_protein.atom_mask > 0, as_protein.b_factors, 0),
np.where(as_protein.atom_mask > 0, bfactors, 0))
if __name__ == '__main__':
absltest.main()
|
import pytest
from helpers.utils import (
NAMESPACE,
VALUES_DISABLE_EVERYTHING,
cleanup_helm,
cleanup_k8s,
exec_subprocess,
install_chart,
install_custom_resources,
merge_yaml,
wait_for_pods_to_be_ready,
)
VALUES_ACCESS_CLICKHOUSE = merge_yaml(
VALUES_DISABLE_EVERYTHING,
"""
clickhouse:
enabled: true
zookeeper:
enabled: true
web:
enabled: true
migrate:
enabled: true
postgresql:
enabled: true
redis:
enabled: true
pgbouncer:
enabled: true
""",
)
VALUES_EXTERNAL_CLICKHOUSE = merge_yaml(
VALUES_DISABLE_EVERYTHING,
"""
clickhouse:
enabled: true
cluster: kubetest
database: kubetest_db
user: kubeuser
password: kubetestpw
zookeeper:
enabled: true
""",
)
VALUES_ACCESS_EXTERNAL_CLICKHOUSE_VIA_PASSWORD = merge_yaml(
VALUES_DISABLE_EVERYTHING,
"""
web:
enabled: true
migrate:
enabled: true
postgresql:
enabled: true
redis:
enabled: true
pgbouncer:
enabled: true
externalClickhouse:
host: "clickhouse-posthog.clickhouse.svc.cluster.local"
cluster: kubetest
database: kubetest_db
user: kubeuser
password: kubetestpw
""",
)
VALUES_ACCESS_EXTERNAL_CLICKHOUSE_VIA_SECRET = merge_yaml(
VALUES_ACCESS_EXTERNAL_CLICKHOUSE_VIA_PASSWORD,
"""
externalClickhouse:
host: "clickhouse-posthog.clickhouse.svc.cluster.local"
cluster: kubetest
database: kubetest_db
user: kubeuser
existingSecret: clickhouse-existing-secret
existingSecretPasswordKey: clickhouse-password
""",
)
def test_can_connect_from_web_pod(kube):
install_chart(VALUES_ACCESS_CLICKHOUSE)
wait_for_pods_to_be_ready(kube)
verify_can_connect_to_clickhouse(kube)
def test_can_connect_external_clickhouse_via_password(kube):
setup_external_clickhouse()
install_chart(VALUES_ACCESS_EXTERNAL_CLICKHOUSE_VIA_PASSWORD)
wait_for_pods_to_be_ready(kube)
verify_can_connect_to_clickhouse(kube)
def test_can_connect_external_clickhouse_via_secret(kube):
install_custom_resources("./custom_k8s_resources/clickhouse_external_secret.yaml")
setup_external_clickhouse()
install_chart(VALUES_ACCESS_EXTERNAL_CLICKHOUSE_VIA_SECRET)
wait_for_pods_to_be_ready(kube)
verify_can_connect_to_clickhouse(kube)
def setup_external_clickhouse():
# :TRICKY: We can't use a single docker image since posthog relies on clickhouse being installed in a cluster
install_chart(VALUES_EXTERNAL_CLICKHOUSE, namespace="clickhouse")
def verify_can_connect_to_clickhouse(kube):
"Checks whether clickhouse is connectable from the web pod"
pods = kube.get_pods(namespace=NAMESPACE, labels={"role": "web"})
pod = list(pods.values())[0]
command = " ".join(
[
f"kubectl exec --stdin --tty {pod.name} -n posthog",
"--",
"python manage.py shell_plus -c",
"\"print('connection check success', sync_execute('select count() from events')[0][0])\"",
]
)
# This command will exit with an error code if clickhouse is not connectable
exec_subprocess(command)
@pytest.fixture(autouse=True)
def before_each_cleanup():
cleanup_k8s([NAMESPACE, "clickhouse"])
cleanup_helm([NAMESPACE, "clickhouse"])
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# fmt: off
class Tests:
camera_creation = ("Camera Entity successfully created", "Camera Entity failed to be created")
camera_component_added = ("Camera component was added to entity", "Camera component failed to be added to entity")
camera_component_check = ("Entity has a Camera component", "Entity failed to find Camera component")
creation_undo = ("UNDO Entity creation success", "UNDO Entity creation failed")
creation_redo = ("REDO Entity creation success", "REDO Entity creation failed")
directional_light_creation = ("Directional Light Entity successfully created", "Directional Light Entity failed to be created")
directional_light_component = ("Entity has a Directional Light component", "Entity failed to find Directional Light component")
shadow_camera_check = ("Directional Light component Shadow camera set", "Directional Light component Shadow camera was not set")
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
is_visible = ("Entity is visible", "Entity was not visible")
is_hidden = ("Entity is hidden", "Entity was not hidden")
entity_deleted = ("Entity deleted", "Entity was not deleted")
deletion_undo = ("UNDO deletion success", "UNDO deletion failed")
deletion_redo = ("REDO deletion success", "REDO deletion failed")
no_error_occurred = ("No errors detected", "Errors were detected")
# fmt: on
def AtomEditorComponents_DirectionalLight_AddedToEntity():
"""
Summary:
Tests the Directional Light component can be added to an entity and has the expected functionality.
Test setup:
- Wait for Editor idle loop.
- Open the "Base" level.
Expected Behavior:
The component can be added, used in game mode, hidden/shown, deleted, and has accurate required components.
Creation and deletion undo/redo should also work.
Test Steps:
1) Create a Directional Light entity with no components.
2) Add Directional Light component to Directional Light entity.
3) UNDO the entity creation and component addition.
4) REDO the entity creation and component addition.
5) Enter/Exit game mode.
6) Test IsHidden.
7) Test IsVisible.
8) Add Camera entity.
9) Add Camera component to Camera entity
10) Set the Directional Light component property Shadow|Camera to the Camera entity.
11) Delete Directional Light entity.
12) UNDO deletion.
13) REDO deletion.
14) Look for errors.
:return: None
"""
import azlmbr.legacy.general as general
import azlmbr.math as math
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report, Tracer, TestHelper as helper
with Tracer() as error_tracer:
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
helper.init_idle()
helper.open_level("", "Base")
# Test steps begin.
# 1. Create a Directional Light entity with no components.
directional_light_name = "Directional Light"
directional_light_entity = EditorEntity.create_editor_entity_at(
math.Vector3(512.0, 512.0, 34.0), directional_light_name)
Report.critical_result(Tests.directional_light_creation, directional_light_entity.exists())
# 2. Add Directional Light component to Directional Light entity.
directional_light_component = directional_light_entity.add_component(directional_light_name)
Report.critical_result(
Tests.directional_light_component, directional_light_entity.has_component(directional_light_name))
# 3. UNDO the entity creation and component addition.
# -> UNDO component addition.
general.undo()
# -> UNDO naming entity.
general.undo()
# -> UNDO selecting entity.
general.undo()
# -> UNDO entity creation.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.creation_undo, not directional_light_entity.exists())
# 4. REDO the entity creation and component addition.
# -> REDO entity creation.
general.redo()
# -> REDO selecting entity.
general.redo()
# -> REDO naming entity.
general.redo()
# -> REDO component addition.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.creation_redo, directional_light_entity.exists())
# 5. Enter/Exit game mode.
helper.enter_game_mode(Tests.enter_game_mode)
general.idle_wait_frames(1)
helper.exit_game_mode(Tests.exit_game_mode)
# 6. Test IsHidden.
directional_light_entity.set_visibility_state(False)
Report.result(Tests.is_hidden, directional_light_entity.is_hidden() is True)
# 7. Test IsVisible.
directional_light_entity.set_visibility_state(True)
general.idle_wait_frames(1)
Report.result(Tests.is_visible, directional_light_entity.is_visible() is True)
# 8. Add Camera entity.
camera_name = "Camera"
camera_entity = EditorEntity.create_editor_entity_at(math.Vector3(512.0, 512.0, 34.0), camera_name)
Report.result(Tests.camera_creation, camera_entity.exists())
# 9. Add Camera component to Camera entity.
camera_entity.add_component(camera_name)
Report.result(Tests.camera_component_added, camera_entity.has_component(camera_name))
# 10. Set the Directional Light component property Shadow|Camera to the Camera entity.
shadow_camera_property_path = "Controller|Configuration|Shadow|Camera"
directional_light_component.set_component_property_value(shadow_camera_property_path, camera_entity.id)
shadow_camera_set = directional_light_component.get_component_property_value(shadow_camera_property_path)
Report.result(Tests.shadow_camera_check, camera_entity.id == shadow_camera_set)
# 11. Delete DirectionalLight entity.
directional_light_entity.delete()
Report.result(Tests.entity_deleted, not directional_light_entity.exists())
# 12. UNDO deletion.
general.undo()
Report.result(Tests.deletion_undo, directional_light_entity.exists())
# 13. REDO deletion.
general.redo()
Report.result(Tests.deletion_redo, not directional_light_entity.exists())
# 14. Look for errors.
helper.wait_for_condition(lambda: error_tracer.has_errors, 1.0)
Report.result(Tests.no_error_occurred, not error_tracer.has_errors)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(AtomEditorComponents_DirectionalLight_AddedToEntity)
|
from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
title = models.CharField(max_length=200)
post_url = models.TextField()
img_url = models.TextField()
published_date = models.DateField()
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title |
# Generated by Django 2.2.13 on 2020-07-29 10:06
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('commodities', '0002_auto_20200723_1020'),
('barriers', '0063_auto_20200428_1008'),
]
operations = [
migrations.AddField(
model_name='historicalbarrierinstance',
name='commodities_cache',
field=django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.jsonb.JSONField(), default=list, size=None),
),
migrations.CreateModel(
name='BarrierCommodity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10)),
('country', models.UUIDField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('barrier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='barrier_commodities', to='barriers.BarrierInstance')),
('commodity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='barrier_commodities', to='commodities.Commodity')),
],
),
migrations.AddField(
model_name='barrierinstance',
name='commodities',
field=models.ManyToManyField(through='barriers.BarrierCommodity', to='commodities.Commodity'),
),
]
|
import win32api
import win32gui
import win32con
import win32ui
from .base_handler import Base
class FrontEnd(Base):
name = "frontend"
@classmethod
def get_point(cls, hwnd):
return win32gui.GetCursorPos()
@classmethod
def move_to(x, y):
win32api.mouse_event(win32con.MOUSEEVENTF_ABSOLUTE|win32con.MOUSEEVENTF_MOVE, x * 65535 // 3840, y * 65535 // 2160)
return True
@classmethod
def left_click_at_point(cls, hwnd, point):
win32api.mouse_event(win32con.MOUSEEVENTF_ABSOLUTE|win32con.MOUSEEVENTF_LEFTDOWN|win32con.MOUSEEVENTF_LEFTUP, point[0] * 65535 // 2560, point[1] * 65535 // 1440)
return True
@classmethod
def left_click_at_point(cls, hwnd, point):
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN|win32con.MOUSEEVENTF_RIGHTUP, 0, 0)
return True
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""helper functions for unit tests"""
import tensorflow as tf
import numpy as np
def _create_random_point_cloud_segmented(batch_size,
num_points,
dimension=3,
sizes=None,
scale=1,
clean_aabb=False,
equal_sized_batches=False):
points = np.random.uniform(0, scale, [num_points, dimension])
if sizes is None:
if not equal_sized_batches:
batch_ids = np.random.randint(0, batch_size, num_points)
batch_ids[:batch_size] = np.arange(0, batch_size)
else:
batch_ids = np.repeat(np.arange(0, batch_size), num_points // batch_size)
# batch_ids = np.sort(batch_ids)
else:
sizes = np.array(sizes, dtype=int)
batch_ids = np.repeat(np.arange(0, batch_size), sizes)
if clean_aabb:
# adds points such that the aabb is [0,0,0] [1,1,1]*scale
# to prevent rounding errors
points = np.concatenate(
(points, scale * np.ones([batch_size, dimension]) - 1e-9,
1e-9 + np.zeros([batch_size, dimension])))
batch_ids = np.concatenate(
(batch_ids, np.arange(0, batch_size), np.arange(0, batch_size)))
return points, batch_ids
def _create_random_point_cloud_padded(max_num_points,
batch_shape,
dimension=3,
sizes=None,
scale=1):
batch_size = np.prod(batch_shape)
points = np.random.uniform(
0, scale, [max_num_points * batch_size, dimension])
points = points.reshape(batch_shape + [max_num_points, dimension])
if sizes is None:
sizes = np.random.randint(1, max_num_points, batch_shape)
return points, sizes
def _create_uniform_distributed_point_cloud_2D(num_points_sqrt,
scale=1,
flat=False):
ticks = np.linspace(0, scale, num=num_points_sqrt)
points = np.array(np.meshgrid(ticks, ticks)).T
if flat:
points = points.reshape(-1, 2)
return points
def _create_uniform_distributed_point_cloud_3D(num_points_root,
bb_min=0,
bb_max=1,
flat=False):
ticks = np.linspace(bb_min, bb_max, num=num_points_root, endpoint=False)
points = np.array(np.meshgrid(ticks, ticks, ticks)).T
if flat:
points = points.reshape(-1, 3)
return points
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-11-01 07:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cate_name', models.CharField(max_length=64, verbose_name='名字')),
],
options={
'verbose_name_plural': '新闻类别表',
'db_table': 'cate',
},
),
migrations.CreateModel(
name='New',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_time', models.DateTimeField(verbose_name='发表时间')),
('new_seenum', models.IntegerField(default=0, verbose_name='浏览次数')),
('new_disnum', models.IntegerField(default=0, verbose_name='跟帖次数')),
('index_image_url', models.ImageField(default='SOME STRING', upload_to='', verbose_name='新闻列表图片路径')),
('new_title', models.CharField(max_length=100, verbose_name='标题')),
('new_source', models.TextField(default='Fantasy News', max_length=20, verbose_name='新闻来源')),
('digest', models.CharField(default='SOME STRING', max_length=500)),
('new_content', models.TextField(verbose_name='新闻内容')),
('new_cate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='类别', to='index.Cate')),
],
options={
'verbose_name_plural': '新闻信息表',
'db_table': 'new',
},
),
]
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random
import uuid
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
DB_HOST = os.getenv("MYSQL_DB_HOST", "mysql.platiagro")
DB_NAME = os.getenv("MYSQL_DB_NAME", "platiagro")
DB_USER = os.getenv("MYSQL_DB_USER", "root")
DB_PASS = os.getenv("MYSQL_DB_PASSWORD", "")
DB_URL = f"mysql+pymysql://{DB_USER}:{DB_PASS}@{DB_HOST}/{DB_NAME}"
engine = create_engine(DB_URL,
convert_unicode=True,
pool_size=5,
pool_recycle=300)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def insert_task(**kwargs):
"""
Inserts a new task in database. Avoids duplicate task names.
Parameters
----------
**kwargs
Arbitrary keyword arguments.
Returns
------
str or None
Inserted task_id or None when the task already exists.
"""
name = kwargs.get("name")
description = kwargs.get("description")
category = kwargs.get("category", "DEFAULT")
tags = kwargs.get("tags", [])
image = kwargs.get("image")
commands = kwargs.get("commands")
arguments = kwargs.get("arguments")
is_default = kwargs.get("is_default")
parameters = kwargs.get("parameters", [])
experiment_notebook_path = kwargs.get("experiment_notebook_path")
deployment_notebook_path = kwargs.get("deployment_notebook_path")
cpu_limit = kwargs.get("cpu_limit")
cpu_request = kwargs.get("cpu_request")
memory_limit = kwargs.get("memory_limit")
memory_request = kwargs.get("memory_request")
readiness_probe_initial_delay_seconds = kwargs.get("readiness_probe_initial_delay_seconds", 60)
conn = engine.connect()
text = f'SELECT uuid FROM tasks WHERE name="{name}" LIMIT 1'
result = conn.execute(text)
row = result.fetchone()
if row:
return row[0]
# saves task info to the database
task_id = str(uuid_alpha())
created_at = datetime.datetime.now()
arguments_json = json.dumps(arguments)
commands_json = json.dumps(commands)
parameters_json = json.dumps(parameters)
tags_json = json.dumps(tags)
text = (
"INSERT INTO tasks (uuid, name, description, category, image, commands, arguments, parameters, tags, "
"experiment_notebook_path, deployment_notebook_path, cpu_limit, cpu_request, memory_limit, memory_request, "
"readiness_probe_initial_delay_seconds, is_default, created_at, updated_at) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
)
conn.execute(
text,
task_id,
name,
description,
category,
image,
commands_json,
arguments_json,
parameters_json,
tags_json,
experiment_notebook_path,
deployment_notebook_path,
cpu_limit,
cpu_request,
memory_limit,
memory_request,
readiness_probe_initial_delay_seconds,
is_default,
created_at,
created_at,
)
conn.close()
return task_id
def uuid_alpha():
"""
Generates an uuid that always starts with an alpha char.
Returns
-------
str
"""
uuid_ = str(uuid.uuid4())
if not uuid_[0].isalpha():
c = random.choice(["a", "b", "c", "d", "e", "f"])
uuid_ = f"{c}{uuid_[1:]}"
return uuid_
|
__all__ = ["LarkParser", "ParserError", "Lark2Django"]
#from .lark_parser import LarkParser, ParserError
from QueryLarkDjangoParser import LarkParser, ParserError, Lark2Django |
class Pessoa:
def __init__(self,nome=None,idade=0,*filhos):
self.nome = nome
self.idade = idade
self.filhos = list(filhos)
def cumprimentar(self):
return 'Olá'
if __name__ == '__main__':
matheus = Pessoa('Matheus',25)
edmilson = Pessoa("Edmilson",60,matheus)
for filhos in edmilson.filhos:
print(filhos.nome)
|
import datetime
from datetime import date
from io import BytesIO
import pandas as pd
import numpy as np
import re
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash import BashOperator
from airflow.models import Variable
from minio import Minio
DEFAULT_ARGS = {
'owner': 'Airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2021, 1, 13),
}
dag = DAG('etl_webscraping_acomodacoes',
default_args=DEFAULT_ARGS,
schedule_interval="@once"
)
data_lake_server = Variable.get("data_lake_server")
data_lake_login = Variable.get("data_lake_login")
data_lake_password = Variable.get("data_lake_password")
client = Minio(
data_lake_server,
access_key=data_lake_login,
secret_key=data_lake_password,
secure=False
)
today = date.today()
current_date = today.strftime("%d_%m_%Y")
def extract():
# Extrai os dados a partir do Data Lake.
url_acomodacoes = client.presigned_get_object("landing", 'webscraping-acomodacoes/webscraping_acomodacoeshash_' + current_date + '.csv')
url_bairros = client.presigned_get_object("processing", "categorias-bairros/bairros_eleitos.csv")
df_acomodacoes = pd.read_csv(url_acomodacoes)
df_bairros = pd.read_csv(url_bairros)
# Persiste os arquivos na área de Staging.
df_acomodacoes.to_csv("/tmp/acomodacoeshash.csv", index=False)
df_bairros.to_csv("/tmp/bairros_eleitos.csv", index=False)
def transform():
# Ler os dados a partir da área de Staging.
df = pd.read_csv("/tmp/acomodacoeshash.csv")
df_bairros = pd.read_csv("/tmp/bairros_eleitos.csv")
##############################################################
# Coluna - Vaga de garagem
##############################################################
map_vaga_garagem = {
'Não': 0,
'Sim': 1
}
df['vaga_garagem'] = df['vaga_garagem'].map(map_vaga_garagem)
df['vaga_garagem'] = df['vaga_garagem'].fillna(0)
print('Coluna - Vaga de garagem: OK')
##############################################################
# Coluna - Área
##############################################################
# Coletando apenas o número e convertendo para int
areas = []
for i in range(len(df['area'])):
try:
area_value = int(re.findall('\d[0-9]+', df['area'][i])[0])
except:
area_value = np.nan
areas.append(area_value)
df['area'] = areas
print('Coluna - Área: OK')
##############################################################
# Coluna - Aluguel
##############################################################
# Extraindo numeros com ',' e convertendo para float
values_extracted = []
for i in range(len(df['aluguel'])):
try:
value_extracted = re.findall('[0-9]+[,]*', df['aluguel'][i])
value_extracted = float(''.join(value_extracted).replace(',', '.'))
except:
value_extracted = np.nan
values_extracted.append(value_extracted)
df['aluguel'] = values_extracted
print('Coluna - Aluguel: OK')
##############################################################
# Coluna - Condomínio
##############################################################
# Extraindo somente os numeros e convertendo para float
values_extracted = []
for i in range(len(df['condominio'])):
try:
value_extracted = re.findall('[0-9]+[,.][0-9]+', df['condominio'][i])
value_extracted = [float(x.replace(',', '.')) for x in value_extracted]
value_extracted = sum(value_extracted)
except:
value_extracted = np.nan
values_extracted.append(value_extracted)
df['condominio'] = values_extracted
print('Coluna - Condomínio: OK')
##############################################################
# Coluna - Bairro
##############################################################
texts_extracted = [x.split('-')[0].strip() for x in df['bairro']]
df['bairro'] = texts_extracted
bairro_map = {
'Jardim Cidade Universitária I': 'JD. Cidade Universitária I',
'CIDADE UNIVERSITARIA': 'JD. Cidade Universitária I',
' CIDADE UNIVERSITARIA': 'JD. Cidade Universitária I',
'Jardim Cidade Universitaria I': 'JD. Cidade Universitária I',
'Jardim Cidade Universitaria I - Limeira/SP': 'JD. Cidade Universitária I',
'Jardim Cidade Universitária I - Limeira/Sp ': 'JD. Cidade Universitária I',
'Jardim Paulista': 'JD. Paulista',
'JD. PAULISTA': 'JD. Paulista',
'Jardim Paulista - Limeira/Sp': 'JD. Paulista',
'Jardim Morro Azul': 'JD. Morro Azul',
'Jardim Morro Azul - Limeira/Sp ': 'JD. Morro Azul',
'CHACARA ANTONIETA': 'Chácara Antonieta',
' CHACARA ANTONIETA': 'Chácara Antonieta',
'Chácaras Antonieta - Limeira/Sp': 'Chácara Antonieta',
'Chácaras Antonieta - Limeira/Sp ': 'Chácara Antonieta',
'Jardim São Paulo': 'JD. São Paulo'
}
df['bairro'] = df['bairro'].map(bairro_map)
print('Coluna - Bairro: OK')
##############################################################
# Filtrando colunas
##############################################################
df = df.drop(columns=['imovel_url', 'nome'])
print('Filtrando colunas: OK')
##############################################################
# Alterando códigos das imobiliárias
##############################################################
imob_map = {
'53dd1202c5ef8ce3878ffbd4b3c79bd2': 'A',
'ef23a7e1738f4b316011bbdd88e514a2': 'B',
'c7234506476bbf0aff48eda764ff9eba': 'C'
}
df['imob'] = df['imob'].map(imob_map)
print('Imobiliárias códigos: OK')
##############################################################
# Criando coluna: total
##############################################################
df['total'] = df['aluguel'] + df['condominio']
print('Criação da coluna total: OK')
##############################################################
# Criando coluna: preço por metro quadrado
##############################################################
df['preco_m2'] = df['total'] / df['area']
df['preco_m2'] = df['preco_m2'].replace(np.inf, np.nan)
print('Criação da coluna preço por metro quadrado: OK')
##############################################################
# Tratando dataframe dos bairros eleitos
##############################################################
df_bairros = df_bairros.drop(columns=['lat', 'lon'])
df_bairros = df_bairros.iloc[[7, 11, 10, 19],:]
df_bairros = df_bairros.sort_values(by='dist').reset_index(drop=True)
print('Tratando dataframe dos bairros eleitos: OK')
##############################################################
# Criando coluna: distância da faculdade
##############################################################
dists_unicamp = []
for bairro in df['bairro']:
if bairro == 'JD. Cidade Universitária I':
dists_unicamp.append(df_bairros.loc[2, 'dist'])
elif bairro == 'JD. Paulista':
dists_unicamp.append(df_bairros.loc[0, 'dist'])
elif bairro == 'JD. Morro Azul':
dists_unicamp.append(df_bairros.loc[1, 'dist'])
elif bairro == 'Chácara Antonieta':
dists_unicamp.append(df_bairros.loc[3, 'dist'])
elif bairro == 'JD. São Paulo':
dists_unicamp.append(np.nan)
else:
dists_unicamp.append(np.nan)
df['dist_unicamp'] = dists_unicamp
print('Criaçaõ da coluna distância da faculdade: OK')
##############################################################
# Removendo linhas com aluguel nulo
##############################################################
df = df[df['aluguel'].notna()]
print('Remoção das linhas com aluguel nulo: OK')
# Persiste os dados transformados na área de staging.
df.to_csv("/tmp/acomodacoeshash.csv", index=False)
def load():
# Carrega os dados a partir da área de staging.
df_ = pd.read_csv("/tmp/acomodacoeshash.csv")
acomodacoes_filename = 'acomodacoes_hashcode_' + current_date + '.parquet'
# Converte os dados para o formato parquet.
df_.to_parquet("/tmp/" + acomodacoes_filename, index=False)
# Carrega os dados para o Data Lake.
client.fput_object(
"processing",
acomodacoes_filename,
"/tmp/" + acomodacoes_filename
)
extract_task = PythonOperator(
task_id='extract_file_from_data_lake',
provide_context=True,
python_callable=extract,
dag=dag
)
transform_task = PythonOperator(
task_id='transform_data',
provide_context=True,
python_callable=transform,
dag=dag
)
load_task = PythonOperator(
task_id='load_file_to_data_lake',
provide_context=True,
python_callable=load,
dag=dag
)
clean_task = BashOperator(
task_id="clean_files_on_staging",
bash_command="rm -f /tmp/*.csv;rm -f /tmp/*.json;rm -f /tmp/*.parquet;",
dag=dag
)
extract_task >> transform_task >> load_task >> clean_task |
from PyQt5.QtWidgets import QWidget, QGridLayout, QPushButton
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QPixmap, QIcon
from app.editor.lib.state_editor.state_enums import MainEditorScreenStates
from app.resources.resources import RESOURCES
from app.data.database import DB
from app.data.overworld import OverworldPrefab
from app.extensions.custom_gui import RightClickListView
from app.editor.base_database_gui import DragDropCollectionModel
import app.editor.tilemap_editor as tilemap_editor
from app.utilities import str_utils
class OverworldDatabase(QWidget):
def __init__(self, state_manager):
super().__init__()
self.state_manager = state_manager
self.grid = QGridLayout()
self.setLayout(self.grid)
def deletion_func(model, index):
return len(DB.overworlds) > 1
self.view = RightClickListView((deletion_func, None, None), self)
self.view.setMinimumSize(128, 320)
self.view.setIconSize(QSize(64, 64))
self.view.currentChanged = self.on_map_changed
self.view.doubleClicked.connect(self.on_double_click)
self.model = OverworldModel(DB.overworlds, self)
self.view.setModel(self.model)
self.model.drag_drop_finished.connect(self.catch_drag)
self.button = QPushButton("Create New Overworld...")
self.button.clicked.connect(self.model.append)
self.grid.addWidget(self.view, 0, 0)
self.grid.addWidget(self.button, 1, 0)
self.state_manager.subscribe_to_key(
OverworldDatabase.__name__, 'ui_refresh_signal', self.update_view)
def on_map_changed(self, curr, prev):
if DB.overworlds:
new_overworld = DB.overworlds[curr.row()]
self.state_manager.change_and_broadcast(
'selected_overworld', new_overworld.nid)
def catch_drag(self):
if DB.overworlds:
index = self.view.currentIndex()
new_overworld = DB.overworlds[index.row()]
self.state_manager.change_and_broadcast(
'selected_overworld', new_overworld.nid)
def on_double_click(self, index):
if DB.overworlds:
selected_overworld = DB.overworlds[index.row()]
self.state_manager.change_and_broadcast(
'selected_overworld', selected_overworld.nid)
self.state_manager.change_and_broadcast(
'main_editor_mode', MainEditorScreenStates.OVERWORLD_EDITOR)
def create_initial_overworld(self):
nids = [m.nid for m in DB.overworlds]
nid = str(str_utils.get_next_int("0", nids))
DB.overworlds.append(OverworldPrefab(nid, 'Overworld'))
self.model.dataChanged.emit(self.model.index(
0), self.model.index(self.model.rowCount()))
first_index = self.model.index(0)
self.view.setCurrentIndex(first_index)
def update_view(self, _=None):
self.model.layoutChanged.emit()
class OverworldModel(DragDropCollectionModel):
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
overworld = self._data[index.row()]
text = overworld.nid + " : " + overworld.name
return text
elif role == Qt.DecorationRole:
overworld = self._data[index.row()]
res = RESOURCES.tilemaps.get(overworld.tilemap)
if res:
image = tilemap_editor.draw_tilemap(res)
img = QIcon(QPixmap.fromImage(image))
return img
return None
def create_new(self):
nids = [m.nid for m in DB.overworlds]
nid = str(str_utils.get_next_int("0", nids))
name = "Overworld %s" % nid
# Create new overworld
new_overworld = OverworldPrefab(nid, name)
DB.overworlds.append(new_overworld)
return new_overworld
|
import requests
res = requests.get("http://api.aoikujira.com/time/get.php")
text = res.text
print(text)
bin = res.content
print(bin)
|
from typing import List
import os
from sqlalchemy.orm.exc import NoResultFound
import sqlalchemy.sql
from ping_dashboard.data import db_session
from ping_dashboard.data.location import Location
def init_db():
top_folder = os.path.dirname(__file__)
rel_file = os.path.join('..', 'db', 'ping_dashboard.sqlite')
db_file = os.path.abspath(os.path.join(top_folder, rel_file))
db_session.global_init(db_file)
def get_server_urls() -> List[Location]:
init_db()
session = db_session.create_session()
locations = session.query(Location). \
all()
session.close()
return locations
def sorted_server_urls():
init_db()
session = db_session.create_session()
locations = session.query(Location).\
order_by(Location.ping.desc()).\
all()
session.close()
return locations
def does_customer_exist(location_id):
init_db()
session = db_session.create_session()
exists = session.query(sqlalchemy.exists().where(Location.id == location_id )).scalar()
if exists:
print(f'Customer {location_id} exists.')
if not exists:
print(f'Customer {location_id} does not exists.')
return exists
def remove_customer(location_id):
init_db()
session = db_session.create_session()
location = session.query(Location) \
.filter(Location.id == location_id) \
.first()
session.delete(location)
session.commit()
session.close()
def get_anonymize_customers(locations) -> List:
customer_number = 1
anonymize_customers = []
for _ in locations:
customer_name = f'Cust_{customer_number}'
anonymize_customers.append(customer_name)
customer_number += 1
return anonymize_customers
def update_server(friendly_name, anonymize_name, response_time, ping, location_status, url, status_color, type):
init_db()
session = db_session.create_session()
try:
s = session.query(Location).filter(Location.id == friendly_name).one()
# print(f'Location {friendly_name} exists.')
s.id = friendly_name
s.anonymized_name = anonymize_name
s.response_time = response_time
s.ping = ping
s.status = location_status
s.url = url
s.status_color = status_color
s.type = type
session.commit()
except NoResultFound:
# print(f'Location {friendly_name} does not exists.')
s = Location()
s.id = friendly_name
s.anonymized_name = anonymize_name
s.url = url
s.status = location_status
s.response_time = response_time
s.ping = ping
s.status_color = status_color
s.type = type
session = db_session.create_session()
session.add(s)
session.commit() |
import heapq
from collections import defaultdict
# Custom object to store the required values
class Element:
def __init__(self, frequency, sequence, number):
self.frequency = frequency
self.sequence = sequence
self.number = number
def __lt__(self, other):
# higher frequency wins
if self.frequency != other.frequency:
return self.frequency > other.frequency
# if both elements have same frequency, return the element that was pushed later
return self.sequence > other.sequence
class FreqStack(object):
def __init__(self):
self.sequence = 0
self.frequencyMap = defaultdict(int)
self.maxHeap = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
self.frequencyMap[x] += 1
heapq.heappush(self.maxHeap, Element(self.frequencyMap[x], self.sequence, x))
self.sequence += 1
def pop(self):
"""
:rtype: int
"""
numberToPop = heapq.heappop(self.maxHeap).number
self.frequencyMap[numberToPop] -= 1
return numberToPop
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop() |
lambda x: x + 1
|
from lockfish.nc import nc
from testing import *
from lockfish.clangparser import *
rdr()
res = parse_folder('csourcelim', '.c')
rdrstop()
rdrv=rdrval()
r2=[r.cursor for r in res]
restus=res
res=r2
#Testing node collection
class TestNC(tc):
#1
def testPreps(self):
global res
global rdrv
self.assertTrue('Done' in rdrv)
self.assertTrue(len(restus) == 5)
#2
def testNCInit(self):
global res
c1 = nc(res)
self.assertTrue(c1.l == res)
#3
def testNCAddAppendIndexPlusLenInit(self):
global res
c1 = nc([])
c1.append(res[0])
c2 = nc([])
c2.append(res[1])
c3 = c1 + c2
self.assertTrue(c3[0] == res[0])
self.assertTrue(c3[1] == res[1])
self.assertTrue(len(c3) == 2)
self.assertTrue(c3.count() == 2)
i = 0
for n in c3:
self.assertTrue(n == res[i])
i = i + 1
rdr()
c3.pprint()
rdrstop()
self.assertTrue("sys_socket.c" in rdrval())
c4=nc(c3)
for n in c4:
self.assertTrue(n in c3)
if __name__ == '__main__':
unittest.main()
|
expected_output = {
"interface": {
"Loopback0": {
"interface_status": "Up",
"ip_address": "200.0.7.1",
"protocol_status": "Up"
},
"MgmtEth0/RSP0/CPU0/0": {
"interface_status": "Up",
"ip_address": "5.25.27.1",
"protocol_status": "Up"
},
"MgmtEth0/RSP0/CPU0/1": {
"interface_status": "Shutdown",
"ip_address": "unassigned",
"protocol_status": "Down"
},
"MgmtEth0/RSP1/CPU0/0": {
"interface_status": "Up",
"ip_address": "5.25.27.2",
"protocol_status": "Up"
},
"MgmtEth0/RSP1/CPU0/1": {
"interface_status": "Shutdown",
"ip_address": "unassigned",
"protocol_status": "Down"
}
}
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Fetches line spectra from the NIST Atomic Spectra Database.
"""
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.nist`.
"""
server = _config.ConfigItem(
['http://physics.nist.gov/cgi-bin/ASD/lines1.pl'],
'Name of the NIST URL to query.'
)
timeout = _config.ConfigItem(
30,
'Time limit for connecting to NIST server.'
)
conf = Conf()
from .core import Nist, NistClass
__all__ = ['Nist', 'NistClass',
'Conf', 'conf',
]
|
"""
This parent playbook collects data and launches appropriate child playbooks to gather threat intelligence information about indicators. After the child playbooks have run, this playbook posts the notes to the container and prompts the analyst to add tags to each enriched indicator based on the intelligence provided.
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
def on_start(container):
phantom.debug('on_start() called')
# call 'list_investigate_playbooks' block
list_investigate_playbooks(container=container)
return
def list_investigate_playbooks(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("list_investigate_playbooks() called")
parameters = []
parameters.append({
"name": None,
"repo": "local",
"tags": "investigate, threat_intel",
"category": None,
"playbook_type": "input",
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/playbooks_list", parameters=parameters, name="list_investigate_playbooks", callback=decision_1)
return
def decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("decision_1() called")
# check for 'if' condition 1
found_match_1 = phantom.decision(
container=container,
conditions=[
["list_investigate_playbooks:custom_function_result.data.*.name", "!=", ""]
])
# call connected blocks if condition 1 matched
if found_match_1:
collect_all_indicators(action=action, success=success, container=container, results=results, handle=handle)
return
return
def collect_all_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("collect_all_indicators() called")
id_value = container.get("id", None)
parameters = []
parameters.append({
"container": id_value,
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/indicator_collect", parameters=parameters, name="collect_all_indicators", callback=launch_investigate_playbooks)
return
def launch_investigate_playbooks(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("launch_investigate_playbooks() called")
################################################################################
# Determine if any investigate playbooks are available with input types matching
# the indicators in the container, and synchronously launch any playbooks that
# are found. By default, this will look for local playbooks only, but it can be
# changed to use community playbooks.
################################################################################
list_investigate_playbooks_data = phantom.collect2(container=container, datapath=["list_investigate_playbooks:custom_function_result.data.*.full_name","list_investigate_playbooks:custom_function_result.data.*.input_spec"])
collect_all_indicators_data_all_indicators = phantom.collect2(container=container, datapath=["collect_all_indicators:custom_function_result.data.all_indicators.*.cef_value","collect_all_indicators:custom_function_result.data.all_indicators.*.data_types"])
list_investigate_playbooks_data___full_name = [item[0] for item in list_investigate_playbooks_data]
list_investigate_playbooks_data___input_spec = [item[1] for item in list_investigate_playbooks_data]
collect_all_indicators_data_all_indicators___cef_value = [item[0] for item in collect_all_indicators_data_all_indicators]
collect_all_indicators_data_all_indicators___data_types = [item[1] for item in collect_all_indicators_data_all_indicators]
launch_investigate_playbooks__playbooks_launched = None
################################################################################
## Custom Code Start
################################################################################
playbooks_launched = []
# loop through each playbook with the matching tags
for playbook in list_investigate_playbooks_data:
playbook_name = playbook[0]
input_spec = playbook[1]
phantom.debug(playbook_name)
inputs_to_provide = []
# loop through each input parameter, matching only the "indicators" input
for param in input_spec:
if param['name'] == 'indicators':
# loop through each accepted data type
for accepted_data_type in param['contains']:
# loop through each indicator in the container and add any indicators with matching data types to the "inputs_to_provide" list
for indicator in collect_all_indicators_data_all_indicators:
for indicator_type in indicator[1]:
# if the types match and the indicator value is not already in the inputs_to_provide then add it now
if indicator_type == accepted_data_type and indicator[0] not in inputs_to_provide:
inputs_to_provide.append(indicator[0])
# back in the playbook loop, call the playbook if there are any inputs
if inputs_to_provide != []:
playbook_run_name = playbook_name.split('/')[1].replace(' ','_').lower()
playbook_input = {'indicators': inputs_to_provide}
phantom.debug('launching playbook {} with input {}'.format(playbook_name, playbook_input))
phantom.playbook(playbook=playbook_name, container=container, name=playbook_run_name, inputs=playbook_input, callback=add_notes)
playbooks_launched.append(playbook_run_name)
launch_investigate_playbooks__playbooks_launched = playbooks_launched
################################################################################
## Custom Code End
################################################################################
phantom.save_run_data(key="launch_investigate_playbooks:playbooks_launched", value=json.dumps(launch_investigate_playbooks__playbooks_launched))
add_notes(container=container)
return
def add_notes(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("add_notes() called")
################################################################################
# Add notes to the container if any were generated by playbooks from the previous
# step.
################################################################################
launch_investigate_playbooks__playbooks_launched = json.loads(phantom.get_run_data(key="launch_investigate_playbooks:playbooks_launched"))
input_parameter_0 = ""
################################################################################
## Custom Code Start
################################################################################
playbooks_launched = launch_investigate_playbooks__playbooks_launched
# return early if any of the launched playbooks are not completed
if not phantom.completed(playbook_names=launch_investigate_playbooks__playbooks_launched):
return
playbook_outputs = []
for playbook_name in playbooks_launched:
note_title = phantom.collect2(container=container, datapath=["{}:playbook_output:note_title".format(playbook_name)])[0][0]
note_content = phantom.collect2(container=container, datapath=["{}:playbook_output:note_content".format(playbook_name)])[0][0]
phantom.add_note(container=container, content=note_content, note_format="markdown", note_type="general", title=note_title)
#phantom.add_note(container=container, content=note, note_format="markdown", note_type="general", title='trustar test note')
################################################################################
## Custom Code End
################################################################################
threat_intel_indicator_review(container=container)
return
def threat_intel_indicator_review(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("threat_intel_indicator_review() called")
# set user and message variables for phantom.prompt call
user = "Administrator"
message = """For each indicator below, please review the gathered information and mark the indicator for further action."""
# add the note from each of the launched playbooks
playbooks_launched = json.loads(phantom.get_run_data(key="launch_investigate_playbooks:playbooks_launched"))
for playbook in playbooks_launched:
message += '\n\n'
message += phantom.collect2(container=container, datapath=["{}:playbook_output:note_title".format(playbook)])[0][0] + '\n'
message += phantom.collect2(container=container, datapath=["{}:playbook_output:note_content".format(playbook)])[0][0] + '\n'
# no parameters to add
parameters = []
# create two questions and responses for each indicator. the first chooses a tag from a preconfigured list, and the second accepts a freeform comma-separated list of tags
response_types = []
all_indicators = phantom.collect2(container=container, datapath=["collect_all_indicators:custom_function_result.data.all_indicators.*.cef_value","collect_all_indicators:custom_function_result.data.all_indicators.*.data_types"])
for index, indicator in enumerate(all_indicators):
response_types.append({
"prompt": "Choose a tag for the indicator [{0}]".format(indicator[0]),
"options": {
"type": "list",
"choices": [
"Tag to block",
"Tag as safe",
"Do nothing"]}})
response_types.append({
"prompt": "Add any other comma-separated freeform tags for the indicator [{}], or enter 'n' to not add more tags.".format(indicator[0]),
"options": {
"type": "message"}})
phantom.prompt2(container=container, user=user, message=message, respond_in_mins=30, name="threat_intel_indicator_review", parameters=parameters, response_types=response_types, callback=process_responses)
return
def process_responses(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("process_responses() called")
threat_intel_indicator_review_result_data = phantom.collect2(container=container, datapath=["threat_intel_indicator_review:action_result.summary.responses","threat_intel_indicator_review:action_result.parameter.context.artifact_id"], action_results=results)
collect_all_indicators_data_all_indicators = phantom.collect2(container=container, datapath=["collect_all_indicators:custom_function_result.data.all_indicators.*.cef_value"])
threat_intel_indicator_review_summary_responses = [item[0] for item in threat_intel_indicator_review_result_data]
collect_all_indicators_data_all_indicators___cef_value = [item[0] for item in collect_all_indicators_data_all_indicators]
parameters = []
parameters.append({
"input_1": threat_intel_indicator_review_summary_responses,
"input_2": collect_all_indicators_data_all_indicators___cef_value,
"input_3": None,
"input_4": None,
"input_5": None,
"input_6": None,
"input_7": None,
"input_8": None,
"input_9": None,
"input_10": None,
})
################################################################################
## Custom Code Start
################################################################################
responses = threat_intel_indicator_review_summary_responses[0]
indicator_values = collect_all_indicators_data_all_indicators___cef_value
# lookup table to turn prompt responses into tags to add. "Do nothing" is not included, so no tags will be added
response_to_tag_map = {
"Tag to block": "marked_for_block",
"Tag as safe": "safe"
}
# overwrite the parameters list with a list of one indicator and one tag per parameter dictionary
parameters = []
for indicator_index, indicator_value in enumerate(indicator_values):
preconfigured_response = responses[indicator_index * 2]
freeform_response = responses[indicator_index * 2 + 1]
# handle the preconfigured responses
if preconfigured_response in response_to_tag_map:
phantom.comment(comment="Tagging the indicator {} with the preconfigured tag {}".format(indicator_value, response_to_tag_map[preconfigured_response]))
parameters.append({"input_1": [indicator_value, response_to_tag_map[preconfigured_response]]})
elif preconfigured_response != 'Do nothing':
phantom.error('The response {} was chosen for the indicator {}, but that response is not in the set of allowed responses.'.format(preconfigured_response, indicator_value))
# handle the freeform responses
if freeform_response.lower() not in ['n', 'none', 'na', 'n/a']:
freeform_tags = freeform_response.replace(' ','').split(',')
for tag in freeform_tags:
phantom.comment(comment="Tagging the indicator {} with the freeform tag {}".format(indicator_value, tag))
parameters.append({"input_1": [indicator_value, tag]})
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/passthrough", parameters=parameters, name="process_responses", callback=tag_indicators)
return
def tag_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("tag_indicators() called")
process_responses__result = phantom.collect2(container=container, datapath=["process_responses:custom_function_result.data"])
parameters = []
# build parameters list for 'tag_indicators' call
for process_responses__result_item in process_responses__result:
parameters.append({
"tags": process_responses__result_item[0],
"indicator": process_responses__result_item[0],
"overwrite": None,
})
################################################################################
## Custom Code Start
################################################################################
# overwrite the parameters, extracting the indicator and tag for each result from process_responses
parameters = []
for item in process_responses__result:
parameters.append({
"indicator": item[0][0]['item'],
"tags": item[0][1]['item'],
"overwrite": None
})
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/indicator_tag", parameters=parameters, name="tag_indicators")
return
def on_finish(container, summary):
phantom.debug("on_finish() called")
################################################################################
## Custom Code Start
################################################################################
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
################################################################################
## Custom Code End
################################################################################
return |
# import Node as nd
# import time
# node1 = nd.Node(10,12,1,1,1,1)
# print(node1.get_sensed_data())
# time.sleep(3)
# print(node1.get_sensed_data())
# time.sleep(3)
# print(node1.get_sensed_data())
# import time, threading
# def foo():
# print(time.ctime())
# threading.Timer(10, foo).start()
# foo()
# import math,random
# def getpoints(startx,starty,endx,endy,id):
# rangeX = (startx,endx)
# rangeY = (starty,endy)
# randPoints = []
# excluded = set()
# i = 0
# while i<20:
# x = random.randrange(*rangeX)
# y = random.randrange(*rangeY)
# if (x,y) in excluded: continue
# randPoints.append((x,y))
# i += 1
# excluded.update((x, y))
# secure_random = random.SystemRandom()
# nodeswithenergy = []#(x,y),energy,id
# for j in range(len(nodesinlevel)):
# for i in range(nodesinlevel[j]):
# list1 = []
# list1.append(secure_random.choice(randPoints))
# randPoints.remove(list1[0])
# list1.append(Et[j])
# list1.append(id)
# id = id+1
# nodeswithenergy.append(list1)
# return(nodeswithenergy)
# Z=10
# node_objects = []
# for i in range(0,int(math.sqrt(int(Z)))):#for y axis
# for j in range(0,int(math.sqrt(int(Z)))):#for x axis
# for k in getpoints(j*20,i*20,(j+1)*20,(i+1)*20,j*20+1+200*i):
# node_objects.append(k[0][0],k[0][1],k[1],k[2])
# for i in node_objects:
# print(i.get_node_id(),end=" ")
# L = 200
# R=121
# import EH_relay as relay_nd
# EH = []
# count = 1
# for i in range(0,L+1,20):
# for j in range(0,L+1,20):
# print(i,j)
# if count <= R:
# EH.append(relay_nd.EH_relay(i,j,count))
# count+=1
# print(len(EH))
# EHx = []
# EHy = []
# for j in EH:
# EHx.append(j.getlocation()[0])
# EHy.append(j.getlocation()[1])
# print(EHx,EHy)
# p = [1,2,3,4,5]
# q = p[:]
# print(p,q)
# q[0] =10
# q[4] = 30
# p = q[:]
# print(p,q)
# f = open("node_energies.txt", "a")
# h = 0
# # hp.network.is_Network_alive()
# while(h < 10):
# f.write("round :"+str(h+1)+"\n")
# h +=1
# print(h)
# f.close()
# (a,b) = (1,2)
# print(a,b)
# m = 12
# d = 3
# import numpy as np
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from matplotlib import animation
# x = [0, 1, 2]
# y = [0, 1, 2]
# yaw = [0.0, 0.5, 1.3]
# fig = plt.figure()
# plt.axis('equal')
# plt.grid()
# ax = fig.add_subplot(111)
# ax.set_xlim(-10, 10)
# ax.set_ylim(-10, 10)
# patch = patches.Rectangle((0, 0), 0, 0, fc='y')
# def init():
# ax.add_patch(patch)
# return patch,
# def animate(i):
# patch.set_width(1.2)
# patch.set_height(1.0)
# patch.set_xy([x[i], y[i]])
# patch._angle = -np.rad2deg(yaw[i])
# return patch,
# anim = animation.FuncAnimation(fig, animate,
# init_func=init,
# frames=len(x),
# interval=500,
# blit=True)
# plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
# import mpl_toolkits.mplot3d.axes3d as p3
# import matplotlib.animation as animation
# # Fixing random state for reproducibility
# np.random.seed(19680801)
# def Gen_RandLine(length, dims=2):
# """
# Create a line using a random walk algorithm
# length is the number of points for the line.
# dims is the number of dimensions the line has.
# """
# lineData = np.empty((dims, length))
# lineData[:, 0] = np.random.rand(dims)
# for index in range(1, length):
# # scaling the random numbers by 0.1 so
# # movement is small compared to position.
# # subtraction by 0.5 is to change the range to [-0.5, 0.5]
# # to allow a line to move backwards.
# step = ((np.random.rand(dims) - 0.5) * 0.1)
# lineData[:, index] = lineData[:, index - 1] + step
# return lineData
# def update_lines(num, dataLines, lines):
# for line, data in zip(lines, dataLines):
# # NOTE: there is no .set_data() for 3 dim data...
# line.set_data(data[0:2, :num])
# line.set_3d_properties(data[2, :num])
# return lines
# # Attaching 3D axis to the figure
# fig = plt.figure()
# ax = p3.Axes3D(fig)
# # Fifty lines of random 3-D lines
# data = [Gen_RandLine(25, 3) for index in range(50)]
# # Creating fifty line objects.
# # NOTE: Can't pass empty arrays into 3d version of plot()
# lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]
# # Setting the axes properties
# ax.set_xlim3d([0.0, 1.0])
# ax.set_xlabel('X')
# ax.set_ylim3d([0.0, 1.0])
# ax.set_ylabel('Y')
# ax.set_zlim3d([0.0, 1.0])
# ax.set_zlabel('Z')
# ax.set_title('3D Test')
# # Creating the Animation object
# line_ani = animation.FuncAnimation(fig, update_lines, 25, fargs=(data, lines),
# interval=50, blit=False)
# plt.show()
# t = [1,2,3]
# t = []
# print(t)
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
def updatefig(i):
fig.clear()
p = plt.plot(np.random.random(100))
p1 = plt.plot(np.random.random(100))
plt.draw()
anim = animation.FuncAnimation(fig, updatefig, 100)
plt.show() |
import data_mine as dm
import json
import jsonlines
import os
import pandas as pd
import sys
import unittest
from data_mine import Collection
from data_mine.nlp.CSQA import CSQAType
from data_mine.nlp.CSQA.utils import type_to_data_file
from data_mine.utils import datamine_cache_dir
from pyfakefs.fake_filesystem_unittest import TestCase
if sys.version_info >= (3, 3):
from unittest.mock import ANY, patch
else:
from mock import ANY, patch
# Regular format.
TRAIN_QUESTION1 = json.loads("""{
"answerKey": "D",
"id": "3e792834df2aa7ae2a9070b494e37c26",
"question": {
"question_concept": "steam",
"choices": [
{
"label": "A",
"text": "condensate"
},
{
"label": "B",
"text": "electric smoke"
},
{
"label": "C",
"text": "smoke"
},
{
"label": "D",
"text": "liquid water"
},
{
"label": "E",
"text": "cold air"
}
],
"stem": "John cooled the steam. What did the steam become?"
}
}""")
# Answers not in normal order. Tests that choices are sorted.
TRAIN_QUESTION2 = json.loads("""{
"answerKey": "A",
"id": "6c84e79d0595efd99596faa07c4961d0",
"question": {
"question_concept": "climb",
"choices": [
{
"label": "E",
"text": "may fall"
},
{
"label": "A",
"text": "grab"
},
{
"label": "D",
"text": "falling"
},
{
"label": "C",
"text": "throw"
},
{
"label": "B",
"text": "look down"
}
],
"stem": "What would you do to a rock when climb up a cliff?"
}
}""")
# The "answerKey" field is missing. Simulate real CSQA test data.
TEST_QUESTION = json.loads("""{
"id": "9082b65f2bc5328ea991f734f930ddb5",
"question": {
"question_concept": "children",
"choices": [
{
"label": "A",
"text": "watch television"
},
{
"label": "B",
"text": "play basketball"
},
{
"label": "C",
"text": "cut and paste"
},
{
"label": "D",
"text": "swimming"
},
{
"label": "E",
"text": "reach over"
}
],
"stem": "If children were in a gym, would they be doing?"
}
}""")
# Answer 'F' should not be accepted.
INVALID_CORRECT_ANSWER = json.loads("""{
"answerKey": "F",
"id": "aaaabbbbccccdddd2a9070b494e37373",
"question": {
"question_concept": "soccer",
"choices": [
{
"label": "A",
"text": "text A"
},
{
"label": "B",
"text": "text B"
},
{
"label": "C",
"text": "text C"
},
{
"label": "D",
"text": "text D"
},
{
"label": "E",
"text": "text E"
}
],
"stem": "Some question?"
}
}""")
class TestCSQADatasetLoader(TestCase):
def setUp(self):
self.setUpPyfakefs()
dataset_dir = os.path.join(datamine_cache_dir(), "CSQA")
os.makedirs(dataset_dir, mode=0o755)
def write_questions(self, csqa_type, question_list):
datafile = type_to_data_file(csqa_type)
with jsonlines.open(datafile, "w") as writer:
writer.write_all(question_list)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_empty_dataset(self, mock_download_dataset):
self.write_questions(CSQAType.TRAIN, [])
df = dm.CSQA(CSQAType.TRAIN)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
self.assertEqual(len(df), 0)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_load_in_train_and_dev_format(self, mock_download_dataset):
self.write_questions(CSQAType.TRAIN, [
TRAIN_QUESTION1, TRAIN_QUESTION2
])
expected_df = pd.DataFrame(json.loads("""[
{
"id": "3e792834df2aa7ae2a9070b494e37c26",
"question": "John cooled the steam. What did the steam become?",
"answers": [
"condensate",
"electric smoke",
"smoke",
"liquid water",
"cold air"
],
"correct": "D",
"question_concept": "steam"
},
{
"id": "6c84e79d0595efd99596faa07c4961d0",
"question": "What would you do to a rock when climb up a cliff?",
"answers": [
"grab",
"look down",
"throw",
"falling",
"may fall"
],
"correct": "A",
"question_concept": "climb"
}
]"""))
pd.testing.assert_frame_equal(dm.CSQA(CSQAType.TRAIN), expected_df)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_load_in_test_format(self, mock_download_dataset):
self.write_questions(CSQAType.TEST, [TEST_QUESTION])
expected_df = pd.DataFrame(json.loads("""[
{
"id": "9082b65f2bc5328ea991f734f930ddb5",
"question": "If children were in a gym, would they be doing?",
"answers": [
"watch television",
"play basketball",
"cut and paste",
"swimming",
"reach over"
],
"correct": null,
"question_concept": "children"
}
]"""))
self.assertIsNone(next(expected_df.iterrows())[1].correct)
pd.testing.assert_frame_equal(dm.CSQA(CSQAType.TEST), expected_df)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_missing_correct_answer_on_train(self, mock_download_dataset):
self.assertEqual(len(TEST_QUESTION), 2)
self.assertNotIn("answerKey", TEST_QUESTION)
self.write_questions(CSQAType.TRAIN, [TEST_QUESTION])
with self.assertRaises(AssertionError):
dm.CSQA(CSQAType.TRAIN)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_missing_correct_answer_on_dev(self, mock_download_dataset):
self.assertEqual(len(TEST_QUESTION), 2)
self.assertNotIn("answerKey", TEST_QUESTION)
self.write_questions(CSQAType.DEV, [TEST_QUESTION])
with self.assertRaises(AssertionError):
dm.CSQA(CSQAType.DEV)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_has_correct_answer_on_test(self, mock_download_dataset):
self.assertEqual(len(TRAIN_QUESTION1), 3)
self.assertIn("answerKey", TRAIN_QUESTION1)
self.write_questions(CSQAType.TEST, [TRAIN_QUESTION1])
with self.assertRaises(AssertionError):
dm.CSQA(CSQAType.TEST)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
@patch('data_mine.nlp.CSQA.loader.download_dataset')
def test_invalid_correct_answer(self, mock_download_dataset):
self.write_questions(CSQAType.DEV, [
TRAIN_QUESTION1, TRAIN_QUESTION2, INVALID_CORRECT_ANSWER
])
with self.assertRaises(AssertionError):
dm.CSQA(CSQAType.DEV)
mock_download_dataset.assert_called_once_with(Collection.CSQA, ANY)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
def sol():
"""
Solution module
"""
return "/SOLU"
def load_step_solve(n1,n2,inc=1):
"""
Solve from load step n1 to n2
"""
_lss = "LSSOLVE,%g,%g,%g"%(n1,n2,inc)
return _lss
def solve():
"""
Solve
"""
return "SOLVE"
if __name__ == '__main__':
print load_step_solve(1,10,1)
|
# Generated by Django 3.0 on 2020-02-22 13:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0002_profilefeediten'),
]
operations = [
migrations.RenameModel(
old_name='ProfileFeedIten',
new_name='ProfileFeedItem',
),
]
|
import numpy as np
from borderDetector import borderDetector
if __name__ == '__main__':
bd = borderDetector(imgPath="car.bmp", sigmas=np.array([0.73, 0.84]))
bd.detect()
|
import unittest
from troposphere import Join
from troposphere.sqs import Queue
class TestQueue(unittest.TestCase):
def test_QueueName(self):
Queue(
"q",
FifoQueue=False,
).validate()
Queue(
"q",
FifoQueue=True,
QueueName="foobar.fifo",
).validate()
Queue(
"q",
FifoQueue=True,
QueueName=Join("foo", "bar"),
).validate()
Queue(
"q",
FifoQueue=True,
).validate()
with self.assertRaises(ValueError):
Queue(
"q",
FifoQueue=True,
QueueName="foobar",
).validate()
if __name__ == "__main__":
unittest.main()
|
# coding: utf-8
'''
Created on 11.05.2013
@author: кей
'''
from dals.local_host.local_host_io_wrapper import file2list
from dals.local_host.local_host_io_wrapper import get_utf8_template
if __name__=="__main__":
sets = get_utf8_template()
sets['name'] = '../info/preposition_ru.txt'
readed = file2list(sets)
result_set = set(readed[0][0].split(" "))
# Добавляем делее
sets['name'] = '../info/stop_words_ru.txt'
readed, err = file2list(sets)
for line in readed:
work_copy = line[2:]
result_set |= set(work_copy.split(" "))
for it in result_set:
print '\"'+it+'\", '
print len(result_set)
print 'Done'
|
"""Helpful utilities for the AiiDA lab tools."""
import sys
from os import path
from importlib import import_module
from markdown import markdown
import requests
import ipywidgets as ipw
from IPython.lib import backgroundjobs as bg
from .config import AIIDALAB_APPS, AIIDALAB_REGISTRY
def update_cache():
"""Run this process asynchronously."""
requests_cache.install_cache(cache_name='apps_meta', backend='sqlite', expire_after=3600, old_data_on_error=True)
requests.get(AIIDALAB_REGISTRY)
requests_cache.install_cache(cache_name='apps_meta', backend='sqlite')
# Warning: try-except is a fix for Quantum Mobile release v19.03.0 that does not have requests_cache installed
try:
import requests_cache
# At start getting data from cache
requests_cache.install_cache(cache_name='apps_meta', backend='sqlite')
# If requests_cache is installed, upgrade the cache in the background.
UPDATE_CACHE_BACKGROUND = bg.BackgroundJobFunc(update_cache)
UPDATE_CACHE_BACKGROUND.start()
except ImportError:
pass
def load_app_registry():
"""Load apps' information from the AiiDA lab registry."""
try:
return requests.get(AIIDALAB_REGISTRY).json()
except ValueError:
print("Registry server is unavailable! Can't check for the updates")
return {}
def load_widget(name):
if path.exists(path.join(AIIDALAB_APPS, name, 'start.py')):
return load_start_py(name)
return load_start_md(name)
def load_start_py(name):
"""Load app appearance from a Python file."""
try:
mod = import_module('apps.%s.start' % name)
appbase = "../" + name
jupbase = "../../.."
notebase = jupbase + "/notebooks/apps/" + name
try:
return mod.get_start_widget(appbase=appbase, jupbase=jupbase, notebase=notebase)
except TypeError:
return mod.get_start_widget(appbase=appbase, jupbase=jupbase)
except Exception: # pylint: disable=broad-except
return ipw.HTML("<pre>{}</pre>".format(sys.exc_info()))
def load_start_md(name):
"""Load app appearance from a Markdown file."""
fname = path.join(AIIDALAB_APPS, name, 'start.md')
try:
md_src = open(fname).read()
md_src = md_src.replace("](./", "](../{}/".format(name))
html = markdown(md_src)
# open links in new window/tab
html = html.replace('<a ', '<a target="_blank" ')
# downsize headings
html = html.replace("<h3", "<h4")
return ipw.HTML(html)
except Exception as exc: # pylint: disable=broad-except
return ipw.HTML("Could not load start.md: {}".format(str(exc)))
|
"""Module for plotting summarised data with matplotlib."""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from typing import List, Optional
from ..checks import check_type, check_condition
def plot_summarised_variable(
summary_df: pd.DataFrame,
axis_right: int,
axis_left: Optional[List[int]] = None,
title: Optional[str] = None,
figsize_h: int = 14,
figsize_w: int = 8,
legend: bool = True,
):
"""Produce one way summary plot from pre-summarised data.
Parameters
----------
summary_df : pd.DataFrame
DataFrame with summarised info to plot.
axis_right : int
The index of the column in summary_df to plot on the right axis.
Typically this would be a weights column.
axis_left : Optional[List[int]], default = None
The index of the columns in summary_df to plot on the left axis.
Currently the maximum number of left axis columns supported is 5.
title : str, default = None
Title for the plot. If None summary_df.index.name is used as the title.
figsize_h : int, default = 14
Height of plot figure, used in matplotlib.pylot.subplots figsize arg.
figsize_w : int, default = 8
Width of plot figure, used in matplotlib.pylot.subplots figsize arg.
legend : bool, default = True
Should a legend be added to the plot?
"""
LEFT_Y_AXIS_COLOURS = ["magenta", "forestgreen", "lime", "orangered", "dodgerblue"]
check_type(summary_df, pd.DataFrame, "summary_df")
check_type(axis_right, int, "axis_right")
check_type(axis_left, list, "axis_left", none_allowed=True)
check_type(title, str, "title", none_allowed=True)
check_type(figsize_h, int, "figsize_h", none_allowed=True)
check_type(figsize_w, int, "figsize_w", none_allowed=True)
check_type(legend, bool, "legend")
check_condition(
axis_right <= summary_df.shape[1] - 1,
f"only {summary_df.shape[1]} columns in summary_df but axis_right = {axis_right}",
)
if axis_left is not None:
if axis_right in axis_left:
raise ValueError(
f"column index {axis_right} specified for both right and left axes"
)
if len(axis_left) > len(LEFT_Y_AXIS_COLOURS):
raise ValueError(
f"only {len(LEFT_Y_AXIS_COLOURS)} plots supports for the left axis but {len(axis_left)} given"
)
for axis_left_no, axis_left_index in enumerate(axis_left):
check_type(axis_left_index, int, f"axis_left_index[{axis_left_no}]")
check_condition(
axis_left_index <= summary_df.shape[1] - 1,
f"only {summary_df.shape[1]} columns in summary_df but axis_left[{axis_left_no}] = {axis_left_index}",
)
if title is None:
title = summary_df.index.name
_, ax1 = plt.subplots(figsize=(figsize_h, figsize_w))
# plot bin counts on 1st axis
ax1.bar(
np.arange(summary_df.shape[0]),
summary_df[summary_df.columns[axis_right]].reset_index(drop=True),
color="gold",
label=summary_df.columns[axis_right],
)
plt.xticks(np.arange(summary_df.shape[0]), summary_df.index, rotation=270)
ax2 = ax1.twinx()
if axis_left is not None:
for column_no, left_axis_column_index in enumerate(axis_left):
ax2.plot(
summary_df[summary_df.columns[left_axis_column_index]]
.reset_index(drop=True)
.dropna()
.index,
summary_df[summary_df.columns[left_axis_column_index]]
.reset_index(drop=True)
.dropna(),
color=LEFT_Y_AXIS_COLOURS[column_no],
linestyle="-",
marker="D",
label=summary_df.columns[left_axis_column_index],
)
if legend:
ax1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
if axis_left is not None:
ax2.legend(bbox_to_anchor=(1.05, 0.94), loc=2, borderaxespad=0.0)
plt.title(title, fontsize=20)
def plot_summarised_variable_2way(
summary_df: pd.DataFrame,
axis_right: int,
axis_left: Optional[List[int]] = None,
bar_type: Optional[str] = "stacked",
bars_percent: Optional[bool] = False,
title: Optional[str] = None,
figsize_h: int = 14,
figsize_w: int = 8,
legend: bool = True,
):
"""Produce one way summary plot from pre-summarised data.
Parameters
----------
summary_df : pd.DataFrame
DataFrame with summarised info to plot.
axis_right : int
The index of the column in summary_df to plot on the right axis.
Typically this would be a weights column.
axis_left : Optional[List[int]], default = None
The index of the columns in summary_df to plot on the left axis.
Currently only 3 left axis lines are supported.
bar_type : Optional[str], default = "stacked"
Type of bars to plot on the right axis. Must be either "stacked" or
"side_by_side".
bars_percent : Optional[bool], default = False
Should bars on the right axis be plotted as percentage of total within
each bar?
title : str, default = None
Title for the plot. If None summary_df.index.name is used as the title.
figsize_h : int, default = 14
Height of plot figure, used in matplotlib.pylot.subplots figsize arg.
figsize_w : int, default = 8
Width of plot figure, used in matplotlib.pylot.subplots figsize arg.
legend : bool, default = True
Should a legend be added to the plot?
"""
BIN_COLOURS = [
"gold",
"khaki",
"goldenrod",
"darkkhaki",
"darkgoldenrod",
"olive",
"y",
]
LEFT_AXIS_COLOURS = [
[
"magenta",
"m",
"orchid",
"mediumvioletred",
"deeppink",
"darkmagenta",
"darkviolet",
],
[
"forestgreen",
"darkgreen",
"seagreen",
"green",
"darkseagreen",
"g",
"mediumseagreen",
],
[
"lime",
"limegreen",
"greenyellow",
"lawngreen",
"chartreuse",
"lightgreen",
"springgreen",
],
]
check_type(summary_df, pd.DataFrame, "summary_df")
check_type(axis_right, int, "axis_right")
check_type(axis_left, list, "axis_left", none_allowed=True)
check_type(bar_type, str, "bar_type", none_allowed=True)
check_type(bars_percent, bool, "bars_percent", none_allowed=True)
check_type(title, str, "title", none_allowed=True)
check_type(figsize_h, int, "figsize_h", none_allowed=True)
check_type(figsize_w, int, "figsize_w", none_allowed=True)
check_type(legend, bool, "legend")
check_condition(
axis_right <= summary_df.shape[1] - 1,
f"only {summary_df.shape[1]} columns in summary_df but axis_right = {axis_right}",
)
if axis_left is not None:
if axis_right in axis_left:
raise ValueError(
f"column index {axis_right} specified for both right and left axes"
)
if len(axis_left) > len(LEFT_AXIS_COLOURS):
raise ValueError(
f"only {len(LEFT_AXIS_COLOURS)} plots supported for the left axis but {len(axis_left)} given"
)
for axis_left_no, axis_left_index in enumerate(axis_left):
check_type(axis_left_index, int, f"axis_left_index[{axis_left_no}]")
check_condition(
axis_left_index <= summary_df.shape[1] - 1,
f"only {summary_df.shape[1]} columns in summary_df but axis_left[{axis_left_no}] = {axis_left_index}",
)
if len(summary_df.index.levels[1]) > len(BIN_COLOURS):
raise ValueError(
f"only {len(BIN_COLOURS)} levels supported for the second groupby column but {len(summary_df.index.levels[1])} given in summary_df"
)
by_col = summary_df.index.names[0]
split_by_col = summary_df.index.names[1]
if title is None:
title = f"{by_col} by {split_by_col}"
_, ax1 = plt.subplots(figsize=(figsize_h, figsize_w))
# turn data into by_col x split_by_col table and fill in levels
# with no weight (i.e. nulls) with 0
unstack_weights = summary_df[summary_df.columns[axis_right]].unstack()
unstack_weights.fillna(0, inplace=True)
if bars_percent:
row_totals = unstack_weights.sum(axis=1)
for col in unstack_weights.columns.values:
unstack_weights[col] = unstack_weights[col] / row_totals
split_levels = unstack_weights.columns.values
unstack_weights.columns = pd.Index(
[
"("
+ split_by_col
+ " = "
+ str(x)
+ ") "
+ str(summary_df.columns[axis_right])
for x in unstack_weights.columns.values
]
)
if bar_type == "stacked":
top_bins = np.zeros(unstack_weights.shape[0])
# plot bin counts on 1st axis
for i in range(unstack_weights.shape[1]):
heights = unstack_weights.loc[
:, unstack_weights.columns.values[i]
].reset_index(drop=True)
ax1.bar(
x=np.arange(unstack_weights.shape[0]),
height=heights,
color=BIN_COLOURS[i],
bottom=top_bins,
label=unstack_weights.columns.values[i],
)
top_bins = top_bins + heights
plt.xticks(
np.arange(unstack_weights.shape[0]), unstack_weights.index, rotation=270
)
x_ticket_offset = 0
elif bar_type == "side_by_side":
bar_width = 0.8 / unstack_weights.shape[1]
x_offset = 0
for i in range(unstack_weights.shape[1]):
ax1.bar(
np.arange(unstack_weights.shape[0]) + x_offset,
unstack_weights.loc[:, unstack_weights.columns.values[i]].reset_index(
drop=True
),
color=BIN_COLOURS[i],
width=bar_width,
label=unstack_weights.columns.values[i],
)
x_offset += bar_width
x_ticket_offset = (bar_width * (unstack_weights.shape[1] / 2)) - (
bar_width * 0.5
)
plt.xticks(
np.arange(unstack_weights.shape[0]) + x_ticket_offset,
unstack_weights.index,
rotation=270,
)
else:
raise ValueError(f"unexpected value for bar_type; {bar_type}")
ax2 = ax1.twinx()
if axis_left is not None:
for column_no, axis_left_column_index in enumerate(axis_left):
unstacked_left_axis_column = summary_df[
summary_df.columns[axis_left_column_index]
].unstack()
unstacked_left_axis_column.columns = pd.Index(
[
"("
+ split_by_col
+ " = "
+ str(x)
+ ") "
+ str(summary_df.columns[axis_left_column_index])
for x in unstacked_left_axis_column.columns.values
]
)
for i in range(unstacked_left_axis_column.shape[1]):
ax2.plot(
unstacked_left_axis_column.loc[
:, unstacked_left_axis_column.columns.values[i]
]
.reset_index(drop=True)
.dropna()
.index
+ x_ticket_offset,
unstacked_left_axis_column.loc[
:, unstacked_left_axis_column.columns.values[i]
]
.reset_index(drop=True)
.dropna(),
color=LEFT_AXIS_COLOURS[column_no][i],
linestyle="-",
marker="D",
label=unstacked_left_axis_column.columns.values[i],
)
if legend:
ax1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
if axis_left is not None:
plt.legend(
bbox_to_anchor=(1.05, (0.94 - (0.03 * len(split_levels)))),
loc=2,
borderaxespad=0.0,
)
plt.title(title, fontsize=20)
|
import cv2
import numpy as np
from optical_flow import OpticalFlow, OpticalFlowFeatures
class VideoFeatures:
def __init__(self, filepath):
self.filepath = filepath
def get_feature_vector(self):
hists, magnitudes = self.__get_feature()
avg_hists = np.nanmean(hists, 0)
avg_magnitudes = np.nanmean(magnitudes, 0)
avg_magnitudes = (avg_magnitudes - np.nanmin(avg_magnitudes)) / (
np.nanmax(avg_magnitudes) - np.nanmin(avg_magnitudes))
variances = np.sum(np.nanvar(hists, 0), 2)
variances = (variances - np.nanmin(variances)) / (
np.nanmax(variances) - np.nanmin(variances))
feature_vector = np.concatenate((avg_hists.flatten(), avg_magnitudes.flatten(), variances.flatten()))
return feature_vector
def __get_feature(self):
hists = list()
magnitudes = list()
x_cells, y_cells = self.__get_cells(3, 3)
for pos, flow in enumerate(OpticalFlow(self.filepath).farneback()):
flow_features = OpticalFlowFeatures(flow)
hist = flow_features.get_hof(x_cells, y_cells, 8)
magnitude = flow_features.get_magnitude(x_cells, y_cells)
hists.append(hist)
magnitudes.append(magnitude)
return hists, magnitudes
def __get_cells(self, x_guess, y_guess):
video = cv2.VideoCapture(self.filepath)
im = cv2.cvtColor(video.read()[1], cv2.COLOR_BGR2GRAY)
h, w = im.shape[:2]
return self.__closest_factor(x_guess, w), self.__closest_factor(y_guess, h)
def __closest_factor(self, p, q):
factors_of_q = set(reduce(list.__add__,
([i, q // i] for i in range(1, int(q ** 0.5) + 1) if q % i == 0)))
return min(factors_of_q, key=lambda x : abs(x - p))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name='title')),
('content', models.TextField(verbose_name='content')),
('creation_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='creation_date')),
('site_wide', models.BooleanField(default=False, verbose_name='site wide')),
('members_only', models.BooleanField(default=False, verbose_name='members only')),
('dismissal_type', models.IntegerField(default=2, choices=[(1, 'No Dismissals Allowed'), (2, 'Session Only Dismissal'), (3, 'Permanent Dismissal Allowed')])),
('publish_start', models.DateTimeField(default=django.utils.timezone.now, verbose_name='publish_start')),
('publish_end', models.DateTimeField(null=True, verbose_name='publish_end', blank=True)),
('creator', models.ForeignKey(verbose_name='creator', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'announcement',
'verbose_name_plural': 'announcements',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dismissal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('dismissed_at', models.DateTimeField(default=django.utils.timezone.now)),
('announcement', models.ForeignKey(related_name='dismissals', to='announcements.Announcement')),
('user', models.ForeignKey(related_name='announcement_dismissals', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
"""Test Cloud preferences."""
from unittest.mock import patch
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.cloud.prefs import CloudPreferences, STORAGE_KEY
async def test_set_username(hass):
"""Test we clear config if we set different username."""
prefs = CloudPreferences(hass)
await prefs.async_initialize()
assert prefs.google_enabled
await prefs.async_update(google_enabled=False)
assert not prefs.google_enabled
await prefs.async_set_username("new-username")
assert prefs.google_enabled
async def test_set_username_migration(hass):
"""Test we not clear config if we had no username."""
prefs = CloudPreferences(hass)
with patch.object(prefs, "_empty_config", return_value=prefs._empty_config(None)):
await prefs.async_initialize()
assert prefs.google_enabled
await prefs.async_update(google_enabled=False)
assert not prefs.google_enabled
await prefs.async_set_username("new-username")
assert not prefs.google_enabled
async def test_load_invalid_cloud_user(hass, hass_storage):
"""Test loading cloud user with invalid storage."""
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": "non-existing"}}
prefs = CloudPreferences(hass)
await prefs.async_initialize()
cloud_user_id = await prefs.get_cloud_user()
assert cloud_user_id != "non-existing"
cloud_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]["data"]["cloud_user"]
)
assert cloud_user
assert cloud_user.groups[0].id == GROUP_ID_ADMIN
async def test_setup_remove_cloud_user(hass, hass_storage):
"""Test creating and removing cloud user."""
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": None}}
prefs = CloudPreferences(hass)
await prefs.async_initialize()
await prefs.async_set_username("user1")
cloud_user = await hass.auth.async_get_user(await prefs.get_cloud_user())
assert cloud_user
assert cloud_user.groups[0].id == GROUP_ID_ADMIN
await prefs.async_set_username("user2")
cloud_user2 = await hass.auth.async_get_user(await prefs.get_cloud_user())
assert cloud_user2
assert cloud_user2.groups[0].id == GROUP_ID_ADMIN
assert cloud_user2.id != cloud_user.id
|
from django.urls import path
from . import views
app_name = 'sell'
urlpatterns = [
# path('', views.index, name='index'),
path('', views.ProtectView.as_view(), name='index'),
path('create_sell/', views.InvoiceCreateView.as_view(), name='create'),
path('sell_list/', views.InvoiceListView.as_view(), name='list'),
path('pdf/<str:pk>', views.some_view, name='pdf'),
]
|
import os
import pytest
import subprocess
import tempfile
import time
import warnings
import docker
import girder_client
def getTestFilePath(name):
"""
Return the path to a file in the tests/test_files directory.
:param name: The name of the file.
:returns: the path to the file.
"""
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_files', name)
def _get_htk_ipaddr(dclient):
# search docker containers for a DSA docker container
# and fetch its IP address
return list(dclient.containers.list(
filters={'label': 'HISTOMICSTK_GC_TEST'})[0].attrs[
'NetworkSettings']['Networks'].values())[0]['IPAddress']
def _connect_girder_client_to_local_dsa(ip):
# connect a girder client to the local DSA docker
apiUrl = 'http://%s:8080/api/v1' % ip
gc = girder_client.GirderClient(apiUrl=apiUrl)
gc.authenticate('admin', 'password')
return gc
def _connect_to_existing_local_dsa():
client = docker.from_env(version='auto')
ipaddr = _get_htk_ipaddr(client)
return _connect_girder_client_to_local_dsa(ipaddr)
def _create_and_connect_to_local_dsa():
# create a local dsa docker and connect to it
cwd = os.getcwd()
thisDir = os.path.dirname(os.path.realpath(__file__))
externdatadir = os.path.join(thisDir, '..', '.tox', 'externaldata')
if not os.path.exists(externdatadir):
os.makedirs(externdatadir)
os.chdir(thisDir)
outfilePath = os.path.join(
tempfile.gettempdir(), 'histomicstk_test_girder_log.txt')
with open(outfilePath, 'w') as outfile:
# build a DSA docker container locally
proc = subprocess.Popen([
'docker-compose', 'up', '--build'],
close_fds=True, stdout=outfile, stderr=outfile)
os.chdir(cwd)
timeout = time.time() + 1200
# connect to docker and take a look at all its containers
client = docker.from_env(version='auto')
while time.time() < timeout:
try:
ipaddr = _get_htk_ipaddr(client)
if ipaddr:
gc = _connect_girder_client_to_local_dsa(ipaddr)
break
except Exception: # noqa
# warnings.warn(
# "Looks like the local DSA docker image is still "
# "initializing. Will wait a few seconds and try again.",
# )
time.sleep(0.1)
return gc, proc
# TODO -- refactor to session scope by figuring out pytest issue (bug?)
# See https://docs.pytest.org/en/latest/fixture.html
# Note:
# We could use scope='session' to create the DSA docker instance once
# and reuse it for all test modules. However, pytest seems to have a
# bug when yield is used (as oppoed to return) and it does not run the
# teardown code properly. Instead, the girderClient fixture is called
# again between modules, causing a stopIteration error.
# Until this bug is fixed, we restrict the scope to the "module" level
# to ensure: 1. Safe teardown, and 2. That edits to the database
# done by one module do not carry over to the next module.
#
# @pytest.fixture(scope='session')
@pytest.fixture(scope='module')
def girderClient():
"""
Yield an authenticated girder client that points to the server.
If a local girder server docker is running, this will connect to it,
otherwise, this will spin up a local girder server, load it with some
initial data, and connect to it.
NOTE: The default behavior initializes the docker image once per module and
re-uses it for all tests. This means whatever one unit test changes in
the DSA database is persistent for the next unit test. So if, for example,
you remove one annotation as part of the first unit test, the next unit
test will not have access to that annotation. Once all the unit tests are
done, the database is torn down.
If, instead, if you would like to run tests *repeatedly* (i.e. prototyping)
, or you would like the changes written by tests in one module to be
carried over to the next test module, you may prefer to start the server
manually. That way you won't worry about unknown wait time till the local
server is fully initialized. To manually start a DSA docker image, navigate
to the directory where this file exists, and start the container:
$ cd HistomicsTK/tests/
$ docker-compose up --build
Of course, you need to have docker installed and to either
run this as sudo or be added to the docker group by the system admins.
"""
try:
# First we try to connect to any existing local DSA docker
yield _connect_to_existing_local_dsa()
except Exception as e:
warnings.warn(
e.__repr__() + "\n"
"Looks like there's no existing local DSA docker running; "
"will create one now and try again.",
)
# create a local dsa docker and connect to it
gc, proc = _create_and_connect_to_local_dsa()
yield gc
proc.terminate()
proc.wait()
|
import frappe
# def get_or_create_doc(params):
# print(params)
#
# dn = params[params['key']]
# doc = frappe.db.exists(params['doctype'], dn)
# if doc:
# created = False
# else:
# created = True
# doc = frappe.get_doc(params).insert()
#
# return created, doc
def get_or_create_doc(fields=None, dt=None, dn=None):
if fields:
if not dt:
dt = fields.pop('doctype')
key = fields.get('key')
if key:
dn = fields[key]
try:
if dn:
doc = frappe.get_doc(dt, dn)
created = False
else:
doc = frappe.get_doc(dt, fields)
created = False
except frappe.exceptions.DoesNotExistError as e:
doc = frappe.new_doc(dt)
if fields:
for key, value in fields.items():
setattr(doc, key, value)
doc.insert()
doc.save()
created = True
return created, doc
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import user_pb2 as user_dot_user__pb2
class UserServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/chaosplatform.user.UserService/Create',
request_serializer=user_dot_user__pb2.CreateRequest.SerializeToString,
response_deserializer=user_dot_user__pb2.CreateReply.FromString,
)
self.Delete = channel.unary_unary(
'/chaosplatform.user.UserService/Delete',
request_serializer=user_dot_user__pb2.DeleteRequest.SerializeToString,
response_deserializer=user_dot_user__pb2.DeleteReply.FromString,
)
class UserServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def Create(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=user_dot_user__pb2.CreateRequest.FromString,
response_serializer=user_dot_user__pb2.CreateReply.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=user_dot_user__pb2.DeleteRequest.FromString,
response_serializer=user_dot_user__pb2.DeleteReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'chaosplatform.user.UserService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
# DHANUSH H V https://www.github.com/DHANUSH-web
# Both decimal to binary and binary to decimal in a single program
# Programmed in: Python
# binary to decimal without using built-in method int()
def decimal(binary):
sz = len(binary)
dec, index = 0, sz-1
for bit in binary:
if bit == '1':
dec += pow(2, index)
index -= 1
return dec
# decimal to binary without using built-in method bin()
def binary(decimal):
_bin = []
while decimal > 0:
_bin.append(str(decimal % 2))
decimal //= 2
b = ""
for bit in reversed(_bin):
b += bit
return b
_input = input("Enter input type (b/d): ").lower()
if _input == 'b':
print(f"Decimal: {decimal(input('Enter a binary value: '))}")
else:
print(f"Binary: {binary(int(input('Enter a decimal value : ')))}")
|
import six
import types
class Field(object):
""":class:`Field` is used to define what attributes will be serialized.
A :class:`Field` maps a property or function on an object to a value in the
serialized result. Subclass this to make custom fields. For most simple
cases, overriding :meth:`Field.to_value` should give enough flexibility. If
more control is needed, override :meth:`Field.as_getter`.
:param str attr: The attribute to get on the object, using the same format
as ``operator.attrgetter``. If this is not supplied, the name this
field was assigned to on the serializer will be used.
:param bool call: Whether the value should be called after it is retrieved
from the object. Useful if an object has a method to be serialized.
:param str label: A label to use as the name of the serialized field
instead of using the attribute name of the field.
:param bool required: Whether the field is required. If set to ``False``,
:meth:`Field.to_value` will not be called if the value is ``None``.
"""
#: Set to ``True`` if the value function returned from
#: :meth:`Field.as_getter` requires the serializer to be passed in as the
#: first argument. Otherwise, the object will be the only parameter.
getter_takes_serializer = False
def __init__(self, attr=None, call=False, label=None, required=True):
self.attr = attr
self.call = call
self.label = label
self.required = required
def to_value(self, value):
"""Transform the serialized value.
Override this method to clean and validate values serialized by this
field. For example to implement an ``int`` field: ::
def to_value(self, value):
return int(value)
:param value: The value fetched from the object being serialized.
"""
return value
to_value._serpy_base_implementation = True
def _is_to_value_overridden(self):
to_value = self.to_value
# If to_value isn't a method, it must have been overridden.
if not isinstance(to_value, types.MethodType):
return True
return not getattr(to_value, '_serpy_base_implementation', False)
def as_getter(self, serializer_field_name, serializer_cls):
"""Returns a function that fetches an attribute from an object.
Return ``None`` to use the default getter for the serializer defined in
:attr:`Serializer.default_getter`.
When a :class:`Serializer` is defined, each :class:`Field` will be
converted into a getter function using this method. During
serialization, each getter will be called with the object being
serialized, and the return value will be passed through
:meth:`Field.to_value`.
If a :class:`Field` has ``getter_takes_serializer = True``, then the
getter returned from this method will be called with the
:class:`Serializer` instance as the first argument, and the object
being serialized as the second.
:param str serializer_field_name: The name this field was assigned to
on the serializer.
:param serializer_cls: The :class:`Serializer` this field is a part of.
"""
return None
class StrField(Field):
"""A :class:`Field` that converts the value to a string."""
to_value = staticmethod(six.text_type)
class IntField(Field):
"""A :class:`Field` that converts the value to an integer."""
to_value = staticmethod(int)
class FloatField(Field):
"""A :class:`Field` that converts the value to a float."""
to_value = staticmethod(float)
class BoolField(Field):
"""A :class:`Field` that converts the value to a boolean."""
to_value = staticmethod(bool)
class MethodField(Field):
"""A :class:`Field` that calls a method on the :class:`Serializer`.
This is useful if a :class:`Field` needs to serialize a value that may come
from multiple attributes on an object. For example: ::
class FooSerializer(Serializer):
plus = MethodField()
minus = MethodField('do_minus')
def get_plus(self, foo_obj):
return foo_obj.bar + foo_obj.baz
def do_minus(self, foo_obj):
return foo_obj.bar - foo_obj.baz
foo = Foo(bar=5, baz=10)
FooSerializer(foo).data
# {'plus': 15, 'minus': -5}
:param str method: The method on the serializer to call. Defaults to
``'get_<field name>'``.
"""
getter_takes_serializer = True
def __init__(self, method=None, **kwargs):
super(MethodField, self).__init__(**kwargs)
self.method = method
def as_getter(self, serializer_field_name, serializer_cls):
method_name = self.method
if method_name is None:
method_name = 'get_{0}'.format(serializer_field_name)
return getattr(serializer_cls, method_name)
|
#!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
import os
import re, fnmatch
import pathlib
class ansi:
ENDC = '\033[0m'
HEADER = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class dotdict(dict):
def __init__(self, *args, **kwargs):
super(dotdict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class Blacklist():
def __init__(self, patterns):
self._patterns = list(map(fnmatch.translate, patterns))
self.update()
def add(self, patterns):
self._patterns += list(map(fnmatch.translate, patterns))
self.update()
def update(self):
self._regex = re.compile('|'.join(self._patterns))
def __contains__(self, item):
return self._regex.match(item) is not None
def filename(p):
_, fn = os.path.split(p)
return fn
def pathname(p):
pn, _ = os.path.split(p)
return pn
def ensure_path(p):
pathlib.Path(p).mkdir(parents=True, exist_ok=True)
|
from ebl.transliteration.domain.atf import Atf
from ebl.fragmentarium.domain.museum_number import MuseumNumber
from ebl.transliteration.domain.sign import (
Sign,
SignListRecord,
SignName,
Value,
Logogram,
Fossey,
)
def test_logogram():
logogram = Logogram(
"AŠ-IKU", Atf("AŠ-IKU"), ["ikû I"], "AŠ-IKU; *iku* (Deich); ZL 290 (Lit.)"
)
assert logogram.logogram == "AŠ-IKU"
assert logogram.atf == Atf("AŠ-IKU")
assert logogram.word_id == ["ikû I"]
assert logogram.schramm_logogramme == "AŠ-IKU; *iku* (Deich); ZL 290 (Lit.)"
def test_fossey():
fossey = Fossey(
405,
25728,
"B",
"Mai: MDP, VI, 11.I, 11",
"Paulus AOAT 50, 981",
"NABU 1997/1",
"P123456",
MuseumNumber("K", "4562"),
"dcclt",
"Das Zeichen ist eigentlich ZA₇",
"Marduk-apla-iddina I, 1171-1159 BC",
"me-luḫ-ḫa",
"M15,21.7c-0.1-0.1-0.2-0.4-0.2-0.8c-0.1-1-0.1-1.2-0.5-1.3c-0.2",
)
assert fossey.page == 405
assert fossey.number == 25728
assert fossey.suffix == "B"
assert fossey.reference == "Mai: MDP, VI, 11.I, 11"
assert fossey.new_edition == "Paulus AOAT 50, 981"
assert fossey.secondary_literature == "NABU 1997/1"
assert fossey.cdli_number == "P123456"
assert fossey.museum_number == MuseumNumber("K", "4562")
assert fossey.external_project == "dcclt"
assert fossey.notes == "Das Zeichen ist eigentlich ZA₇"
assert fossey.date == "Marduk-apla-iddina I, 1171-1159 BC"
assert fossey.transliteration == "me-luḫ-ḫa"
assert (
fossey.sign == "M15,21.7c-0.1-0.1-0.2-0.4-0.2-0.8c-0.1-1-0.1-1.2-0.5-1.3c-0.2"
)
def test_sign():
name = SignName("KUR")
lists = (SignListRecord("FOO", "123"),)
values = (Value("kur", 8), Value("ruk"))
logogram = Logogram(
"AŠ-IKU", Atf("AŠ-IKU"), ["ikû I"], "AŠ-IKU; *iku* (Deich); ZL 290 (Lit.)"
)
fossey = Fossey(
405,
25728,
"B",
"Mai: MDP, VI, 11.I, 11",
"Paulus AOAT 50, 981",
"NABU 1997/1",
"P123456",
MuseumNumber("K", "4562"),
"dcclt",
"Das Zeichen ist eigentlich ZA₇",
"Marduk-apla-iddina I, 1171-1159 BC",
"me-luḫ-ḫa",
"M15,21.7c-0.1-0.1-0.2-0.4-0.2-0.8c-0.1-1-0.1-1.2-0.5-1.3c-0.2",
)
sign = Sign(
name,
lists=lists,
values=values,
logograms=logogram,
fossey=fossey,
mes_zl="test_mesZl",
labasi="test_LaBaSi",
)
assert sign.name == name
assert sign.lists == lists
assert sign.values == values
assert sign.logograms == logogram
assert sign.fossey == fossey
assert sign.mes_zl == "test_mesZl"
assert sign.labasi == "test_LaBaSi"
def test_standardization_abz():
name = "ABZ"
number = "123"
sign = Sign(SignName("KUR"), lists=(SignListRecord(name, number),))
assert sign.standardization == f"{name}{number}"
def test_standardization_multiple_abz():
name = "ABZ"
number = "123"
sign = Sign(
SignName("KUR"),
lists=(SignListRecord(name, number), SignListRecord(name, "999")),
)
assert sign.standardization == f"{name}{number}"
def test_standardization_no_abz():
sign = Sign(SignName("KUR"))
assert sign.standardization == sign.name
|
import urllib.request
from typing import List
import re, os, json
from courseDB import CourseDB
from bs4 import BeautifulSoup
from pprint import pprint
from utils import get_page, change_keys, parse_day
from utils import bcolors
from halo import Halo
from shared_course_web_ananlyzer import download_course_description_single_page
def download_engineering_course_description(url: str, db: CourseDB, col_name: str, exceptionKeys: dict={}, startIndex=0, courseUrl=None):
spinner = Halo(text='Downloading UTSG Engineering Course Description')
spinner.start()
count = 0
if not '$page' in url:
count = download_course_description_single_page(url, db, col_name, spinner=spinner, departmentHint="ENG", exceptionKeys=exceptionKeys, courseUrl=courseUrl)
else:
page_index = startIndex
size = download_course_description_single_page(url.replace("$page", str(page_index)), db, col_name, spinner=spinner, departmentHint="ENG", exceptionKeys=exceptionKeys, courseUrl=courseUrl)
total_size = size
while size > 0:
size = download_course_description_single_page(url.replace("$page", str(page_index)), db, col_name, spinner=spinner, departmentHint="ENG", exceptionKeys=exceptionKeys, courseUrl=courseUrl)
total_size += size
page_index += 1
count = size
spinner.stop()
return count
def download_engineering_table(url: str, db: CourseDB, col_name: str, save_year_course: bool = True, drop_first: bool = True) -> str:
spinner = Halo(text='Downloading UTSG Engineering Course Timetable')
spinner.start()
page = get_page(url)
soup = BeautifulSoup(page, 'html.parser')
course_groups_html = soup.find_all('table')[1:]
course_table = []
if drop_first:
db.drop_col(col_name)
for course_group_html in course_groups_html:
course_headers = [tag.string for tag in course_group_html.tr.find_all('th')]
all_courses = course_group_html.find_all('tr')[1:]
for index, meeting_html in enumerate(all_courses):
meeting_info = [info.string if info.string != '\xa0' else 'NONE' for info in meeting_html.find_all('font')]
course_type = meeting_info[0][-1]
course_name = meeting_info[0]
if not save_year_course and course_type.capitalize() == 'Y':
continue
course_found = False
detail_raw = {header: context for header, context in zip(course_headers[2:], meeting_info[2:])}
detail_info = change_keys(detail_raw, {
"START DATE": 'meetingStartDate',
"DAY": 'meetingDay',
"START": 'meetingStartTime',
"FINISH": 'meetingEndTime',
"LOCATION": 'meetingLocation',
"PROFESSOR(S)": 'instructor',
"SCHEDULING NOTES": 'notes',
"DELIVERY MODE": 'deliveryMode'
})
deliveryMode = detail_info['deliveryMode'] if 'deliveryMode' in detail_info else "None"
detail_info.update({'meetingDay': parse_day(detail_info['meetingDay'])})
instructor = detail_info['instructor']
meeting = {'meetingName': meeting_info[1],
'meetingType': meeting_info[1][:3],
'instructors': [] if instructor == 'NONE' else [instructor],
'deliveryMode': deliveryMode,
'detail': [detail_info]}
meeting_type = meeting.pop('meetingType')
# check for previous course name
for previous_course in course_table:
if previous_course['courseName'] == meeting_info[0]:
# check for previous meeting type first
meeting_type_found = False
for previous_meetings in previous_course['meetings']:
previous_meeting_type, meetings = previous_meetings['meetingType'], previous_meetings['activities']
if previous_meeting_type == meeting_type:
# check for previous meeting name
meeting_found = False
for previous_meeting in meetings:
if previous_meeting['meetingName'] == meeting['meetingName']:
# update instructor list
instructor_found = False
for previous_instructor in previous_meeting['instructors']:
if previous_instructor == meeting['instructors'][0]:
instructor_found = True
break
if not instructor_found:
previous_meeting['instructors'].extend(meeting['instructors'])
previous_meeting['detail'].extend(meeting['detail'])
meeting_found = True
break
if not meeting_found:
# no previous meeting found
meetings.append(meeting)
meeting_type_found = True
break
if not meeting_type_found:
# add a new type
previous_course['meetings'].append({'meetingType': meeting_type, 'activities': [meeting]})
course_found = True
break
if not course_found:
# add a new course
course_table.append({
'courseName': course_name,
'courseType': course_type,
'orgName': 'Engineering',
'meetings': [{'meetingType': meeting_type, 'activities': [meeting]}]
})
spinner.succeed('[ENG] Reading Session Detail - ' + course_name + ' - ' + bcolors.OKBLUE + 'Progress {} of {}'.format(
index + 1, len(all_courses)) + bcolors.ENDC)
db.insert_many(col_name, course_table)
spinner.stop()
def get_enginneering_exception_dict() -> dict:
excep = {
"Humanities and Social Science elective.": ""
}
return excep
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(f"{dir_path}/../../secrets.json") as f:
data = json.load(f)
db = CourseDB(data['dbname'], data['dbuser'], data['dbpwd'], useAuth=False)
url = 'https://engineering.calendar.utoronto.ca/search-courses?course_keyword=&field_section_value=All&field_subject_area_target_id=All&page=$page'
courseUrl = "https://engineering.calendar.utoronto.ca/course/$course"
excep = get_enginneering_exception_dict()
download_engineering_course_description(url, db, 'test', exceptionKeys=excep, startIndex=0, courseUrl=courseUrl) |
def ReadInt(msg):
while True:
try:
number = int(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERRO: Digite um número inteiro válido!\033[m')
continue
except (KeyboardInterrupt):
print('\n\033[1;31O usuário preferiu não informar esse número.\033[m')
return 0
else:
return number
#formatação do texto
def linha(tam=42):
return '=' * tam
#Cabeçalho
def cabeçalho(txt):
print(linha())
print(txt.center(42))
print(linha())
def menu(lista):
cabeçalho('MENU PRINCIPAL')
cont = 1
for item in lista:
print(f'{c} - {item}')
cont += 1
print(linha())
option = int(input('Sua opção: '))
return option |
#!/usr/bin/env python
# -*- coding: utf-8 -*-+
from stopover import Stopover
endpoint = 'http://localhost:5704'
receiver_group = 'group0'
stream = 'stream0'
# all the messages with the same key will fall under the same partition
key = None
stopover = Stopover(endpoint)
stopover.put('hi 0', stream, key=key)
index = 1
for message in stopover.listen(stream, receiver_group):
print(message.index, message.value)
stopover.commit(message, receiver_group)
stopover.put(f'hi {index}', stream, key=key)
index += 1
|
str=input()
n=int(input())
res =str[n:]
print(res+str[:n])
|
from __future__ import print_function, absolute_import, unicode_literals
from zope.interface import implementer
import contextlib
from ._interfaces import IJournal
@implementer(IJournal)
class Journal(object):
def __init__(self, save_checkpoint):
self._save_checkpoint = save_checkpoint
self._outbound_queue = []
self._processing = False
def queue_outbound(self, fn, *args, **kwargs):
assert self._processing
self._outbound_queue.append((fn, args, kwargs))
@contextlib.contextmanager
def process(self):
assert not self._processing
assert not self._outbound_queue
self._processing = True
yield # process inbound messages, change state, queue outbound
self._save_checkpoint()
for (fn, args, kwargs) in self._outbound_queue:
fn(*args, **kwargs)
self._outbound_queue[:] = []
self._processing = False
@implementer(IJournal)
class ImmediateJournal(object):
def __init__(self):
pass
def queue_outbound(self, fn, *args, **kwargs):
fn(*args, **kwargs)
@contextlib.contextmanager
def process(self):
yield
|
import requests
import json
import time
import hmac
import base64
import hashlib
import urllib.parse
def create_sign_for_dingtalk(secret: str):
"""
docstring
"""
timestamp = str(round(time.time() * 1000))
secret_enc = secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
return timestamp, sign
DINGTALK_API_URL="https://oapi.dingtalk.com/robot/send?access_token={}"
DINGTAIL_HEADERS = {'Content-Type': 'application/json'}
def do_notify_by_ding_talk(dingtalk_config: dict, data: dict):
"""发消息给钉钉机器人
"""
token = dingtalk_config['token']
secret = dingtalk_config['secret']
assert token and secret
url = DINGTALK_API_URL.format(token)
timestamp, sign = create_sign_for_dingtalk(secret)
url += f'×tamp={timestamp}&sign={sign}'
#APP.debug(f'钉钉机器人 数据===> {data}')
return requests.post(url=url, headers = DINGTAIL_HEADERS, data=json.dumps(data))
# for examples:
DINGTAIL_SUBJECT = "[GITHOOK] {pusher}推送项目{rep_name}{result}"
DINGTAIL_BODY = """## {pusher}推送项目[{rep_name}]({url}){result}\n
### <font color=red>COMMITS:</font>\n
{comment_li}\n
### <font color=red>COMMANDS:</font>\n
{command_li}\n
### <font color=red>STDOUT:</font>\n
{stdout_li}\n
### <font color=red>STDERR:</font>\n
{stderr_li}
"""
#dt_data = data.copy()
#dt_data['comment_li'] = '\n'.join((f'- {c}' for c in data['comments']))
#dt_data['command_li'] = '\n'.join((f'- {c}' for c in data['commands']))
#dt_data['stdout_li'] = '\n'.join((f'- {c}' for c in data['stdout_list']))
#dt_data['stderr_li'] = '\n'.join((f'- {c}' for c in data['stderr_list']))
def notify_by_ding_talk(dingtalk_config: dict, title: str, text: str):
"""发消息给钉钉机器人
"""
dt_msg = {
"msgtype": 'markdown',
"markdown": {
'title': title,
'text': text
}
}
res = do_notify_by_ding_talk(dingtalk_config, dt_msg)
return res.json() |
from frogsay.client import open_client
from .util import temp_dir_name
def test_empty_cache_fetches_more_tips():
with temp_dir_name() as db_dir:
with open_client(db_dir) as client:
# Exhaust all tips
tries = 0
max_tips = 49
client.frog_tip()
while not client.should_refresh:
tries += 1
client.frog_tip()
assert tries == max_tips
with open_client(db_dir) as client:
assert client.num_cached_tips == 0
|
import json
import pathlib
from pathlib import Path
from dask.array.routines import shape
import distributed
from ..parse import utilities
import h5py
from dask import array as da
from dask import bag as db
from distributed import progress
from dask import delayed
from dask.diagnostics import ProgressBar
import dask
from .. import Array, Scan
import numpy as np
from copy import deepcopy as copy
import logging
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from tqdm.autonotebook import tqdm
from threading import Thread
from time import sleep
try:
import bitshuffle.h5
except:
print('Could not import bitshuffle.h5!')
from dask_jobqueue import SLURMCluster
from distributed import Client
import socket
import getpass
logger = logging.getLogger(__name__)
# cluter helpers
class SwissFelCluster:
def __init__(self, local=True, cores=8, memory="24 GB", workers=5):
if local:
self.client = distributed.Client()
else:
self.cluster = SLURMCluster(cores=cores, memory=memory)
self.client = Client(self.cluster)
self.ip = socket.gethostbyname(socket.gethostname())
self.dashboard_port_scheduler = self.client._scheduler_identity.get("services")[
"dashboard"
]
self.username = getpass.getuser()
def _repr_html_(self):
return self.client._repr_html_()
def scale_workers(self, N_workers):
self.cluster.scale(N_workers)
def create_dashboard_tunnel(self, ssh_host="ra"):
print(
"type following commant in a terminal, if port is taken, change first number in command."
)
print(
" ".join(
[
f"jupdbport={self.dashboard_port_scheduler}",
"&&",
"ssh",
"-f",
"-L",
f"$jupdbport:{self.ip}:{self.dashboard_port_scheduler}",
f"{self.username}@{ssh_host}",
"sleep 10",
"&&",
"firefox",
"http://localhost:$jupdbport",
]
)
)
# parsing stuff
@delayed
def parse_bs_h5_file(fina, memlimit_MB=100):
"""Data parser assuming the standard swissfel h5 format for raw data"""
# if (type(files) is str) or (not np.iterable(files)):
# files = [files]
fina = Path(fina)
try:
with h5py.File(fina.resolve(), mode="r") as fh:
datasets = utilities.findItemnamesGroups(fh, ["data", "pulse_id"])
logger.info("Successfully parsed file %s" % fina.resolve())
dstores = {}
for name, (ds_data, ds_index) in datasets.items():
if ds_data.size == 0:
logger.debug("Found empty dataset in {}".format(name))
continue
# data first
dtype = np.dtype(ds_data.dtype)
size_element = (
np.dtype(ds_data.dtype).itemsize
* np.prod(ds_data.shape[1:])
/ 1024 ** 2
)
chunk_length = int(memlimit_MB // size_element)
dset_size = ds_data.shape
chunk_shapes = []
slices = []
for chunk_start in range(0, dset_size[0], chunk_length):
slice_0dim = [
chunk_start,
min(chunk_start + chunk_length, dset_size[0]),
]
chunk_shape = list(dset_size)
chunk_shape[0] = slice_0dim[1] - slice_0dim[0]
slices.append(slice_0dim)
chunk_shapes.append(chunk_shape)
dstores[name] = {
"file_path": fina.resolve().as_posix(),
"data_dsp": ds_data.name,
"data_shape": ds_data.shape,
"data_dtype": dtype.str,
"data_chunks": {"slices": slices, "shapes": chunk_shapes},
"index_dsp": ds_index.name,
"index_dtype": ds_index.dtype.str,
"index_shape": ds_index.shape,
}
# dstores[name]["stepLengths"] = []
# dstores[name]["stepLengths"].append(len(datasets[name][0]))
return dstores
except:
return {}
@delayed
def read_h5_chunk(fina, ds_path, slice_args):
with h5py.File(fina, "r") as fh:
dat = fh[ds_path][slice(*slice_args)]
return dat
def dstore_to_darray(dstore):
fina = pathlib.Path(dstore["file_path"])
index = dask.array.from_delayed(
read_h5_chunk(fina, dstore["index_dsp"], [None]),
dstore["index_shape"],
dtype=np.dtype(dstore["index_dtype"]),
)
arrays = [
dask.array.from_delayed(
read_h5_chunk(fina, dstore["data_dsp"], tslice),
tshape,
dtype=np.dtype(dstore["data_dtype"]),
)
for tslice, tshape in zip(
dstore["data_chunks"]["slices"], dstore["data_chunks"]["shapes"]
)
]
data = dask.array.concatenate(arrays, axis=0)
return index, data
def parse_filelist(flist):
return dask.compute([parse_bs_h5_file(fina) for fina in flist])[0]
def readScanEcoJson_v01(file_name_json, exclude_from_files=None):
p = pathlib.Path(file_name_json)
assert p.is_file(), "Input string does not describe a valid file path."
with p.open(mode="r") as f:
s = json.load(f)
assert len(s["scan_files"]) == len(
s["scan_values"]
), "number of files and scan values don't match in {}".format(file_name_json)
assert len(s["scan_files"]) == len(
s["scan_readbacks"]
), "number of files and scan readbacks don't match in {}".format(file_name_json)
for step in s["scan_files"]:
for sstr in exclude_from_files:
kill = []
for i, tf in enumerate(step):
if sstr in tf:
kill.append(i)
for k in kill[-1::-1]:
step.pop(k)
return s, p
def parseScanEcoV01(
file_name_json=None,
search_paths=["./", "./scan_data/", "../scan_data"],
memlimit_MB=100,
createEscArrays=True,
scan_info=None,
scan_info_filepath=None,
exclude_from_files=[],
checknstore_parsing_result=False,
clear_parsing_result=False,
return_json_info=False,
):
if file_name_json:
"""Data parser assuming eco-written files from pilot phase 1"""
s, scan_info_filepath = readScanEcoJson_v01(
file_name_json, exclude_from_files=exclude_from_files
)
else:
s = scan_info
# breakpoint()
if checknstore_parsing_result:
if checknstore_parsing_result == "same_directory":
parse_res_file = scan_info_filepath.parent.resolve() / Path(
scan_info_filepath.stem + "_parse_result.json"
)
elif checknstore_parsing_result == "work_directory":
tp = scan_info_filepath.parent.resolve()
while tp.stem not in ["res", "raw", "work"]:
tp = tp.parent
tp = tp.parent
parse_res_file = (
tp
/ Path("work/scan_info")
/ Path(scan_info_filepath.stem + "_parse_result.json")
)
parse_res_file = scan_info_filepath.parent.resolve() / Path(
scan_info_filepath.stem + "_parse_result.json"
)
parse_res_file.parent.mkdir(parents=True, exist_ok=True)
else:
parse_res_file = (
Path(checknstore_parsing_result)
/ Path(".escape_parse_result")
/ Path(scan_info_filepath.stem + "_parse_result.json")
)
parse_res_file.parent.mkdir(parents=True, exist_ok=True)
if checknstore_parsing_result and Path(parse_res_file).exists():
with open(parse_res_file, "r") as fp:
dstores_flat = json.load(fp)
else:
dstores = []
for files_step in s["scan_files"]:
dstores_step = []
lastpath = None
searchpaths = None
for fina in files_step:
fp = pathlib.Path(fina)
fn = pathlib.Path(fp.name)
if not searchpaths:
searchpaths = [fp.parent] + [
scan_info_filepath.parent / pathlib.Path(tp.format(fp.parent.name))
for tp in search_paths
]
for path in searchpaths:
file_path = path / fn
if file_path.is_file():
if not lastpath:
lastpath = path
searchpaths.insert(0, path)
break
dstores_step.append(parse_bs_h5_file(file_path))
dstores.append(dstores_step)
with ProgressBar():
dstores = dask.compute(dstores, scheduler="processes")[0]
# flatten files in step
dstores_flat = []
for dstore in dstores:
tmp = {}
for i in dstore:
tmp.update(i)
dstores_flat.append(tmp)
if checknstore_parsing_result and dstores_flat:
with open(parse_res_file, "w") as fp:
json.dump(dstores_flat, fp)
chs = set()
for dstore in dstores_flat:
chs = chs.union(chs, set(list(dstore.keys())))
# general scan info
parameter = {
parname: {"values": [], "attributes": {"Id": id_name}}
for parname, id_name in zip(
s["scan_parameters"]["name"], s["scan_parameters"]["Id"]
)
}
parameter.update(
{
f"{parname}_readback": {"values": [], "attributes": {"Id": id_name}}
for parname, id_name in zip(
s["scan_parameters"]["name"], s["scan_parameters"]["Id"]
)
}
)
parameter.update({"scan_step_info": {"values": []}})
escArrays = {}
for ch in chs:
arrays = []
s_sl = []
scan = []
tparameter = copy(parameter)
for stepNo, (scan_values, scan_readbacks, scan_step_info, dstore) in enumerate(
zip(
s["scan_values"], s["scan_readbacks"], s["scan_step_info"], dstores_flat
)
):
if ch not in dstore.keys():
continue
arrays.append(dstore_to_darray(dstore[ch]))
s_sl.append(len(arrays[-1][0]))
for par_name, value in zip(
parameter.keys(),
copy(scan_values) + copy(scan_readbacks) + [copy(scan_step_info)],
):
tparameter[par_name]["values"].append(value)
index_array = dask.array.concatenate([tr[0] for tr in arrays], axis=0).ravel()
data_array = dask.array.concatenate([tr[1] for tr in arrays], axis=0)
try:
escArrays[ch] = Array(
data=data_array,
index=index_array,
step_lengths=s_sl,
parameter=tparameter,
)
except Exception as e:
print(f"Could not create escape.Array for {ch};\nError: {str(e)}")
if return_json_info:
return escArrays, s
else:
return escArrays
# datasets_scan = []
# def get_datasets_from_files(n):
# lastpath = None
# searchpaths = None
# files = s["scan_files"][n]
# datasets = {}
# for n_file, f in enumerate(files):
# fp = pathlib.Path(f)
# fn = pathlib.Path(fp.name)
# if not searchpaths:
# searchpaths = [fp.parent] + [
# scan_info_filepath.parent / pathlib.Path(tp.format(fp.parent.name))
# for tp in search_paths
# ]
# for path in searchpaths:
# file_path = path / fn
# if file_path.is_file():
# if not lastpath:
# lastpath = path
# searchpaths.insert(0, path)
# break
# # assert file_path.is_file(), 'Could not find file {} '.format(fn)
# try:
# fh = h5py.File(file_path.resolve(), mode="r")
# datasets.update(utilities.findItemnamesGroups(fh, ["data", "pulse_id"]))
# logger.info("Successfully parsed file %s" % file_path.resolve())
# except:
# logger.warning(f"could not read {file_path.absolute().as_posix()}.")
# all_parses[n] = datasets
# ts = []
# for n in range(len(s["scan_files"])):
# ts.append(Thread(target=get_datasets_from_files, args=[n]))
# for t in ts:
# t.start()
# while len(all_parses) < len(s["scan_files"]):
# m = len(s["scan_files"])
# n = len(all_parses)
# files_bar.update(n - files_bar.n)
# # files_bar.update(n)
# sleep(0.01)
# for t in ts:
# t.join()
# # while not files_bar.n==files_bar.total:
# # sleep(.01)
# files_bar.update(files_bar.total - files_bar.n)
# # datasets_scan.append(datasets)
# names = set()
# dstores = {}
# # general scan info
# parameter = {
# parname: {"values": [], "attributes": {"Id": id_name}}
# for parname, id_name in zip(
# s["scan_parameters"]["name"], s["scan_parameters"]["Id"]
# )
# }
# parameter.update(
# {
# f"{parname}_readback": {"values": [], "attributes": {"Id": id_name}}
# for parname, id_name in zip(
# s["scan_parameters"]["name"], s["scan_parameters"]["Id"]
# )
# }
# )
# parameter.update({"scan_step_info": {"values": []}})
# for stepNo, (scan_values, scan_readbacks, scan_step_info) in enumerate(
# zip(s["scan_values"], s["scan_readbacks"], s["scan_step_info"])
# ):
# datasets = all_parses[stepNo]
# tnames = set(datasets.keys())
# newnames = tnames.difference(names)
# oldnames = names.intersection(tnames)
# for name in newnames:
# if datasets[name][0].size == 0:
# logger.debug(
# "Found empty dataset in {} in cycle {}".format(name, stepNo)
# )
# else:
# size_data = (
# np.dtype(datasets[name][0].dtype).itemsize
# * datasets[name][0].size
# / 1024 ** 2
# )
# size_element = (
# np.dtype(datasets[name][0].dtype).itemsize
# * np.prod(datasets[name][0].shape[1:])
# / 1024 ** 2
# )
# if datasets[name][0].chunks:
# chunk_size = list(datasets[name][0].chunks)
# else:
# chunk_size = list(datasets[name][0].shape)
# if chunk_size[0] == 1:
# chunk_size[0] = int(memlimit_mD_MB // size_element)
# dstores[name] = {}
# # ToDo: get rid of bad definition in eco scan! (readbacks are just added as values but not as names).
# dstores[name]["parameter"] = copy(parameter)
# for par_name, value in zip(
# parameter.keys(),
# copy(scan_values) + copy(scan_readbacks) + [copy(scan_step_info)],
# ):
# dstores[name]["parameter"][par_name]["values"].append(value)
# dstores[name]["data"] = []
# dstores[name]["data"].append(datasets[name][0])
# dstores[name]["data_chunks"] = chunk_size
# dstores[name]["eventIds"] = []
# dstores[name]["eventIds"].append(datasets[name][1])
# dstores[name]["stepLengths"] = []
# dstores[name]["stepLengths"].append(len(datasets[name][0]))
# names.add(name)
# for name in oldnames:
# if datasets[name][0].size == 0:
# logger.debug(
# "Found empty dataset in {} in cycle {}".format(name, stepNo)
# )
# elif not len(datasets[name][0].shape) == len(
# dstores[name]["data"][0].shape
# ):
# logger.debug("Found inconsistent dataset in {}".format(name))
# elif not datasets[name][0].shape[0] == datasets[name][1].shape[0]:
# logger.debug("Found inconsistent dataset in {}".format(name))
# else:
# for par_name, value in zip(
# parameter.keys(),
# copy(scan_values) + copy(scan_readbacks) + [copy(scan_step_info)],
# ):
# dstores[name]["parameter"][par_name]["values"].append(value)
# dstores[name]["data"].append(datasets[name][0])
# dstores[name]["eventIds"].append(datasets[name][1])
# dstores[name]["stepLengths"].append(len(datasets[name][0]))
# if createEscArrays:
# escArrays = {}
# containers = {}
# for name, dat in dstores.items():
# containers[name] = LazyContainer(dat)
# escArrays[name] = Array(
# containers[name].get_data,
# index=containers[name].get_eventIds,
# step_lengths=dat["stepLengths"],
# parameter=dat["parameter"],
# )
# return escArrays
# else:
# return dstores
class LazyContainer:
def __init__(self, dat):
self.dat = dat
def get_data(self, **kwargs):
return da.concatenate(
[
da.from_array(td, chunks=self.dat["data_chunks"])
for td in self.dat["data"]
]
)
def get_eventIds(self):
ids = {}
def getids(n, dset):
ids[n] = dset[...].ravel()
ts = [
Thread(target=getids, args=[n, td])
for n, td in enumerate(self.dat["eventIds"])
]
for t in ts:
t.start()
for t in ts:
t.join()
return np.concatenate([ids[n] for n in range(len(self.dat["eventIds"]))])
|
import logging
import time
import requests
def get_tweets(query, token, batch_size=100, **options):
"""
Yield all tweets matching query
:param query: query (string)
:param token: bearer token for academic api
:param batch_size: number of tweets per batch (max=100)
:return:
"""
data = {'query': query,
'expansions': "attachments.media_keys",
'media.fields': "url",
'tweet.fields': 'created_at',
'max_results': batch_size,
**options}
url = "https://api.twitter.com/2/tweets/search/all"
headers = {"Authorization": f"Bearer {token}"}
while True:
r = requests.get(url, params=data, headers=headers)
r.raise_for_status()
d = r.json()
yield parse_result(d)
if not 'next_token' in d['meta']:
return
data['next_token'] = d['meta']['next_token']
time.sleep(1) # rate limit: 1 request per second
def parse_result(d: dict):
urls = {x['media_key']: x['url'] for x in d['includes']['media']}
for tweet in d['data']:
media = tweet['attachments']['media_keys']
if len(media) != 1:
raise Exception("Trouble parsing tweet")
url = urls[media[0]]
yield {'created_at': tweet['created_at'],
'id': tweet['id'],
'text': tweet['text'],
'image': url}
|
from PIL import Image, ImageEnhance
img = Image.open("Sample3.jpeg")
img_con = ImageEnhance.Bri(img)
img_con.enhance(1.0).show("100% more contrast")
|
'''
Python reducer UDO definition
'''
def usqlml_main(df):
pass |
from login import Locker
class Credential:
"""
Class that generates new instances of credential
"""
list_cred = []
list_credentials= []
def __init__(self,username,password,new_account,email):
'''
__init__ method that helps us define properties for our objects.
'''
self.username = username
self.password = password
self.new_account = new_account
self.email = email
# def save_cred(self):
# '''
# to save all credentials
# ''''
# Credential.
def keeped(self):
'''
save_data method saves objects into cred_list
'''
Credential.list_cred.append(self)
def remove(self):
'''
delete_data method deletes a saved data from the cred_list
'''
Credential.list_cred.remove(self)
@classmethod
def find_username(cls,username):
'''
Method that takes in a name and returns a data that matches that name.
Args:
name: username to search for
Returns :
data of person that matches the name.
'''
for credentials in cls.list_cred:
if credentials.username == username:
return credentials
@classmethod
def cred_exists(cls,username):
'''
Method that checks if a objects exists from the list.
Args:
name to search if it exists
Returns :
Boolean: True or false depending if the our object exists
'''
for credentials in cls.list_cred:
if credentials.username == username:
return True
return False
@classmethod
def excute(cls,username):
'''
method that returns the list
'''
list_credentials= []
for credentials in cls.list_cred:
if credentials.username == username:
list_credentials.append(credentials)
return list_credentials
@classmethod
def copy_username(cls,username):
checking = Credential.find_username(username)
pyperclip.copy(checking.username)
@classmethod
def checking(cls,email,password):
login_user=''
for login in Locker.locker_list:
if(login.email== email and login.password==password):
login_user=login.email
return login_user
|
class Employee:
def __init__(self, eid, did, name, money):
self.did = did
self.eid = eid
self.name = name
self.money = money
list_employees = [Employee(1001, 9002, "师父", 60000),
Employee(1002, 9001, "孙悟空", 50000),
Employee(1003, 9002, "猪八戒", 20000),
Employee(1004, 9001, "沙僧", 30000),
Employee(1005, 9001, "小白龙", 15000)]
def edis(n):
return n.eid and n.money
def select(kdddx, n):
for item in kdddx:
if n(item):
yield n(item)
for name in select(list_employees, lambda g: g.name):
print(name)
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib.collections import LineCollection
import seaborn as sns
# Download data shapes from:
# http://www.gadm.org/download
# More details on how to read the .shp files
# https://gis.stackexchange.com/questions/113799/how-to-read-a-shapefile-in-python
# Other datasets:
# https://www.townlands.ie/page/download/
KM = 1000.
clat = 53.5
clon = -8
wid = 500. * KM
hgt = 500. * KM
lon1 = -11.146
lon2 = -5.2992
lat1 = 51.242709
lat2 = 55.456703
fig = plt.figure()
ax = fig.add_subplot(111)
m = Basemap(width=wid, height=hgt, ax=ax,
area_thresh=2500., projection='lcc',
lat_0=clat, lon_0=clon)
# This contains the country border
data_shape = 'IRL_adm_shp/IRL_adm0'
shp_info = m.readshapefile(data_shape, 'country',
drawbounds=True, color='k')
# This contains the counties
data_shape1 = 'IRL_adm_shp/IRL_adm1'
#data_shape2 = 'counties/counties'
shp_info = m.readshapefile(data_shape1, 'counties',
drawbounds=True, color='lightgrey')
import shapefile
sf = shapefile.Reader(data_shape1)
# https://stackoverflow.com/questions/15968762/shapefile-and-matplotlib-plot-polygon-collection-of-shapefile-coordinates
import matplotlib.patches as patches
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
recs = sf.records()
shapes = sf.shapes()
Nshp = len(shapes)
cns = []
for nshp in xrange(Nshp):
cns.append(recs[nshp][1])
cns = np.array(cns)
cm = plt.get_cmap('Dark2')
cccol = cm(1.*np.arange(Nshp)/Nshp)
for nshp in xrange(Nshp):
ptchs = []
pts = np.array(shapes[nshp].points)
x, y = m(pts[:,0], pts[:,1])
pts = np.array(zip(x,y))
prt = shapes[nshp].parts
par = list(prt) + [pts.shape[0]]
for pij in xrange(len(prt)):
ptchs.append(Polygon(pts[par[pij]:par[pij+1]]))
ax.add_collection(PatchCollection(ptchs,facecolor=cccol[nshp,:],edgecolor='k', linewidths=.1))
fig.savefig('map1.png')
####### CITIES ########
# Cities, etc
## http://www.naturalearthdata.com/downloads/10m-cultural-vectors/
#
#cities_file = 'populated/ne_10m_populated_places_simple.shp'
#
#sf = shapefile.Reader(cities_file)
##grab the shapefile's field names (omit the first psuedo field)
#fields = [x[0] for x in sf.fields][1:]
#records = sf.records()
#shps = [s.points for s in sf.shapes()]
#
##write the records into a dataframe
#shapefile_dataframe = pd.DataFrame(columns=fields, data=records)
#
##add the coordinate data to a column called "coords"
#shapefile_dataframe = shapefile_dataframe.assign(coords=shps)
#
#df = shapefile_dataframe
#
#def cond(x):
# cond1 = x[0][0] < -5.5
# cond2 = x[0][0] > -11.
# cond3 = x[0][1] < 56.
# cond4 = x[0][1] > 54.
# return cond1*cond2*cond3*cond4
#
#cond_ir = df['coords'].apply(cond).astype(bool)
#
#df = df[cond_ir]
#
#df['x'] = df['coords'].apply(lambda x:x[0][0])
#df['y'] = df['coords'].apply(lambda x:x[0][1])
#
#x, y = m(df['x'].values, df['y'].values)
##print x, y
#m.plot(x,y,'bo')
# Other tests:
#fields = sf.fields
#records = sf.records()
#
#def draw_screen_poly( lats, lons, m, ax, i):
# x, y = m( lons, lats )
# xy = zip(x,y)
# poly = Polygon( xy, facecolor=palette[i], alpha=0.4 )
# ax.add_patch(poly)
# return ax, poly, xy
#
#palette = sns.color_palette(None, len(sf.shapes()))
#
# #for i, shape in enumerate(sf.shapes()):
## points = shape.points
## p = np.array(points).T
## ax, poly, xy = draw_screen_poly(p[1], p[0], m, ax, i)
|
import unittest
from katas.kyu_7.pythons_dynamic_classes_1 import class_name_changer
class ClassNameChangerTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(str(MyClass),
'<class \'tests.kyu_7_tests.test_pythons_dynamic_c'
'lasses_1.MyClass\'>')
def test_equals_2(self):
class_name_changer(MyClass, 'UsefulClass')
self.assertEqual(str(MyClass),
'<class \'tests.kyu_7_tests.test_pythons_dynamic_c'
'lasses_1.UsefulClass\'>')
def test_equals_3(self):
class_name_changer(MyClass, 'SecondUsefulClass')
self.assertEqual(str(MyClass),
'<class \'tests.kyu_7_tests.test_pythons_dynamic_c'
'lasses_1.SecondUsefulClass\'>')
def test_raises_1(self):
self.assertRaises(NameError, class_name_changer, MyClass, 'bad_name')
def test_raises_2(self):
self.assertRaises(NameError, class_name_changer, MyClass, '!@#$%%^')
def test_my_class(self):
self.assertIsInstance(MyClass().__str__(), str)
class MyClass(object):
def __str__(self):
return str(type(self))
|
#!/usr/bin/env python
import rospy
import math
import tf
from std_msgs.msg import Bool
from geometry_msgs.msg import Point
from geometry_msgs.msg import Pose
from robomuse_gazebo.srv import *
def callback2(msg):
global pos
pos = msg
def callback1(msg):
global state
state = msg.data
def move_arm_client(pn):
global moving
rospy.wait_for_service('move_arm')
try:
pose = Pose()
moving = True
pose.position.x = pn
ser = rospy.ServiceProxy('move_arm', MoveArm)
resp1 = ser(pose)
return resp1.result
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
if __name__ == '__main__':
global moved, state, pos, moving
moved = False
moving = False
state = False
pos = Point()
pn = [2, 1, 3]
cnt = 0
rospy.init_node('screen_tf_listener')
rospy.Subscriber("/robot/processed_image",Bool,callback1)
rospy.Subscriber("/robot/processed_image_pos",Point,callback2)
listener = tf.TransformListener()
while not rospy.is_shutdown():
if state and not moved:
moved = True
try:
tp, quat = listener.lookupTransform('arm_base_link', 'display_screen_link', rospy.Time(0))
print("moved to pos:")
goal = [tp[0]+pos.x, tp[1]+pos.y, tp[2]+pos.z]
print(pn[cnt])
move_arm_client(pn[cnt])
cnt+=1
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
elif moved and not state:
moved = False
print("moved Back")
move_arm_client(0)
print(0)
if cnt == 3: exit()
|
"""Tests that the LEDs are working, by writing to the appropriate GPIO pins"""
from periphery import GPIO # pylint: disable=W0403
IN = "in"
OUT = "out"
HIGH = "high"
LOW = "low"
CCP_OK = GPIO(pin=13, direction=OUT)
IED_OK = GPIO(pin=12, direction=OUT)
FAULT = GPIO(pin=11, direction=OUT)
CCP_DATA_TX = GPIO(pin=25, direction=OUT)
CCP_DATA_RX = GPIO(pin=24, direction=OUT)
IED_DATA_TX = GPIO(pin=5, direction=OUT)
IED_DATA_RX = GPIO(pin=4, direction=OUT)
def test_ccp_ok(level, logging):
"""Tests that the CCP OK LED can be turned on"""
#return _test(CCP_OK, level, logging)
return _write(CCP_OK, level, logging)
def test_ied_ok(level, logging):
"""Tests that the IED OK LED can be turned on"""
#return _test(IED_OK, level, logging)
return _write(IED_OK, level, logging)
def test_fault(level, logging):
"""Tests that the Fault LED can be turned on"""
#return _test(FAULT, level, logging)
return _write(FAULT, level, logging)
def test_ccp_data_tx(level, logging):
"""Tests that the CCP Data Tx (transmit) LED can be turned on"""
return _test(CCP_DATA_TX, level, logging)
def test_ccp_data_rx(level, logging):
"""Tests that the CCP Data Rx (receive) LED can be turned on"""
return _test(CCP_DATA_RX, level, logging)
def test_ied_data_tx(level, logging):
"""Tests that the IED Data Tx (transmit) LED can be turned on"""
return _test(IED_DATA_TX, level, logging)
def test_ied_data_rx(level, logging):
"""Tests that the IED Data Rx (receive) LED can be turned on"""
return _test(IED_DATA_RX, level, logging)
def _write(pin, level, logging):
try:
pin.write(level)
logging.debug("Success")
return True
except Exception as ex:
logging.error(ex)
return False
def _test(pin, level, logging):
try:
pin.write(level)
logging.debug("Value written")
val = pin.read()
if val is level:
logging.debug("Value read is the one same as the one written")
return True
else:
message = "Value read is different to the one written: " + str(val) + " != " + str(level)
logging.fatal()
return False
except Exception as ex:
logging.fatal(ex)
return False |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 20:12:48 2021
@author: ShiningStone
"""
import numpy as np
class CCoreCuda:
def __init__(self):
self.cp = self.getCupy()
self.DEBUG = False
self.memPool = self.cp.get_default_memory_pool()#self.cp.cuda.MemoryPool(self.cp.cuda.malloc_managed)
# self.cp.cuda.set_allocator(self.memPool.malloc)
# mempool = self.cp.get_default_memory_pool()
# mempool.set_limit(size=10.5*1024**3)
def getCupy(self):
import cupy as cp
return cp
def calCovariance(self,x,y):
'''
calculat the covariance of two matrices
x: left matrix
y: right matrix
if the input for x and y are both 1-D vectors, they will be reshaped to (len(vector),1)
'''
if isinstance(x, np.ndarray):
x = self.cp.asarray(x)
if isinstance(y, np.ndarray):
y = self.cp.asarray(y)
if self.DEBUG:
mempool = self.memPool
pinned_mempool = self.cp.get_default_pinned_memory_pool()
print(mempool.get_limit())
print(mempool.used_bytes()) # 0
print(mempool.total_bytes()) # 0
print(pinned_mempool.n_free_blocks()) # 0
temp = self.cp.matmul(x.T,y)
self.cp.cuda.Stream.null.synchronize()
if self.cp.cuda.runtime.getDeviceCount() == 1:
out = self.cp.asnumpy(temp)
else:
with self.cp.cuda.Device(1):
out = self.cp.array(temp)
del x
del y
del temp
self.memPool.free_all_blocks()
return out
def calSelfCovariance(self,x):
'''
calculat the covariance of two matrices
x: left matrix
y: right matrix
if the input for x and y are both 1-D vectors, they will be reshaped to (len(vector),1)
'''
if isinstance(x, np.ndarray):
x = self.cp.asarray(x)
if self.DEBUG:
mempool = self.memPool
pinned_mempool = self.cp.get_default_pinned_memory_pool()
print(mempool.get_limit())
print(mempool.used_bytes()) # 0
print(mempool.total_bytes()) # 0
print(pinned_mempool.n_free_blocks()) # 0
temp = self.cp.matmul(x.T,x)
self.cp.cuda.Stream.null.synchronize()
out = self.cp.asnumpy(temp)
del x
del temp
self.memPool.free_all_blocks()
return out |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides Spinnaker interactions (using http requests).
This module is intended to provide a base class supported
by specializations for the individual subsystems.
(e.g. gate.py)
To talk to spinnaker, we make HTTP calls (via SpinnakerAgent abstraction).
To talk to GCE we use the GcpAgent.
In order to talk to spinnaker, it must have network access. If you are
running outside the project (e.g. on a laptop) then you'll probably need
to create an ssh tunnel into the spinnaker VM because the ports are not
exposed by default.
Rather than setting up this tunnel yourself, the test will set up the
tunnel itself. This not only guarantees the tunnel is available, but
also ensures that the tunnel is in fact going to the instance being tested
as opposed to some other stray tunnel. Furthermore, the test will use
a unique local port so running this test will not interfere with other
accesses to spinnaker.
When using ssh, you must provide ssh a passphrase for the credentials.
You can either run eval `ssh-agent -s` > /dev/null then ssh-add with the
credentials, or you can create a file that contains the passphrase and
pass the file with --ssh_passphrase_file. If you create a file, chmod 400
it to keep it safe.
If spinnaker is reachable without tunnelling then it will talk directly to
it. This is determined by looking up the IP address of the instance, and
trying to connect to gate directly.
In short, run the test with the spinnaker instance project/zone/instance
name and it will figure out the rest of the configuration needed. To do so,
you will also need to provide it ssh credentials, either implicitly by
running ssh-agent, or explicitly by giving it a passphrase (via file for
security).
"""
# Standard python modules.
import base64
import logging
import os
import os.path
import re
import sys
import tarfile
from json import JSONDecoder
from io import BytesIO
import citest.gcp_testing.gce_util as gce_util
import citest.service_testing as service_testing
import citest.gcp_testing as gcp
from citest.base import JournalLogger
import spinnaker_testing.yaml_accumulator as yaml_accumulator
from spinnaker_testing.expression_dict import ExpressionDict
from .scrape_spring_config import scrape_spring_config
def name_value_to_dict(content):
"""Converts a list of name=value pairs to a dictionary.
Args:
content: [string] A list of name=value pairs with one per line.
This is blank lines ignored, as is anything to right of '#'.
"""
result = {}
for match in re.finditer('^([A-Za-z_][A-Za-z0-9_]*) *= *([^#]*)',
content, re.MULTILINE):
result[match.group(1)] = match.group(2).strip()
return result
class SpinnakerStatus(service_testing.HttpOperationStatus):
"""Provides access to Spinnaker's asynchronous task status.
This class can be used to track an asynchronous task status.
It can wait until the task completes, and provide current status state
from its bound reference.
This instance must explicitly refresh() in order to update its value.
It will only poll the server within refresh().
"""
@property
def current_state(self):
"""The value of the JSON "state" field, or None if not known."""
return self.__current_state
@current_state.setter
def current_state(self, state):
"""Updates the current state."""
self.__current_state = state
@property
def error(self):
"""Returns the error, if any."""
return self.__error
def _bind_error(self, error):
"""Sets the error, if any."""
self.__error = error
@property
def exception_details(self):
"""The exceptions clause from the detail if the task status is an error."""
return self.__exception_details
def _bind_exception_details(self, details):
"""Sets the exception details."""
self.__exception_details = details
@property
def id(self):
"""The underlying request ID."""
return self.__request_id
def _bind_id(self, request_id):
"""Bind the request id.
Args:
request_id: [string] The request ID is obtained in the subsystem
response.
"""
self.__request_id = request_id
@property
def detail_path(self):
return self.__detail_path
@property
def detail_doc(self):
return self.__json_doc
def _bind_detail_path(self, path):
"""Bind the detail path."""
self.__detail_path = path
def export_to_json_snapshot(self, snapshot, entity):
super(SpinnakerStatus, self).export_to_json_snapshot(snapshot, entity)
snapshot.edge_builder.make_output(entity, 'Status Detail', self.__json_doc,
format='json')
def __init__(self, operation, original_response=None):
"""Initialize status tracker.
Args:
operation: [AgentOperation] The operation returning the status.
original_response: [HttpResponseType] Contains JSON identifier object
returned from the Spinnaker request to track. This can be none to
indicate an error making the original request.
"""
super(SpinnakerStatus, self).__init__(operation, original_response)
if original_response is not None:
# The request ID is typically the response payload.
self.__request_id = original_response.output
self.__current_state = None # Last known state (after last refresh()).
self.__detail_path = None # The URL path on spinnaker for this status.
self.__exception_details = None
self.__error = None
self.__json_doc = None
if not original_response or original_response.http_code is None:
self.__current_state = 'REQUEST_FAILED'
return
def __str__(self):
"""Convert status to string"""
return ('id={id} current_state={current}'
' error=[{error}] detail=[{detail}]').format(
id=self.id, current=self.__current_state,
error=self.error, detail=self.detail)
def refresh(self):
"""Refresh the status with the current data from spinnaker."""
if self.finished:
return
http_response = self.agent.get(self.detail_path)
try:
self.set_http_response(http_response)
except BaseException as bex:
# TODO(ewiseblatt): 20160122
# This is temporary to help track down a transient error.
# Normally we dont want to do this because we want to scrub the output.
sys.stderr.write('Bad response from agent={0}\n'
'CAUGHT {1}\nRESPONSE: {2}\n'
.format(self.agent, bex, http_response))
raise
def set_http_response(self, http_response):
"""Updates specialized fields from http_response.
Args:
http_response: [HttpResponseType] From the last status update.
"""
# super(SpinnakerStatus, self).set_http_response(http_response)
if http_response.http_code is None:
self.__current_state = 'Unknown'
return
decoder = JSONDecoder()
self.__json_doc = decoder.decode(http_response.output)
self._update_response_from_json(self.__json_doc)
def _update_response_from_json(self, doc):
"""Updates abstract SpinnakerStatus attributes.
This is called by the base class.
Args:
doc: [dict] JSON document object from response payload.
"""
# pylint: disable=unused-argument
raise Exception("_update_response_from_json is not specialized.")
# helper for producing snapshot json
# maps relation to priority when determining outermost from list
# We treat None (not started) higher than valid.
_RELATION_SCORE = {
'VALID': 1,
None: 2,
'INVALID': 3,
'ERROR': 4,
}
# Inversion of _RELATION_SCORE
_SCORE_TO_RELATION = {value: key for key, value in _RELATION_SCORE.items()}
# Convert standard spinnaker status to citest journal relation qualifier
# names.
_STATUS_TO_RELATION = {
'SUCCEEDED': 'VALID',
'TERMINAL': 'INVALID',
'NOT_STARTED': None,
}
def _export_status(self, info, builder, entity):
"""Helper function for writing spinnaker status into SnapshotableEntity.
Args:
info [dict]: The spinnaker Status schema with a 'status' attribute.
Has no effect if there is no "status" attribute.
Returns:
The spinnaker status attribute value.
"""
status = info.get('status')
if not status:
return None
builder.make(entity, 'Status', status,
relation=self._STATUS_TO_RELATION.get(status))
return status
def _export_time_info(self, info, base_time, builder, entity):
"""Helper function to write Status entry timing info to SnapshotableEntity.
This writes a timestamp offset and delta where the offset is relative to
an absolute base_time. The delta are the being/end time in the info.
Args:
info [dict]: The spinnaker Status schema with start/endTime attributes.
There start/endTime attributes are optional.
base_time [int]: The base timestamp (in ms) for timestamp offset.
"""
start_time = info.get('startTime')
if start_time is None:
return
offset = (start_time - base_time) / 1000.0
end_time = info.get('endTime')
if not end_time:
builder.make_data(entity, 'Time', 'Running since {0}'.format(offset))
else:
builder.make_data(
entity, 'Time',
'{0} secs + {1}'.format(offset, (end_time - start_time) / 1000.0))
def _export_error_info(self, container, builder, entity):
"""Helper function to write Status entry error info to SnapshotableEntity.
Has no effect if errors could not be found in the container.
Args:
container [dict]: Excerpt from Status response to look for errors.
"""
message = []
if 'error' in container:
message.append(container['error'])
details = container.get('details', {})
error_list = details.get('errors', [])
if error_list:
message.extend(['* ' + str(err) for err in error_list])
if not message:
return
builder.make(entity, 'Error(s)',
'\n'.join(message), relation='ERROR', format='pre')
class SpinnakerAgent(service_testing.HttpAgent):
"""A BaseAgent to a spinnaker subsystem.
The agent supports POST using the standard spinnaker subsystem protocol
of returning status references and obtaining details through followup GETs.
Class instances should be created using one of the new* factory methods.
"""
@classmethod
def __determine_host_platform(cls, bindings):
"""Helper function to determine the platform spinnaker is hosted on.
This is used while figuring out how to connect to the instance.
"""
host_platform = bindings.get('HOST_PLATFORM', None)
if not host_platform:
if bindings['GCE_PROJECT']:
host_platform = 'gce'
elif bindings['NATIVE_HOSTNAME']:
host_platform = 'native'
else:
bindings['NATIVE_HOSTNAME'] = 'localhost'
logging.getLogger(__name__).info('Assuming --native_hostname=localhost')
host_platform = 'native'
return host_platform
@classmethod
def __determine_native_base_url(cls, bindings, default_port):
"""Helper function to determine a native host platform's base URL.
The returned base URL may be None if bindings['NATIVE_BASE_URL'] and
bindings['NATIVE_HOSTNAME'] are both missing.
Args:
bindings: [dict] List of bindings to configure the endpoint
NATIVE_BASE_URL: The base URL to use, if given. If NATIVE_BASE_URL
is not provided, the URL will be constructed using NATIVE_HOSTNAME
and NATIVE_PORT using http.
NATIVE_HOSTNAME: The host of the base URL to use, if NATIVE_BASE_URL
is not given.
NATIVE_PORT: The port of the base URL to use, if NATIVE_BASE_URL
is not given.
default_port: The port to use if bindings['NATIVE_BASE_URL'] is not given
and bindings['NATIVE_PORT'] is not given.
"""
base_url = None
if bindings['NATIVE_BASE_URL']:
base_url = bindings['NATIVE_BASE_URL']
elif bindings['NATIVE_HOSTNAME']:
base_url = 'http://{host}:{port}'.format(
host=bindings['NATIVE_HOSTNAME'],
port=bindings['NATIVE_PORT'] or default_port)
return base_url
@classmethod
def new_instance_from_bindings(cls, name, status_factory, bindings, port):
"""Create a new Spinnaker HttpAgent talking to the specified server port.
Args:
name:[string] The name of agent we are creating for reporting only.
status_factory: [SpinnakerStatus (SpinnakerAgent, HttpResponseType)]
Factory method for creating specialized SpinnakerStatus instances.
bindings: [dict] Specify how to connect to the server.
The actual parameters used depend on the hosting platform.
The hosting platform is specified with 'host_platform'
port: [int] The port of the endpoint we want to connect to.
Returns:
A SpinnakerAgent connected to the specified instance port.
"""
host_platform = cls.__determine_host_platform(bindings)
if host_platform == 'native':
base_url = cls.__determine_native_base_url(bindings, port)
return cls.new_native_instance(
name, status_factory=status_factory, base_url=base_url, bindings=bindings)
if host_platform == 'gce':
return cls.new_gce_instance_from_bindings(
name, status_factory, bindings, port)
raise ValueError('Unknown host_platform={0}'.format(host_platform))
@classmethod
def new_gce_instance_from_bindings(
cls, name, status_factory, bindings, port):
"""Create a new Spinnaker HttpAgent talking to the specified server port.
Args:
name: [string] The name of agent we are creating for reporting only.
status_factory: [SpinnakerStatus (SpinnakerAgent, HttpResponseType)]
Factory method for creating specialized SpinnakerStatus instances.
bindings: [dict] List of bindings to configure the endpoint
GCE_PROJECT: The GCE project ID that the endpoint is in.
GCE_ZONE: The GCE zone that the endpoint is in.
GCE_INSTANCE: The GCE instance that the endpoint is in.
GCE_SSH_PASSPHRASE_FILE: If not empty, the SSH passphrase key
for tunneling if needed to connect through a GCE firewall.
GCE_SERVICE_ACCOUNT: If not empty, the GCE service account to use
when interacting with the GCE instance.
IGNORE_SSL_CERT_VERIFICATION: If True, ignores SSL certificate
verification when scraping spring config.
port: [int] The port of the endpoint we want to connect to.
Returns:
A SpinnakerAgent connected to the specified instance port.
"""
project = bindings['GCE_PROJECT']
zone = bindings['GCE_ZONE']
instance = bindings['GCE_INSTANCE']
ssh_passphrase_file = bindings.get('GCE_SSH_PASSPHRASE_FILE', None)
service_account = bindings.get('GCE_SERVICE_ACCOUNT', None)
ignore_ssl_cert_verification = bindings['IGNORE_SSL_CERT_VERIFICATION']
logger = logging.getLogger(__name__)
JournalLogger.begin_context('Locating {0}...'.format(name))
context_relation = 'ERROR'
try:
gcloud = gcp.GCloudAgent(
project=project, zone=zone, service_account=service_account,
ssh_passphrase_file=ssh_passphrase_file)
netloc = gce_util.establish_network_connectivity(
gcloud=gcloud, instance=instance, target_port=port)
if not netloc:
error = 'Could not locate {0}.'.format(name)
logger.error(error)
context_relation = 'INVALID'
raise RuntimeError(error)
protocol = bindings['NETWORK_PROTOCOL']
base_url = '{protocol}://{netloc}'.format(protocol=protocol,
netloc=netloc)
logger.info('%s is available at %s. Using %s', name, netloc, base_url)
deployed_config = scrape_spring_config(
os.path.join(base_url, 'resolvedEnv'),
ignore_ssl_cert_verification=ignore_ssl_cert_verification)
JournalLogger.journal_or_log_detail(
'{0} configuration'.format(name), deployed_config)
spinnaker_agent = cls(base_url, status_factory)
spinnaker_agent.__deployed_config = deployed_config
context_relation = 'VALID'
except:
logger.exception('Failed to create spinnaker agent.')
raise
finally:
JournalLogger.end_context(relation=context_relation)
return spinnaker_agent
@classmethod
def new_native_instance(cls, name, status_factory, base_url, bindings):
"""Create a new Spinnaker HttpAgent talking to the specified server port.
Args:
name: [string] The name of agent we are creating for reporting only.
status_factory: [SpinnakerStatus (SpinnakerAgent, HttpResponseType)]
Factory method for creating specialized SpinnakerStatus instances.
base_url: [string] The service base URL to send messages to.
bindings: [dict] List of bindings to configure the endpoint
BEARER_AUTH_TOKEN: The token used to authenticate request to a
protected host.
IGNORE_SSL_CERT_VERIFICATION: If True, ignores SSL certificate
verification when making requests.
Returns:
A SpinnakerAgent connected to the specified instance port.
"""
bearer_auth_token = bindings.get('BEARER_AUTH_TOKEN', None)
ignore_ssl_cert_verification = bindings['IGNORE_SSL_CERT_VERIFICATION']
logger = logging.getLogger(__name__)
logger.info('Locating %s...', name)
if not base_url:
logger.error('Could not locate %s.', name)
return None
logger.info('%s is available at %s', name, base_url)
env_url = os.path.join(base_url, 'resolvedEnv')
headers = {}
if bearer_auth_token:
headers['Authorization'] = 'Bearer {}'.format(bearer_auth_token)
deployed_config = scrape_spring_config(env_url, headers=headers, ignore_ssl_cert_verification=ignore_ssl_cert_verification)
JournalLogger.journal_or_log_detail(
'{0} configuration'.format(name), deployed_config)
spinnaker_agent = cls(base_url, status_factory)
spinnaker_agent.ignore_ssl_cert_verification = ignore_ssl_cert_verification
spinnaker_agent.__deployed_config = deployed_config
if bearer_auth_token:
spinnaker_agent.add_header('Authorization', 'Bearer {}'.format(bearer_auth_token))
return spinnaker_agent
@property
def deployed_config(self):
"""The configuration dictionary gleaned from the deployed service."""
return self.__deployed_config
@property
def runtime_config(self):
"""Confguration dictionary approxmation from static config files.
This might not be available at all, depending on how we can access the
service. This does not consider how the service was actually invoked
so may be incomplete or wrong. However it is probably close enough for
our needs, and certainly close enough to locate the service to obtain
the actual |deploy_config| data.
"""
return self.config_dict
def __init__(self, base_url, status_factory):
"""Construct a an agent for talking to spinnaker.
This could really be any spinnaker subsystem, not just the master process.
The important consideration is that the protocol for this server is that
posting requests returns a reference url for status updates, and accepts
GET requests on those urls to return status details.
Args:
base_url: [string] The base URL string spinnaker is running on.
status_factory: [SpinnakerStatus (SpinnakerAgent, HttpResponseType)]
Factory method for creating specialized SpinnakerStatus instances.
"""
super(SpinnakerAgent, self).__init__(base_url)
self.__deployed_config = {}
self.__default_status_factory = status_factory
# 6 minutes is a long time, but starting VMs can take 2-3 mins
# especially with internal polling, so platform sluggishness combined
# with a missed poll can go higher. We still dont expect to come
# near this, but care more about eventual correctness than timeliness
# here. We can capture timing information and look at it after the fact
# to make performance related conclusions.
self.default_max_wait_secs = 600
def _new_messaging_status(self, operation, http_response):
"""Implements HttpAgent interface."""
return (operation.status_class(operation, http_response)
if operation.status_class
else self.__default_status_factory(operation, http_response))
@staticmethod
def __get_deployed_local_yaml_bindings(gcloud, instance):
"""Return the contents of the spinnaker-local.yml configuration file.
Args:
gcloud: [GCloudAgent] Specifies project and zone.
Capable of remote fetching if needed.
instance: [string] The GCE instance name containing the deployment.
Returns:
None or the configuration file contents.
"""
config_dict = ExpressionDict()
logger = logging.getLogger(__name__)
if gce_util.am_i(gcloud.project, gcloud.zone, instance):
yaml_file = os.path.expanduser('~/.spinnaker/spinnaker-local.yml')
logger.debug('We are the instance. Config from %s', yaml_file)
if not os.path.exists(yaml_file):
logger.debug('%s does not exist', yaml_file)
return None
try:
yaml_accumulator.load_path(yaml_file, config_dict)
return config_dict
except IOError as ex:
logger.error('Failed to load from %s: %s', yaml_file, ex)
return None
logger.debug('Load spinnaker-local.yml from instance %s', instance)
# If this is a production installation, look in:
# /home/spinnaker/.spinnaker
# or /opt/spinnaker/config
# or /etc/default/spinnaker (name/value)
# Otherwise look in ~/.spinnaker for a development installation.
# pylint: disable=bad-continuation
response = gcloud.remote_command(
instance,
'LIST=""'
'; for i in /etc/default/spinnaker'
' /home/spinnaker/.spinnaker/spinnaker-local.yml'
' /opt/spinnaker/config/spinnaker-local.yml'
' $HOME/.spinnaker/spinnaker-local.yml'
'; do'
' if sudo stat $i >& /dev/null; then'
' LIST="$LIST $i"'
'; fi'
'; done'
# tar emits warnings about the absolute paths, so we'll filter them out
# We need to base64 the binary results so we return text.
'; (sudo tar czf - $LIST 2> /dev/null | base64)')
if not response.ok():
logger.error(
'Could not determine configuration:\n%s', response.error)
return None
# gcloud prints an info message about upgrades to the output stream.
# There seems to be no way to suppress this!
# Look for it and truncate the stream there if we see it.
got = response.output
update_msg_offset = got.find('Updates are available')
if update_msg_offset > 0:
got = got[0:update_msg_offset]
# When we ssh in, there may be a message written warning us that the host
# was added to known hosts. If so, this will be the first line. Remove it.
eoln = got.find('\n')
if eoln > 0 and re.match('^Warning: .+$', got[0:eoln]):
got = got[eoln + 1:]
if not got:
return None
tar = tarfile.open(mode='r', fileobj=BytesIO(base64.b64decode(got)))
try:
entry = tar.extractfile('etc/default/spinnaker')
except KeyError:
pass
else:
logger.info('Importing configuration from /etc/default/spinnaker')
config_dict.update(name_value_to_dict(entry.read()))
file_list = ['home/spinnaker/.spinnaker/spinnaker-local.yml',
'opt/spinnaker/config/spinnaker-local.yml']
log_name = os.environ.get('LOGNAME')
if log_name is not None:
file_list.append(os.path.join('home', log_name,
'.spinnaker/spinnaker-local.yml'))
for member in file_list:
try:
entry = tar.extractfile(member)
except KeyError:
continue
logger.info('Importing configuration from %s', member)
yaml_accumulator.load_string(entry.read(), config_dict)
return config_dict
|
# code for streaming twitter to a mysql db
# for Python 3 and will support emoji characters (utf8mb4)
# based on the Python 2 code
# supplied by http://pythonprogramming.net/twitter-api-streaming-tweets-python-tutorial/
# for further information on how to use python 3, twitter's api, and
# mysql together visit: http://miningthedetails.com/blog/python/TwitterStreamsPythonMySQL/
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import mysql.connector
from mysql.connector import errorcode
import time
import json
# set up connection to db
# make sure to set charset to 'utf8mb4' to support emoji
cnx = mysql.connector.connect(user='root', password='',
host='localhost',
database='dbname',
charset = 'utf8mb4')
cursor=cnx.cursor()
#Twitter consumer key, consumer secret, access token, access secret
ckey="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
csecret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
atoken="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
asecret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# set up stream listener
class listener(StreamListener):
def on_data(self, data):
all_data = json.loads(data)
# collect all desired data fields
if 'text' in all_data:
tweet = all_data["text"]
created_at = all_data["created_at"]
retweeted = all_data["retweeted"]
username = all_data["user"]["screen_name"]
user_tz = all_data["user"]["time_zone"]
user_location = all_data["user"]["location"]
user_coordinates = all_data["coordinates"]
# if coordinates are not present store blank value
# otherwise get the coordinates.coordinates value
if user_coordinates is None:
final_coordinates = user_coordinates
else:
final_coordinates = str(all_data["coordinates"]["coordinates"])
# inser values into the db
cursor.execute("INSERT INTO tableName (created_at, username, tweet, coordinates, userTimeZone, userLocation, retweeted) VALUES (%s,%s,%s,%s,%s,%s,%s)",
(created_at, username, tweet, final_coordinates, user_tz, user_location, retweeted))
cnx.commit()
print((username,tweet))
return True
else:
return True
def on_error(self, status):
print(status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
# create stream and filter on a searchterm
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["searchterm"],
languages = ["en"], stall_warnings = True) |
from typing import Any, Dict
from labfunctions import types
from labfunctions.executors.docker_exec import docker_exec
from labfunctions.runtimes.builder import builder_exec
def notebook_dispatcher(data: Dict[str, Any]):
ctx = types.ExecutionNBTask(**data)
result = docker_exec(ctx)
return result.dict()
def build_dispatcher(data: Dict[str, Any]):
ctx = types.runtimes.BuildCtx(**data)
result = builder_exec(ctx)
return result.dict()
|
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from typing import Tuple
import os
import SimpleITK as sitk
import numpy as np
import torch
import logging
from scipy.ndimage.morphology import binary_dilation
from fuse.data.processor.processor_base import FuseProcessorBase
from fuse_examples.classification.prostate_x.data_utils import FuseProstateXUtilsData
# from fuse_examples.classification.prostate_x.processor_dicom_mri import FuseDicomMRIProcessor
from fuse.data.processor.processor_dicom_mri import FuseDicomMRIProcessor
class FuseProstateXPatchProcessor(FuseProcessorBase):
"""
This processor crops the lesion volume from within 4D MRI volume base on
lesion location as appears in the database.
:returns a sample that includes:
'patient_num': patient id
'lesion_num': one MRI volume may include more than one lesion
'input': vol_tensor as extracted from MRI volume processor
'input_lesion_mask': mask_tensor,
'ggg': row['ggg']: in prostate - lesion grade
'zone': row['zone']: zone in prostate
'ClinSig': row['ClinSig']: Clinical significant ( 0 for benign and 3+3 lesions, 1 for rest)
"""
def __init__(self,
vol_processor: FuseDicomMRIProcessor = FuseDicomMRIProcessor(),
path_to_db: str = None,
data_path: str = None,
ktrans_data_path: str = None,
db_name: str = None,
db_version: str = None,
fold_no : int = None,
lsn_shape: Tuple[int, int, int] = (16, 120, 120),
lsn_spacing: Tuple[float, float, float] = (3, 0.5, 0.5),
):
"""
:param vol_processor - extracts 4D tensor from path to MRI dicoms
:param path_to_db: path to data pickle
:param data_path: path to directory in which dicom data is located
:param ktrans_data_path: path to directory of Ktrans seq (prostate x)
:param db_name: 'prostatex' for this example
:param fold_no: cross validation fold
:param lsn_shape: shape of volume to extract from full volume (pixels)
:param lsn_spacing: spacing of volume to extract from full volume (mm)
"""
# store input parameters
self.vol_processor = vol_processor
self.path_to_db = path_to_db
self.data_path = data_path
self.ktrans_data_path = ktrans_data_path
self.lsn_shape = lsn_shape
self.lsn_spacing = lsn_spacing
self.db_name = db_name
self.db_ver = db_version
self.fold_no=fold_no
self.prostate_data_path = os.path.join(self.data_path,'PROSTATEx/')
# ========================================================================
def create_resample(self,vol_ref:sitk.sitkFloat32, interpolation: str, size:Tuple[int,int,int], spacing: Tuple[float,float,float]):
"""
create_resample create resample operator
:param vol_ref: sitk vol to use as a ref
:param interpolation:['linear','nn','bspline']
:param size: in pixels ()
:param spacing: in mm ()
:return: resample sitk operator
"""
if interpolation == 'linear':
interpolator = sitk.sitkLinear
elif interpolation == 'nn':
interpolator = sitk.sitkNearestNeighbor
elif interpolation == 'bspline':
interpolator = sitk.sitkBSpline
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(vol_ref)
resample.SetOutputSpacing(spacing)
resample.SetInterpolator(interpolator)
resample.SetSize(size)
return resample
# ========================================================================
def apply_resampling(self,img:sitk.sitkFloat32, mask:sitk.sitkFloat32,
spacing: Tuple[float,float,float] =(0.5, 0.5, 3), size: Tuple[int,int,int] =(160, 160, 32),
transform:sitk=None, interpolation:str='bspline',
label_interpolator:sitk=sitk.sitkLabelGaussian,
):
ref = img if img != [] else mask
size = [int(s) for s in size]
resample = self.create_resample(ref, interpolation, size=size, spacing=spacing)
if ~(transform is None):
resample.SetTransform(transform)
img_r = resample.Execute(img)
resample.SetInterpolator(label_interpolator)
mask_r = resample.Execute(mask)
return img_r, mask_r
# ========================================================================
def crop_lesion_vol(self,vol:sitk.sitkFloat32, position:Tuple[float,float,float], ref:sitk.sitkFloat32, size:Tuple[int,int,int]=(160, 160, 32),
spacing:Tuple[int,int,int]=(1, 1, 3), center_slice=None):
"""
crop_lesion_vol crop tensor around position
:param vol: vol to crop
:param position: point to crop around
:param ref: reference volume
:param size: size in pixels to crop
:param spacing: spacing to resample the col
:param center_slice: z coordinates of position
:return: cropped volume
"""
def get_lesion_mask(position, ref):
mask = np.zeros_like(sitk.GetArrayViewFromImage(ref), dtype=np.uint8)
coords = np.round(position[::-1]).astype(np.int)
mask[coords[0], coords[1], coords[2]] = 1
mask = binary_dilation(mask, np.ones((3, 5, 5))) + 0
mask_sitk = sitk.GetImageFromArray(mask)
mask_sitk.CopyInformation(ref)
return mask_sitk
mask = get_lesion_mask(position, ref)
vol.SetOrigin((0,) * 3)
mask.SetOrigin((0,) * 3)
vol.SetDirection(np.eye(3).flatten())
mask.SetDirection(np.eye(3).flatten())
ma_centroid = mask > 0.5
label_analysis_filer = sitk.LabelShapeStatisticsImageFilter()
label_analysis_filer.Execute(ma_centroid)
centroid = label_analysis_filer.GetCentroid(1)
offset_correction = np.array(size) * np.array(spacing)/2
corrected_centroid = np.array(centroid)
corrected_centroid[2] = center_slice * np.array(spacing[2])
offset = corrected_centroid - np.array(offset_correction)
translation = sitk.TranslationTransform(3, offset)
img, mask = self.apply_resampling(vol, mask, spacing=spacing, size=size, transform=translation)
return img, mask
# ========================================================================
def __call__(self,
sample_desc,
*args, **kwargs):
"""
Return list of samples (lesions) giving a patient level descriptor
:param sample_desc: (db_ver, set_type, patient_id)
:return: list of lesions, see TorchClassificationAlgo.create_lesion_sample()
"""
samples = []
# decode descriptor
patient_id= sample_desc
# ========================================================================
# get db - lesions
db_full = FuseProstateXUtilsData.get_dataset(self.path_to_db,'other',self.db_ver,self.db_name,self.fold_no)
db = FuseProstateXUtilsData.get_lesions_prostate_x(db_full)
# ========================================================================
# get patient
patient = db[db['Patient ID'] == patient_id]
# ========================================================================
lgr = logging.getLogger('Fuse')
lgr.info(f'patient={patient_id}', {'color': 'magenta'})
# ========================================================================
# all seq paths for a certain patient
patient_directories = os.listdir(os.path.join(self.prostate_data_path, patient_id))
patient_directories = patient_directories[0]
images_path = os.path.join(self.prostate_data_path, patient_id, patient_directories)
# ========================================================================
# vol_4D is multichannel volume (z,x,y,chan(sequence))
vol_4D,vol_ref = self.vol_processor((images_path,self.ktrans_data_path,patient_id))
# ========================================================================
# each row contains one lesion, iterate over lesions
for index, row in patient.iterrows():
#read original position
pos_orig = np.array(np.fromstring(row.values[1], dtype=np.float32, sep=' '))
# transform to pixel coordinate in ref coords
pos_vol = np.array(vol_ref.TransformPhysicalPointToContinuousIndex(pos_orig.astype(np.float64)))
# crop lesion vol
vol_cropped, mask_cropped = self.crop_lesion_vol(
vol_4D, pos_vol,vol_ref ,center_slice=pos_vol[2],
size=(self.lsn_shape[2], self.lsn_shape[1], self.lsn_shape[0]),
spacing=(self.lsn_spacing[2], self.lsn_spacing[1], self.lsn_spacing[0]))
vol_cropped_tmp = sitk.GetArrayFromImage(vol_cropped)
if len(vol_cropped_tmp.shape)<4:
# fix dimensions in case of one seq
vol_cropped_tmp = vol_cropped_tmp[:,:,:,np.newaxis]
vol = np.moveaxis(vol_cropped_tmp, 3, 0)
else:
vol = np.moveaxis(sitk.GetArrayFromImage(vol_cropped), 3, 0)
if np.isnan(vol).any():
input[np.isnan(input)] = 0
mask = sitk.GetArrayFromImage(mask_cropped)
vol_tensor = torch.from_numpy(vol).type(torch.FloatTensor)
mask_tensor = torch.from_numpy(mask).unsqueeze(0).type(torch.FloatTensor)
# sample
sample = {
'patient_num': patient_id,
'lesion_num': row['fid'],
'input': vol_tensor,
'input_lesion_mask': mask_tensor,
'ggg': row['ggg'],
'zone': row['zone'],
'ClinSig': row['ClinSig'],
}
samples.append(sample)
return samples
if __name__ == "__main__":
import matplotlib.pyplot as plt
import pandas as pd
path_to_db = '/gpfs/haifa/projects/m/msieve_dev3/usr/Tal/my_research/virtual_biopsy/prostate/experiments/V4/'
dataset = 'prostate_x'
if dataset=='prostate_x':
# for ProstateX
path_to_dataset = '/projects/msieve/MedicalSieve/PatientData/ProstateX/manifest-A3Y4AE4o5818678569166032044/'
prostate_data_path = path_to_dataset
Ktrain_data_path = path_to_dataset + '/ProstateXKtrains-train-fixed/'
sample = ('29062021', 'train', 'ProstateX-0148', 'pred')
a = FuseProstateXPatchProcessor(vol_processor=FuseDicomMRIProcessor(reference_inx=0),path_to_db = path_to_db,
data_path=prostate_data_path,ktrans_data_path=Ktrain_data_path,
db_name=dataset,fold_no=1,lsn_shape=(13, 74, 74))
samples = a.__call__(sample)
l_seq = pd.read_csv('/gpfs/haifa/projects/m/msieve_dev3/usr/Tal/my_research/virtual_biopsy/prostate/prostate_x/metadata.csv')
for sample_id in list(l_seq['Subject ID'].unique()):
# sample_id = 'ACRIN-6698-760011'
sample = ('29062021', 'validation', sample_id, 'pred')
samples = a.__call__(sample)
if len(samples)==0:
sample = ('29062021', 'train', sample_id, 'pred')
samples = a.__call__(sample)
path2save = '/gpfs/haifa/projects/m/msieve_dev3/usr/Tal/my_research/virtual_biopsy/prostate/prostate_x/data_visualization/'
fix, ax = plt.subplots(nrows=5, ncols=13, sharex=True, sharey=True)
for idx in range(5):
for jdx in range(13):
ll = samples[0]['input'].cpu().detach().numpy()[idx, jdx, :, :]
ax[idx, jdx].imshow(ll, cmap='gray')
fix.suptitle(sample_id)
fix.savefig(path2save + sample_id + '.jpg') |
from flask import current_app, url_for, g
from app import db, geolocator
from sqlalchemy import event
class Address(db.Model):
__tablename__ = 'address'
id = db.Column(db.Integer, primary_key=True)
line_1 = db.Column(db.String(255))
line_2 = db.Column(db.String(255))
line_3 = db.Column(db.String(255))
line_4 = db.Column(db.String(255))
city = db.Column(db.String(255))
state_code = db.Column(db.String(2))
postal_code = db.Column(db.String(20))
county = db.Column(db.String(255))
country = db.Column(db.String(255))
latitude = db.Column(db.Numeric(precision=9,scale=6))
longitude = db.Column(db.Numeric(precision=9,scale=6))
def __init__(self, **kwargs):
super(Address, self).__init__(**kwargs)
def __repr__(self):
return '{}, {}, {} {}'.format(self.line_1, self.city, self.state_code, self.postal_code)
def to_dict(self):
data = {
'id': self.id,
'line_1': self.line_1,
'line_2': self.line_2,
'line_3': self.line_3,
'line_4': self.line_4,
'city': self.city,
'state_code': self.state_code,
'postal_code': self.postal_code,
'county': self.county,
'country': self.country
}
return data
def from_dict(self, data):
for field in data['line_1', 'line_2', 'line_3', 'line_4', 'city',
'state_code', 'postal_code', 'county', 'country']:
if field in data:
setattr(self, field, data[field])
def geocode(self, **kwargs):
line_1 = kwargs.get("line_1",self.line_1)
city = kwargs.get("city",self.city)
state_code = kwargs.get("state_code",self.state_code)
postal_code = kwargs.get("postal_code",self.postal_code)
self.latitude = None
self.longitude = None
try:
location = geolocator.geocode('{} {} {} {}'.format(line_1, city, state_code, postal_code))
if location is not None:
self.latitude = location.latitude
self.longitude = location.longitude
except:
current_app.logger.error('Unable to geocode address')
class Deal(db.Model):
__tablename__ = 'deal'
id = db.Column(db.Integer, primary_key=True)
address_id = db.Column(db.Integer, db.ForeignKey('address.id'))
address = db.relationship('Address', uselist=False)
property_tax = db.Column(db.Integer)
sq_feet = db.Column(db.Integer)
bedrooms = db.Column(db.Integer)
bathrooms = db.Column(db.Integer)
after_repair_value = db.Column(db.Integer)
rehab_estimate = db.Column(db.Integer)
purchase_price = db.Column(db.Integer)
list_price = db.Column(db.Integer)
under_contract_ind = db.Column(db.Boolean)
contacts = db.relationship("DealContact")
def add_contact(self, contact):
if self.contacts is None:
self.contacts = []
self.contacts.append(contact)
def get_submitter(self):
return [deal_contact for deal_contact in self.contacts if deal_contact.is_submitter()][0]
def to_dict(self):
data = {
'id': self.id,
'address': self.address.to_dict(),
'property_tax': self.property_tax,
'sq_feet': self.sq_feet,
'bedrooms': self.bedrooms,
'bathrooms': self.bathrooms,
'after_repair_value': self.after_repair_value,
'rehab_estimate': self.rehab_estimate,
'purchase_price': self.purchase_price,
'list_price': self.list_price,
'under_contract_ind': self.under_contract_ind,
'contacts' : [contact.to_dict() for contact in self.contacts],
'_links': {
'self': url_for('api.get_deal', id=self.id)
}
}
return data
def from_dict(self, data):
for field in ['address', 'property_tax', 'sq_feet', 'bedrooms',
'bathrooms', 'after_repair_value', 'rehab_estimate',
'purchase_price', 'list_price',
'under_contract_ind', 'submitted_by']:
if field == 'address' and field in data:
self.address = Address()
for address_field in data['address']:
setattr(self.address, address_field, data[field][address_field])
elif field == 'submitted_by' and field in data:
contact = Contact()
for contact_field in data[field]:
setattr(contact, contact_field, data[field][contact_field])
deal_contact = DealContact()
deal_contact_role = DealContactRole(name="Submitted By")
deal_contact.add_role(deal_contact_role)
deal_contact.contact = contact
self.add_contact(deal_contact)
elif field in data:
setattr(self, field, data[field])
deal_contact = DealContact()
deal_contact_role = DealContactRole(name="Submitted To")
deal_contact.add_role(deal_contact_role)
deal_contact.contact = g.current_user.contact
self.add_contact(deal_contact)
class DealContact(db.Model):
__tablename__ = "deal_contact"
id = db.Column(db.Integer, primary_key=True)
deal_id = db.Column(db.Integer, db.ForeignKey('deal.id'))
deal = db.relationship("Deal", back_populates="contacts")
contact_id = db.Column(db.Integer, db.ForeignKey('contact.id'))
contact = db.relationship("Contact", back_populates="deal_contacts")
roles = db.relationship("DealContactRole")
def add_role(self, role):
if self.roles is None:
self.roles = []
self.roles.append(role)
def is_submitter(self):
return len([role for role in self.roles if role.name == "Submitted By"])
def __repr__(self):
return str(self.contact)
def to_dict(self):
data = {
'id': self.id,
'contact': self.contact.to_dict(),
'roles': [role.to_dict() for role in self.roles]
}
return data
class DealContactRole(db.Model):
__tablename__ = "deal_contact_role"
id = db.Column(db.Integer, primary_key=True)
deal_contact_id = db.Column(db.Integer, db.ForeignKey('deal_contact.id'))
name = db.Column(db.String(255))
def __repr__(self):
return self.name
def to_dict(self):
data = {
'id': self.id,
'name': self.name
}
return data
class Contact(db.Model):
__tablename__ = 'contact'
id = db.Column(db.Integer, primary_key=True)
deal_contacts = db.relationship("DealContact")
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship("User", back_populates="contact")
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
phone = db.Column(db.String(255))
email = db.Column(db.String(255))
def to_dict(self):
data = {
'id': self.id,
'first_name': self.first_name,
'last_name': self.last_name,
'phone': self.phone,
'email': self.email
}
return data
def __repr__(self):
return "{} {}".format(self.first_name, self.last_name)
def update_geocoding(mapper, connection, target):
target.geocode()
event.listen(Address, 'before_insert', update_geocoding)
event.listen(Address, 'before_update', update_geocoding)
|
from sense_hat import SenseHat
from random import randint
import math
import sys
import json
def main():
sense = SenseHat()
sense.set_imu_config(True, True, True)
gyro = sense.get_gyroscope_raw()
acc = sense.get_accelerometer_raw()
comp = sense.get_compass_raw()
temp = sense.temp
humidity = sense.humidity
pressure = sense.pressure
data = [
{
"id" : "gyro",
"values" : [gyro['x'], gyro['y'], gyro['z']]
},
{
"id" : "acc",
"values" : [acc['x'], acc['y'], acc['z']]
},
{
"id" : "comp",
"values" : [comp['x'], comp['y'], comp['z']]
},
{
"id" : "temp",
"values" : [temp]
},
{
"id" : "humidity",
"values" : [humidity]
},
{
"id" : "pressure",
"values" : [pressure]
}
]
print(json.dumps(data))
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# Loads metadata about a release into the database.
import sqlite3
import sys
import re
if len(sys.argv) < 4:
print("usage: load-release.py <DB_FILE> <TAG> <GIT_COMMIT_HASH>")
sys.exit(1)
dbfile, tag, githash = sys.argv[1:4]
# Extract major.minor.patch from tag.
m = re.match(r'v(\d+)\.(\d+)\.(\d+)', tag)
if tag == 'master':
# HACK: this causes "master" to sort after all the other tags.
major, minor, patch = 9999, 9999, 9999
elif m:
major, minor, patch = m[1], m[2], m[3]
else:
sys.exit('Unrecognized tag format: {}'.format(tag))
# Load into database.
conn = sqlite3.connect(dbfile)
c = conn.cursor()
c.execute('''
insert into rels (tag, major, minor, patch, githash)
values (?, ?, ?, ?, ?)
''', [tag, major, minor, patch, githash])
conn.commit()
conn.close()
|
import numpy as np
import time
from kid_readout.utils import data_block,roach_interface,data_file
from sim900 import sim900Client
ri = roach_interface.RoachBaseband()
df = data_file.DataFile()
sc = sim900Client.sim900Client()
ri.set_adc_attenuator(31)
ri.set_dac_attenuator(26)
ri.set_tone_freqs(np.array([67.001,150.001]), nsamp=2**18)
ri._sync()
df.log_hw_state(ri)
df.log_adc_snap(ri)
tsg = None
while True:
try:
dmod,addr = ri.get_data(64)
chids = ri.fpga_fft_readout_indexes+1
tones = ri.tone_bins[ri.readout_selection]
nsamp = ri.tone_nsamp
for m in range(len(chids)):
block = data_block.DataBlock(data = dmod[:,m], tone=tones[m], fftbin = chids[m],
nsamp = nsamp, nfft = ri.nfft, t0 = time.time(), fs = ri.fs)
tsg = df.add_block_to_timestream(block, tsg=tsg)
sc.fetchDict()
df.add_cryo_data(sc.data)
df.nc.sync()
time.sleep(120.)
except KeyboardInterrupt:
df.nc.close()
break |
import asyncio
import json
import multiprocessing
import aioredis
import deserialize
from common.logger.logger import get_logger
from common.queue.push.fcm import blocking_get_fcm_job
from common.queue.result.push import publish_push_result_job
from common.structure.job.fcm import FCMJob
from worker.push.fcm.config import config
from worker.push.fcm.external.fcm.abstract import AbstractFCM
from worker.push.fcm.external.fcm.legacy import FCMClientLegacy
from worker.push.fcm.external.fcm.v1 import FCMClientV1
logger = get_logger(__name__)
class Replica:
REDIS_TIMEOUT = 0 # Infinite
def __init__(self, pid):
self.fcm: AbstractFCM = self.create_fcm_client()
self.redis_host = config.push_worker.redis.host
self.redis_port = config.push_worker.redis.port
self.redis_password = config.push_worker.redis.password
self.redis_pool = None
logger.debug(f'Worker {pid} up')
loop = asyncio.get_event_loop()
loop.run_until_complete(self.job())
def create_fcm_client(self) -> AbstractFCM:
fcm_config = config.push_worker.fcm
if config.push_worker.fcm.client == 'legacy':
return FCMClientLegacy(fcm_config.legacy.server_key)
elif config.push_worker.fcm.client == 'v1':
return FCMClientV1(fcm_config.v1.project_id, fcm_config.v1.key_file_name)
else:
raise ValueError(f'fcm client not allow: {config.push_worker.fcm.client}')
async def process_job(self, job_json): # real worker if job published
try:
logger.debug(job_json)
job: FCMJob = deserialize.deserialize(
FCMJob, json.loads(job_json)
)
if not job.push_tokens:
return
sent, failed = await self.fcm.send_data(
targets=job.push_tokens,
data={
'notification': {
'title': job.title,
'body': job.body,
'image': job.image_url,
}
}
)
logger.info(f'sent: {sent}, failed: {failed}')
if job.id:
with await self.redis_pool as redis_conn:
await publish_push_result_job(
redis_conn=redis_conn,
job={
'id': job.id,
'sent': sent,
'failed': failed,
}
)
except Exception:
logger.exception(f'Fatal Error! {job_json}')
async def job(self): # real working job
self.redis_pool = await aioredis.create_pool(
f'redis://{self.redis_host}:{self.redis_port}',
password=self.redis_password,
db=int(config.push_worker.redis.notification_queue.database),
minsize=5,
maxsize=10,
)
while True:
with await self.redis_pool as redis_conn:
job_json = await blocking_get_fcm_job(
redis_conn=redis_conn,
timeout=self.REDIS_TIMEOUT
)
logger.debug(multiprocessing.current_process())
if not job_json:
continue
logger.info('new task')
asyncio.create_task(self.process_job(job_json))
|
import graphene
import hero.schema
# Our Project Level Schema 🚒 🔥
# If we had multiple apps, we'd import them here ✈
# Then, inherit from their Queries and Mutations 👦
# And, finally return them as one object 💧
class Query(hero.schema.Query, graphene.ObjectType):
# This class will inherit from multiple Queries
# as we begin to add more apps to our project
pass
class Mutation(hero.schema.Mutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
|
"""
Utilities related to reading DSM2 data
"""
import numpy as np
from .. import utils
from ..grid import unstructured_grid
from ..spatial import wkb2shp
def line_parser(fn):
"""
Generator to read lines, removing comments along the way
"""
with open(fn,'rt') as fp:
for line in fp:
cmt=line.find('#')
if cmt>=0:
line=line[:cmt]
line=line.strip()
if not line:
continue
yield line
class DSM2Grid(unstructured_grid.UnstructuredGrid):
def __init__(self,grid_fn,node_shp,channel_shp=None):
self.src_fn=grid_fn
self.node_shp=node_shp
self.channel_shp=channel_shp
nodes=self.read_nodes()
self.tok=line_parser(grid_fn)
self.sections={}
for section in self.tok:
self.sections[section]=self.read_section()
extra_node_fields=[]
extra_edge_fields=[]
for field in nodes.dtype.names:
if field in ['x','id']: continue
ftype=np.float64
extra_node_fields.append( ('id',ftype) )
for field in self.sections['CHANNEL'].dtype.names:
if field in ['CHAN_NO','UPNODE','DOWNNODE']:
continue
else:
ftype=np.float64
extra_edge_fields.append( (field,ftype) )
super(DSM2Grid,self).__init__(extra_node_fields=extra_node_fields,
extra_edge_fields=extra_edge_fields)
# Some extra work to make sure that ids line up with DSM ids, even
# if DSM nodes are missing, and accounting for DSM ids being 1-based.
# (so self.node[0] and any missing ids will be 'deleted'
Nnodes=1+nodes['id'].max()
self.nodes=np.zeros(Nnodes,self.node_dtype)
self.nodes['deleted']=True
self.nodes['x'][nodes['id']]=nodes['x']
self.nodes['deleted'][nodes['id']]=False
edges=self.sections['CHANNEL']
eid=edges['CHAN_NO'].astype(np.int32)
Nedges=1+eid.max()
self.edges=np.zeros(Nedges,self.edge_dtype)
self.edges['deleted']=True
self.edges['deleted'][eid]=False
self.edges['nodes'][eid,0]=edges['UPNODE'].astype(np.int32)
self.edges['nodes'][eid,1]=edges['DOWNNODE'].astype(np.int32)
for field in edges.dtype.names:
if field in ['CHAN_NO','UPNODE','DOWNNODE']: continue
self.edges[field][eid]=edges[field]
if channel_shp:
self.init_channel_geometry(channel_shp)
def init_channel_geometry(self,channel_shp):
# Load the channel centerlines
channels=wkb2shp.shp2geom(channel_shp)
centerline=np.full(self.Nedges(),None,dtype=np.object_)
for rec in channels:
centerline[int(rec['id'])]=np.array(rec['geom'])
self.add_edge_field('centerline',centerline)
def read_section(self):
"""
Read header line with field names, and records up until 'END'
is encountered. Returns result as numpy struct array.
Currently assumes that everything is a float.
"""
header=next(self.tok)
fields=header.split()
rows=[]
dtype=[(f,np.float64) for f in fields]
dest=np.zeros(0,dtype=dtype)
while 1:
l=next(self.tok)
if l=='END':
break
dest=utils.array_append(dest)
for fld,s in zip(fields,l.split()):
dest[fld][-1]=float(s)
return dest
def read_nodes(self):
node_data=wkb2shp.shp2geom(self.node_shp)
# => dtype=[('id', '<f8'), ('geom', 'O')]
nodes=np.zeros( len(node_data), dtype=[('id',np.int32),
('x',np.float64,2)])
nodes['id']=node_data['id'].astype(np.int32)
nodes['x']=[np.array(p) for p in node_data['geom']]
return nodes
def plot_edges(self,*a,centerlines=False,**k):
if centerlines:
orig_return_mask=k.pop('return_mask',False)
k['return_mask']=True
lcoll,mask=super(DSM2Grid,self).plot_edges(*a,**k)
segs=lcoll.get_segments()
for ji,j in enumerate(np.nonzero(mask)[0]):
if self.edges['centerline'][j] is not None:
segs[ji]=self.edges['centerline'][j]
lcoll.set_segments(segs)
if orig_return_mask:
return lcoll,mask
else:
return lcoll
else:
return super(DSM2Grid,self).plot_edges(*a,**k)
|
import logging
import random
from django.conf import settings
from django.db import transaction
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.generics import CreateAPIView, ListAPIView, GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.views import ObtainJSONWebToken
import constants
from courses.models import Course, CourseLesson
from courses.paginations import CourseListPageNumberPagination
from fuguangapi.utils.tencentcloudapi import TencentCloudAPI, TencentCloudSDKException
from .models import User, UserCourse, StudyProgress
from .serializers import UserRegisterModelSerializer, UserCourseModelSerializer
# from ronglianyunapi import send_sms
# from mycelery.sms.tasks import send_sms
from .tasks import send_sms
logger = logging.getLogger('django')
class LoginAPIView(ObtainJSONWebToken):
"""
用户登录视图
"""
def post(self, request, *args, **kwargs):
"""
校验用户操作验证码成功以后的ticket临时票据
"""
if settings.IS_TEST:
return super().post(request, *args, **kwargs)
try:
api = TencentCloudAPI()
result = api.captcha(
request.data.get("ticket"),
request.data.get("randstr"),
request._request.META.get("REMOTE_ADDR"),
)
if result:
return super().post(request, *args, **kwargs)
else:
raise TencentCloudSDKException
except TencentCloudSDKException as err:
return Response({"errmsg": "验证码校验失败"}, status=status.HTTP_400_BAD_REQUEST)
class MobileAPIView(APIView):
"""
手机号是否已经注册
"""
def get(self, request, mobile):
"""
校验手机号是否已注册
:param request:
:param mobile: 手机号
:return:
"""
try:
user = User.objects.get(mobile=mobile)
return Response({"errmsg": "手机号码已注册"}, status=status.HTTP_400_BAD_REQUEST)
except User.DoesNotExist:
# 查不到说明手机号未注册
return Response({"errmsg": "ok"}, status=status.HTTP_200_OK)
class UserAPIView(CreateAPIView):
queryset = User.objects.all()
serializer_class = UserRegisterModelSerializer
class SMSAPIView(APIView):
"""
发送手机验证码
"""
def get(self, request, mobile):
"""
:param request:
:param mobile: 手机号
:return:
"""
redis = get_redis_connection("sms_code")
# 判断手机短信是否处于发送冷却中[60秒只能发送一次]
interval = redis.ttl(f'interval_{mobile}')
if interval != -2:
return Response({"errmsg": f"短信发送过于频繁,请于{interval}秒后再次点击获取!"},
status=status.HTTP_400_BAD_REQUEST)
# 生成随机验证码
code = f"{random.randint(0, 999999):06d}"
# 短信有效期
time = settings.RONGLIANYUN.get("sms_expire")
# 短信发送间隔时间
sms_interval = settings.RONGLIANYUN.get("sms_interval")
# 异步发送短信
# send_sms(settings.RONGLIANYUN.get('reg_tid'), mobile, datas=(code, time // 60))
send_sms.delay(settings.RONGLIANYUN.get("reg_tid"), mobile, datas=(code, time // 60))
# 将验证码存入redis
# 使用redis的管道对象pipeline, 一次执行多条命令
pipe = redis.pipeline()
pipe.multi()
pipe.setex(f"sms_{mobile}", time, code)
pipe.setex(f"interval_{mobile}", sms_interval, 1)
pipe.execute()
return Response({"errmsg": "OK"}, status=status.HTTP_200_OK)
class CourseListAPiView(ListAPIView):
"""
获取用户的课程列表
"""
permission_classes = [IsAuthenticated]
serializer_class = UserCourseModelSerializer
pagination_class = CourseListPageNumberPagination
def get_queryset(self):
user = self.request.user
query = UserCourse.objects.filter(user=user)
course_type = int(self.request.query_params.get("type", -1))
course_type_list = [item[0] for item in Course.COURSE_TYPE]
if course_type in course_type_list:
query = query.filter(course__course_type=course_type)
return query.order_by("-id").all()
class UserCourseAPIView(GenericAPIView):
"""学习进度"""
permission_classes = [IsAuthenticated]
serializers_class = UserCourseModelSerializer
def get(self, request, course_id):
"""获取用户的课程学习进度"""
user = request.user
try:
user_course = UserCourse.objects.get(user=user, course_id=course_id)
except UserCourse.DoesNotExist:
return Response({"errmsg": "当前课程未购买"}, status=status.HTTP_400_BAD_REQUEST)
# 章节id
chapter_id = user_course.chapter_id
if chapter_id:
"""学习过课程"""
lesson = user_course.lesson
else:
"""未学习过课程"""
# 获取课程第1个章节
chapter = user_course.course.chapter_list.order_by('orders').first()
# 获取课程第1个章节的第一个课时
lesson = chapter.lesson_list.order_by('orders').first()
# 更新用户学习进度
user_course.chapter = chapter
user_course.lesson = lesson
user_course.save()
serializer = self.get_serializer(user_course)
data = serializer.data
# 获取课时类型和连接
data['lesson_type'] = lesson.lesson_type
data['lesson_link'] = lesson.lesson_link
return Response(data)
class StudyLessonAPIView(APIView):
"""用户在当前课时的学习进度"""
permission_classes = [IsAuthenticated]
def get(self, request):
lesson_id = int(request.query_params.get("lesson"))
user = request.user
# 查找课时
lesson = CourseLesson.objects.get(id=lesson_id)
progress = StudyProgress.objects.filter(user=user, lesson=lesson).first()
# 如果查询没有进度,则默认进度进度为0
if progress is None:
progress = StudyProgress.objects.create(user=user, lesson=lesson, study_time=0)
return Response(progress.study_time)
class StudyProgressAPIView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
"""添加课时学习进度"""
try:
# 1. 接收客户端提交的视频进度和课时ID
study_time = int(request.data.get("time"))
lesson_id = int(request.data.get("lesson"))
user = request.user
# 判断当前课时是否免费或者当前课时所属的课程是否被用户购买了
# 判断本次更新学习时间是否超出阈值,当超过阈值,则表示用户已经违规快进了。
if study_time > constants.MAV_SEEK_TIME:
raise Exception
# 查找课时
lesson = CourseLesson.objects.get(pk=lesson_id)
except:
return Response({"error": "无效的参数或当前课程信息不存在!"})
with transaction.atomic():
save_id = transaction.savepoint()
try:
# 2. 记录课时学习进度
progress = StudyProgress.objects.filter(user=user, lesson=lesson).first()
if progress is None:
"""新增一条用户与课时的学习记录"""
progress = StudyProgress(
user=user,
lesson=lesson,
study_time=study_time
)
else:
"""直接更新现有的学习时间"""
progress.study_time = int(progress.study_time) + int(study_time)
progress.save()
# 3. 记录课程学习的总进度
user_course = UserCourse.objects.get(user=user, course=lesson.course)
user_course.study_time = int(user_course.study_time) + int(study_time)
# 用户如果往后观看章节,则记录下
if lesson.chapter.orders > user_course.chapter.orders:
user_course.chapter = lesson.chapter
# 用户如果往后观看课时,则记录下
if lesson.orders > user_course.lesson.orders:
user_course.lesson = lesson
user_course.save()
return Response({"message": "课时学习进度更新完成!"})
except Exception as e:
logger.error(f"更新课时进度失败!:{e}")
transaction.savepoint_rollback(save_id)
return Response({"error": "当前课时学习进度丢失!"})
|
from datetime import datetime
from app.main.model.user import User
from app.main.model.users_chat import UsersChat
from app.main import db
from app.main.util.extract_resource import extract_resource
from app.main.model.message import Message
def __make_users_chat(user1, user2):
c1 = UsersChat(of_user=user1)
c2 = UsersChat(of_user=user2)
c1.other_user_chat = c2
c2.other_user_chat = c1
db.session.add(c1)
db.session.add(c2)
db.session.commit()
return c1
def get_user_chats_with_users(user):
return user.chats_with_users.all(), 200
def get_user_chat_with_user(user, user_id):
other_user = User.query.filter(User.id == user_id).first_or_404()
try:
chat = user.chat_with_user(other_user).one()
chat.last_opened_on = datetime.utcnow()
db.session.commit()
return chat, 200
except:
return {}, 300
def send_message_to_user(user, user_id, request):
other_user = User.query.filter(User.id == user_id).first_or_404()
try:
message_body = extract_resource(request, 'body')
except:
return {}, 400
try:
chat = other_user.chat_with_user(user).one()
except:
chat = __make_users_chat(other_user, user)
new_message = Message(body=message_body, sender_user=user, receiver_chat=chat)
db.session.add(new_message)
db.session.commit()
return new_message, 201
|
"""
Colour Correction
=================
Defines various objects for colour correction, like colour matching two images:
- :func:`colour.characterisation.matrix_augmented_Cheung2004` : Polynomial
expansion using *Cheung, Westland, Connah and Ripamonti (2004)* method.
- :func:`colour.characterisation.polynomial_expansion_Finlayson2015` :
Polynomial expansion using *Finlayson, MacKiewicz and Hurlbert (2015)*
method.
- :func:`colour.characterisation.polynomial_expansion_Vandermonde` :
Polynomial expansion using *Vandermonde* method.
- :attr:`colour.POLYNOMIAL_EXPANSION_METHODS`: Supported polynomial expansion
methods.
- :func:`colour.polynomial_expansion`: Polynomial expansion of given
:math:`a` array.
- :func:`colour.characterisation.matrix_colour_correction_Cheung2004` :
Colour correction matrix computation using *Cheung et al. (2004)* method.
- :func:`colour.characterisation.matrix_colour_correction_Finlayson2015` :
Colour correction matrix computation using *Finlayson et al. (2015)*
method.
- :func:`colour.characterisation.matrix_colour_correction_Vandermonde`
Colour correction matrix computation using *Vandermonde* method.
- :attr:`colour.MATRIX_COLOUR_CORRECTION_METHODS`: Supported colour
correction matrix methods.
- :func:`colour.matrix_colour_correction`: Colour correction matrix
computation from given :math:`M_T` colour array to :math:`M_R` colour
array.
- :func:`colour.characterisation.colour_correction_Cheung2004` :
Colour correction using *Cheung et al. (2004)* method.
- :func:`colour.characterisation.colour_correction_Finlayson2015` :
Colour correction using *Finlayson et al. (2015)* method.
- :func:`colour.characterisation.colour_correction_Vandermonde` :
Colour correction using *Vandermonde* method.
- :attr:`colour.COLOUR_CORRECTION_METHODS`: Supported colour correction
methods.
- :func:`colour.colour_correction`: Colour correction of given *RGB*
colourspace array using the colour correction matrix from given
:math:`M_T` colour array to :math:`M_R` colour array.
References
----------
- :cite:`Cheung2004` : Cheung, V., Westland, S., Connah, D., & Ripamonti, C.
(2004). A comparative study of the characterisation of colour cameras by
means of neural networks and polynomial transforms. Coloration Technology,
120(1), 19-25. doi:10.1111/j.1478-4408.2004.tb00201.x
- :cite:`Finlayson2015` : Finlayson, G. D., MacKiewicz, M., & Hurlbert, A.
(2015). Color Correction Using Root-Polynomial Regression. IEEE
Transactions on Image Processing, 24(5), 1460-1470.
doi:10.1109/TIP.2015.2405336
- :cite:`Westland2004` : Westland, S., & Ripamonti, C. (2004). Table 8.2. In
Computational Colour Science Using MATLAB (1st ed., p. 137). John Wiley &
Sons, Ltd. doi:10.1002/0470020326
- :cite:`Wikipedia2003e` : Wikipedia. (2003). Vandermonde matrix. Retrieved
May 2, 2018, from https://en.wikipedia.org/wiki/Vandermonde_matrix
"""
from __future__ import annotations
import numpy as np
from colour.algebra import least_square_mapping_MoorePenrose, spow
from colour.hints import (
ArrayLike,
Any,
Boolean,
Integer,
Literal,
NDArray,
Union,
)
from colour.utilities import (
CaseInsensitiveMapping,
as_float_array,
as_int,
closest,
filter_kwargs,
ones,
tsplit,
tstack,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"matrix_augmented_Cheung2004",
"polynomial_expansion_Finlayson2015",
"polynomial_expansion_Vandermonde",
"POLYNOMIAL_EXPANSION_METHODS",
"polynomial_expansion",
"matrix_colour_correction_Cheung2004",
"matrix_colour_correction_Finlayson2015",
"matrix_colour_correction_Vandermonde",
"MATRIX_COLOUR_CORRECTION_METHODS",
"matrix_colour_correction",
"colour_correction_Cheung2004",
"colour_correction_Finlayson2015",
"colour_correction_Vandermonde",
"COLOUR_CORRECTION_METHODS",
"colour_correction",
]
def matrix_augmented_Cheung2004(
RGB: ArrayLike,
terms: Literal[3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22] = 3,
) -> NDArray:
"""
Performs polynomial expansion of given *RGB* colourspace array using
*Cheung et al. (2004)* method.
Parameters
----------
RGB
*RGB* colourspace array to expand.
terms
Number of terms of the expanded polynomial.
Returns
-------
:class:`numpy.ndarray`
Expanded *RGB* colourspace array.
Notes
-----
- This definition combines the augmented matrices given in
:cite:`Cheung2004` and :cite:`Westland2004`.
References
----------
:cite:`Cheung2004`, :cite:`Westland2004`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> matrix_augmented_Cheung2004(RGB, terms=5) # doctest: +ELLIPSIS
array([ 0.1722481..., 0.0917066..., 0.0641693..., 0.0010136..., 1...])
"""
RGB = as_float_array(RGB)
R, G, B = tsplit(RGB)
tail = ones(R.shape)
existing_terms = np.array([3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22])
closest_terms = as_int(closest(existing_terms, terms))
if closest_terms != terms:
raise ValueError(
f'"Cheung et al. (2004)" method does not define an augmented '
f"matrix with {terms} terms, closest augmented matrix has "
f"{closest_terms} terms!"
)
if terms == 3:
return RGB
elif terms == 5:
return tstack(
[
R,
G,
B,
R * G * B,
tail,
]
)
elif terms == 7:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
tail,
]
)
elif terms == 8:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R * G * B,
tail,
]
)
elif terms == 10:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
tail,
]
)
elif terms == 11:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
tail,
]
)
elif terms == 14:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
R ** 3,
G ** 3,
B ** 3,
tail,
]
)
elif terms == 16:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
R ** 2 * G,
G ** 2 * B,
B ** 2 * R,
R ** 3,
G ** 3,
B ** 3,
]
)
elif terms == 17:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
R ** 2 * G,
G ** 2 * B,
B ** 2 * R,
R ** 3,
G ** 3,
B ** 3,
tail,
]
)
elif terms == 19:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
R ** 2 * G,
G ** 2 * B,
B ** 2 * R,
R ** 2 * B,
G ** 2 * R,
B ** 2 * G,
R ** 3,
G ** 3,
B ** 3,
]
)
elif terms == 20:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
R ** 2 * G,
G ** 2 * B,
B ** 2 * R,
R ** 2 * B,
G ** 2 * R,
B ** 2 * G,
R ** 3,
G ** 3,
B ** 3,
tail,
]
)
elif terms == 22:
return tstack(
[
R,
G,
B,
R * G,
R * B,
G * B,
R ** 2,
G ** 2,
B ** 2,
R * G * B,
R ** 2 * G,
G ** 2 * B,
B ** 2 * R,
R ** 2 * B,
G ** 2 * R,
B ** 2 * G,
R ** 3,
G ** 3,
B ** 3,
R ** 2 * G * B,
R * G ** 2 * B,
R * G * B ** 2,
]
)
def polynomial_expansion_Finlayson2015(
RGB: ArrayLike,
degree: Literal[1, 2, 3, 4] = 1,
root_polynomial_expansion: Boolean = True,
) -> NDArray:
"""
Performs polynomial expansion of given *RGB* colourspace array using
*Finlayson et al. (2015)* method.
Parameters
----------
RGB
*RGB* colourspace array to expand.
degree
Expanded polynomial degree.
root_polynomial_expansion
Whether to use the root-polynomials set for the expansion.
Returns
-------
:class:`numpy.ndarray`
Expanded *RGB* colourspace array.
References
----------
:cite:`Finlayson2015`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> polynomial_expansion_Finlayson2015(RGB, degree=2) # doctest: +ELLIPSIS
array([ 0.1722481..., 0.0917066..., 0.0641693..., 0.1256832..., \
0.0767121...,
0.1051335...])
"""
RGB = as_float_array(RGB)
R, G, B = tsplit(RGB)
# TODO: Generalise polynomial expansion.
existing_degrees = np.array([1, 2, 3, 4])
closest_degree = as_int(closest(existing_degrees, degree))
if closest_degree != degree:
raise ValueError(
f'"Finlayson et al. (2015)" method does not define a polynomial '
f"expansion for {degree} degree, closest polynomial expansion is "
f"{closest_degree} degree!"
)
if degree == 1:
return RGB
elif degree == 2:
if root_polynomial_expansion:
return tstack(
[
R,
G,
B,
spow(R * G, 1 / 2),
spow(G * B, 1 / 2),
spow(R * B, 1 / 2),
]
)
else:
return tstack(
[
R,
G,
B,
R ** 2,
G ** 2,
B ** 2,
R * G,
G * B,
R * B,
]
)
elif degree == 3:
if root_polynomial_expansion:
return tstack(
[
R,
G,
B,
spow(R * G, 1 / 2),
spow(G * B, 1 / 2),
spow(R * B, 1 / 2),
spow(R * G ** 2, 1 / 3),
spow(G * B ** 2, 1 / 3),
spow(R * B ** 2, 1 / 3),
spow(G * R ** 2, 1 / 3),
spow(B * G ** 2, 1 / 3),
spow(B * R ** 2, 1 / 3),
spow(R * G * B, 1 / 3),
]
)
else:
return tstack(
[
R,
G,
B,
R ** 2,
G ** 2,
B ** 2,
R * G,
G * B,
R * B,
R ** 3,
G ** 3,
B ** 3,
R * G ** 2,
G * B ** 2,
R * B ** 2,
G * R ** 2,
B * G ** 2,
B * R ** 2,
R * G * B,
]
)
elif degree == 4:
if root_polynomial_expansion:
return tstack(
[
R,
G,
B,
spow(R * G, 1 / 2),
spow(G * B, 1 / 2),
spow(R * B, 1 / 2),
spow(R * G ** 2, 1 / 3),
spow(G * B ** 2, 1 / 3),
spow(R * B ** 2, 1 / 3),
spow(G * R ** 2, 1 / 3),
spow(B * G ** 2, 1 / 3),
spow(B * R ** 2, 1 / 3),
spow(R * G * B, 1 / 3),
spow(R ** 3 * G, 1 / 4),
spow(R ** 3 * B, 1 / 4),
spow(G ** 3 * R, 1 / 4),
spow(G ** 3 * B, 1 / 4),
spow(B ** 3 * R, 1 / 4),
spow(B ** 3 * G, 1 / 4),
spow(R ** 2 * G * B, 1 / 4),
spow(G ** 2 * R * B, 1 / 4),
spow(B ** 2 * R * G, 1 / 4),
]
)
else:
return tstack(
[
R,
G,
B,
R ** 2,
G ** 2,
B ** 2,
R * G,
G * B,
R * B,
R ** 3,
G ** 3,
B ** 3,
R * G ** 2,
G * B ** 2,
R * B ** 2,
G * R ** 2,
B * G ** 2,
B * R ** 2,
R * G * B,
R ** 4,
G ** 4,
B ** 4,
R ** 3 * G,
R ** 3 * B,
G ** 3 * R,
G ** 3 * B,
B ** 3 * R,
B ** 3 * G,
R ** 2 * G ** 2,
G ** 2 * B ** 2,
R ** 2 * B ** 2,
R ** 2 * G * B,
G ** 2 * R * B,
B ** 2 * R * G,
]
)
def polynomial_expansion_Vandermonde(
a: ArrayLike, degree: Integer = 1
) -> NDArray:
"""
Performs polynomial expansion of given :math:`a` array using *Vandermonde*
method.
Parameters
----------
a
:math:`a` array to expand.
degree
Expanded polynomial degree.
Returns
-------
:class:`numpy.ndarray`
Expanded :math:`a` array.
References
----------
:cite:`Wikipedia2003e`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> polynomial_expansion_Vandermonde(RGB) # doctest: +ELLIPSIS
array([ 0.1722481 , 0.0917066 , 0.06416938, 1. ])
"""
a = as_float_array(a)
a_e = np.transpose(np.vander(np.ravel(a), int(degree) + 1))
a_e = np.hstack(list(a_e.reshape(a_e.shape[0], -1, 3)))
return np.squeeze(a_e[:, 0 : a_e.shape[-1] - a.shape[-1] + 1])
POLYNOMIAL_EXPANSION_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Cheung 2004": matrix_augmented_Cheung2004,
"Finlayson 2015": polynomial_expansion_Finlayson2015,
"Vandermonde": polynomial_expansion_Vandermonde,
}
)
POLYNOMIAL_EXPANSION_METHODS.__doc__ = """
Supported polynomial expansion methods.
References
----------
:cite:`Cheung2004`, :cite:`Finlayson2015`, :cite:`Westland2004`,
:cite:`Wikipedia2003e`
"""
def polynomial_expansion(
a: ArrayLike,
method: Union[
Literal["Cheung 2004", "Finlayson 2015", "Vandermonde"], str
] = "Cheung 2004",
**kwargs: Any,
) -> NDArray:
"""
Performs polynomial expansion of given :math:`a` array.
Parameters
----------
a
:math:`a` array to expand.
method
Computation method.
Other Parameters
----------------
degree
{:func:`colour.characterisation.polynomial_expansion_Finlayson2015`,
:func:`colour.characterisation.polynomial_expansion_Vandermonde`},
Expanded polynomial degree, must be one of *[1, 2, 3, 4]* for
:func:`colour.characterisation.polynomial_expansion_Finlayson2015`
definition.
root_polynomial_expansion
{:func:`colour.characterisation.polynomial_expansion_Finlayson2015`},
Whether to use the root-polynomials set for the expansion.
terms
{:func:`colour.characterisation.matrix_augmented_Cheung2004`},
Number of terms of the expanded polynomial.
Returns
-------
:class:`numpy.ndarray`
Expanded :math:`a` array.
References
----------
:cite:`Cheung2004`, :cite:`Finlayson2015`, :cite:`Westland2004`,
:cite:`Wikipedia2003e`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> polynomial_expansion(RGB) # doctest: +ELLIPSIS
array([ 0.1722481..., 0.0917066..., 0.0641693...])
>>> polynomial_expansion(RGB, 'Cheung 2004', terms=5) # doctest: +ELLIPSIS
array([ 0.1722481..., 0.0917066..., 0.0641693..., 0.0010136..., 1...])
"""
method = validate_method(method, POLYNOMIAL_EXPANSION_METHODS)
function = POLYNOMIAL_EXPANSION_METHODS[method]
return function(a, **filter_kwargs(function, **kwargs))
def matrix_colour_correction_Cheung2004(
M_T: ArrayLike,
M_R: ArrayLike,
terms: Literal[3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22] = 3,
) -> NDArray:
"""
Computes a colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array using *Cheung et al. (2004)* method.
Parameters
----------
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
terms
Number of terms of the expanded polynomial.
Returns
-------
:class:`numpy.ndarray`
Colour correction matrix.
References
----------
:cite:`Cheung2004`, :cite:`Westland2004`
Examples
--------
>>> prng = np.random.RandomState(2)
>>> M_T = prng.random_sample((24, 3))
>>> M_R = M_T + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> matrix_colour_correction_Cheung2004(M_T, M_R) # doctest: +ELLIPSIS
array([[ 1.0526376..., 0.1378078..., -0.2276339...],
[ 0.0739584..., 1.0293994..., -0.1060115...],
[ 0.0572550..., -0.2052633..., 1.1015194...]])
"""
return least_square_mapping_MoorePenrose(
matrix_augmented_Cheung2004(M_T, terms), M_R
)
def matrix_colour_correction_Finlayson2015(
M_T: ArrayLike,
M_R: ArrayLike,
degree: Literal[1, 2, 3, 4] = 1,
root_polynomial_expansion: Boolean = True,
) -> NDArray:
"""
Computes a colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array using *Finlayson et al. (2015)* method.
Parameters
----------
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
degree
Expanded polynomial degree.
root_polynomial_expansion
Whether to use the root-polynomials set for the expansion.
Returns
-------
:class:`numpy.ndarray`
Colour correction matrix.
References
----------
:cite:`Finlayson2015`
Examples
--------
>>> prng = np.random.RandomState(2)
>>> M_T = prng.random_sample((24, 3))
>>> M_R = M_T + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> matrix_colour_correction_Finlayson2015(M_T, M_R) # doctest: +ELLIPSIS
array([[ 1.0526376..., 0.1378078..., -0.2276339...],
[ 0.0739584..., 1.0293994..., -0.1060115...],
[ 0.0572550..., -0.2052633..., 1.1015194...]])
"""
return least_square_mapping_MoorePenrose(
polynomial_expansion_Finlayson2015(
M_T, degree, root_polynomial_expansion
),
M_R,
)
def matrix_colour_correction_Vandermonde(
M_T: ArrayLike, M_R: ArrayLike, degree: Integer = 1
) -> NDArray:
"""
Computes a colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array using *Vandermonde* method.
Parameters
----------
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
degree
Expanded polynomial degree.
Returns
-------
:class:`numpy.ndarray`
Colour correction matrix.
References
----------
:cite:`Wikipedia2003e`
Examples
--------
>>> prng = np.random.RandomState(2)
>>> M_T = prng.random_sample((24, 3))
>>> M_R = M_T + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> matrix_colour_correction_Vandermonde(M_T, M_R) # doctest: +ELLIPSIS
array([[ 1.0300256..., 0.1141770..., -0.2621816..., 0.0418022...],
[ 0.0670209..., 1.0221494..., -0.1166108..., 0.0128250...],
[ 0.0744612..., -0.1872819..., 1.1278078..., -0.0318085...]])
"""
return least_square_mapping_MoorePenrose(
polynomial_expansion_Vandermonde(M_T, degree), M_R
)
MATRIX_COLOUR_CORRECTION_METHODS: CaseInsensitiveMapping = (
CaseInsensitiveMapping(
{
"Cheung 2004": matrix_colour_correction_Cheung2004,
"Finlayson 2015": matrix_colour_correction_Finlayson2015,
"Vandermonde": matrix_colour_correction_Vandermonde,
}
)
)
MATRIX_COLOUR_CORRECTION_METHODS.__doc__ = """
Supported colour correction matrix methods.
References
----------
:cite:`Cheung2004`, :cite:`Finlayson2015`, :cite:`Westland2004`,
:cite:`Wikipedia2003e`
"""
def matrix_colour_correction(
M_T: ArrayLike,
M_R: ArrayLike,
method: Union[
Literal["Cheung 2004", "Finlayson 2015", "Vandermonde"], str
] = "Cheung 2004",
**kwargs: Any,
) -> NDArray:
"""
Computes a colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array.
The resulting colour correction matrix is computed using multiple linear or
polynomial regression using given method. The purpose of that object
is for example the matching of two *ColorChecker* colour rendition charts
together.
Parameters
----------
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
method
Computation method.
Other Parameters
----------------
degree
{:func:`colour.characterisation.polynomial_expansion_Finlayson2015`,
:func:`colour.characterisation.polynomial_expansion_Vandermonde`},
Expanded polynomial degree, must be one of *[1, 2, 3, 4]* for
:func:`colour.characterisation.polynomial_expansion_Finlayson2015`
definition.
root_polynomial_expansion
{:func:`colour.characterisation.polynomial_expansion_Finlayson2015`},
Whether to use the root-polynomials set for the expansion.
terms
{:func:`colour.characterisation.matrix_augmented_Cheung2004`},
Number of terms of the expanded polynomial.
Returns
-------
:class:`numpy.ndarray`
Colour correction matrix.
References
----------
:cite:`Cheung2004`, :cite:`Finlayson2015`, :cite:`Westland2004`,
:cite:`Wikipedia2003e`
Examples
--------
>>> M_T = np.array(
... [[0.17224810, 0.09170660, 0.06416938],
... [0.49189645, 0.27802050, 0.21923399],
... [0.10999751, 0.18658946, 0.29938611],
... [0.11666120, 0.14327905, 0.05713804],
... [0.18988879, 0.18227649, 0.36056247],
... [0.12501329, 0.42223442, 0.37027445],
... [0.64785606, 0.22396782, 0.03365194],
... [0.06761093, 0.11076896, 0.39779139],
... [0.49101797, 0.09448929, 0.11623839],
... [0.11622386, 0.04425753, 0.14469986],
... [0.36867946, 0.44545230, 0.06028681],
... [0.61632937, 0.32323906, 0.02437089],
... [0.03016472, 0.06153243, 0.29014596],
... [0.11103655, 0.30553067, 0.08149137],
... [0.41162190, 0.05816656, 0.04845934],
... [0.73339206, 0.53075188, 0.02475212],
... [0.47347718, 0.08834792, 0.30310315],
... [0.00000000, 0.25187016, 0.35062450],
... [0.76809639, 0.78486240, 0.77808297],
... [0.53822392, 0.54307997, 0.54710883],
... [0.35458526, 0.35318419, 0.35524431],
... [0.17976704, 0.18000531, 0.17991488],
... [0.09351417, 0.09510603, 0.09675027],
... [0.03405071, 0.03295077, 0.03702047]]
... )
>>> M_R = np.array(
... [[0.15579559, 0.09715755, 0.07514556],
... [0.39113140, 0.25943419, 0.21266708],
... [0.12824821, 0.18463570, 0.31508023],
... [0.12028974, 0.13455659, 0.07408400],
... [0.19368988, 0.21158946, 0.37955964],
... [0.19957425, 0.36085439, 0.40678123],
... [0.48896605, 0.20691688, 0.05816533],
... [0.09775522, 0.16710693, 0.47147724],
... [0.39358649, 0.12233400, 0.10526425],
... [0.10780332, 0.07258529, 0.16151473],
... [0.27502671, 0.34705454, 0.09728099],
... [0.43980441, 0.26880559, 0.05430533],
... [0.05887212, 0.11126272, 0.38552469],
... [0.12705825, 0.25787860, 0.13566464],
... [0.35612929, 0.07933258, 0.05118732],
... [0.48131976, 0.42082843, 0.07120612],
... [0.34665585, 0.15170714, 0.24969804],
... [0.08261116, 0.24588716, 0.48707733],
... [0.66054904, 0.65941137, 0.66376412],
... [0.48051509, 0.47870296, 0.48230082],
... [0.33045354, 0.32904184, 0.33228886],
... [0.18001305, 0.17978567, 0.18004416],
... [0.10283975, 0.10424680, 0.10384975],
... [0.04742204, 0.04772203, 0.04914226]]
... )
>>> matrix_colour_correction(M_T, M_R) # doctest: +ELLIPSIS
array([[ 0.6982266..., 0.0307162..., 0.1621042...],
[ 0.0689349..., 0.6757961..., 0.1643038...],
[-0.0631495..., 0.0921247..., 0.9713415...]])
"""
method = validate_method(method, MATRIX_COLOUR_CORRECTION_METHODS)
function = MATRIX_COLOUR_CORRECTION_METHODS[method]
return function(M_T, M_R, **filter_kwargs(function, **kwargs))
def colour_correction_Cheung2004(
RGB: ArrayLike,
M_T: ArrayLike,
M_R: ArrayLike,
terms: Literal[3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22] = 3,
) -> NDArray:
"""
Performs colour correction of given *RGB* colourspace array using the
colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array using *Cheung et al. (2004)* method.
Parameters
----------
RGB
*RGB* colourspace array to colour correct.
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
terms
Number of terms of the expanded polynomial.
Returns
-------
:class:`numpy.ndarray`
Colour corrected *RGB* colourspace array.
References
----------
:cite:`Cheung2004`, :cite:`Westland2004`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> prng = np.random.RandomState(2)
>>> M_T = prng.random_sample((24, 3))
>>> M_R = M_T + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> colour_correction_Cheung2004(RGB, M_T, M_R) # doctest: +ELLIPSIS
array([ 0.1793456..., 0.1003392..., 0.0617218...])
"""
RGB = as_float_array(RGB)
shape = RGB.shape
RGB = np.reshape(RGB, (-1, 3))
RGB_e = matrix_augmented_Cheung2004(RGB, terms)
CCM = matrix_colour_correction_Cheung2004(M_T, M_R, terms)
return np.reshape(np.transpose(np.dot(CCM, np.transpose(RGB_e))), shape)
def colour_correction_Finlayson2015(
RGB: ArrayLike,
M_T: ArrayLike,
M_R: ArrayLike,
degree: Literal[1, 2, 3, 4] = 1,
root_polynomial_expansion: Boolean = True,
) -> NDArray:
"""
Performs colour correction of given *RGB* colourspace array using the
colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array using *Finlayson et al. (2015)* method.
Parameters
----------
RGB
*RGB* colourspace array to colour correct.
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
degree
Expanded polynomial degree.
root_polynomial_expansion
Whether to use the root-polynomials set for the expansion.
Returns
-------
:class:`numpy.ndarray`
Colour corrected *RGB* colourspace array.
References
----------
:cite:`Finlayson2015`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> prng = np.random.RandomState(2)
>>> M_T = prng.random_sample((24, 3))
>>> M_R = M_T + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> colour_correction_Finlayson2015(RGB, M_T, M_R) # doctest: +ELLIPSIS
array([ 0.1793456..., 0.1003392..., 0.0617218...])
"""
RGB = as_float_array(RGB)
shape = RGB.shape
RGB = np.reshape(RGB, (-1, 3))
RGB_e = polynomial_expansion_Finlayson2015(
RGB, degree, root_polynomial_expansion
)
CCM = matrix_colour_correction_Finlayson2015(
M_T, M_R, degree, root_polynomial_expansion
)
return np.reshape(np.transpose(np.dot(CCM, np.transpose(RGB_e))), shape)
def colour_correction_Vandermonde(
RGB: ArrayLike, M_T: ArrayLike, M_R: ArrayLike, degree: Integer = 1
) -> NDArray:
"""
Performs colour correction of given *RGB* colourspace array using the
colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array using *Vandermonde* method.
Parameters
----------
RGB
*RGB* colourspace array to colour correct.
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
degree
Expanded polynomial degree.
Returns
-------
:class:`numpy.ndarray`
Colour corrected *RGB* colourspace array.
References
----------
:cite:`Wikipedia2003e`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> prng = np.random.RandomState(2)
>>> M_T = prng.random_sample((24, 3))
>>> M_R = M_T + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> colour_correction_Vandermonde(RGB, M_T, M_R) # doctest: +ELLIPSIS
array([ 0.2128689..., 0.1106242..., 0.036213 ...])
"""
RGB = as_float_array(RGB)
shape = RGB.shape
RGB = np.reshape(RGB, (-1, 3))
RGB_e = polynomial_expansion_Vandermonde(RGB, degree)
CCM = matrix_colour_correction_Vandermonde(M_T, M_R, degree)
return np.reshape(np.transpose(np.dot(CCM, np.transpose(RGB_e))), shape)
COLOUR_CORRECTION_METHODS = CaseInsensitiveMapping(
{
"Cheung 2004": colour_correction_Cheung2004,
"Finlayson 2015": colour_correction_Finlayson2015,
"Vandermonde": colour_correction_Vandermonde,
}
)
COLOUR_CORRECTION_METHODS.__doc__ = """
Supported colour correction methods.
References
----------
:cite:`Cheung2004`, :cite:`Finlayson2015`, :cite:`Westland2004`,
:cite:`Wikipedia2003e`
"""
def colour_correction(
RGB: ArrayLike,
M_T: ArrayLike,
M_R: ArrayLike,
method: Union[
Literal["Cheung 2004", "Finlayson 2015", "Vandermonde"], str
] = "Cheung 2004",
**kwargs: Any,
) -> NDArray:
"""
Performs colour correction of given *RGB* colourspace array using the
colour correction matrix from given :math:`M_T` colour array to
:math:`M_R` colour array.
Parameters
----------
RGB
*RGB* colourspace array to colour correct.
M_T
Test array :math:`M_T` to fit onto array :math:`M_R`.
M_R
Reference array the array :math:`M_T` will be colour fitted against.
method
Computation method.
Other Parameters
----------------
degree
{:func:`colour.characterisation.polynomial_expansion_Finlayson2015`,
:func:`colour.characterisation.polynomial_expansion_Vandermonde`},
Expanded polynomial degree, must be one of *[1, 2, 3, 4]* for
:func:`colour.characterisation.polynomial_expansion_Finlayson2015`
definition.
root_polynomial_expansion
{:func:`colour.characterisation.polynomial_expansion_Finlayson2015`},
Whether to use the root-polynomials set for the expansion.
terms
{:func:`colour.characterisation.matrix_augmented_Cheung2004`},
Number of terms of the expanded polynomial.
Returns
-------
:class:`numpy.ndarray`
Colour corrected *RGB* colourspace array.
References
----------
:cite:`Cheung2004`, :cite:`Finlayson2015`, :cite:`Westland2004`,
:cite:`Wikipedia2003e`
Examples
--------
>>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
>>> M_T = np.array(
... [[0.17224810, 0.09170660, 0.06416938],
... [0.49189645, 0.27802050, 0.21923399],
... [0.10999751, 0.18658946, 0.29938611],
... [0.11666120, 0.14327905, 0.05713804],
... [0.18988879, 0.18227649, 0.36056247],
... [0.12501329, 0.42223442, 0.37027445],
... [0.64785606, 0.22396782, 0.03365194],
... [0.06761093, 0.11076896, 0.39779139],
... [0.49101797, 0.09448929, 0.11623839],
... [0.11622386, 0.04425753, 0.14469986],
... [0.36867946, 0.44545230, 0.06028681],
... [0.61632937, 0.32323906, 0.02437089],
... [0.03016472, 0.06153243, 0.29014596],
... [0.11103655, 0.30553067, 0.08149137],
... [0.41162190, 0.05816656, 0.04845934],
... [0.73339206, 0.53075188, 0.02475212],
... [0.47347718, 0.08834792, 0.30310315],
... [0.00000000, 0.25187016, 0.35062450],
... [0.76809639, 0.78486240, 0.77808297],
... [0.53822392, 0.54307997, 0.54710883],
... [0.35458526, 0.35318419, 0.35524431],
... [0.17976704, 0.18000531, 0.17991488],
... [0.09351417, 0.09510603, 0.09675027],
... [0.03405071, 0.03295077, 0.03702047]]
... )
>>> M_R = np.array(
... [[0.15579559, 0.09715755, 0.07514556],
... [0.39113140, 0.25943419, 0.21266708],
... [0.12824821, 0.18463570, 0.31508023],
... [0.12028974, 0.13455659, 0.07408400],
... [0.19368988, 0.21158946, 0.37955964],
... [0.19957425, 0.36085439, 0.40678123],
... [0.48896605, 0.20691688, 0.05816533],
... [0.09775522, 0.16710693, 0.47147724],
... [0.39358649, 0.12233400, 0.10526425],
... [0.10780332, 0.07258529, 0.16151473],
... [0.27502671, 0.34705454, 0.09728099],
... [0.43980441, 0.26880559, 0.05430533],
... [0.05887212, 0.11126272, 0.38552469],
... [0.12705825, 0.25787860, 0.13566464],
... [0.35612929, 0.07933258, 0.05118732],
... [0.48131976, 0.42082843, 0.07120612],
... [0.34665585, 0.15170714, 0.24969804],
... [0.08261116, 0.24588716, 0.48707733],
... [0.66054904, 0.65941137, 0.66376412],
... [0.48051509, 0.47870296, 0.48230082],
... [0.33045354, 0.32904184, 0.33228886],
... [0.18001305, 0.17978567, 0.18004416],
... [0.10283975, 0.10424680, 0.10384975],
... [0.04742204, 0.04772203, 0.04914226]]
... )
>>> colour_correction(RGB, M_T, M_R) # doctest: +ELLIPSIS
array([ 0.1334872..., 0.0843921..., 0.0599014...])
"""
method = validate_method(method, COLOUR_CORRECTION_METHODS)
function = COLOUR_CORRECTION_METHODS[method]
return function(RGB, M_T, M_R, **filter_kwargs(function, **kwargs))
|
# Generated by Django 3.1.4 on 2021-03-31 22:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nlpviewer_backend', '0015_remove_job_document'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'permissions': (('read_project', 'Can read the project'), ('edit_annotation', 'Can edit annotation'), ('edit_text', 'Can edit the document'), ('edit_project', 'Can edit the project'), ('remove_project', 'Can remove the project'), ('new_project', 'Can create in the project'), ('create_job', 'Can create a job'), ('query_job', 'Can query jobs'), ('update_job', 'Can update jobs'))},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.