code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright (C) 2013-2015 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import dxpy
import json
import string
import random
import sys
import argparse
import os
# to find the magic library
import magic
import subprocess
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def unpack(input):
m = magic.Magic()
# determine compression format
try:
file_type = m.from_file(input)
except Exception as e:
raise dxpy.AppError("Error while identifying compression format: " + str(e))
# if we find a tar file throw a program error telling the user to unpack it
if file_type == 'application/x-tar':
raise dxpy.AppError("App does not support tar files. Please unpack.")
# since we haven't returned, the file is compressed. Determine what program to use to uncompress
uncomp_util = None
if file_type == 'XZ compressed data':
uncomp_util = 'xzcat'
elif file_type[:21] == 'bzip2 compressed data':
uncomp_util = 'bzcat'
elif file_type[:20] == 'gzip compressed data':
uncomp_util = 'zcat'
elif file_type == 'POSIX tar archive (GNU)' or 'tar' in file_type:
raise dxpy.AppError("Found a tar archive. Please untar your sequences before importing")
else:
# just return input filename since it's already uncompressed
return input
if uncomp_util != None:
# bzcat does not support -t. Use non streaming decompressors for testing input
test_util = None
if uncomp_util == 'xzcat':
test_util = 'xz'
elif uncomp_util == 'bzcat':
test_util = 'bzip2'
elif uncomp_util == 'zcat':
test_util = 'gzip'
try:
subprocess.check_call(" ".join([test_util, "-t", input]), shell=True)
except subprocess.CalledProcessError:
raise dxpy.AppError("File failed integrity check by "+uncomp_util+". Compressed file is corrupted.")
# with that in hand, unzip file. If we find a tar archive then exit with error.
try:
with subprocess.Popen([uncomp_util, input], stdout=subprocess.PIPE).stdout as pipe:
line = pipe.next()
uncomp_type = m.from_buffer(line)
except Exception as e:
raise dxpy.AppError("Error detecting file format after decompression: " + str(e))
if uncomp_type == 'POSIX tar archive (GNU)' or 'tar' in uncomp_type:
raise dxpy.AppError("Found a tar archive after decompression. Please untar your files before importing")
elif 'ASCII text' not in uncomp_type:
raise dxpy.AppError("After decompression found file type other than plain text")
try:
out_name = id_generator()
subprocess.check_call(" ".join([uncomp_util, "--stdout", input, ">", out_name]), shell=True)
return out_name
except subprocess.CalledProcessError as e:
raise dxpy.AppError("Unable to open compressed input for reading: " + str(e))
def detect_type(bed_file):
delimiter = find_delimiter(bed_file)
with open(bed_file, 'rU') as bf:
header=""
while "track" not in header:
header=bf.readline()
# if this isn't a browser line either then there isn't a header
if "browser" not in header:
break
if "type=bedDetail" in header:
print("File is a BED detail file", file=sys.stderr)
return {"type": "bedDetail", "delimiter": delimiter}
num_cols = find_num_columns(bed_file, delimiter)
if num_cols >= 12:
return {"type": "genes", "delimiter": delimiter}
else:
return {"type": "spans", "delimiter": delimiter}
# takes the whole bed file and splits into separate files for each track contained in it
def split_on_track(bed_file):
files = []
current_filename = id_generator()
# open bed file
with open(bed_file, 'rU') as bf:
curr_file = open(current_filename, "w")
line = bf.readline()
if line.startswith("browser"):
line = bf.readline()
curr_file.write(line)
line = bf.readline()
while True:
if line.startswith("track"):
# close and save our last track as a new file
curr_file.close()
files.append(current_filename)
# open a new file for the next track
current_filename = id_generator()
curr_file = open(current_filename, "w")
elif line == "":
curr_file.close()
files.append(current_filename)
break
curr_file.write(line)
line = bf.readline()
return files
def find_num_columns(bed_file, delimiter="\t"):
num_cols = 0
with open(bed_file, "rU") as bf:
line = bf.readline()
while line != "":
if line.startswith("track"):
line = bf.readline()
line = line.split(delimiter)
if len(line) > num_cols:
num_cols = len(line)
line = bf.readline()
print("Found num cols: " + str(num_cols), file=sys.stderr)
return num_cols
def find_delimiter(bed_file):
with open(bed_file, "rU") as bf:
line = bf.readline()
if line.startswith("track"):
line = bf.readline()
tab_split = line.split("\t")
if len(tab_split) >= 3:
print("Bed file is tab delimited", file=sys.stderr)
return "\t"
else:
space_split = line.split()
if len(space_split) < 3:
raise dxpy.AppError("File is not a valid bed file (neither space delimited nor tab delimited)")
print("Bed file is space delimited", file=sys.stderr)
return " "
def import_spans(bed_file, table_name, ref_id, file_id, additional_types, property_keys, property_values, tags, isBedDetail, delimiter="\t"):
num_cols = find_num_columns(bed_file, delimiter)
# if this is a bedDetail file we should treat the last two columns separately
if isBedDetail:
num_cols -= 2
possible_columns = [("chr", "string"),
("lo", "int32"),
("hi", "int32"),
("name", "string"),
("score", "float"),
("strand", "string"),
("thick_start", "int32"),
("thick_end", "int32"),
("item_rgb", "string")]
bedDetail_columns = [("bedDetail_ID", "string"),
("bedDetail_desc", "string")]
possible_default_row = ["", 0, 0, "", 0, ".", 0, 0, ""]
columns = possible_columns[:num_cols]
if isBedDetail:
columns.extend(bedDetail_columns)
if num_cols > len(columns):
for i in range(len(columns), num_cols):
columns.append(("BED_column_"+str(i+1), "string"))
possible_default_row.append("")
default_row = possible_default_row[:num_cols]
if isBedDetail:
default_row.extend(["",""])
column_descs = [dxpy.DXGTable.make_column_desc(name, type) for name, type in columns]
indices = [dxpy.DXGTable.genomic_range_index("chr","lo","hi", 'gri')]
for c in columns:
if "name" in c:
indices.append(dxpy.DXGTable.lexicographic_index([
dxpy.DXGTable.lexicographic_index_column("name", True, False),
dxpy.DXGTable.lexicographic_index_column("chr"),
dxpy.DXGTable.lexicographic_index_column("lo"),
dxpy.DXGTable.lexicographic_index_column("hi")], "search"))
break
with open(bed_file, 'rU') as bed, dxpy.new_dxgtable(column_descs, indices=indices, mode='w') as span:
details = {"original_contigset": dxpy.dxlink(ref_id)}
if file_id != None:
details["original_file"] = dxpy.dxlink(file_id)
if len(property_keys) != len(property_values):
raise dxpy.AppError("Expected each provided property to have a corresponding value.")
for i in range(len(property_keys)):
details[property_keys[i]] = property_values[i]
span.set_details(details)
span.add_types(["Spans", "gri"])
span.rename(table_name)
for line in bed:
row = list(default_row)
if line.startswith("track"):
details = span.get_details()
details['track'] = line
span.set_details(details)
continue
line = line.rstrip("\n")
line = line.split(delimiter)
if isBedDetail:
# only the first 4 columns are guaranteed to be defined by UCSC
validate_line(line[:4])
# save last two fields separately
bedDetailFields = line[-2:]
line = line[:-2]
else:
validate_line(line[:num_cols])
# check to see if this is a weird line
if len(line) == 0:
break
if len(line) < 3:
raise dxpy.AppError("Line: "+"\t".join(line)+" in BED file contains less than the minimum 3 columns. Invalid BED file.")
try:
row[0] = line[0]
row[1] = int(line[1])
row[2] = int(line[2])
row[3] = line[3]
# dashes are sometimes used when field is invalid
if line[4] == "-" or line[4] == ".":
line[4] = 0
row[4] = float(line[4])
row[5] = line[5]
# dashes are sometimes used when field is invalid
if line[6] == "-" or line[6] == ".":
line[6] = 0
row[6] = int(line[6])
# dashes are sometimes used when field is invalid
if line[7] == "-" or line[7] == ".":
line[7] = 0
row[7] = int(line[7])
row[8] = line[8]
# an index error would come from having fewer columns in a row, which we should handle ok
except IndexError:
pass
# value error when fields are messed up and string gets converted to int, etc. Throw these out.
except ValueError:
continue
if isBedDetail:
# add these in at the end if we have a bedDetail file
row[num_cols] = bedDetailFields[0]
row[num_cols+1] = bedDetailFields[1]
span.add_row(row)
span.flush()
return dxpy.dxlink(span.get_id())
##########named spans###############END
def generate_gene_row(line, block_size, block_start, span_type, default_row, parent_id, span_id):
row = list(default_row)
try:
# chr
row[0] = line[0]
# lo
row[1] = int(line[1])
# if we're a child, add our offset
if parent_id != -1:
row[1] += block_start
# hi
# if we're a child, just add size to our start
if parent_id != -1:
row[2] = row[1] + block_size
else:
row[2] = int(line[2])
# name
row[3] = line[3]
# span_id
row[4] = span_id
# type
row[5] = span_type
# strand
row[6] = line[5]
# is_coding
if span_type == "CDS":
row[7] = True
elif "UTR" in span_type:
row[7] = False
else:
row[7] = False
# parent_id
row[8] = parent_id
# frame
row[9] = -1
# description
row[10] = "\t".join(line[12:])
# BED files have no description?
# a misformed line can have string columns where they should be int
except ValueError:
return None
# if missing columns then also throw out the line
except IndexError:
return None
return row
def import_genes(bed_file, table_name, ref_id, file_id, additional_types, property_keys, property_values, tags, delimiter="\t"):
# implement BED importing from this format:
# http://genome.ucsc.edu/FAQ/FAQformat.html#format1
columns = [("chr", "string"),
("lo", "int32"),
("hi", "int32"),
("name", "string"),
("span_id", "int32"),
("type", "string"),
("strand", "string"),
("is_coding", "boolean"),
("parent_id", "int32"),
("frame", "int16"),
("description", "string")]
column_descs = [dxpy.DXGTable.make_column_desc(name, type) for name, type in columns]
indices = [dxpy.DXGTable.genomic_range_index("chr","lo","hi", 'gri'),
dxpy.DXGTable.lexicographic_index([
dxpy.DXGTable.lexicographic_index_column("name", True, False),
dxpy.DXGTable.lexicographic_index_column("chr"),
dxpy.DXGTable.lexicographic_index_column("lo"),
dxpy.DXGTable.lexicographic_index_column("hi"),
dxpy.DXGTable.lexicographic_index_column("type")], "search")]
default_row = ["", 0, 0, "", -1, "", ".", False, -1, -1, ""]
with open(bed_file, 'rU') as bed, dxpy.new_dxgtable(column_descs, indices=indices, mode='w') as span:
span_table_id = span.get_id()
details = {"original_contigset": dxpy.dxlink(ref_id)}
if file_id != None:
details["original_file"] = dxpy.dxlink(file_id)
if len(property_keys) != len(property_values):
raise dxpy.AppError("Expected each provided property to have a corresponding value.")
for i in range(len(property_keys)):
details[property_keys[i]] = property_values[i]
span.set_details(details)
span.add_types(["gri", "Genes"])
span.rename(table_name)
current_span_id = 0
# where the parsing magic happens
for line in bed:
if line.startswith("track"):
details = span.get_details()
details['track'] = line
span.set_details(details)
continue
line = line.rstrip("\n")
row = list(default_row)
line = line.split(delimiter)
validate_line(line)
if len(line) < 12:
raise dxpy.AppError("Line: "+"\t".join(line)+" in gene model-like BED file contains less than 12 columns. Invalid BED file.")
# add parent gene track
row = generate_gene_row(line, 0, 0, "transcript", default_row, -1, current_span_id)
if row != None:
span.add_row(row)
current_parent_id = current_span_id
current_span_id += 1
# add all children
blockCount = int(line[9])
line[10] = line[10].rstrip(",").split(",")
blockSizes = [int(line[10][n]) for n in range(blockCount)]
line[11] = line[11].rstrip(",").split(",")
blockStarts = [int(line[11][n]) for n in range(blockCount)]
gene_lo = int(line[1])
gene_hi = int(line[2])
# set thick* to be within the gene if outside
thickStart = min(max(int(line[6]), gene_lo), gene_hi)
thickEnd = max(min(int(line[7]), gene_hi), gene_lo)
for i in range(blockCount):
# look to thickStart and thickEnd to get information about the type of this region
# if thick* are the same or cover the whole transcript then we ignore them
# else, we partition the exons into CDS and UTR based on their boundaries
if thickStart == thickEnd or (thickStart == gene_lo and thickEnd == gene_hi):
span.add_row(generate_gene_row(line,
blockSizes[i],
blockStarts[i],
"exon",
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
else:
exon_lo = int(line[1])+blockStarts[i]
exon_hi = int(exon_lo+blockSizes[i])
# we're all UTR if we enter either of these
if (exon_hi <= thickStart and line[5] == '+') or (exon_lo >= thickEnd and line[5] == '-'):
span.add_row(generate_gene_row(line,
blockSizes[i],
blockStarts[i],
"5' UTR",
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
elif (exon_hi <= thickStart and line[5] == '-') or (exon_lo >= thickEnd and line[5] == '+'):
span.add_row(generate_gene_row(line,
blockSizes[i],
blockStarts[i],
"3' UTR",
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
# if this is true then we overlap CDS partially or completely
elif (exon_lo < thickEnd and exon_hi > thickStart):
# entirely contained
if exon_lo >= thickStart and exon_hi <= thickEnd:
span.add_row(generate_gene_row(line,
blockSizes[i],
blockStarts[i],
"CDS",
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
else:
# left portion is UTR
if exon_lo < thickStart:
if line[5] == '+':
UTR_type = "5' UTR"
else:
UTR_type = "3' UTR"
UTR_size = (min(blockSizes[i], thickStart - exon_lo))
span.add_row(generate_gene_row(line,
UTR_size,
blockStarts[i],
UTR_type,
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
# CDS portion
CDS_size = blockSizes[i] - (max(exon_lo, thickStart) - exon_lo)
CDS_size -= (exon_hi - min(exon_hi, thickEnd))
CDS_start = (max(exon_lo, thickStart) - exon_lo) + blockStarts[i]
span.add_row(generate_gene_row(line,
CDS_size,
CDS_start,
"CDS",
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
# right portion is UTR
if exon_hi > thickEnd:
if line[5] == '+':
UTR_type = "3' UTR"
else:
UTR_type = "5' UTR"
UTR_size = (min(blockSizes[i], exon_hi - thickEnd))
UTR_start = blockStarts[i] + thickEnd - exon_lo
span.add_row(generate_gene_row(line,
UTR_size,
UTR_start,
UTR_type,
default_row,
current_parent_id,
current_span_id))
current_span_id += 1
return dxpy.dxlink(span.get_id())
parser = argparse.ArgumentParser(description='Import a local BED file as a Spans or Genes object. If multiple tracks exist in the BED file, one object will be created for each.')
parser.add_argument('filename', help='local filename to import')
parser.add_argument('reference', help='ID of ContigSet object (reference) that this BED file annotates')
parser.add_argument('--file_id', default=None, help='the DNAnexus file-id of the original file. If provided, a link to this id will be added in the type details')
parser.add_argument('--additional_type', default=[], action='append', help='This will be added to the list of object types (in addition to the type \"Spans\", or \"Genes\" which is added automatically)')
parser.add_argument('--property_key', default=[], action='append', help='The keys in key-value pairs that will be added to the details of the object. The nth property key will be paired with the nth property value. The number of keys must equal the number of values provided')
parser.add_argument('--property_value', default=[], action='append', help='The values in key-value pairs that will be added to the details of the object. The nth property key will be paired with the nth property value. The number of keys must equal the number of values provided')
parser.add_argument('--tag', default=[], action='append', help='"A set of tags (string labels) that will be added to the resulting Variants table object. (You can use tags and properties to better describe and organize your data)')
def import_BED(**args):
if len(args) == 0:
cmd_line_args = parser.parse_args(sys.argv[1:])
args['filename'] = cmd_line_args.filename
args['reference'] = cmd_line_args.reference
args['file_id'] = cmd_line_args.file_id
args['additional_type'] = cmd_line_args.additional_type
args['property_key'] = cmd_line_args.property_key
args['property_value'] = cmd_line_args.property_value
args['tag'] = cmd_line_args.tag
bed_filename = args['filename']
reference = args['reference']
file_id = args['file_id']
additional_types = args['additional_type']
property_keys = args['property_key']
property_values = args['property_value']
tags = args['tag']
job_outputs = []
# uncompresses file if necessary. Returns new filename
bed_filename_uncomp = unpack( bed_filename )
current_file = 1
for import_filename in split_on_track(bed_filename_uncomp):
try:
bed_basename = os.path.basename(bed_filename)
except:
bed_basename = bed_filename
if current_file == 1:
name = bed_basename
else:
name = bed_basename+"_"+str(current_file)
current_file += 1
bed_type = detect_type(import_filename)["type"]
delimiter = detect_type(import_filename)["delimiter"]
print("Bed type is : " + bed_type, file=sys.stderr)
if bed_type == "genes":
print("Importing as Genes Type", file=sys.stderr)
job_outputs.append(import_genes(import_filename, name, reference, file_id, additional_types, property_keys, property_values, tags, delimiter))
elif bed_type == "spans" or bed_type == "bedDetail":
print("Importing as Spans Type", file=sys.stderr)
if bed_type == "bedDetail":
print("input file is in 'bedDetails' format...", file=sys.stderr)
bedDetail=True
else:
bedDetail=False
job_outputs.append(import_spans(import_filename, name, reference, file_id, additional_types, property_keys, property_values, tags, bedDetail, delimiter))
else:
raise dxpy.AppError("Unable to determine type of BED file")
subprocess.check_call(" ".join(["rm", import_filename]), shell=True)
if(bed_filename != bed_filename_uncomp):
subprocess.check_call(" ".join(["rm", bed_filename_uncomp]), shell=True)
print(json.dumps(job_outputs))
return job_outputs
def validate_line(line):
line_str = "\t".join(line)
entries = list(line)
if len(entries) > 1:
try:
if int(entries[1]) < 0:
raise dxpy.AppError("The start position for one entry was unexpectedly negative. \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[1]))
except ValueError:
raise dxpy.AppError("One of the start values could not be translated to an integer. " + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[1]))
if len(entries) > 2:
try:
if int(entries[2]) < 0:
raise dxpy.AppError("The end position for one entry was unexpectedly negative. \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[2]))
except ValueError:
raise dxpy.AppError("One of the end values could not be translated to an integer. " + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[2]))
if len(entries) > 4:
try:
if entries[4] != "." and entries[4] != "-":
float(entries[4])
except ValueError:
raise dxpy.AppError("One of the score values for one entry could not be translated to a number. " + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[4]))
if len(entries) > 5:
if entries[5] != "+" and entries[5] != "-" and entries[5] != ".":
raise dxpy.AppError("The strand indicated for an element was not \"+\", \"-\", or \".\"" + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[5]))
if len(entries) > 6:
try:
if entries[6] != "." and entries[6] != "-":
if int(entries[6]) < 0:
raise dxpy.AppError("The thickStart position for one entry was unexpectedly negative. \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[6]))
except ValueError:
raise dxpy.AppError("One of the thickStart values could not be translated to an integer. " + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[6]))
if len(entries) > 7:
try:
if entries[7] != "." and entries[7] != "-":
if int(entries[7]) < 0:
raise dxpy.AppError("The thickEnd position for one entry was unexpectedly negative. \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[7]))
except ValueError:
raise dxpy.AppError("One of the thickEnd values could not be translated to an integer. " + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[7]))
if len(entries) > 9:
try:
if int(entries[9]) < 0:
raise dxpy.AppError("The number of exons (blockCount) for one entry was unexpectedly negative. \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[9]))
except ValueError:
raise dxpy.AppError("One of the thickEnd values could not be translated to an integer. " + "\nOffending line_str: " + line_str + "\nOffending value: " + str(entries[9]))
if len(entries) > 10:
try:
entries[10] = entries[10].rstrip(",").split(",")
blockStarts = [int(entries[10][n]) for n in range(int(entries[9]))]
except:
raise dxpy.AppError("Could not parse the blockSizes entry as a comma-separated list of integers \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[10]))
if len(entries) > 11:
try:
entries[11] = entries[11].rstrip(",").split(",")
blockStarts = [int(entries[11][n]) for n in range(int(entries[9]))]
except:
raise dxpy.AppError("Could not parse the blockStarts entry as a comma-separated list of integers \nOffending line_str: " + line_str + "\nOffending value: " + str(entries[11]))
def main(**args):
import_BED(**args)
if __name__ == '__main__':
import_BED()
| andyshinn/dx-toolkit | src/python/dxpy/scripts/dx_bed_to_spans.py | Python | apache-2.0 | 31,038 |
import os
import sys
class Widget(object):
"""
Widget is a User Interface (UI) component object. A widget
object claims a rectagular region of its content, is responsible
for all drawing within that region.
"""
def __init__(self, name, width=50, height=50):
self.name = name
self.resize(width, height)
def size(self):
return (self.width, self.height)
def resize(self, width, height):
self.width, self.height = width, height | ishikawa/modipyd | examples/widget/002/widget.py | Python | mit | 491 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__version__ = '0.2.1'
| josegonzalez/docker-travis-php | docker_travis_php/__init__.py | Python | mit | 68 |
from __future__ import absolute_import
from django.conf import settings
from django.db import IntegrityError, models, transaction
from django.db.models import Q
from django.utils import timezone
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
class GroupSubscriptionReason(object):
implicit = -1 # not for use as a persisted field value
committed = -2 # not for use as a persisted field value
processing_issue = -3 # not for use as a persisted field value
unknown = 0
comment = 1
assigned = 2
bookmark = 3
status_change = 4
deploy_setting = 5
mentioned = 6
descriptions = {
implicit:
u"have opted to receive updates for all issues within "
"projects that you are a member of",
committed:
u"were involved in a commit that is part of this release",
processing_issue:
u"are subscribed to alerts for this project",
comment:
u"have commented on this issue",
assigned:
u"have been assigned to this issue",
bookmark:
u"have bookmarked this issue",
status_change:
u"have changed the resolution status of this issue",
deploy_setting:
u"opted to receive all deploy notifications for this organization",
mentioned:
u"have been mentioned in this issue",
}
class GroupSubscriptionManager(BaseManager):
def subscribe(self, group, user, reason=GroupSubscriptionReason.unknown):
"""
Subscribe a user to an issue, but only if the user has not explicitly
unsubscribed.
"""
try:
with transaction.atomic():
self.create(
user=user,
group=group,
project=group.project,
is_active=True,
reason=reason,
)
except IntegrityError:
pass
def get_participants(self, group):
"""
Identify all users who are participating with a given issue.
"""
from sentry.models import User, UserOption, UserOptionValue
# Identify all members of a project -- we'll use this to start figuring
# out who could possibly be associated with this group due to implied
# subscriptions.
users = User.objects.filter(
sentry_orgmember_set__teams=group.project.team,
is_active=True,
)
# Obviously, users who have explicitly unsubscribed from this issue
# aren't considered participants.
users = users.exclude(
id__in=GroupSubscription.objects.filter(
group=group,
is_active=False,
user__in=users,
).values('user')
)
# Fetch all of the users that have been explicitly associated with this
# issue.
participants = {
subscription.user: subscription.reason
for subscription in GroupSubscription.objects.filter(
group=group,
is_active=True,
user__in=users,
).select_related('user')
}
# Find users which by default do not subscribe.
participating_only = set(
uo.user_id
for uo in UserOption.objects.filter(
Q(project__isnull=True) | Q(project=group.project),
user__in=users,
key='workflow:notifications',
).exclude(
user__in=[
uo.user_id for uo in UserOption.objects.filter(
project=group.project,
user__in=users,
key='workflow:notifications',
) if uo.value == UserOptionValue.all_conversations
]
) if uo.value == UserOptionValue.participating_only
)
if participating_only:
excluded = participating_only.difference(participants.keys())
if excluded:
users = users.exclude(id__in=excluded)
results = {}
for user in users:
results[user] = GroupSubscriptionReason.implicit
for user, reason in participants.items():
results[user] = reason
return results
class GroupSubscription(Model):
"""
Identifies a subscription relationship between a user and an issue.
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', related_name="subscription_set")
group = FlexibleForeignKey('sentry.Group', related_name="subscription_set")
# namespace related_name on User since we don't own the model
user = FlexibleForeignKey(settings.AUTH_USER_MODEL)
is_active = models.BooleanField(default=True)
reason = BoundedPositiveIntegerField(
default=GroupSubscriptionReason.unknown,
)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = GroupSubscriptionManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupsubscription'
unique_together = (('group', 'user'), )
__repr__ = sane_repr('project_id', 'group_id', 'user_id')
| jean/sentry | src/sentry/models/groupsubscription.py | Python | bsd-3-clause | 5,262 |
#! /usr/bin/env python3
import sys
import os
import collections
import gdb_helper
programs = {}
def ocl_prepare_program(uid, lines):
progr_lines = []
for line in lines:
progr_lines += line.split("\n")
programs[uid] = progr_lines
def ocl_parse(program_uid, kernel_name):
def get_lines():
yield from programs[program_uid]
return parse(get_lines(), kernel_name)
def cuda_parse(source_name, kernel_name):
def get_lines():
with open(source_name, "r") as fsource:
yield from fsource.readlines()
arglist = parse(get_lines(), kernel_name)
arguments = list(zip(arglist[0::2], arglist[1::2]))
return arguments
def parse(lines, kernel_name):
if "<" in kernel_name:
kernel_name = kernel_name.split("<")[0]
else:
kernel_name = "void {}".format(kernel_name)
parameters = []
while True:
line = lines.send(None)
if " __attribute__" in line:
idx = line.index(" __attribute__")
closeAt = idx
parents = 0
inside = False
while not (inside and parents == 0):
if line[closeAt] == "(":
inside = True
parents += 1
elif line[closeAt] == ")":
parents -= 1
closeAt += 1
line = line[:idx] + line[closeAt:]
if kernel_name in line:
while not "{" in line:
line += " " + lines.send(None).strip()
break
for param in line.split("(")[1].split(")")[0].split(","):
param = param.strip()
type_ = " ".join([w for w in param.split()[:-1] if not w.startswith("__")])
name = param.split()[-1]
while name.startswith("*"):
name = name[1:]
type_ = type_ + " *"
parameters.append(type_)
parameters.append(name)
return parameters
def cuda_get_raw_lookup_table(binary=None):
lookup_table = collections.OrderedDict()
for addr, info in cuda_get_lookup_table(binary).items():
# convert str(addr) to int
lookup_table[int(addr, 16)] = info
return lookup_table
def cuda_get_lookup_table(binary=None):
if binary is None:
binary = os.readlink("/proc/self/exe")
lookup_table = collections.OrderedDict()
symbols = gdb_helper.get_cuda_kernel_names(binary)
for symb, loc, address in gdb_helper.get_symbol_location(symbols, binary):
params = cuda_parse(loc, symb)
lookup_table[address] = symb, params
return lookup_table
if __name__ == "__main__":
binary = sys.argv[1] if len(sys.argv) > 1 else "./hello"
symbols = gdb_helper.get_cuda_kernel_names()
for symb, loc, address in gdb_helper.get_symbol_location(symbols):
print(symb)
print(cuda_parse(loc, symb))
| mpbl/specfem3d_globe | utils/BOAST_framework_to_develop_the_CUDA_and_OpenCL_routines_of_SPECFEM/gpuTrace/parse_gpu_program.py | Python | gpl-3.0 | 2,849 |
#!/usr/bin/python
# snakeoil.py
# Chris X Edwards <snakeoil@xed.ch>
# Snake Oil is a Python library for interfacing with a TORCS
# race car simulator which has been patched with the server
# extentions used in the Simulated Car Racing competitions.
# http://scr.geccocompetitions.com/
#
# To use it, you must import it and create a "drive()" function.
# This will take care of option handling and server connecting, etc.
# To see how to write your own client do something like this which is
# a complete working client:
# /-----------------------------------------------\
# |#!/usr/bin/python |
# |import snakeoil |
# |if __name__ == "__main__": |
# | C= snakeoil.Client() |
# | for step in xrange(C.maxSteps,0,-1): |
# | C.get_servers_input() |
# | snakeoil.drive_example(C) |
# | C.respond_to_server() |
# | C.shutdown() |
# \-----------------------------------------------/
# This should then be a full featured client. The next step is to
# replace 'snakeoil.drive_example()' with your own. There is a
# dictionary which holds various option values (see `default_options`
# variable for all the details) but you probably only need a few
# things from it. Mainly the `trackname` and `stage` are important
# when developing a strategic bot.
#
# This dictionary also contains a ServerState object
# (key=S) and a DriverAction object (key=R for response). This allows
# you to get at all the information sent by the server and to easily
# formulate your reply. These objects contain a member dictionary "d"
# (for data dictionary) which contain key value pairs based on the
# server's syntax. Therefore, you can read the following:
# angle, curLapTime, damage, distFromStart, distRaced, focus,
# fuel, gear, lastLapTime, opponents, racePos, rpm,
# speedX, speedY, speedZ, track, trackPos, wheelSpinVel, z
# The syntax specifically would be something like:
# X= o[S.d['tracPos']]
# And you can set the following:
# accel, brake, clutch, gear, steer, focus, meta
# The syntax is:
# o[R.d['steer']]= X
# Note that it is 'steer' and not 'steering' as described in the manual!
# All values should be sensible for their type, including lists being lists.
# See the SCR manual or http://xed.ch/help/torcs.html for details.
#
# If you just run the snakeoil.py base library itself it will implement a
# serviceable client with a demonstration drive function that is
# sufficient for getting around most tracks.
# Try `snakeoil.py --help` to get started.
# for Python3-based torcs python robot client
import socket
import sys
import getopt
import os
import time
PI= 3.14159265359
data_size = 2**17
# Initialize help messages
ophelp= 'Options:\n'
ophelp+= ' --host, -H <host> TORCS server host. [localhost]\n'
ophelp+= ' --port, -p <port> TORCS port. [3001]\n'
ophelp+= ' --id, -i <id> ID for server. [SCR]\n'
ophelp+= ' --steps, -m <#> Maximum simulation steps. 1 sec ~ 50 steps. [100000]\n'
ophelp+= ' --episodes, -e <#> Maximum learning episodes. [1]\n'
ophelp+= ' --track, -t <track> Your name for this track. Used for learning. [unknown]\n'
ophelp+= ' --stage, -s <#> 0=warm up, 1=qualifying, 2=race, 3=unknown. [3]\n'
ophelp+= ' --debug, -d Output full telemetry.\n'
ophelp+= ' --help, -h Show this help.\n'
ophelp+= ' --version, -v Show current version.'
usage= 'Usage: %s [ophelp [optargs]] \n' % sys.argv[0]
usage= usage + ophelp
version= "20130505-2"
def clip(v,lo,hi):
if v<lo: return lo
elif v>hi: return hi
else: return v
def bargraph(x,mn,mx,w,c='X'):
'''Draws a simple asciiart bar graph. Very handy for
visualizing what's going on with the data.
x= Value from sensor, mn= minimum plottable value,
mx= maximum plottable value, w= width of plot in chars,
c= the character to plot with.'''
if not w: return '' # No width!
if x<mn: x= mn # Clip to bounds.
if x>mx: x= mx # Clip to bounds.
tx= mx-mn # Total real units possible to show on graph.
if tx<=0: return 'backwards' # Stupid bounds.
upw= tx/float(w) # X Units per output char width.
if upw<=0: return 'what?' # Don't let this happen.
negpu, pospu, negnonpu, posnonpu= 0,0,0,0
if mn < 0: # Then there is a negative part to graph.
if x < 0: # And the plot is on the negative side.
negpu= -x + min(0,mx)
negnonpu= -mn + x
else: # Plot is on pos. Neg side is empty.
negnonpu= -mn + min(0,mx) # But still show some empty neg.
if mx > 0: # There is a positive part to the graph
if x > 0: # And the plot is on the positive side.
pospu= x - max(0,mn)
posnonpu= mx - x
else: # Plot is on neg. Pos side is empty.
posnonpu= mx - max(0,mn) # But still show some empty pos.
nnc= int(negnonpu/upw)*'-'
npc= int(negpu/upw)*c
ppc= int(pospu/upw)*c
pnc= int(posnonpu/upw)*'_'
return '[%s]' % (nnc+npc+ppc+pnc)
class Client():
def __init__(self,H=None,p=None,i=None,e=None,t=None,s=None,d=None,vision=False):
# If you don't like the option defaults, change them here.
self.vision = vision
self.host= 'localhost'
self.port= 3001
self.sid= 'SCR'
self.maxEpisodes=1 # "Maximum number of learning episodes to perform"
self.trackname= 'unknown'
self.stage= 3 # 0=Warm-up, 1=Qualifying 2=Race, 3=unknown <Default=3>
self.debug= False
self.maxSteps= 100000 # 50steps/second
self.parse_the_command_line()
if H: self.host= H
if p: self.port= p
if i: self.sid= i
if e: self.maxEpisodes= e
if t: self.trackname= t
if s: self.stage= s
if d: self.debug= d
self.S= ServerState()
self.R= DriverAction()
self.setup_connection()
def setup_connection(self):
# == Set Up UDP Socket ==
try:
self.so= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as emsg:
print('Error: Could not create socket...')
sys.exit(-1)
# == Initialize Connection To Server ==
self.so.settimeout(1)
n_fail = 5
while True:
# This string establishes track sensor angles! You can customize them.
#a= "-90 -75 -60 -45 -30 -20 -15 -10 -5 0 5 10 15 20 30 45 60 75 90"
# xed- Going to try something a bit more aggressive...
a= "-45 -19 -12 -7 -4 -2.5 -1.7 -1 -.5 0 .5 1 1.7 2.5 4 7 12 19 45"
initmsg='%s(init %s)' % (self.sid,a)
try:
self.so.sendto(initmsg.encode(), (self.host, self.port))
except socket.error as emsg:
sys.exit(-1)
sockdata= str()
try:
sockdata,addr= self.so.recvfrom(data_size)
sockdata = sockdata.decode('utf-8')
except socket.error as emsg:
print("Waiting for server on %d............" % self.port)
print("Count Down : " + str(n_fail))
if n_fail < 0:
print("relaunch torcs")
os.system('pkill torcs')
time.sleep(1.0)
if self.vision is False:
os.system('torcs -nofuel -nodamage -nolaptime -t 100000 &')
else:
os.system('torcs -nofuel -nodamage -nolaptime -t 100000 -vision &')
time.sleep(1.0)
os.system('sh autostart.sh')
n_fail = 5
n_fail -= 1
identify = '***identified***'
if identify in sockdata:
print("Client connected on %d.............." % self.port)
break
def parse_the_command_line(self):
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'H:p:i:m:e:t:s:dhv',
['host=','port=','id=','steps=',
'episodes=','track=','stage=',
'debug','help','version'])
except getopt.error as why:
print('getopt error: %s\n%s' % (why, usage))
sys.exit(-1)
try:
for opt in opts:
if opt[0] == '-h' or opt[0] == '--help':
print(usage)
sys.exit(0)
if opt[0] == '-d' or opt[0] == '--debug':
self.debug= True
if opt[0] == '-H' or opt[0] == '--host':
self.host= opt[1]
if opt[0] == '-i' or opt[0] == '--id':
self.sid= opt[1]
if opt[0] == '-t' or opt[0] == '--track':
self.trackname= opt[1]
if opt[0] == '-s' or opt[0] == '--stage':
self.stage= int(opt[1])
if opt[0] == '-p' or opt[0] == '--port':
self.port= int(opt[1])
if opt[0] == '-e' or opt[0] == '--episodes':
self.maxEpisodes= int(opt[1])
if opt[0] == '-m' or opt[0] == '--steps':
self.maxSteps= int(opt[1])
if opt[0] == '-v' or opt[0] == '--version':
print('%s %s' % (sys.argv[0], version))
sys.exit(0)
except ValueError as why:
print('Bad parameter \'%s\' for option %s: %s\n%s' % (
opt[1], opt[0], why, usage))
sys.exit(-1)
if len(args) > 0:
print('Superflous input? %s\n%s' % (', '.join(args), usage))
sys.exit(-1)
def get_servers_input(self):
'''Server's input is stored in a ServerState object'''
if not self.so: return
sockdata= str()
while True:
try:
# Receive server data
sockdata,addr= self.so.recvfrom(data_size)
sockdata = sockdata.decode('utf-8')
except socket.error as emsg:
print('.', end=' ')
#print "Waiting for data on %d.............." % self.port
if '***identified***' in sockdata:
print("Client connected on %d.............." % self.port)
continue
elif '***shutdown***' in sockdata:
print((("Server has stopped the race on %d. "+
"You were in %d place.") %
(self.port,self.S.d['racePos'])))
self.shutdown()
return
elif '***restart***' in sockdata:
# What do I do here?
print("Server has restarted the race on %d." % self.port)
# I haven't actually caught the server doing this.
self.shutdown()
return
elif not sockdata: # Empty?
continue # Try again.
else:
self.S.parse_server_str(sockdata)
if self.debug:
sys.stderr.write("\x1b[2J\x1b[H") # Clear for steady output.
print(self.S)
break # Can now return from this function.
def respond_to_server(self):
if not self.so: return
try:
message = repr(self.R)
self.so.sendto(message.encode(), (self.host, self.port))
except socket.error as emsg:
print("Error sending to server: %s Message %s" % (emsg[1],str(emsg[0])))
sys.exit(-1)
if self.debug: print(self.R.fancyout())
# Or use this for plain output:
#if self.debug: print self.R
def shutdown(self):
if not self.so: return
print(("Race terminated or %d steps elapsed. Shutting down %d."
% (self.maxSteps,self.port)))
self.so.close()
self.so = None
#sys.exit() # No need for this really.
class ServerState():
'''What the server is reporting right now.'''
def __init__(self):
self.servstr= str()
self.d= dict()
def parse_server_str(self, server_string):
'''Parse the server string.'''
self.servstr= server_string.strip()[:-1]
sslisted= self.servstr.strip().lstrip('(').rstrip(')').split(')(')
for i in sslisted:
w= i.split(' ')
self.d[w[0]]= destringify(w[1:])
def __repr__(self):
# Comment the next line for raw output:
return self.fancyout()
# -------------------------------------
out= str()
for k in sorted(self.d):
strout= str(self.d[k])
if type(self.d[k]) is list:
strlist= [str(i) for i in self.d[k]]
strout= ', '.join(strlist)
out+= "%s: %s\n" % (k,strout)
return out
def fancyout(self):
'''Specialty output for useful ServerState monitoring.'''
out= str()
sensors= [ # Select the ones you want in the order you want them.
#'curLapTime',
#'lastLapTime',
'stucktimer',
#'damage',
#'focus',
'fuel',
#'gear',
'distRaced',
'distFromStart',
#'racePos',
'opponents',
'wheelSpinVel',
'z',
'speedZ',
'speedY',
'speedX',
'targetSpeed',
'rpm',
'skid',
'slip',
'track',
'trackPos',
'angle',
'c_angle', # affordances
'toMarking_L',
'toMarking_M',
'toMarking_R',
'dist_L',
'dist_R',
'toMarking_LL',
'toMarking_ML',
'toMarking_MR',
'toMarking_RR',
'dist_LL',
'dist_MM',
'dist_RR',
'fast',
]
#for k in sorted(self.d): # Use this to get all sensors.
for k in sensors:
if type(self.d.get(k)) is list: # Handle list type data.
if k == 'track': # Nice display for track sensors.
strout= str()
# for tsensor in self.d['track']:
# if tsensor >180: oc= '|'
# elif tsensor > 80: oc= ';'
# elif tsensor > 60: oc= ','
# elif tsensor > 39: oc= '.'
# #elif tsensor > 13: oc= chr(int(tsensor)+65-13)
# elif tsensor > 13: oc= chr(int(tsensor)+97-13)
# elif tsensor > 3: oc= chr(int(tsensor)+48-3)
# else: oc= '_'
# strout+= oc
# strout= ' -> '+strout[:9] +' ' + strout[9] + ' ' + strout[10:]+' <-'
raw_tsens= ['%.1f'%x for x in self.d['track']]
strout+= ' '.join(raw_tsens[:9])+'_'+raw_tsens[9]+'_'+' '.join(raw_tsens[10:])
elif k == 'opponents': # Nice display for opponent sensors.
strout= str()
for osensor in self.d['opponents']:
if osensor >190: oc= '_'
elif osensor > 90: oc= '.'
elif osensor > 39: oc= chr(int(osensor/2)+97-19)
elif osensor > 13: oc= chr(int(osensor)+65-13)
elif osensor > 3: oc= chr(int(osensor)+48-3)
else: oc= '?'
strout+= oc
strout= ' -> '+strout[:18] + ' ' + strout[18:]+' <-'
else:
strlist= [str(i) for i in self.d[k]]
strout= ', '.join(strlist)
else: # Not a list type of value.
if k == 'gear': # This is redundant now since it's part of RPM.
gs= '_._._._._._._._._'
p= int(self.d['gear']) * 2 + 2 # Position
l= '%d'%self.d['gear'] # Label
if l=='-1': l= 'R'
if l=='0': l= 'N'
strout= gs[:p]+ '(%s)'%l + gs[p+3:]
elif k == 'damage':
strout= '%6.0f %s' % (self.d[k], bargraph(self.d[k],0,10000,50,'~'))
elif k == 'fuel':
strout= '%6.0f %s' % (self.d[k], bargraph(self.d[k],0,100,50,'f'))
elif k == 'speedX':
cx= 'X'
if self.d[k]<0: cx= 'R'
strout= '%6.1f %s' % (self.d[k], bargraph(self.d[k],-30,300,50,cx))
elif k == 'speedY': # This gets reversed for display to make sense.
strout= '%6.1f %s' % (self.d[k], bargraph(self.d[k]*-1,-25,25,50,'Y'))
elif k == 'speedZ':
strout= '%6.1f %s' % (self.d[k], bargraph(self.d[k],-13,13,50,'Z'))
elif k == 'z':
strout= '%6.3f %s' % (self.d[k], bargraph(self.d[k],.3,.5,50,'z'))
elif k == 'trackPos': # This gets reversed for display to make sense.
cx='<'
if self.d[k]<0: cx= '>'
strout= '%6.3f %s' % (self.d[k], bargraph(self.d[k]*-1,-1,1,50,cx))
elif k == 'stucktimer':
if self.d[k]:
strout= '%3d %s' % (self.d[k], bargraph(self.d[k],0,300,50,"'"))
else: strout= 'Not stuck!'
elif k == 'rpm':
g= self.d['gear']
if g < 0:
g= 'R'
else:
g= '%1d'% g
strout= bargraph(self.d[k],0,10000,50,g)
elif k == 'angle':
asyms= [
" ! ", ".|' ", "./' ", "_.- ", ".-- ", "..- ",
"--- ", ".__ ", "-._ ", "'-. ", "'\. ", "'|. ",
" | ", " .|'", " ./'", " .-'", " _.-", " __.",
" ---", " --.", " -._", " -..", " '\.", " '|." ]
rad= self.d[k]
deg= int(rad*180/PI)
symno= int(.5+ (rad+PI) / (PI/12) )
symno= symno % (len(asyms)-1)
strout= '%5.2f %3d (%s)' % (rad,deg,asyms[symno])
elif k == 'skid': # A sensible interpretation of wheel spin.
frontwheelradpersec= self.d['wheelSpinVel'][0]
skid= 0
if frontwheelradpersec:
skid= .5555555555*self.d['speedX']/frontwheelradpersec - .66124
strout= bargraph(skid,-.05,.4,50,'*')
elif k == 'slip': # A sensible interpretation of wheel spin.
frontwheelradpersec= self.d['wheelSpinVel'][0]
slip= 0
if frontwheelradpersec:
slip= ((self.d['wheelSpinVel'][2]+self.d['wheelSpinVel'][3]) -
(self.d['wheelSpinVel'][0]+self.d['wheelSpinVel'][1]))
strout= bargraph(slip,-5,150,50,'@')
else:
strout= str(self.d[k])
out+= "%s: %s\n" % (k,strout)
return out
class DriverAction():
'''What the driver is intending to do (i.e. send to the server).
Composes something like this for the server:
(accel 1)(brake 0)(gear 1)(steer 0)(clutch 0)(focus 0)(meta 0) or
(accel 1)(brake 0)(gear 1)(steer 0)(clutch 0)(focus -90 -45 0 45 90)(meta 0)'''
def __init__(self):
self.actionstr= str()
# "d" is for data dictionary.
self.d= { 'accel':0.2,
'brake':0,
'clutch':0,
'gear':1,
'steer':0,
'focus':[-90,-45,0,45,90],
'meta':0
}
def clip_to_limits(self):
"""There pretty much is never a reason to send the server
something like (steer 9483.323). This comes up all the time
and it's probably just more sensible to always clip it than to
worry about when to. The "clip" command is still a snakeoil
utility function, but it should be used only for non standard
things or non obvious limits (limit the steering to the left,
for example). For normal limits, simply don't worry about it."""
self.d['steer']= clip(self.d['steer'], -1, 1)
self.d['brake']= clip(self.d['brake'], 0, 1)
self.d['accel']= clip(self.d['accel'], 0, 1)
self.d['clutch']= clip(self.d['clutch'], 0, 1)
if self.d['gear'] not in [-1, 0, 1, 2, 3, 4, 5, 6]:
self.d['gear']= 0
if self.d['meta'] not in [0,1]:
self.d['meta']= 0
if type(self.d['focus']) is not list or min(self.d['focus'])<-180 or max(self.d['focus'])>180:
self.d['focus']= 0
def __repr__(self):
self.clip_to_limits()
out= str()
for k in self.d:
out+= '('+k+' '
v= self.d[k]
if not type(v) is list:
out+= '%.3f' % v
else:
out+= ' '.join([str(x) for x in v])
out+= ')'
return out
return out+'\n'
def fancyout(self):
'''Specialty output for useful monitoring of bot's effectors.'''
out= str()
od= self.d.copy()
od.pop('gear','') # Not interesting.
od.pop('meta','') # Not interesting.
od.pop('focus','') # Not interesting. Yet.
for k in sorted(od):
if k == 'clutch' or k == 'brake' or k == 'accel':
strout=''
strout= '%6.3f %s' % (od[k], bargraph(od[k],0,1,50,k[0].upper()))
elif k == 'steer': # Reverse the graph to make sense.
strout= '%6.3f %s' % (od[k], bargraph(od[k]*-1,-1,1,50,'S'))
else:
strout= str(od[k])
out+= "%s: %s\n" % (k,strout)
return out
# == Misc Utility Functions
def destringify(s):
'''makes a string into a value or a list of strings into a list of
values (if possible)'''
if not s: return s
if type(s) is str:
try:
return float(s)
except ValueError:
print("Could not find a value in %s" % s)
return s
elif type(s) is list:
if len(s) < 2:
return destringify(s[0])
else:
return [destringify(i) for i in s]
def drive_example(c):
'''This is only an example. It will get around the track but the
correct thing to do is write your own `drive()` function.'''
S,R= c.S.d,c.R.d
target_speed=100
# Steer To Corner
R['steer']= S['angle']*10 / PI
# Steer To Center
R['steer']-= S['trackPos']*.10
# Throttle Control
if S['speedX'] < target_speed - (R['steer']*50):
R['accel']+= .01
else:
R['accel']-= .01
if S['speedX']<10:
R['accel']+= 1/(S['speedX']+.1)
# Traction Control System
if ((S['wheelSpinVel'][2]+S['wheelSpinVel'][3]) -
(S['wheelSpinVel'][0]+S['wheelSpinVel'][1]) > 5):
R['accel']-= .2
# Automatic Transmission
R['gear']=1
if S['speedX']>50:
R['gear']=2
if S['speedX']>80:
R['gear']=3
if S['speedX']>110:
R['gear']=4
if S['speedX']>140:
R['gear']=5
if S['speedX']>170:
R['gear']=6
return
# ================ MAIN ================
if __name__ == "__main__":
C= Client(p=3101)
for step in range(C.maxSteps,0,-1):
C.get_servers_input()
drive_example(C)
C.respond_to_server()
C.shutdown()
| babraham123/deepdriving | gym/snakeoil3_gym.py | Python | mit | 23,725 |
from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity
from yowsup.layers.protocol_media.protocolentities import ImageDownloadableMediaMessageProtocolEntity
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.layers.protocol_media.protocolentities import LocationMediaMessageProtocolEntity
from yowsup.layers.protocol_acks.protocolentities import OutgoingAckProtocolEntity
from yowsup.layers.protocol_media.protocolentities import VCardMediaMessageProtocolEntity
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
if not messageProtocolEntity.isGroupMessage():
if messageProtocolEntity.getType() == 'text':
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
self.onMediaMessage(messageProtocolEntity)
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
ack = OutgoingAckProtocolEntity(entity.getId(), "receipt", "delivery")
self.toLower(ack)
def onTextMessage(self,messageProtocolEntity):
receipt = OutgoingReceiptProtocolEntity(messageProtocolEntity.getId(), messageProtocolEntity.getFrom())
outgoingMessageProtocolEntity = TextMessageProtocolEntity(
messageProtocolEntity.getBody(),
to = messageProtocolEntity.getFrom())
print("Echoing %s to %s" % (messageProtocolEntity.getBody(), messageProtocolEntity.getFrom(False)))
#send receipt otherwise we keep receiving the same message over and over
self.toLower(receipt)
self.toLower(outgoingMessageProtocolEntity)
def onMediaMessage(self, messageProtocolEntity):
if messageProtocolEntity.getMediaType() == "image":
receipt = OutgoingReceiptProtocolEntity(messageProtocolEntity.getId(), messageProtocolEntity.getFrom())
outImage = ImageDownloadableMediaMessageProtocolEntity(
messageProtocolEntity.getMimeType(), messageProtocolEntity.fileHash, messageProtocolEntity.url, messageProtocolEntity.ip,
messageProtocolEntity.size, messageProtocolEntity.fileName, messageProtocolEntity.encoding, messageProtocolEntity.width, messageProtocolEntity.height,
messageProtocolEntity.getCaption(),
to = messageProtocolEntity.getFrom(), preview = messageProtocolEntity.getPreview())
print("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(False)))
#send receipt otherwise we keep receiving the same message over and over
self.toLower(receipt)
self.toLower(outImage)
elif messageProtocolEntity.getMediaType() == "location":
receipt = OutgoingReceiptProtocolEntity(messageProtocolEntity.getId(), messageProtocolEntity.getFrom())
outLocation = LocationMediaMessageProtocolEntity(messageProtocolEntity.getLatitude(),
messageProtocolEntity.getLongitude(), messageProtocolEntity.getLocationName(),
messageProtocolEntity.getLocationURL(), messageProtocolEntity.encoding,
to = messageProtocolEntity.getFrom(), preview=messageProtocolEntity.getPreview())
print("Echoing location (%s, %s) to %s" % (messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(), messageProtocolEntity.getFrom(False)))
#send receipt otherwise we keep receiving the same message over and over
self.toLower(outLocation)
self.toLower(receipt)
elif messageProtocolEntity.getMediaType() == "vcard":
receipt = OutgoingReceiptProtocolEntity(messageProtocolEntity.getId(), messageProtocolEntity.getFrom())
outVcard = VCardMediaMessageProtocolEntity(messageProtocolEntity.getName(),messageProtocolEntity.getCardData(),to = messageProtocolEntity.getFrom())
print("Echoing vcard (%s, %s) to %s" % (messageProtocolEntity.getName(), messageProtocolEntity.getCardData(), messageProtocolEntity.getFrom(False)))
#send receipt otherwise we keep receiving the same message over and over
self.toLower(outVcard)
self.toLower(receipt)
| felix-dumit/campusbot | yowsup2/yowsup/demos/echoclient/layer.py | Python | mit | 4,454 |
# -*- coding: utf-8 -*-
#
# mazi-guides documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 9 17:33:10 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mazi-guides'
copyright = u'2016-2018, MAZI project'
author = u'MAZI project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'3.1'
# The full version, including alpha/beta/rc tags.
#release = u'1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/Mazi-Logo.png'
#html_logo = '_static/photo2.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mazi-guidesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mazi-guides.tex', u'mazi-guides Documentation',
u'NITlab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mazi-guides', u'mazi-guides Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mazi-guides', u'mazi-guides Documentation',
author, 'mazi-guides', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mazi-project/guides | tech/source/conf.py | Python | mit | 9,625 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
random.shuffle(context_words)
words_to_use = collections.deque(context_words)
for j in range(num_skips):
batch[i * num_skips + j] = buffer[skip_window]
context_word = words_to_use.pop()
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer[:] = data[:span]
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| with-git/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | Python | apache-2.0 | 9,909 |
# Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from stevedore import extension
from manila import network
from manila.share import configuration as conf
from manila.share.drivers.emc import driver as emcdriver
from manila.share.drivers.emc.plugins import base
from manila import test
class FakeConnection(base.StorageConnection):
def __init__(self, logger):
self.logger = logger
@property
def driver_handles_share_servers(self):
return True
def create_share(self, context, share, share_server):
"""Is called to create share."""
def create_snapshot(self, context, snapshot, share_server):
"""Is called to create snapshot."""
def delete_share(self, context, share, share_server):
"""Is called to remove share."""
def extend_share(self, share, new_size, share_server):
"""Is called to extend share."""
def delete_snapshot(self, context, snapshot, share_server):
"""Is called to remove snapshot."""
def ensure_share(self, context, share, share_server):
"""Invoked to sure that share is exported."""
def allow_access(self, context, share, access, share_server):
"""Allow access to the share."""
def deny_access(self, context, share, access, share_server):
"""Deny access to the share."""
def raise_connect_error(self):
"""Check for setup error."""
def connect(self, emc_share_driver, context):
"""Any initialization the share driver does while starting."""
raise NotImplementedError()
def update_share_stats(self, stats_dict):
"""Add key/values to stats_dict."""
def get_network_allocations_number(self):
"""Returns number of network allocations for creating VIFs."""
return 0
def setup_server(self, network_info, metadata=None):
"""Set up and configures share server with given network parameters."""
def teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
FAKE_BACKEND = 'fake_backend'
class FakeEMCExtensionManager(object):
def __init__(self):
self.extensions = []
self.extensions.append(
extension.Extension(name=FAKE_BACKEND,
plugin=FakeConnection,
entry_point=None,
obj=None))
class EMCShareFrameworkTestCase(test.TestCase):
@mock.patch('stevedore.extension.ExtensionManager',
mock.Mock(return_value=FakeEMCExtensionManager()))
def setUp(self):
super(EMCShareFrameworkTestCase, self).setUp()
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.share_backend_name = FAKE_BACKEND
self.mock_object(self.configuration, 'safe_get', self._fake_safe_get)
self.mock_object(network, 'API')
self.driver = emcdriver.EMCShareDriver(
configuration=self.configuration)
def test_driver_setup(self):
FakeConnection.connect = mock.Mock()
self.driver.do_setup(None)
self.assertIsInstance(self.driver.plugin, FakeConnection,
"Not an instance of FakeConnection")
FakeConnection.connect.assert_called_with(self.driver, None)
def test_update_share_stats(self):
data = {}
self.driver.plugin = mock.Mock()
self.driver._update_share_stats()
data["share_backend_name"] = FAKE_BACKEND
data["driver_handles_share_servers"] = True
data["vendor_name"] = 'EMC'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'NFS_CIFS'
data['total_capacity_gb'] = 'unknown'
data['free_capacity_gb'] = 'unknown'
data['reserved_percentage'] = 0
data['qos'] = False
data['pools'] = None
data['snapshot_support'] = True
data['replication_domain'] = None
data['filter_function'] = None
data['goodness_function'] = None
self.assertEqual(data, self.driver._stats)
def _fake_safe_get(self, value):
if value in ['emc_share_backend', 'share_backend_name']:
return FAKE_BACKEND
elif value == 'driver_handles_share_servers':
return True
return None
| NetApp/manila | manila/tests/share/drivers/emc/test_driver.py | Python | apache-2.0 | 4,948 |
# -*- coding: cp1252 -*-
##
# <p> Portions copyright © 2005-2009 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2009-05-31 SJM Fixed problem with no CODEPAGE record on extremely minimal BIFF2.x 3rd-party file
# 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo
# 2008-02-09 SJM Excel 2.0: build XFs on the fly from cell attributes
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-10-11 SJM Added missing entry for blank cell type to ctype_text
# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file
# 2007-04-22 SJM Remove experimental "trimming" facility.
from biffh import *
from timemachine import *
from struct import unpack
from formula import dump_formula, decompile_formula, rangename2d
from formatting import nearest_colour_index, Format
import time
DEBUG = 0
OBJ_MSO_DEBUG = 0
_WINDOW2_options = (
# Attribute names and initial values to use in case
# a WINDOW2 record is not written.
("show_formulas", 0),
("show_grid_lines", 1),
("show_sheet_headers", 1),
("panes_are_frozen", 0),
("show_zero_values", 1),
("automatic_grid_line_colour", 1),
("columns_from_right_to_left", 0),
("show_outline_symbols", 1),
("remove_splits_if_pane_freeze_is_removed", 0),
("sheet_selected", 0),
# "sheet_visible" appears to be merely a clone of "sheet_selected".
# The real thing is the visibility attribute from the BOUNDSHEET record.
("sheet_visible", 0),
("show_in_page_break_preview", 0),
)
##
# <p>Contains the data for one worksheet.</p>
#
# <p>In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a
# column index, counting from zero.
# Negative values for row/column indexes and slice positions are supported in the expected fashion.</p>
#
# <p>For information about cell types and cell values, refer to the documentation of the Cell class.</p>
#
# <p>WARNING: You don't call this class yourself. You access Sheet objects via the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
class Sheet(BaseObject):
##
# Name of sheet.
name = ''
##
# Number of rows in sheet. A row index is in range(thesheet.nrows).
nrows = 0
##
# Number of columns in sheet. A column index is in range(thesheet.ncols).
ncols = 0
##
# The map from a column index to a Colinfo object. Often there is an entry
# in COLINFO records for all column indexes in range(257).
# Note that xlrd ignores the entry for the non-existent
# 257th column. On the other hand, there may be no entry for unused columns.
# <br /> -- New in version 0.6.1
colinfo_map = {}
##
# The map from a row index to a Rowinfo object. Note that it is possible
# to have missing entries -- at least one source of XLS files doesn't
# bother writing ROW records.
# <br /> -- New in version 0.6.1
rowinfo_map = {}
##
# List of address ranges of cells containing column labels.
# These are set up in Excel by Insert > Name > Labels > Columns.
# <br> -- New in version 0.6.0
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.col_label_ranges:
# rlo, rhi, clo, chi = crange
# for rx in xrange(rlo, rhi):
# for cx in xrange(clo, chi):
# print "Column label at (rowx=%d, colx=%d) is %r" \
# (rx, cx, thesheet.cell_value(rx, cx))
# </pre>
col_label_ranges = []
##
# List of address ranges of cells containing row labels.
# For more details, see <i>col_label_ranges</i> above.
# <br> -- New in version 0.6.0
row_label_ranges = []
##
# List of address ranges of cells which have been merged.
# These are set up in Excel by Format > Cells > Alignment, then ticking
# the "Merge cells" box.
# <br> -- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True)
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.merged_cells:
# rlo, rhi, clo, chi = crange
# for rowx in xrange(rlo, rhi):
# for colx in xrange(clo, chi):
# # cell (rlo, clo) (the top left one) will carry the data
# # and formatting info; the remainder will be recorded as
# # blank cells, but a renderer will apply the formatting info
# # for the top left cell (e.g. border, pattern) to all cells in
# # the range.
# </pre>
merged_cells = []
##
# Default column width from DEFCOLWIDTH record, else None.
# From the OOo docs:<br />
# """Column width in characters, using the width of the zero character
# from default font (first FONT record in the file). Excel adds some
# extra space to the default width, depending on the default font and
# default font size. The algorithm how to exactly calculate the resulting
# column width is not known.<br />
# Example: The default width of 8 set in this record results in a column
# width of 8.43 using Arial font with a size of 10 points."""<br />
# For the default hierarchy, refer to the Colinfo class above.
# <br /> -- New in version 0.6.1
defcolwidth = None
##
# Default column width from STANDARDWIDTH record, else None.
# From the OOo docs:<br />
# """Default width of the columns in 1/256 of the width of the zero
# character, using default font (first FONT record in the file)."""<br />
# For the default hierarchy, refer to the Colinfo class above.
# <br /> -- New in version 0.6.1
standardwidth = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height_mismatch = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_hidden = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_above = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_below = None
##
# Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden
# by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden
# only by VBA macro).
visibility = 0
##
# A 256-element tuple corresponding to the contents of the GCW record for this sheet.
# If no such record, treat as all bits zero.
# Applies to BIFF4-7 only. See docs of Colinfo class for discussion.
gcw = (0, ) * 256
def __init__(self, book, position, name, number):
self.book = book
self.biff_version = book.biff_version
self._position = position
self.logfile = book.logfile
self.pickleable = book.pickleable
self.dont_use_array = not(array_array and (CAN_PICKLE_ARRAY or not book.pickleable))
self.name = name
self.number = number
self.verbosity = book.verbosity
self.formatting_info = book.formatting_info
self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map
self.nrows = 0 # actual, including possibly empty cells
self.ncols = 0
self._maxdatarowx = -1 # highest rowx containing a non-empty cell
self._maxdatacolx = -1 # highest colx containing a non-empty cell
self._dimnrows = 0 # as per DIMENSIONS record
self._dimncols = 0
self._cell_values = []
self._cell_types = []
self._cell_xf_indexes = []
self._need_fix_ragged_rows = 0
self.defcolwidth = None
self.standardwidth = None
self.default_row_height = None
self.default_row_height_mismatch = 0
self.default_row_hidden = 0
self.default_additional_space_above = 0
self.default_additional_space_below = 0
self.colinfo_map = {}
self.rowinfo_map = {}
self.col_label_ranges = []
self.row_label_ranges = []
self.merged_cells = []
self._xf_index_stats = [0, 0, 0, 0]
self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record
for attr, defval in _WINDOW2_options:
setattr(self, attr, defval)
self.first_visible_rowx = 0
self.first_visible_colx = 0
self.gridline_colour_index = 0x40
self.gridline_colour_rgb = None # pre-BIFF8
self.cached_page_break_preview_mag_factor = 0
self.cached_normal_view_mag_factor = 0
self._ixfe = None # BIFF2 only
self._cell_attr_to_xfx = {} # BIFF2.0 only
#### Don't initialise this here, use class attribute initialisation.
#### self.gcw = (0, ) * 256 ####
if self.biff_version >= 80:
self.utter_max_rows = 65536
else:
self.utter_max_rows = 16384
self.utter_max_cols = 256
##
# Cell object in the given row and column.
def cell(self, rowx, colx):
if self.formatting_info:
xfx = self.cell_xf_index(rowx, colx)
else:
xfx = None
return Cell(
self._cell_types[rowx][colx],
self._cell_values[rowx][colx],
xfx,
)
##
# Value of the cell in the given row and column.
def cell_value(self, rowx, colx):
return self._cell_values[rowx][colx]
##
# Type of the cell in the given row and column.
# Refer to the documentation of the Cell class.
def cell_type(self, rowx, colx):
return self._cell_types[rowx][colx]
##
# XF index of the cell in the given row and column.
# This is an index into Book.xf_list.
# <br /> -- New in version 0.6.1
def cell_xf_index(self, rowx, colx):
self.req_fmt_info()
xfx = self._cell_xf_indexes[rowx][colx]
if xfx > -1:
self._xf_index_stats[0] += 1
return xfx
# Check for a row xf_index
try:
xfx = self.rowinfo_map[rowx].xf_index
if xfx > -1:
self._xf_index_stats[1] += 1
return xfx
except KeyError:
pass
# Check for a column xf_index
try:
xfx = self.colinfo_map[colx].xf_index
assert xfx > -1
self._xf_index_stats[2] += 1
return xfx
except KeyError:
# If all else fails, 15 is used as hardwired global default xf_index.
self._xf_index_stats[3] += 1
return 15
##
# Returns a sequence of the Cell objects in the given row.
def row(self, rowx):
return [
self.cell(rowx, colx)
for colx in xrange(self.ncols)
]
##
# Returns a slice of the types
# of the cells in the given row.
def row_types(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_types[rowx][start_colx:]
return self._cell_types[rowx][start_colx:end_colx]
##
# Returns a slice of the values
# of the cells in the given row.
def row_values(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_values[rowx][start_colx:]
return self._cell_values[rowx][start_colx:end_colx]
##
# Returns a slice of the Cell objects in the given row.
def row_slice(self, rowx, start_colx=0, end_colx=None):
nc = self.ncols
if start_colx < 0:
start_colx += nc
if start_colx < 0:
start_colx = 0
if end_colx is None or end_colx > nc:
end_colx = nc
elif end_colx < 0:
end_colx += nc
return [
self.cell(rowx, colx)
for colx in xrange(start_colx, end_colx)
]
##
# Returns a slice of the Cell objects in the given column.
def col_slice(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self.cell(rowx, colx)
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the values of the cells in the given column.
def col_values(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_values[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the types of the cells in the given column.
def col_types(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_types[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a sequence of the Cell objects in the given column.
def col(self, colx):
return self.col_slice(colx)
# Above two lines just for the docs. Here's the real McCoy:
col = col_slice
# === Following methods are used in building the worksheet.
# === They are not part of the API.
def extend_cells(self, nr, nc):
# print "extend_cells_2", self.nrows, self.ncols, nr, nc
assert 1 <= nc <= self.utter_max_cols
assert 1 <= nr <= self.utter_max_rows
if nr <= self.nrows:
# New cell is in an existing row, so extend that row (if necessary).
# Note that nr < self.nrows means that the cell data
# is not in ascending row order!!
self._need_fix_ragged_rows = 1
nrx = nr - 1
trow = self._cell_types[nrx]
tlen = len(trow)
nextra = max(nc, self.ncols) - tlen
if nextra > 0:
xce = XL_CELL_EMPTY
if self.dont_use_array:
trow.extend([xce] * nextra)
if self.formatting_info:
self._cell_xf_indexes[nrx].extend([-1] * nextra)
else:
aa = array_array
trow.extend(aa('B', [xce]) * nextra)
if self.formatting_info:
self._cell_xf_indexes[nrx].extend(aa('h', [-1]) * nextra)
self._cell_values[nrx].extend([''] * nextra)
if nc > self.ncols:
self.ncols = nc
self._need_fix_ragged_rows = 1
if nr > self.nrows:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
fmt_info = self.formatting_info
xce = XL_CELL_EMPTY
nc = self.ncols
if self.dont_use_array:
for _unused in xrange(self.nrows, nr):
scta([xce] * nc)
scva([''] * nc)
if fmt_info:
scxa([-1] * nc)
else:
aa = array_array
for _unused in xrange(self.nrows, nr):
scta(aa('B', [xce]) * nc)
scva([''] * nc)
if fmt_info:
scxa(aa('h', [-1]) * nc)
self.nrows = nr
def fix_ragged_rows(self):
t0 = time.time()
ncols = self.ncols
xce = XL_CELL_EMPTY
aa = array_array
s_cell_types = self._cell_types
s_cell_values = self._cell_values
s_cell_xf_indexes = self._cell_xf_indexes
s_dont_use_array = self.dont_use_array
s_fmt_info = self.formatting_info
totrowlen = 0
for rowx in xrange(self.nrows):
trow = s_cell_types[rowx]
rlen = len(trow)
totrowlen += rlen
nextra = ncols - rlen
if nextra > 0:
s_cell_values[rowx][rlen:] = [''] * nextra
if s_dont_use_array:
trow[rlen:] = [xce] * nextra
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = [-1] * nextra
else:
trow.extend(aa('B', [xce]) * nextra)
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = aa('h', [-1]) * nextra
self._fix_ragged_rows_time = time.time() - t0
if 0 and self.nrows:
avgrowlen = float(totrowlen) / self.nrows
print >> self.logfile, \
"sheet %d: avg row len %.1f; max row len %d" \
% (self.number, avgrowlen, self.ncols)
def tidy_dimensions(self):
if self.verbosity >= 3:
fprintf(self.logfile,
"tidy_dimensions: nrows=%d ncols=%d _need_fix_ragged_rows=%d\n",
self.nrows, self.ncols, self._need_fix_ragged_rows,
)
if 1 and self.merged_cells:
nr = nc = 0
umaxrows = self.utter_max_rows
umaxcols = self.utter_max_cols
for crange in self.merged_cells:
rlo, rhi, clo, chi = crange
if not (0 <= rlo < rhi <= umaxrows) \
or not (0 <= clo < chi <= umaxcols):
fprintf(self.logfile,
"*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n",
self.number, self.name, crange)
if rhi > nr: nr = rhi
if chi > nc: nc = chi
self.extend_cells(nr, nc)
if self.verbosity >= 1 \
and (self.nrows != self._dimnrows or self.ncols != self._dimncols):
fprintf(self.logfile,
"NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n",
self.number,
self.name,
self._dimnrows,
self._dimncols,
self.nrows,
self.ncols,
)
if self._need_fix_ragged_rows:
self.fix_ragged_rows()
def put_cell(self, rowx, colx, ctype, value, xf_index):
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
self.extend_cells(rowx+1, colx+1)
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print >> self.logfile, "put_cell", rowx, colx
raise
except:
print >> self.logfile, "put_cell", rowx, colx
raise
def put_blank_cell(self, rowx, colx, xf_index):
# This is used for cells from BLANK and MULBLANK records
ctype = XL_CELL_BLANK
value = ''
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
self.extend_cells(rowx+1, colx+1)
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print >> self.logfile, "put_cell", rowx, colx
raise
except:
print >> self.logfile, "put_cell", rowx, colx
raise
def put_number_cell(self, rowx, colx, value, xf_index):
ctype = self._xf_index_to_xl_type_map[xf_index]
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_number_cell extending", rowx, colx
self.extend_cells(rowx+1, colx+1)
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print >> self.logfile, "put_number_cell", rowx, colx
raise
except:
print >> self.logfile, "put_number_cell", rowx, colx
raise
# === Methods after this line neither know nor care about how cells are stored.
def read(self, bk):
global rc_stats
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
blah_rows = DEBUG or self.verbosity >= 4
blah_formulas = 1 and blah
oldpos = bk._position
bk._position = self._position
XL_SHRFMLA_ETC_ETC = (
XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2,
XL_ARRAY2, XL_TABLEOP_B2,
)
self_put_number_cell = self.put_number_cell
self_put_cell = self.put_cell
self_put_blank_cell = self.put_blank_cell
local_unpack = unpack
bk_get_record_parts = bk.get_record_parts
bv = self.biff_version
fmt_info = self.formatting_info
eof_found = 0
while 1:
# if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position
rc, data_len, data = bk_get_record_parts()
# if rc in rc_stats:
# rc_stats[rc] += 1
# else:
# rc_stats[rc] = 1
# if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data)
if rc == XL_NUMBER:
rowx, colx, xf_index, d = local_unpack('<HHHd', data)
# if xf_index == 0:
# fprintf(self.logfile,
# "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d)
self_put_number_cell(rowx, colx, d, xf_index)
elif rc == XL_LABELSST:
rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data)
# print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex]
self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index)
elif rc == XL_LABEL or rc == XL_RSTRING:
# RSTRING has extra richtext info at the end, but we ignore it.
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg = unpack_string(data, 6, bk.encoding or bk.derive_encoding, lenlen=2)
else:
strg = unpack_unicode(data, 6, lenlen=2)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
elif rc == XL_RK:
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
d = unpack_RK(data[6:10])
self_put_number_cell(rowx, colx, d, xf_index)
elif rc == XL_MULRK:
mulrk_row, mulrk_first = local_unpack('<HH', data[0:4])
mulrk_last, = local_unpack('<H', data[-2:])
pos = 4
for colx in xrange(mulrk_first, mulrk_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
d = unpack_RK(data[pos+2:pos+6])
pos += 6
self_put_number_cell(mulrk_row, colx, d, xf_index)
elif rc == XL_ROW:
# Version 0.6.0a3: ROW records are just not worth using (for memory allocation).
# Version 0.6.1: now used for formatting info.
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16])
if not(0 <= rowx < self.utter_max_rows):
print >> self.logfile, \
"*** NOTE: ROW record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows)
continue
r = Rowinfo()
# Using upkbits() is far too slow on a file
# with 30 sheets each with 10K rows :-(
# upkbits(r, bits1, (
# ( 0, 0x7FFF, 'height'),
# (15, 0x8000, 'has_default_height'),
# ))
# upkbits(r, bits2, (
# ( 0, 0x00000007, 'outline_level'),
# ( 4, 0x00000010, 'outline_group_starts_ends'),
# ( 5, 0x00000020, 'hidden'),
# ( 6, 0x00000040, 'height_mismatch'),
# ( 7, 0x00000080, 'has_default_xf_index'),
# (16, 0x0FFF0000, 'xf_index'),
# (28, 0x10000000, 'additional_space_above'),
# (29, 0x20000000, 'additional_space_below'),
# ))
# So:
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = bits2 & 7
r.outline_group_starts_ends = (bits2 >> 4) & 1
r.hidden = (bits2 >> 5) & 1
r.height_mismatch = (bits2 >> 6) & 1
r.has_default_xf_index = (bits2 >> 7) & 1
r.xf_index = (bits2 >> 16) & 0xfff
r.additional_space_above = (bits2 >> 28) & 1
r.additional_space_below = (bits2 >> 29) & 1
if not r.has_default_xf_index:
r.xf_index = -1
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print >> self.logfile, 'ROW', rowx, bits1, bits2
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc in XL_FORMULA_OPCODES: # 06, 0206, 0406
# DEBUG = 1
# if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data)
if bv >= 50:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 20
elif bv >= 30:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 16
else: # BIFF2
rowx, colx, cell_attr, result_str, flags = local_unpack('<HH3s8sB', data[0:16])
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)
lenlen = 1
tkarr_offset = 16
if blah_formulas: # testing formula dumper
#### XXXX FIXME
fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx)
fmlalen = local_unpack("<H", data[20:22])[0]
decompile_formula(bk, data[22:], fmlalen,
reldelta=0, browx=rowx, bcolx=colx, blah=1)
if result_str[6:8] == "\xFF\xFF":
if result_str[0] == '\x00':
# need to read next record (STRING)
gotstring = 0
# if flags & 8:
if 1: # "flags & 8" applies only to SHRFMLA
# actually there's an optional SHRFMLA or ARRAY etc record to skip over
rc2, data2_len, data2 = bk.get_record_parts()
if rc2 == XL_STRING or rc2 == XL_STRING_B2:
gotstring = 1
elif rc2 == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data2[:14])
if blah_formulas:
fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, array_flags)
dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1)
elif rc2 == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data2[:10])
if blah_formulas:
fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, nfmlas)
decompile_formula(bk, data2[10:], tokslen, reldelta=1, blah=1)
elif rc2 not in XL_SHRFMLA_ETC_ETC:
raise XLRDError(
"Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2)
# if DEBUG: print "gotstring:", gotstring
# now for the STRING record
if not gotstring:
rc2, _unused_len, data2 = bk.get_record_parts()
if rc2 not in (XL_STRING, XL_STRING_B2):
raise XLRDError("Expected STRING record; found 0x%04x" % rc2)
# if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding)
if self.biff_version < BIFF_FIRST_UNICODE:
strg = unpack_string(data2, 0, bk.encoding or bk.derive_encoding, lenlen=1 + int(bv > 20))
else:
strg = unpack_unicode(data2, 0, lenlen=2)
self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
# if DEBUG: print "FORMULA strg %r" % strg
elif result_str[0] == '\x01':
# boolean formula result
value = ord(result_str[2])
self.put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index)
elif result_str[0] == '\x02':
# Error in cell
value = ord(result_str[2])
self.put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif result_str[0] == '\x03':
# empty ... i.e. empty (zero-length) string, NOT an empty cell.
self.put_cell(rowx, colx, XL_CELL_TEXT, u"", xf_index)
else:
raise XLRDError("unexpected special case (0x%02x) in FORMULA" % ord(result_str[0]))
else:
# it is a number
d = local_unpack('<d', result_str)[0]
self_put_number_cell(rowx, colx, d, xf_index)
elif rc == XL_BOOLERR:
rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8])
# Note OOo Calc 2.0 writes 9-byte BOOLERR records.
# OOo docs say 8. Excel writes 8.
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err
self.put_cell(rowx, colx, cellty, value, xf_index)
elif rc == XL_COLINFO:
if not fmt_info: continue
c = Colinfo()
first_colx, last_colx, c.width, c.xf_index, flags \
= local_unpack("<HHHHH", data[:10])
#### Colinfo.width is denominated in 256ths of a character,
#### *not* in characters.
if not(0 <= first_colx <= last_colx <= 256):
# Note: 256 instead of 255 is a common mistake.
# We silently ignore the non-existing 257th column in that case.
print >> self.logfile, \
"*** NOTE: COLINFO record has first col index %d, last %d; " \
"should have 0 <= first <= last <= 255 -- record ignored!" \
% (first_colx, last_colx)
del c
continue
upkbits(c, flags, (
( 0, 0x0001, 'hidden'),
( 1, 0x0002, 'bit1_flag'),
# *ALL* colinfos created by Excel in "default" cases are 0x0002!!
# Maybe it's "locked" by analogy with XFProtection data.
( 8, 0x0700, 'outline_level'),
(12, 0x1000, 'collapsed'),
))
for colx in xrange(first_colx, last_colx+1):
if colx > 255: break # Excel does 0 to 256 inclusive
self.colinfo_map[colx] = c
if 0:
fprintf(self.logfile,
"**COL %d %d %d\n",
self.number, colx, c.xf_index)
if blah:
fprintf(
self.logfile,
"COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n",
self.number, first_colx, last_colx, c.width, c.xf_index, flags,
)
c.dump(self.logfile, header='===')
elif rc == XL_DEFCOLWIDTH:
self.defcolwidth, = local_unpack("<H", data[:2])
if 0: print >> self.logfile, 'DEFCOLWIDTH', self.defcolwidth
elif rc == XL_STANDARDWIDTH:
if data_len != 2:
print >> self.logfile, '*** ERROR *** STANDARDWIDTH', data_len, repr(data)
self.standardwidth, = local_unpack("<H", data[:2])
if 0: print >> self.logfile, 'STANDARDWIDTH', self.standardwidth
elif rc == XL_GCW:
if not fmt_info: continue # useless w/o COLINFO
assert data_len == 34
assert data[0:2] == "\x20\x00"
iguff = unpack("<8i", data[2:34])
gcw = []
for bits in iguff:
for j in xrange(32):
gcw.append(bits & 1)
bits >>= 1
self.gcw = tuple(gcw)
if 0:
showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.')
print "GCW:", showgcw
elif rc == XL_BLANK:
if not fmt_info: continue
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index
self_put_blank_cell(rowx, colx, xf_index)
elif rc == XL_MULBLANK: # 00BE
if not fmt_info: continue
mul_row, mul_first = local_unpack('<HH', data[0:4])
mul_last, = local_unpack('<H', data[-2:])
if 0:
print >> self.logfile, "MULBLANK", mul_row, mul_first, mul_last
pos = 4
for colx in xrange(mul_first, mul_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
pos += 2
self_put_blank_cell(mul_row, colx, xf_index)
elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
# if data_len == 10:
# Was crashing on BIFF 4.0 file w/o the two trailing unused bytes.
# Reported by Ralph Heimburger.
if bv < 80:
dim_tuple = local_unpack('<HxxH', data[2:8])
else:
dim_tuple = local_unpack('<ixxH', data[4:12])
self.nrows, self.ncols = 0, 0
self._dimnrows, self._dimncols = dim_tuple
if not self.book._xf_epilogue_done:
# Needed for bv <= 40
self.book.xf_epilogue()
if blah:
fprintf(self.logfile,
"sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n",
self.number, self.name, self._dimncols, self._dimnrows
)
elif rc == XL_EOF:
DEBUG = 0
if DEBUG: print >> self.logfile, "SHEET.READ: EOF"
eof_found = 1
break
elif rc == XL_OBJ:
# handle SHEET-level objects; note there's a separate Book.handle_obj
self.handle_obj(data)
elif rc == XL_MSO_DRAWING:
self.handle_msodrawingetc(rc, data_len, data)
elif rc == XL_TXO:
self.handle_txo(data)
elif rc == XL_NOTE:
self.handle_note(data)
elif rc == XL_FEAT11:
self.handle_feat11(data)
elif rc in bofcodes: ##### EMBEDDED BOF #####
version, boftype = local_unpack('<HH', data[0:4])
if boftype != 0x20: # embedded chart
print >> self.logfile, \
"*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x" \
% (rc, bk._position - data_len - 4, version, boftype)
while 1:
code, data_len, data = bk.get_record_parts()
if code == XL_EOF:
break
if DEBUG: print >> self.logfile, "---> found EOF"
elif rc == XL_COUNTRY:
bk.handle_country(data)
elif rc == XL_LABELRANGES:
pos = 0
pos = unpack_cell_range_address_list_update_pos(
self.row_label_ranges, data, pos, bv, addr_size=8,
)
pos = unpack_cell_range_address_list_update_pos(
self.col_label_ranges, data, pos, bv, addr_size=8,
)
assert pos == data_len
elif rc == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data[:14])
if blah_formulas:
print "ARRAY:", row1x, rownx, col1x, colnx, array_flags
dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1)
elif rc == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data[:10])
if blah_formulas:
print "SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas
decompile_formula(bk, data[10:], tokslen, reldelta=0, blah=1)
elif rc == XL_CONDFMT:
if not fmt_info: continue
assert bv >= 80
num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \
unpack("<6H", data[0:12])
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \
"*** in Sheet %d (%r).\n" \
"*** %d CF record(s); needs_recalc_or_redraw = %d\n" \
"*** Bounding box is %s\n",
self.number, self.name, num_CFs, needs_recalc,
rangename2d(browx1, browx2+1, bcolx1, bcolx2+1),
)
olist = [] # updated by the function
pos = unpack_cell_range_address_list_update_pos(
olist, data, 12, bv, addr_size=8)
# print >> self.logfile, repr(result), len(result)
if self.verbosity >= 1:
fprintf(self.logfile,
"*** %d individual range(s):\n" \
"*** %s\n",
len(olist),
", ".join([rangename2d(*coords) for coords in olist]),
)
elif rc == XL_CF:
if not fmt_info: continue
cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10])
font_block = (flags >> 26) & 1
bord_block = (flags >> 28) & 1
patt_block = (flags >> 29) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \
"*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \
"*** optional data blocks: font=%d, border=%d, pattern=%d\n",
cf_type, cmp_op, sz1, sz2, flags,
font_block, bord_block, patt_block,
)
# hex_char_dump(data, 0, data_len)
pos = 12
if font_block:
(font_height, font_options, weight, escapement, underline,
font_colour_index, two_bits, font_esc, font_underl) = \
unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118])
font_style = (two_bits > 1) & 1
posture = (font_options > 1) & 1
font_canc = (two_bits > 7) & 1
cancellation = (font_options > 7) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"*** Font info: height=%d, weight=%d, escapement=%d,\n" \
"*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \
"*** style=%d, posture=%d, canc=%d, cancellation=%d\n",
font_height, weight, escapement, underline,
font_colour_index, font_esc, font_underl,
font_style, posture, font_canc, cancellation,
)
pos += 118
if bord_block:
pos += 8
if patt_block:
pos += 4
fmla1 = data[pos:pos+sz1]
pos += sz1
if blah and sz1:
fprintf(self.logfile,
"*** formula 1:\n",
)
dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1)
fmla2 = data[pos:pos+sz2]
pos += sz2
assert pos == data_len
if blah and sz2:
fprintf(self.logfile,
"*** formula 2:\n",
)
dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1)
elif rc == XL_DEFAULTROWHEIGHT:
if data_len == 4:
bits, self.default_row_height = unpack("<HH", data[:4])
elif data_len == 2:
self.default_row_height, = unpack("<H", data)
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is 2, " \
"should be 4; assuming BIFF2 format\n")
else:
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is %d, " \
"should be 4; ignoring this record\n",
data_len)
self.default_row_height_mismatch = bits & 1
self.default_row_hidden = (bits >> 1) & 1
self.default_additional_space_above = (bits >> 2) & 1
self.default_additional_space_below = (bits >> 3) & 1
elif rc == XL_MERGEDCELLS:
if not fmt_info: continue
pos = unpack_cell_range_address_list_update_pos(
self.merged_cells, data, 0, bv, addr_size=8)
if blah:
fprintf(self.logfile,
"MERGEDCELLS: %d ranges\n", int_floor_div(pos - 2, 8))
assert pos == data_len, \
"MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len)
elif rc == XL_WINDOW2:
if bv >= 80:
(options,
self.first_visible_rowx, self.first_visible_colx,
self.gridline_colour_index,
self.cached_page_break_preview_mag_factor,
self.cached_normal_view_mag_factor
) = unpack("<HHHHxxHH", data[:14])
else: # BIFF3-7
(options,
self.first_visible_rowx, self.first_visible_colx,
) = unpack("<HHH", data[:6])
self.gridline_colour_rgb = unpack("<BBB", data[6:9])
self.gridline_colour_index = \
nearest_colour_index(
self.book.colour_map,
self.gridline_colour_rgb,
debug=0)
self.cached_page_break_preview_mag_factor = 0 # default (60%)
self.cached_normal_view_mag_factor = 0 # default (100%)
# options -- Bit, Mask, Contents:
# 0 0001H 0 = Show formula results 1 = Show formulas
# 1 0002H 0 = Do not show grid lines 1 = Show grid lines
# 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
# 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
# 4 0010H 0 = Show zero values as empty cells 1 = Show zero values
# 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
# 6 0040H 0 = Columns from left to right 1 = Columns from right to left
# 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
# 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
# 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
# 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
# 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
# The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes.
for attr, _unused_defval in _WINDOW2_options:
setattr(self, attr, options & 1)
options >>= 1
# print "WINDOW2: visible=%d selected=%d" \
# % (self.sheet_visible, self.sheet_selected)
#### all of the following are for BIFF <= 4W
elif bv <= 45:
if rc == XL_FORMAT or rc == XL_FORMAT2:
bk.handle_format(data, rc)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
bk.handle_font(data)
elif rc == XL_STYLE:
if not self.book._xf_epilogue_done:
self.book.xf_epilogue()
bk.handle_style(data)
elif rc == XL_PALETTE:
bk.handle_palette(data)
elif rc == XL_BUILTINFMTCOUNT:
bk.handle_builtinfmtcount(data)
elif rc == XL_XF4 or rc == XL_XF3 or rc == XL_XF2: #### N.B. not XL_XF
bk.handle_xf(data)
elif rc == XL_DATEMODE:
bk.handle_datemode(data)
elif rc == XL_CODEPAGE:
bk.handle_codepage(data)
elif rc == XL_FILEPASS:
bk.handle_filepass(data)
elif rc == XL_WRITEACCESS:
bk.handle_writeaccess(data)
elif rc == XL_IXFE:
self._ixfe = local_unpack('<H', data)[0]
elif rc == XL_NUMBER_B2:
rowx, colx, cell_attr, d = local_unpack('<HH3sd', data)
self_put_number_cell(rowx, colx, d, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_INTEGER:
rowx, colx, cell_attr, d = local_unpack('<HH3sH', data)
self_put_number_cell(rowx, colx, float(d), self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_LABEL_B2:
rowx, colx, cell_attr = local_unpack('<HH3s', data[0:7])
strg = unpack_string(data, 7, bk.encoding or bk.derive_encoding(), lenlen=1)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BOOLERR_B2:
rowx, colx, cell_attr, value, is_err = local_unpack('<HH3sBB', data)
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR_B2", rowx, colx, cell_attr, value, is_err
self.put_cell(rowx, colx, cellty, value, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BLANK_B2:
if not fmt_info: continue
rowx, colx, cell_attr = local_unpack('<HH3s', data[:7])
self_put_blank_cell(rowx, colx, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_EFONT:
bk.handle_efont(data)
elif rc == XL_ROW_B2:
if not fmt_info: continue
rowx, bits1, has_defaults = local_unpack('<H4xH2xB', data[0:11])
if not(0 <= rowx < self.utter_max_rows):
print >> self.logfile, \
"*** NOTE: ROW_B2 record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows)
continue
r = Rowinfo()
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = 0
r.outline_group_starts_ends = 0
r.hidden = 0
r.height_mismatch = 0
r.has_default_xf_index = has_defaults & 1
r.additional_space_above = 0
r.additional_space_below = 0
if not r.has_default_xf_index:
r.xf_index = -1
elif data_len == 18:
# Seems the XF index in the cell_attr is dodgy
xfx = local_unpack('<H', data[16:18])[0]
r.xf_index = self.fixed_BIFF2_xfindex(cell_attr=None, rowx=rowx, colx=-1, true_xfx=xfx)
else:
cell_attr = data[13:16]
r.xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx=-1)
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print >> self.logfile, 'ROW_B2', rowx, bits1, has_defaults
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc == XL_COLWIDTH: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx, width\
= local_unpack("<BBH", data[:4])
if not(first_colx <= last_colx):
print >> self.logfile, \
"*** NOTE: COLWIDTH record has first col index %d, last %d; " \
"should have first <= last -- record ignored!" \
% (first_colx, last_colx)
continue
for colx in xrange(first_colx, last_colx+1):
if self.colinfo_map.has_key(colx):
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.width = width
if blah:
fprintf(
self.logfile,
"COLWIDTH sheet #%d cols %d-%d: wid=%d\n",
self.number, first_colx, last_colx, width
)
elif rc == XL_COLUMNDEFAULT: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx = local_unpack("<HH", data[:4])
#### Warning OOo docs wrong; first_colx <= colx < last_colx
if blah:
fprintf(
self.logfile,
"COLUMNDEFAULT sheet #%d cols in range(%d, %d)\n",
self.number, first_colx, last_colx
)
if not(0 <= first_colx < last_colx <= 256):
print >> self.logfile, \
"*** NOTE: COLUMNDEFAULT record has first col index %d, last %d; " \
"should have 0 <= first < last <= 256" \
% (first_colx, last_colx)
last_colx = min(last_colx, 256)
for colx in xrange(first_colx, last_colx):
offset = 4 + 3 * (colx - first_colx)
cell_attr = data[offset:offset+3]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx=-1, colx=colx)
if self.colinfo_map.has_key(colx):
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.xf_index = xf_index
else:
# if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data)
pass
if not eof_found:
raise XLRDError("Sheet %d (%r) missing EOF record" \
% (self.number, self.name))
self.tidy_dimensions()
bk._position = oldpos
return 1
def fixed_BIFF2_xfindex(self, cell_attr, rowx, colx, true_xfx=None):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
if self.biff_version == 21:
if self._xf_index_to_xl_type_map:
if true_xfx is not None:
xfx = true_xfx
else:
xfx = ord(cell_attr[0]) & 0x3F
if xfx == 0x3F:
if self._ixfe is None:
raise XLRDError("BIFF2 cell record has XF index 63 but no preceding IXFE record.")
xfx = self._ixfe
# OOo docs are capable of interpretation that each
# cell record is preceded immediately by its own IXFE record.
# Empirical evidence is that (sensibly) an IXFE record applies to all
# following cell records until another IXFE comes along.
return xfx
# Have either Excel 2.0, or broken 2.1 w/o XF records -- same effect.
self.biff_version = self.book.biff_version = 20
#### check that XF slot in cell_attr is zero
xfx_slot = ord(cell_attr[0]) & 0x3F
assert xfx_slot == 0
xfx = self._cell_attr_to_xfx.get(cell_attr)
if xfx is not None:
return xfx
if blah:
fprintf(self.logfile, "New cell_attr %r at (%r, %r)\n", cell_attr, rowx, colx)
book = self.book
xf = self.fake_XF_from_BIFF20_cell_attr(cell_attr)
xfx = len(book.xf_list)
xf.xf_index = xfx
book.xf_list.append(xf)
if blah:
xf.dump(self.logfile, header="=== Faked XF %d ===" % xfx, footer="======")
if not book.format_map.has_key(xf.format_key):
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
fmt = Format(xf.format_key, FUN, u"General")
book.format_map[xf.format_key] = fmt
while len(book.format_list) <= xf.format_key:
book.format_list.append(fmt)
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = book.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
self._cell_attr_to_xfx[cell_attr] = xfx
return xfx
def fake_XF_from_BIFF20_cell_attr(self, cell_attr):
from formatting import XF, XFAlignment, XFBorder, XFBackground, XFProtection
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
(prot_bits, font_and_format, halign_etc) = unpack('<BBB', cell_attr)
xf.format_key = font_and_format & 0x3F
xf.font_index = (font_and_format & 0xC0) >> 6
upkbits(xf.protection, prot_bits, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
xf.alignment.hor_align = halign_etc & 0x07
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = 0 # ???????????
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
return xf
def req_fmt_info(self):
if not self.formatting_info:
raise XLRDError("Feature requires open_workbook(..., formatting_info=True)")
##
# Determine column display width.
# <br /> -- New in version 0.6.1
# <br />
# @param colx Index of the queried column, range 0 to 255.
# Note that it is possible to find out the width that will be used to display
# columns with no cell information e.g. column IV (colx=255).
# @return The column width that will be used for displaying
# the given column by Excel, in units of 1/256th of the width of a
# standard character (the digit zero in the first font).
def computed_column_width(self, colx):
self.req_fmt_info()
if self.biff_version >= 80:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
if self.standardwidth is not None:
return self.standardwidth
elif self.biff_version >= 40:
if self.gcw[colx]:
if self.standardwidth is not None:
return self.standardwidth
else:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
elif self.biff_version == 30:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
# All roads lead to Rome and the DEFCOLWIDTH ...
if self.defcolwidth is not None:
return self.defcolwidth * 256
return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record
def handle_msodrawingetc(self, recid, data_len, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSODrawing()
pos = 0
while pos < data_len:
tmp, fbt, cb = unpack('<HHI', data[pos:pos+8])
ver = tmp & 0xF
inst = (tmp >> 4) & 0xFFF
if ver == 0xF:
ndb = 0 # container
else:
ndb = cb
if DEBUG:
hex_char_dump(data, pos, ndb + 8, base=0, fout=self.logfile)
fprintf(self.logfile,
"fbt:0x%04X inst:%d ver:0x%X cb:%d (0x%04X)\n",
fbt, inst, ver, cb, cb)
if fbt == 0xF010: # Client Anchor
assert ndb == 18
(o.anchor_unk,
o.anchor_colx_lo, o.anchor_rowx_lo,
o.anchor_colx_hi, o.anchor_rowx_hi) = unpack('<Hiiii', data[pos+8:pos+8+ndb])
elif fbt == 0xF011: # Client Data
# must be followed by an OBJ record
assert cb == 0
assert pos + 8 == data_len
else:
pass
pos += ndb + 8
else:
# didn't break out of while loop
assert pos == data_len
if DEBUG:
o.dump(self.logfile, header="=== MSODrawing ===", footer= " ")
def handle_obj(self, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSObj()
data_len = len(data)
pos = 0
if DEBUG:
fprintf(self.logfile, "... OBJ record ...\n")
while pos < data_len:
ft, cb = unpack('<HH', data[pos:pos+4])
if DEBUG:
hex_char_dump(data, pos, cb, base=0, fout=self.logfile)
if ft == 0x15: # ftCmo ... s/b first
assert pos == 0
o.type, o.id, option_flags = unpack('<HHH', data[pos+4:pos+10])
upkbits(o, option_flags, (
( 0, 0x0001, 'locked'),
( 4, 0x0010, 'printable'),
( 8, 0x0100, 'autofilter'), # not documented in Excel 97 dev kit
( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit
(13, 0x2000, 'autofill'),
(14, 0x4000, 'autoline'),
))
elif ft == 0x00:
assert cb == 0
assert pos + 4 == data_len
elif ft == 0x0C: # Scrollbar
values = unpack('<5H', data[pos+8:pos+18])
for value, tag in zip(values, ('value', 'min', 'max', 'inc', 'page')):
setattr(o, 'scrollbar_' + tag, value)
elif ft == 0x0D: # "Notes structure" [used for cell comments]
pass ############## not documented in Excel 97 dev kit
elif ft == 0x13: # list box data
if o.autofilter: # non standard exit. NOT documented
break
else:
pass
pos += cb + 4
else:
# didn't break out of while loop
assert pos == data_len
if DEBUG:
o.dump(self.logfile, header="=== MSOBj ===", footer= " ")
def handle_note(self, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
if DEBUG:
fprintf(self.logfile, '... NOTE record ...\n')
hex_char_dump(data, 0, len(data), base=0, fout=self.logfile)
o = MSNote()
data_len = len(data)
o.rowx, o.colx, option_flags, o.object_id = unpack('<4H', data[:8])
o.show = (option_flags >> 1) & 1
# Docs say NULL [sic] bytes padding between string count and string data
# to ensure that string is word-aligned. Appears to be nonsense.
# There also seems to be a random(?) byte after the string (not counted in the
# string length.
o.original_author, endpos = unpack_unicode_update_pos(data, 8, lenlen=2)
assert endpos == data_len - 1
o.last_byte = data[-1]
if DEBUG:
o.dump(self.logfile, header="=== MSNote ===", footer= " ")
def handle_txo(self, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSTxo()
data_len = len(data)
option_flags, o.rot, cchText, cbRuns = unpack('<HH6xHH4x', data)
upkbits(o, option_flags, (
(3, 0x000E, 'horz_align'),
(6, 0x0070, 'vert_align'),
(9, 0x0200, 'lock_text'),
))
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_CONTINUE
o.text, endpos = unpack_unicode_update_pos(data2, 0, known_len=cchText)
assert endpos == data2_len
rc3, data3_len, data3 = self.book.get_record_parts()
assert rc3 == XL_CONTINUE
# ignore the formatting runs for the moment
if DEBUG:
o.dump(self.logfile, header="=== MSTxo ===", footer= " ")
def handle_feat11(self, data):
if not OBJ_MSO_DEBUG:
return
# rt: Record type; this matches the BIFF rt in the first two bytes of the record; =0872h
# grbitFrt: FRT cell reference flag (see table below for details)
# Ref0: Range reference to a worksheet cell region if grbitFrt=1 (bitFrtRef). Otherwise blank.
# isf: Shared feature type index =5 for Table
# fHdr: =0 since this is for feat not feat header
# reserved0: Reserved for future use =0 for Table
# cref: Count of ref ranges this feature is on
# cbFeatData: Count of byte for the current feature data.
# reserved1: =0 currently not used
# Ref1: Repeat of Ref0. UNDOCUMENTED
rt, grbitFrt, Ref0, isf, fHdr, reserved0, cref, cbFeatData, reserved1, Ref1 = unpack('<HH8sHBiHiH8s', data[0:35])
assert reserved0 == 0
assert reserved1 == 0
assert isf == 5
assert rt == 0x872
assert fHdr == 0
assert Ref1 == Ref0
print "FEAT11: grbitFrt=%d Ref0=%r cref=%d cbFeatData=%d" % (grbitFrt, Ref0, cref, cbFeatData)
# lt: Table data source type:
# =0 for Excel Worksheet Table =1 for read-write SharePoint linked List
# =2 for XML mapper Table =3 for Query Table
# idList: The ID of the Table (unique per worksheet)
# crwHeader: How many header/title rows the Table has at the top
# crwTotals: How many total rows the Table has at the bottom
# idFieldNext: Next id to try when assigning a unique id to a new field
# cbFSData: The size of the Fixed Data portion of the Table data structure.
# rupBuild: the rupBuild that generated the record
# unusedShort: UNUSED short that can be used later. The value is reserved during round-tripping.
# listFlags: Collection of bit flags: (see listFlags' bit setting table below for detail.)
# lPosStmCache: Table data stream position of cached data
# cbStmCache: Count of bytes of cached data
# cchStmCache: Count of characters of uncompressed cached data in the stream
# lem: Table edit mode (see List (Table) Editing Mode (lem) setting table below for details.)
# rgbHashParam: Hash value for SharePoint Table
# cchName: Count of characters in the Table name string rgbName
(lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort, listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName) = unpack('<iiiiiiHHiiiii16sH', data[35:35+66])
print "lt=%d idList=%d crwHeader=%d crwTotals=%d idFieldNext=%d cbFSData=%d\n"\
"rupBuild=%d unusedShort=%d listFlags=%04X lPosStmCache=%d cbStmCache=%d\n"\
"cchStmCache=%d lem=%d rgbHashParam=%r cchName=%d" % (
lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort,listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName)
class MSODrawing(BaseObject):
pass
class MSObj(BaseObject):
pass
class MSTxo(BaseObject):
pass
class MSNote(BaseObject):
pass
# === helpers ===
def unpack_RK(rk_str):
flags = ord(rk_str[0])
if flags & 2:
# There's a SIGNED 30-bit integer in there!
i, = unpack('<i', rk_str)
i >>= 2 # div by 4 to drop the 2 flag bits
if flags & 1:
return i / 100.0
return float(i)
else:
# It's the most significant 30 bits of an IEEE 754 64-bit FP number
d, = unpack('<d', '\0\0\0\0' + chr(flags & 252) + rk_str[1:4])
if flags & 1:
return d / 100.0
return d
##### =============== Cell ======================================== #####
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
ctype_text = {
XL_CELL_EMPTY: 'empty',
XL_CELL_TEXT: 'text',
XL_CELL_NUMBER: 'number',
XL_CELL_DATE: 'xldate',
XL_CELL_BOOLEAN: 'bool',
XL_CELL_ERROR: 'error',
XL_CELL_BLANK: 'blank',
}
##
# <p>Contains the data for one cell.</p>
#
# <p>WARNING: You don't call this class yourself. You access Cell objects
# via methods of the Sheet object(s) that you found in the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
# <p> Cell objects have three attributes: <i>ctype</i> is an int, <i>value</i>
# (which depends on <i>ctype</i>) and <i>xf_index</i>.
# If "formatting_info" is not enabled when the workbook is opened, xf_index will be None.
# The following table describes the types of cells and how their values
# are represented in Python.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Type symbol</th>
# <th>Type number</th>
# <th>Python value</th>
# </tr>
# <tr>
# <td>XL_CELL_EMPTY</td>
# <td align="center">0</td>
# <td>empty string u''</td>
# </tr>
# <tr>
# <td>XL_CELL_TEXT</td>
# <td align="center">1</td>
# <td>a Unicode string</td>
# </tr>
# <tr>
# <td>XL_CELL_NUMBER</td>
# <td align="center">2</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_DATE</td>
# <td align="center">3</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_BOOLEAN</td>
# <td align="center">4</td>
# <td>int; 1 means TRUE, 0 means FALSE</td>
# </tr>
# <tr>
# <td>XL_CELL_ERROR</td>
# <td align="center">5</td>
# <td>int representing internal Excel codes; for a text representation,
# refer to the supplied dictionary error_text_from_code</td>
# </tr>
# <tr>
# <td>XL_CELL_BLANK</td>
# <td align="center">6</td>
# <td>empty string u''. Note: this type will appear only when
# open_workbook(..., formatting_info=True) is used.</td>
# </tr>
# </table>
#<p></p>
class Cell(BaseObject):
__slots__ = ['ctype', 'value', 'xf_index']
def __init__(self, ctype, value, xf_index=None):
self.ctype = ctype
self.value = value
self.xf_index = xf_index
def __repr__(self):
if self.xf_index is None:
return "%s:%r" % (ctype_text[self.ctype], self.value)
else:
return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index)
##
# There is one and only one instance of an empty cell -- it's a singleton. This is it.
# You may use a test like "acell is empty_cell".
empty_cell = Cell(XL_CELL_EMPTY, '')
##### =============== Colinfo and Rowinfo ============================== #####
##
# Width and default formatting information that applies to one or
# more columns in a sheet. Derived from COLINFO records.
#
# <p> Here is the default hierarchy for width, according to the OOo docs:
#
# <br />"""In BIFF3, if a COLINFO record is missing for a column,
# the width specified in the record DEFCOLWIDTH is used instead.
#
# <br />In BIFF4-BIFF7, the width set in this [COLINFO] record is only used,
# if the corresponding bit for this column is cleared in the GCW
# record, otherwise the column width set in the DEFCOLWIDTH record
# is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]).
#
# <br />In BIFF8, if a COLINFO record is missing for a column,
# the width specified in the record STANDARDWIDTH is used.
# If this [STANDARDWIDTH] record is also missing,
# the column width of the record DEFCOLWIDTH is used instead."""
# <br />
#
# Footnote: The docs on the GCW record say this:
# """<br />
# If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH
# record. If a bit is cleared, the corresponding column uses the width set in the
# COLINFO record for this column.
# <br />If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if
# the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH
# record of the worksheet will be used instead.
# <br />"""<br />
# At the moment (2007-01-17) xlrd is going with the GCW version of the story.
# Reference to the source may be useful: see the computed_column_width(colx) method
# of the Sheet class.
# <br />-- New in version 0.6.1
# </p>
class Colinfo(BaseObject):
##
# Width of the column in 1/256 of the width of the zero character,
# using default font (first FONT record in the file).
width = 0
##
# XF index to be used for formatting empty cells.
xf_index = -1
##
# 1 = column is hidden
hidden = 0
##
# Value of a 1-bit flag whose purpose is unknown
# but is often seen set to 1
bit1_flag = 0
##
# Outline level of the column, in range(7).
# (0 = no outline)
outline_level = 0
##
# 1 = column is collapsed
collapsed = 0
##
# Height and default formatting information that applies to a row in a sheet.
# Derived from ROW records.
# <br /> -- New in version 0.6.1
class Rowinfo(BaseObject):
##
# Height of the row, in twips. One twip == 1/20 of a point
height = 0
##
# 0 = Row has custom height; 1 = Row has default height
has_default_height = 0
##
# Outline level of the row
outline_level = 0
##
# 1 = Outline group starts or ends here (depending on where the
# outline buttons are located, see WSBOOL record [TODO ??]),
# <i>and</i> is collapsed
outline_group_starts_ends = 0
##
# 1 = Row is hidden (manually, or by a filter or outline group)
hidden = 0
##
# 1 = Row height and default font height do not match
height_mismatch = 0
##
# 1 = the xf_index attribute is usable; 0 = ignore it
has_default_xf_index = 0
##
# Index to default XF record for empty cells in this row.
# Don't use this if has_default_xf_index == 0.
xf_index = -9999
##
# This flag is set, if the upper border of at least one cell in this row
# or if the lower border of at least one cell in the row above is
# formatted with a thick line style. Thin and medium line styles are not
# taken into account.
additional_space_above = 0
##
# This flag is set, if the lower border of at least one cell in this row
# or if the upper border of at least one cell in the row below is
# formatted with a medium or thick line style. Thin line styles are not
# taken into account.
additional_space_below = 0
| ev1l0rd/yalnpv | quickfort/src/qfconvert/xlrd/sheet.py | Python | mit | 79,267 |
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from base64 import b64encode
import mock
from gratipay.elsewhere import UserInfo
from gratipay.models.account_elsewhere import AccountElsewhere
from gratipay.testing import Harness, P
import gratipay.testing.elsewhere as user_info_examples
class TestElsewhere(Harness):
def test_associate_csrf(self):
response = self.client.GxT('/on/github/associate?state=49b7c66246c7')
assert response.code == 400
def test_associate_with_empty_cookie_raises_400(self):
self.client.cookie[b'github_deadbeef'] = b''
response = self.client.GxT('/on/github/associate?state=deadbeef')
assert response.code == 400
def test_extract_user_info(self):
for platform in self.platforms:
user_info = getattr(user_info_examples, platform.name)()
r = platform.extract_user_info(user_info)
assert isinstance(r, UserInfo)
assert r.user_id is not None
assert len(r.user_id) > 0
@mock.patch('gratipay.elsewhere.Platform.api_get')
def test_get_user_info_quotes_values_in_query_string(self, api_get):
self.platforms.twitter.get_user_info('user_name', "'")
api_get.assert_called_with("/users/show.json?screen_name=%27", sess=None)
@mock.patch('gratipay.elsewhere.bitbucket.Bitbucket.api_get')
def test_get_user_info_does_not_quotes_values_in_url(self, api_get):
self.platforms.bitbucket.get_user_info('user_name', "'")
api_get.assert_called_with("/2.0/users/'", sess=None)
def test_opt_in_can_change_username(self):
account = self.make_elsewhere('twitter', 1, 'alice')
expected = 'bob'
actual = account.opt_in('bob')[0].participant.username
assert actual == expected
def test_opt_in_doesnt_have_to_change_username(self):
self.make_participant('bob')
account = self.make_elsewhere('twitter', 1, 'alice')
expected = account.participant.username # A random one.
actual = account.opt_in('bob')[0].participant.username
assert actual == expected
def test_opt_in_resets_is_closed_to_false(self):
alice = self.make_elsewhere('twitter', 1, 'alice')
alice.participant.update_is_closed(True)
user = alice.opt_in('alice')[0]
assert not user.participant.is_closed
assert not P('alice').is_closed
@mock.patch('requests_oauthlib.OAuth2Session.fetch_token')
@mock.patch('gratipay.elsewhere.Platform.get_user_self_info')
@mock.patch('gratipay.elsewhere.Platform.get_user_info')
def test_connect_might_need_confirmation(self, gui, gusi, ft):
self.make_participant('alice', claimed_time='now')
self.make_participant('bob', claimed_time='now')
gusi.return_value = self.client.website.platforms.github.extract_user_info({'id': 2})
gui.return_value = self.client.website.platforms.github.extract_user_info({'id': 1})
ft.return_value = None
cookie = b64encode(json.dumps(['query_data', 'connect', '', '2']))
response = self.client.GxT('/on/github/associate?state=deadbeef',
auth_as='alice',
cookies={b'github_deadbeef': cookie})
assert response.code == 302
assert response.headers['Location'].startswith('/on/confirm.html?id=')
def test_redirect_csrf(self):
response = self.client.GxT('/on/github/redirect')
assert response.code == 405
def test_redirects(self, *classes):
self.make_participant('alice')
data = dict(action='opt-in', then='/', user_id='')
for platform in self.platforms:
platform.get_auth_url = lambda *a, **kw: ('', '', '')
response = self.client.PxST('/on/%s/redirect' % platform.name,
data, auth_as='alice')
assert response.code == 302
def test_upsert(self):
for platform in self.platforms:
user_info = getattr(user_info_examples, platform.name)()
account = AccountElsewhere.upsert(platform.extract_user_info(user_info))
assert isinstance(account, AccountElsewhere)
@mock.patch('gratipay.elsewhere.Platform.get_user_info')
def test_user_pages(self, get_user_info):
for platform in self.platforms:
alice = UserInfo( platform=platform.name
, user_id='0'
, user_name='alice'
, is_team=False
)
get_user_info.side_effect = lambda *a: alice
response = self.client.GET('/on/%s/alice/' % platform.name)
assert response.code == 200
assert 'has not joined' in response.body.decode('utf8')
def test_user_pages_are_404_for_unknown_elsewhere_user(self):
for platform in self.platforms:
if not hasattr(platform, 'api_user_name_info_path'):
continue
r = self.client.GxT("/on/%s/%s/" % (platform.name, 'ijroioifeef'))
assert "Account not found on %s." % (platform.display_name) in r.body
assert r.code == 404
def test_user_pages_are_400_for_invalid_characters(self):
platform = self.platforms.twitter
for username in ('AA%09BB', 'AA%0DBB', 'AA%0ABB'):
r = self.client.GxT('/on/{}/{}/'.format(platform.name, username))
assert "Invalid character in elsewhere account username." in r.body
assert r.code == 400
def test_failure_page_accepts_valid_username(self):
self.client.GET('/on/twitter/Gratipay/') # normal case will have the db primed
response = self.client.GET('/on/twitter/Gratipay/failure.html')
assert response.code == 200
class TestConfirmTakeOver(Harness):
def setUp(self):
Harness.setUp(self)
self.alice_elsewhere = self.make_elsewhere('twitter', -1, 'alice')
token, expires = self.alice_elsewhere.make_connect_token()
self.connect_cookie = {b'connect_%s' % self.alice_elsewhere.id: token}
self.bob = self.make_participant('bob', claimed_time='now')
def test_confirm(self):
url = '/on/confirm.html?id=%s' % self.alice_elsewhere.id
response = self.client.GxT(url)
assert response.code == 403
response = self.client.GxT(url, auth_as='bob')
assert response.code == 400
assert 'bad connect token' in response.body
response = self.client.GET(url, auth_as='bob', cookies=self.connect_cookie)
assert response.code == 200
assert 'Please Confirm' in response.body
def test_confirm_gives_400_for_garbage(self):
assert self.client.GxT('/on/confirm.html?id=garbage', auth_as='bob').code == 400
def test_take_over(self):
data = {'account_id': self.alice_elsewhere.id, 'should_transfer': 'yes'}
response = self.client.PxST('/on/take-over.html', data=data)
assert response.code == 403
response = self.client.PxST('/on/take-over.html', data=data, auth_as='bob')
assert response.code == 400
assert 'bad connect token' in response.body
response = self.client.PxST('/on/take-over.html', data=data, auth_as='bob',
cookies=self.connect_cookie)
assert response.code == 302
assert response.headers['Location'] == '/bob/'
class TestFriendFinder(Harness):
def test_twitter_get_friends_for(self):
platform = self.platforms.twitter
user_info = platform.extract_user_info(user_info_examples.twitter())
account = AccountElsewhere.upsert(user_info)
friends, nfriends, pages_urls = platform.get_friends_for(account)
assert nfriends > 0
def test_github_get_friends_for(self):
platform = self.platforms.github
user_info = platform.extract_user_info(user_info_examples.github())
account = AccountElsewhere.upsert(user_info)
friends, nfriends, pages_urls = platform.get_friends_for(account)
assert nfriends > 0
| gratipay/gratipay.com | tests/py/test_elsewhere.py | Python | mit | 8,165 |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# Sample query for counting all the Person entries between dates.
from __future__ import print_function
import datetime
query = Person.all(filter_expired=False).filter(
'entry_date >=', datetime.datetime(2013, 1, 1, 0, 0, 0)).filter(
'entry_date <', datetime.datetime(2014, 1, 1, 0, 0, 0))
count = 0
while True:
current_count = query.count()
if current_count == 0:
break
count += current_count
query.with_cursor(query.cursor())
print('# of persons =', count)
| google/personfinder | tools/sample_queries/count_all_persons.py | Python | apache-2.0 | 1,108 |
# -*- coding: utf-8 -*-
# Copyright (c) 2002 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the listspace viewmanager class.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, QFileInfo, QEvent, Qt
from PyQt5.QtWidgets import QStackedWidget, QSplitter, QListWidget, \
QListWidgetItem, QSizePolicy, QMenu, QApplication
from ViewManager.ViewManager import ViewManager
import QScintilla.Editor
from QScintilla.Editor import Editor
import UI.PixmapCache
class StackedWidget(QStackedWidget):
"""
Class implementing a custimized StackedWidget.
"""
def __init__(self, parent):
"""
Constructor
@param parent parent widget (QWidget)
"""
super(StackedWidget, self).__init__(parent)
self.editors = []
def addWidget(self, assembly):
"""
Public method to add a new widget.
@param assembly editor assembly object to be added
(QScintilla.EditorAssembly.EditorAssembly)
"""
editor = assembly.getEditor()
super(StackedWidget, self).addWidget(assembly)
if editor not in self.editors:
self.editors.append(editor)
def removeWidget(self, widget):
"""
Public method to remove a widget.
@param widget widget to be removed (QWidget)
"""
if isinstance(widget, QScintilla.Editor.Editor):
self.editors.remove(widget)
widget = widget.parent()
super(StackedWidget, self).removeWidget(widget)
def currentWidget(self):
"""
Public method to get a reference to the current editor.
@return reference to the current editor (Editor)
"""
widget = super(StackedWidget, self).currentWidget()
if widget is not None:
widget = widget.getEditor()
return widget
def setCurrentWidget(self, widget):
"""
Public method to set the current widget.
@param widget widget to be made current (QWidget)
"""
if widget is not None:
if isinstance(widget, QScintilla.Editor.Editor):
self.editors.remove(widget)
self.editors.insert(0, widget)
widget = widget.parent()
super(StackedWidget, self).setCurrentWidget(widget)
def setCurrentIndex(self, index):
"""
Public method to set the current widget by its index.
@param index index of widget to be made current (integer)
"""
widget = self.widget(index)
if widget is not None:
self.setCurrentWidget(widget)
def nextTab(self):
"""
Public slot used to show the next tab.
"""
ind = self.currentIndex() + 1
if ind == self.count():
ind = 0
self.setCurrentIndex(ind)
self.currentWidget().setFocus()
def prevTab(self):
"""
Public slot used to show the previous tab.
"""
ind = self.currentIndex() - 1
if ind == -1:
ind = self.count() - 1
self.setCurrentIndex(ind)
self.currentWidget().setFocus()
def hasEditor(self, editor):
"""
Public method to check for an editor.
@param editor editor object to check for
@return flag indicating, whether the editor to be checked belongs
to the list of editors managed by this stacked widget.
"""
return editor in self.editors
def firstEditor(self):
"""
Public method to retrieve the first editor in the list of managed
editors.
@return first editor in list (QScintilla.Editor.Editor)
"""
return len(self.editors) and self.editors[0] or None
class Listspace(QSplitter, ViewManager):
"""
Class implementing the listspace viewmanager class.
@signal changeCaption(str) emitted if a change of the caption is necessary
@signal editorChanged(str) emitted when the current editor has changed
@signal editorChangedEd(Editor) emitted when the current editor has changed
@signal lastEditorClosed() emitted after the last editor window was closed
@signal editorOpened(str) emitted after an editor window was opened
@signal editorOpenedEd(Editor) emitted after an editor window was opened
@signal editorClosed(str) emitted just before an editor window gets closed
@signal editorClosedEd(Editor) emitted just before an editor window gets
closed
@signal editorRenamed(str) emitted after an editor was renamed
@signal editorRenamedEd(Editor) emitted after an editor was renamed
@signal editorSaved(str) emitted after an editor window was saved
@signal editorSavedEd(Editor) emitted after an editor window was saved
@signal checkActions(Editor) emitted when some actions should be checked
for their status
@signal cursorChanged(Editor) emitted after the cursor position of the
active window has changed
@signal breakpointToggled(Editor) emitted when a breakpoint is toggled.
@signal bookmarkToggled(Editor) emitted when a bookmark is toggled.
@signal syntaxerrorToggled(Editor) emitted when a syntax error is toggled.
@signal previewStateChanged(bool) emitted to signal a change in the
preview state
@signal editorLanguageChanged(Editor) emitted to signal a change of an
editors language
@signal editorTextChanged(Editor) emitted to signal a change of an
editor's text
@signal editorLineChanged(str,int) emitted to signal a change of an
editor's current line (line is given one based)
"""
changeCaption = pyqtSignal(str)
editorChanged = pyqtSignal(str)
editorChangedEd = pyqtSignal(Editor)
lastEditorClosed = pyqtSignal()
editorOpened = pyqtSignal(str)
editorOpenedEd = pyqtSignal(Editor)
editorClosed = pyqtSignal(str)
editorClosedEd = pyqtSignal(Editor)
editorRenamed = pyqtSignal(str)
editorRenamedEd = pyqtSignal(Editor)
editorSaved = pyqtSignal(str)
editorSavedEd = pyqtSignal(Editor)
checkActions = pyqtSignal(Editor)
cursorChanged = pyqtSignal(Editor)
breakpointToggled = pyqtSignal(Editor)
bookmarkToggled = pyqtSignal(Editor)
syntaxerrorToggled = pyqtSignal(Editor)
previewStateChanged = pyqtSignal(bool)
editorLanguageChanged = pyqtSignal(Editor)
editorTextChanged = pyqtSignal(Editor)
editorLineChanged = pyqtSignal(str, int)
def __init__(self, parent):
"""
Constructor
@param parent parent widget (QWidget)
"""
self.stacks = []
QSplitter.__init__(self, parent)
ViewManager.__init__(self)
self.setChildrenCollapsible(False)
self.viewlist = QListWidget(self)
policy = self.viewlist.sizePolicy()
policy.setHorizontalPolicy(QSizePolicy.Ignored)
self.viewlist.setSizePolicy(policy)
self.addWidget(self.viewlist)
self.viewlist.setContextMenuPolicy(Qt.CustomContextMenu)
self.viewlist.currentRowChanged.connect(self.__showSelectedView)
self.viewlist.customContextMenuRequested.connect(self.__showMenu)
self.stackArea = QSplitter(self)
self.stackArea.setChildrenCollapsible(False)
self.addWidget(self.stackArea)
self.stackArea.setOrientation(Qt.Vertical)
stack = StackedWidget(self.stackArea)
self.stackArea.addWidget(stack)
self.stacks.append(stack)
self.currentStack = stack
stack.currentChanged.connect(self.__currentChanged)
stack.installEventFilter(self)
self.setSizes([int(self.width() * 0.2), int(self.width() * 0.8)])
# 20% for viewlist, 80% for the editors
self.__inRemoveView = False
self.__initMenu()
self.contextMenuEditor = None
self.contextMenuIndex = -1
def __initMenu(self):
"""
Private method to initialize the viewlist context menu.
"""
self.__menu = QMenu(self)
self.__menu.addAction(
UI.PixmapCache.getIcon("tabClose.png"),
self.tr('Close'), self.__contextMenuClose)
self.closeOthersMenuAct = self.__menu.addAction(
UI.PixmapCache.getIcon("tabCloseOther.png"),
self.tr("Close Others"),
self.__contextMenuCloseOthers)
self.__menu.addAction(
self.tr('Close All'), self.__contextMenuCloseAll)
self.__menu.addSeparator()
self.saveMenuAct = self.__menu.addAction(
UI.PixmapCache.getIcon("fileSave.png"),
self.tr('Save'), self.__contextMenuSave)
self.__menu.addAction(
UI.PixmapCache.getIcon("fileSaveAs.png"),
self.tr('Save As...'), self.__contextMenuSaveAs)
self.__menu.addAction(
UI.PixmapCache.getIcon("fileSaveAll.png"),
self.tr('Save All'), self.__contextMenuSaveAll)
self.__menu.addSeparator()
self.openRejectionsMenuAct = self.__menu.addAction(
self.tr("Open 'rejection' file"),
self.__contextMenuOpenRejections)
self.__menu.addSeparator()
self.__menu.addAction(
UI.PixmapCache.getIcon("print.png"),
self.tr('Print'), self.__contextMenuPrintFile)
self.__menu.addSeparator()
self.copyPathAct = self.__menu.addAction(
self.tr("Copy Path to Clipboard"),
self.__contextMenuCopyPathToClipboard)
def __showMenu(self, point):
"""
Private slot to handle the customContextMenuRequested signal of
the viewlist.
@param point position to open the menu at (QPoint)
"""
if self.editors:
itm = self.viewlist.itemAt(point)
if itm is not None:
row = self.viewlist.row(itm)
self.contextMenuEditor = self.editors[row]
self.contextMenuIndex = row
if self.contextMenuEditor:
self.saveMenuAct.setEnabled(
self.contextMenuEditor.isModified())
fileName = self.contextMenuEditor.getFileName()
self.copyPathAct.setEnabled(bool(fileName))
if fileName:
rej = "{0}.rej".format(fileName)
self.openRejectionsMenuAct.setEnabled(
os.path.exists(rej))
else:
self.openRejectionsMenuAct.setEnabled(False)
self.closeOthersMenuAct.setEnabled(
self.viewlist.count() > 1)
self.__menu.popup(self.viewlist.mapToGlobal(point))
def canCascade(self):
"""
Public method to signal if cascading of managed windows is available.
@return flag indicating cascading of windows is available
"""
return False
def canTile(self):
"""
Public method to signal if tiling of managed windows is available.
@return flag indicating tiling of windows is available
"""
return False
def canSplit(self):
"""
public method to signal if splitting of the view is available.
@return flag indicating splitting of the view is available.
"""
return True
def tile(self):
"""
Public method to tile the managed windows.
"""
pass
def cascade(self):
"""
Public method to cascade the managed windows.
"""
pass
def _removeAllViews(self):
"""
Protected method to remove all views (i.e. windows).
"""
self.viewlist.clear()
for win in self.editors:
for stack in self.stacks:
if stack.hasEditor(win):
stack.removeWidget(win)
break
win.closeIt()
def _removeView(self, win):
"""
Protected method to remove a view (i.e. window).
@param win editor window to be removed
"""
self.__inRemoveView = True
ind = self.editors.index(win)
itm = self.viewlist.takeItem(ind)
if itm:
del itm
for stack in self.stacks:
if stack.hasEditor(win):
stack.removeWidget(win)
break
win.closeIt()
self.__inRemoveView = False
if ind > 0:
ind -= 1
else:
if len(self.editors) > 1:
ind = 1
else:
return
stack.setCurrentWidget(stack.firstEditor())
self._showView(self.editors[ind].parent())
aw = self.activeWindow()
fn = aw and aw.getFileName() or None
if fn:
self.changeCaption.emit(fn)
self.editorChanged.emit(fn)
self.editorLineChanged.emit(fn, aw.getCursorPosition()[0] + 1)
else:
self.changeCaption.emit("")
self.editorChangedEd.emit(aw)
def _addView(self, win, fn=None, noName=""):
"""
Protected method to add a view (i.e. window).
@param win editor assembly to be added
@param fn filename of this editor (string)
@param noName name to be used for an unnamed editor (string)
"""
editor = win.getEditor()
if fn is None:
if not noName:
self.untitledCount += 1
noName = self.tr("Untitled {0}").format(self.untitledCount)
self.viewlist.addItem(noName)
editor.setNoName(noName)
else:
txt = os.path.basename(fn)
if not QFileInfo(fn).isWritable():
txt = self.tr("{0} (ro)").format(txt)
itm = QListWidgetItem(txt)
itm.setToolTip(fn)
self.viewlist.addItem(itm)
self.currentStack.addWidget(win)
self.currentStack.setCurrentWidget(win)
editor.captionChanged.connect(self.__captionChange)
editor.cursorLineChanged.connect(self.__cursorLineChanged)
index = self.editors.index(editor)
self.viewlist.setCurrentRow(index)
editor.setFocus()
if fn:
self.changeCaption.emit(fn)
self.editorChanged.emit(fn)
self.editorLineChanged.emit(fn, editor.getCursorPosition()[0] + 1)
else:
self.changeCaption.emit("")
self.editorChangedEd.emit(editor)
def __captionChange(self, cap, editor):
"""
Private method to handle caption change signals from the editor.
Updates the listwidget text to reflect the new caption information.
@param cap Caption for the editor (string)
@param editor Editor to update the caption for
"""
fn = editor.getFileName()
if fn:
self.setEditorName(editor, fn)
def __cursorLineChanged(self, lineno):
"""
Private slot to handle a change of the current editor's cursor line.
@param lineno line number of the current editor's cursor (zero based)
"""
editor = self.sender()
if editor:
fn = editor.getFileName()
if fn:
self.editorLineChanged.emit(fn, lineno + 1)
def _showView(self, win, fn=None):
"""
Protected method to show a view (i.e. window).
@param win editor assembly to be shown
@param fn filename of this editor (string)
"""
editor = win.getEditor()
for stack in self.stacks:
if stack.hasEditor(editor):
stack.setCurrentWidget(win)
self.currentStack = stack
break
index = self.editors.index(editor)
self.viewlist.setCurrentRow(index)
editor.setFocus()
fn = editor.getFileName()
if fn:
self.changeCaption.emit(fn)
self.editorChanged.emit(fn)
self.editorLineChanged.emit(fn, editor.getCursorPosition()[0] + 1)
else:
self.changeCaption.emit("")
self.editorChangedEd.emit(editor)
def __showSelectedView(self, row):
"""
Private slot called to show a view selected in the list.
@param row row number of the item clicked on (integer)
"""
if row != -1:
self._showView(self.editors[row].parent())
self._checkActions(self.editors[row])
def activeWindow(self):
"""
Public method to return the active (i.e. current) window.
@return reference to the active editor
"""
return self.currentStack.currentWidget()
def showWindowMenu(self, windowMenu):
"""
Public method to set up the viewmanager part of the Window menu.
@param windowMenu reference to the window menu
"""
pass
def _initWindowActions(self):
"""
Protected method to define the user interface actions for window
handling.
"""
pass
def setEditorName(self, editor, newName):
"""
Public method to change the displayed name of the editor.
@param editor editor window to be changed
@param newName new name to be shown (string)
"""
if newName:
currentRow = self.viewlist.currentRow()
index = self.editors.index(editor)
txt = os.path.basename(newName)
if not QFileInfo(newName).isWritable():
txt = self.tr("{0} (ro)").format(txt)
itm = self.viewlist.item(index)
itm.setText(txt)
itm.setToolTip(newName)
self.viewlist.setCurrentRow(currentRow)
self.changeCaption.emit(newName)
def _modificationStatusChanged(self, m, editor):
"""
Protected slot to handle the modificationStatusChanged signal.
@param m flag indicating the modification status (boolean)
@param editor editor window changed
"""
currentRow = self.viewlist.currentRow()
index = self.editors.index(editor)
keys = []
if m:
keys.append("fileModified.png")
if editor.hasSyntaxErrors():
keys.append("syntaxError22.png")
elif editor.hasWarnings():
keys.append("warning22.png")
if not keys:
keys.append("empty.png")
self.viewlist.item(index).setIcon(
UI.PixmapCache.getCombinedIcon(keys))
self.viewlist.setCurrentRow(currentRow)
self._checkActions(editor)
def _syntaxErrorToggled(self, editor):
"""
Protected slot to handle the syntaxerrorToggled signal.
@param editor editor that sent the signal
"""
currentRow = self.viewlist.currentRow()
index = self.editors.index(editor)
keys = []
if editor.isModified():
keys.append("fileModified.png")
if editor.hasSyntaxErrors():
keys.append("syntaxError22.png")
elif editor.hasWarnings():
keys.append("warning22.png")
if not keys:
keys.append("empty.png")
self.viewlist.item(index).setIcon(
UI.PixmapCache.getCombinedIcon(keys))
self.viewlist.setCurrentRow(currentRow)
ViewManager._syntaxErrorToggled(self, editor)
def addSplit(self):
"""
Public method used to split the current view.
"""
stack = StackedWidget(self.stackArea)
stack.show()
self.stackArea.addWidget(stack)
self.stacks.append(stack)
self.currentStack = stack
stack.currentChanged.connect(self.__currentChanged)
stack.installEventFilter(self)
if self.stackArea.orientation() == Qt.Horizontal:
size = self.stackArea.width()
else:
size = self.stackArea.height()
self.stackArea.setSizes(
[int(size / len(self.stacks))] * len(self.stacks))
self.splitRemoveAct.setEnabled(True)
self.nextSplitAct.setEnabled(True)
self.prevSplitAct.setEnabled(True)
def removeSplit(self):
"""
Public method used to remove the current split view.
@return flag indicating successfull removal
"""
if len(self.stacks) > 1:
stack = self.currentStack
res = True
savedEditors = stack.editors[:]
for editor in savedEditors:
res &= self.closeEditor(editor)
if res:
try:
i = self.stacks.index(stack)
except ValueError:
return True
if i == len(self.stacks) - 1:
i -= 1
self.stacks.remove(stack)
stack.close()
self.currentStack = self.stacks[i]
if len(self.stacks) == 1:
self.splitRemoveAct.setEnabled(False)
self.nextSplitAct.setEnabled(False)
self.prevSplitAct.setEnabled(False)
return True
return False
def getSplitOrientation(self):
"""
Public method to get the orientation of the split view.
@return orientation of the split (Qt.Horizontal or Qt.Vertical)
"""
return self.stackArea.orientation()
def setSplitOrientation(self, orientation):
"""
Public method used to set the orientation of the split view.
@param orientation orientation of the split
(Qt.Horizontal or Qt.Vertical)
"""
self.stackArea.setOrientation(orientation)
def nextSplit(self):
"""
Public slot used to move to the next split.
"""
aw = self.activeWindow()
_hasFocus = aw and aw.hasFocus()
ind = self.stacks.index(self.currentStack) + 1
if ind == len(self.stacks):
ind = 0
self.currentStack = self.stacks[ind]
if _hasFocus:
aw = self.activeWindow()
if aw:
aw.setFocus()
index = self.editors.index(self.currentStack.currentWidget())
self.viewlist.setCurrentRow(index)
def prevSplit(self):
"""
Public slot used to move to the previous split.
"""
aw = self.activeWindow()
_hasFocus = aw and aw.hasFocus()
ind = self.stacks.index(self.currentStack) - 1
if ind == -1:
ind = len(self.stacks) - 1
self.currentStack = self.stacks[ind]
if _hasFocus:
aw = self.activeWindow()
if aw:
aw.setFocus()
index = self.editors.index(self.currentStack.currentWidget())
self.viewlist.setCurrentRow(index)
def __contextMenuClose(self):
"""
Private method to close the selected editor.
"""
if self.contextMenuEditor:
self.closeEditorWindow(self.contextMenuEditor)
def __contextMenuCloseOthers(self):
"""
Private method to close the other editors.
"""
index = self.contextMenuIndex
for i in list(range(self.viewlist.count() - 1, index, -1)) + \
list(range(index - 1, -1, -1)):
editor = self.editors[i]
self.closeEditorWindow(editor)
def __contextMenuCloseAll(self):
"""
Private method to close all editors.
"""
savedEditors = self.editors[:]
for editor in savedEditors:
self.closeEditorWindow(editor)
def __contextMenuSave(self):
"""
Private method to save the selected editor.
"""
if self.contextMenuEditor:
self.saveEditorEd(self.contextMenuEditor)
def __contextMenuSaveAs(self):
"""
Private method to save the selected editor to a new file.
"""
if self.contextMenuEditor:
self.saveAsEditorEd(self.contextMenuEditor)
def __contextMenuSaveAll(self):
"""
Private method to save all editors.
"""
self.saveEditorsList(self.editors)
def __contextMenuOpenRejections(self):
"""
Private slot to open a rejections file associated with the selected
editor.
"""
if self.contextMenuEditor:
fileName = self.contextMenuEditor.getFileName()
if fileName:
rej = "{0}.rej".format(fileName)
if os.path.exists(rej):
self.openSourceFile(rej)
def __contextMenuPrintFile(self):
"""
Private method to print the selected editor.
"""
if self.contextMenuEditor:
self.printEditor(self.contextMenuEditor)
def __contextMenuCopyPathToClipboard(self):
"""
Private method to copy the file name of the selected editor to the
clipboard.
"""
if self.contextMenuEditor:
fn = self.contextMenuEditor.getFileName()
if fn:
cb = QApplication.clipboard()
cb.setText(fn)
def __currentChanged(self, index):
"""
Private slot to handle the currentChanged signal.
@param index index of the current editor
"""
if index == -1 or not self.editors:
return
editor = self.activeWindow()
if editor is None:
return
self._checkActions(editor)
editor.setFocus()
fn = editor.getFileName()
if fn:
self.changeCaption.emit(fn)
if not self.__inRemoveView:
self.editorChanged.emit(fn)
self.editorLineChanged.emit(
fn, editor.getCursorPosition()[0] + 1)
else:
self.changeCaption.emit("")
self.editorChangedEd.emit(editor)
cindex = self.editors.index(editor)
self.viewlist.setCurrentRow(cindex)
def eventFilter(self, watched, event):
"""
Public method called to filter the event queue.
@param watched the QObject being watched
@param event the event that occurred
@return flag indicating, if we handled the event
"""
if event.type() == QEvent.MouseButtonPress and \
not event.button() == Qt.RightButton:
switched = True
if isinstance(watched, QStackedWidget):
switched = watched is not self.currentStack
self.currentStack = watched
elif isinstance(watched, QScintilla.Editor.Editor):
for stack in self.stacks:
if stack.hasEditor(watched):
switched = stack is not self.currentStack
self.currentStack = stack
break
currentWidget = self.currentStack.currentWidget()
if currentWidget:
index = self.editors.index(currentWidget)
self.viewlist.setCurrentRow(index)
aw = self.activeWindow()
if aw is not None:
self._checkActions(aw)
aw.setFocus()
fn = aw.getFileName()
if fn:
self.changeCaption.emit(fn)
if switched:
self.editorChanged.emit(fn)
self.editorLineChanged.emit(
fn, aw.getCursorPosition()[0] + 1)
else:
self.changeCaption.emit("")
self.editorChangedEd.emit(aw)
return False
| paulmadore/Eric-IDE | 6-6.0.9/eric/Plugins/ViewManagerPlugins/Listspace/Listspace.py | Python | gpl-3.0 | 28,425 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os, glob
"""
Runs process_logdata_ekf.py on the .ulg files in the supplied directory. ulog files are skipped from the analysis, if a
corresponding .pdf file already exists (unless the overwrite flag was set).
"""
parser = argparse.ArgumentParser(description='Analyse the estimator_status and ekf2_innovation message data for the'
' .ulg files in the specified directory')
parser.add_argument("directory_path")
parser.add_argument('-o', '--overwrite', action='store_true',
help='Whether to overwrite an already analysed file. If a file with .pdf extension exists for a .ulg'
'file, the log file will be skipped from analysis unless this flag has been set.')
def is_valid_directory(parser, arg):
if os.path.isdir(arg):
# Directory exists so return the directory
return arg
else:
parser.error('The directory {} does not exist'.format(arg))
args = parser.parse_args()
ulog_directory = args.directory_path
print("\n"+"analysing the .ulg files in "+ulog_directory)
# get all the ulog files found in the specified directory
ulog_files = glob.glob(os.path.join(ulog_directory, '*.ulg'))
# remove the files already analysed unless the overwrite flag was specified. A ulog file is consired to be analysed if
# a corresponding .pdf file exists.'
if not args.overwrite:
print("skipping already analysed ulg files.")
ulog_files = [ulog_file for ulog_file in ulog_files if not os.path.exists('{}.pdf'.format(ulog_file))]
# analyse all ulog files
for ulog_file in ulog_files:
print("\n"+"loading "+ulog_file +" for analysis")
os.system("python process_logdata_ekf.py '{}'".format(ulog_file))
| mcgill-robotics/Firmware | Tools/ecl_ekf/batch_process_logdata_ekf.py | Python | bsd-3-clause | 1,794 |
conf_nova_compute_conf = """[DEFAULT]
compute_driver = libvirt.LibvirtDriver
glance_api_version = 2
[libvirt]
virt_type = kvm
inject_password = False
inject_key = False
inject_partition = -2
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = {{ rbd_secret_uuid }}
disk_cachemodes= "network=writeback"
block_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_NON_SHARED_INC"
live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
live_migration_uri = qemu+tcp://%s/system
"""
| jiasir/playback | playback/templates/nova_compute_conf.py | Python | mit | 672 |
'''
Created on 24.08.2017
@author: Peer
'''
from setuptools import setup
setup(name='gitexplorer',
version='0.1',
description='Git repository analyzer',
author='Peer Wagner',
author_email='wagnerpeer@gmail.com',
url='http://gitexplorer.readthedocs.io',
packages=['gitexplorer'],
requires=['pymongodb'],
provides=['gitexplorer'],
download_url='https://github.com/wagnerpeer/gitexplorer',
license='MIT',
exclude_package_data={'gitexplorer': ['__main__.py']},
)
| wagnerpeer/gitexplorer | setup.py | Python | mit | 534 |
#!/usr/bin/env python
# Dumps results to a CSV file
import sys, os
sys.path.append('..')
import cPickle
import pandas as pd
from misc.config import c
results_dir = c['RESULTS_DIR']
all_results = {}
datasets = ['segment', 'satimage', 'pendigits']
models = ['test_r2svm', 'test_r2elm', 'random_r2svm', 'fixed_r2svm']
paths = [ os.path.join(results_dir, model + '_' + dataset) for model in models for dataset in datasets ]
for path in paths:
if os.path.isdir(path):
print path
results = {}
for exp in os.listdir(path):
name = exp[:-11]
try:
exp_res = cPickle.load(open(os.path.join(path, exp),'r'))
except:
print exp
continue
merged_res = exp_res['monitors']
merged_res.update(exp_res['results'])
merged_res.update(exp_res['config']['params'])
results[name] = merged_res
name = path.split('/')[-1]
all_results[name] = results
for k, v in all_results.iteritems():
pd.DataFrame.from_dict(v).transpose().to_csv(os.path.join(results_dir, 'csv', k))
| kudkudak/r2-learner | scripts/to_csv.py | Python | mit | 1,129 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reusable implementation of basic RL algorithms."""
from absl import logging
import numpy as np
def get_state_xy(idx, num_cols):
"""Given state index this method returns its equivalent coordinate (x,y).
Args:
idx: index uniquely identifying a state
num_cols: number of colums
Returns:
values x, y describing the state's location in the grid
"""
y = int(idx % num_cols)
x = int((idx - y) / num_cols)
return x, y
def get_state_idx(x, y, num_cols):
"""Given state (x,y), returns the index that uniquely identifies this state.
Args:
x: value of the coordinate x
y: value of the coordinate y
num_cols: number of colums
Returns:
unique index identifying a position in the grid
"""
idx = y + x * num_cols
return idx
def policy_random(env):
r"""Random policy on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
Returns:
Numpy array S \times A: random policy
"""
return np.ones((env.num_states, env.num_actions)) / env.num_actions
def policy_eps_suboptimal(env, optimal_policy, epsilon=0):
r"""Epsilon suboptimal policy.
Takes random action with probability epsilon and
optimal action with prob 1 - epsilon on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
optimal_policy: Numpy array S \times A with optimal policy
epsilon: float in [0, 1]
Returns:
Numpy array S \times A: policy followed by the agent
"""
return epsilon * policy_random(env) + (1 - epsilon) * optimal_policy
def policy_iteration(env, gamma=0.99, tolerance=1e-5, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
gamma: float, discount factor.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
values = np.zeros(env.num_states)
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = np.sum(env.rewards[s, :] * policy[s, :] + gamma * policy[s, :] *
np.matmul(env.transition_probs[s, :, :], values))
delta = max(delta, abs(v - values[s]))
values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
g = np.zeros(env.num_actions, dtype=float)
for a in range(env.num_actions):
g[a] = (
env.rewards[s, a] +
gamma * np.matmul(env.transition_probs[s, a, :], values))
action = np.argmax(g)
for a in range(env.num_actions):
if a == action:
policy[s, a] = 1.
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values, policy
| google-research/google-research | generalization_representations_rl_aistats22/minigrid/rl_basics.py | Python | apache-2.0 | 3,853 |
# -*- flake8: noqa -*-
from metrology.reporter.graphite import GraphiteReporter
from metrology.reporter.librato import LibratoReporter
from metrology.reporter.logger import LoggerReporter
| zenoss/metrology | metrology/reporter/__init__.py | Python | mit | 188 |
import os
import sys
import json
import subprocess
__author__ = 'Agostino Sturaro'
def run_batches(batches):
# check that all configuration files exist and are valid json
# for batch_no in range(concurrent_procs):
for batch_no in batches:
batch_conf_fpath = os.path.normpath('../Simulations/test_mp/batch_{}.json'.format(batch_no))
if os.path.exists(batch_conf_fpath):
try:
with open(batch_conf_fpath, 'rt') as f:
config = json.load(f)
except Exception:
raise ValueError('The batch configuration file {} is not a valid json file'.format(batch_conf_fpath))
else:
raise RuntimeError('The batch configuration file {} does not exist'.format(batch_conf_fpath))
procs = []
# spawn the processes
# for batch_no in range(concurrent_procs):
for batch_no in batches:
batch_conf_fpath = os.path.normpath('../Simulations/test_mp/batch_{}.json'.format(batch_no))
proc = subprocess.Popen([sys.executable, 'batch_sim_runner_2.py', str(batch_no), batch_conf_fpath])
procs.append(proc)
# wait for child processes to finish
for proc in procs:
proc.wait()
# TODO: ask before overwrite
# each of these processes must have its own configuration file
# if we need them to run concurrently, all of their output files must be different
run_batches(range(0, 8))
# run_batches(range(8, 16))
| TiedNets/TiedNets | multi_proc_runner.py | Python | gpl-3.0 | 1,452 |
# -*- coding: utf-8 -*-
"""Tests the features of xblock/runtime"""
# Allow tests to access private members of classes
# pylint: disable=W0212
from collections import namedtuple
from datetime import datetime
from mock import Mock, patch
from unittest import TestCase
from xblock.core import XBlock, XBlockMixin
from xblock.fields import BlockScope, Scope, String, ScopeIds, List, UserScope, Integer
from xblock.exceptions import (
NoSuchDefinition,
NoSuchHandlerError,
NoSuchServiceError,
NoSuchUsage,
NoSuchViewError,
FieldDataDeprecationWarning,
)
from xblock.runtime import (
DictKeyValueStore,
IdReader,
KeyValueStore,
KvsFieldData,
Mixologist,
ObjectAggregator,
)
from xblock.fragment import Fragment
from xblock.field_data import DictFieldData, FieldData
from xblock.test.tools import (
assert_equals, assert_false, assert_true, assert_raises,
assert_raises_regexp, assert_is, assert_is_not, assert_in, unabc,
WarningTestMixin, TestRuntime
)
class TestMixin(object):
"""
Set up namespaces for each scope to use.
"""
mixin_content = String(scope=Scope.content, default='mixin_c')
mixin_settings = String(scope=Scope.settings, default='mixin_s')
mixin_user_state = String(scope=Scope.user_state, default='mixin_ss')
mixin_preferences = String(scope=Scope.preferences, default='mixin_sp')
mixin_user_info = String(scope=Scope.user_info, default='mixin_si')
mixin_by_type = String(scope=Scope(UserScope.NONE, BlockScope.TYPE), default='mixin_bt')
mixin_for_all = String(scope=Scope(UserScope.NONE, BlockScope.ALL), default='mixin_fa')
mixin_user_def = String(scope=Scope(UserScope.ONE, BlockScope.DEFINITION), default='mixin_sd')
mixin_agg_global = String(scope=Scope(UserScope.ALL, BlockScope.ALL), default='mixin_ag')
mixin_agg_type = String(scope=Scope(UserScope.ALL, BlockScope.TYPE), default='mixin_at')
mixin_agg_def = String(scope=Scope(UserScope.ALL, BlockScope.DEFINITION), default='mixin_ad')
mixin_agg_usage = String(scope=Scope.user_state_summary, default='mixin_au')
class TestXBlockNoFallback(XBlock):
"""
Set up a class that contains ModelTypes as fields, but no views or handlers
"""
content = String(scope=Scope.content, default='c')
settings = String(scope=Scope.settings, default='s')
user_state = String(scope=Scope.user_state, default='ss')
preferences = String(scope=Scope.preferences, default='sp')
user_info = String(scope=Scope.user_info, default='si')
by_type = String(scope=Scope(UserScope.NONE, BlockScope.TYPE), default='bt')
for_all = String(scope=Scope(UserScope.NONE, BlockScope.ALL), default='fa')
user_def = String(scope=Scope(UserScope.ONE, BlockScope.DEFINITION), default='sd')
agg_global = String(scope=Scope(UserScope.ALL, BlockScope.ALL), default='ag')
agg_type = String(scope=Scope(UserScope.ALL, BlockScope.TYPE), default='at')
agg_def = String(scope=Scope(UserScope.ALL, BlockScope.DEFINITION), default='ad')
agg_usage = String(scope=Scope.user_state_summary, default='au')
def handler_without_correct_decoration(self, request, suffix=''):
"""a handler which is missing the @XBlock.handler decoration."""
pass
class TestXBlock(TestXBlockNoFallback):
"""
Test xblock class with fallback methods
"""
@XBlock.handler
def existing_handler(self, request, suffix=''): # pylint: disable=unused-argument
""" an existing handler to be used """
self.user_state = request
return "I am the existing test handler"
@XBlock.handler
def fallback_handler(self, handler_name, request, suffix=''): # pylint: disable=unused-argument
""" test fallback handler """
self.user_state = request
if handler_name == 'test_fallback_handler':
return "I have been handled"
if handler_name == 'handler_without_correct_decoration':
return "gone to fallback"
def student_view(self, context):
""" an existing view to be used """
self.preferences = context[0]
return Fragment(self.preferences)
def fallback_view(self, view_name, context):
""" test fallback view """
self.preferences = context[0]
if view_name == 'test_fallback_view':
return Fragment(self.preferences)
else:
return Fragment(u"{} default".format(view_name))
# Allow this tuple to be named as if it were a class
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def check_field(collection, field):
"""
Test method.
Asserts that the given `field` is present in `collection`.
Sets the field to a new value and asserts that the update properly occurs.
Deletes the new value, and asserts that the default value is properly restored.
"""
print "Getting %s from %r" % (field.name, collection)
assert_equals(field.default, getattr(collection, field.name))
new_value = 'new ' + field.name
print "Setting %s to %s on %r" % (field.name, new_value, collection)
setattr(collection, field.name, new_value)
print "Checking %s on %r" % (field.name, collection)
assert_equals(new_value, getattr(collection, field.name))
print "Deleting %s from %r" % (field.name, collection)
delattr(collection, field.name)
print "Back to defaults for %s in %r" % (field.name, collection)
assert_equals(field.default, getattr(collection, field.name))
def test_db_model_keys():
# Tests that updates to fields are properly recorded in the KeyValueStore,
# and that the keys have been constructed correctly
key_store = DictKeyValueStore()
field_data = KvsFieldData(key_store)
runtime = TestRuntime(Mock(), mixins=[TestMixin], services={'field-data': field_data})
tester = runtime.construct_xblock_from_class(TestXBlock, ScopeIds('s0', 'TestXBlock', 'd0', 'u0'))
assert_false(field_data.has(tester, 'not a field'))
for field in tester.fields.values():
new_value = 'new ' + field.name
assert_false(field_data.has(tester, field.name))
if isinstance(field, List):
new_value = [new_value]
setattr(tester, field.name, new_value)
# Write out the values
tester.save()
# Make sure everything saved correctly
for field in tester.fields.values():
assert_true(field_data.has(tester, field.name))
def get_key_value(scope, user_id, block_scope_id, field_name):
"""Gets the value, from `key_store`, of a Key with the given values."""
new_key = KeyValueStore.Key(scope, user_id, block_scope_id, field_name)
return key_store.db_dict[new_key]
# Examine each value in the database and ensure that keys were constructed correctly
assert_equals('new content', get_key_value(Scope.content, None, 'd0', 'content'))
assert_equals('new settings', get_key_value(Scope.settings, None, 'u0', 'settings'))
assert_equals('new user_state', get_key_value(Scope.user_state, 's0', 'u0', 'user_state'))
assert_equals('new preferences', get_key_value(Scope.preferences, 's0', 'TestXBlock', 'preferences'))
assert_equals('new user_info', get_key_value(Scope.user_info, 's0', None, 'user_info'))
assert_equals('new by_type', get_key_value(Scope(UserScope.NONE, BlockScope.TYPE), None, 'TestXBlock', 'by_type'))
assert_equals('new for_all', get_key_value(Scope(UserScope.NONE, BlockScope.ALL), None, None, 'for_all'))
assert_equals('new user_def', get_key_value(Scope(UserScope.ONE, BlockScope.DEFINITION), 's0', 'd0', 'user_def'))
assert_equals('new agg_global', get_key_value(Scope(UserScope.ALL, BlockScope.ALL), None, None, 'agg_global'))
assert_equals('new agg_type', get_key_value(Scope(UserScope.ALL, BlockScope.TYPE), None, 'TestXBlock', 'agg_type'))
assert_equals('new agg_def', get_key_value(Scope(UserScope.ALL, BlockScope.DEFINITION), None, 'd0', 'agg_def'))
assert_equals('new agg_usage', get_key_value(Scope.user_state_summary, None, 'u0', 'agg_usage'))
assert_equals('new mixin_content', get_key_value(Scope.content, None, 'd0', 'mixin_content'))
assert_equals('new mixin_settings', get_key_value(Scope.settings, None, 'u0', 'mixin_settings'))
assert_equals('new mixin_user_state', get_key_value(Scope.user_state, 's0', 'u0', 'mixin_user_state'))
assert_equals('new mixin_preferences', get_key_value(Scope.preferences, 's0', 'TestXBlock', 'mixin_preferences'))
assert_equals('new mixin_user_info', get_key_value(Scope.user_info, 's0', None, 'mixin_user_info'))
assert_equals(
'new mixin_by_type',
get_key_value(Scope(UserScope.NONE, BlockScope.TYPE), None, 'TestXBlock', 'mixin_by_type')
)
assert_equals(
'new mixin_for_all',
get_key_value(Scope(UserScope.NONE, BlockScope.ALL), None, None, 'mixin_for_all')
)
assert_equals(
'new mixin_user_def',
get_key_value(Scope(UserScope.ONE, BlockScope.DEFINITION), 's0', 'd0', 'mixin_user_def')
)
assert_equals(
'new mixin_agg_global',
get_key_value(Scope(UserScope.ALL, BlockScope.ALL), None, None, 'mixin_agg_global')
)
assert_equals(
'new mixin_agg_type',
get_key_value(Scope(UserScope.ALL, BlockScope.TYPE), None, 'TestXBlock', 'mixin_agg_type')
)
assert_equals(
'new mixin_agg_def',
get_key_value(Scope(UserScope.ALL, BlockScope.DEFINITION), None, 'd0', 'mixin_agg_def')
)
assert_equals('new mixin_agg_usage', get_key_value(Scope.user_state_summary, None, 'u0', 'mixin_agg_usage'))
@unabc("{} shouldn't be used in tests")
class MockRuntimeForQuerying(TestRuntime):
"""Mock out a runtime for querypath_parsing test"""
# unabc doesn't squash pylint errors
# pylint: disable=abstract-method
def __init__(self, **kwargs):
self.mock_query = Mock()
super(MockRuntimeForQuerying, self).__init__(**kwargs)
def query(self, block):
return self.mock_query
def test_querypath_parsing():
mrun = MockRuntimeForQuerying()
block = Mock()
mrun.querypath(block, "..//@hello")
print mrun.mock_query.mock_calls
expected = Mock()
expected.parent().descendants().attr("hello")
assert mrun.mock_query.mock_calls == expected.mock_calls
def test_runtime_handle():
# Test a simple handler and a fallback handler
key_store = DictKeyValueStore()
field_data = KvsFieldData(key_store)
runtime = TestRuntime(services={'field-data': field_data})
tester = TestXBlock(runtime, scope_ids=Mock(spec=ScopeIds))
runtime = MockRuntimeForQuerying()
# string we want to update using the handler
update_string = "user state update"
assert_equals(runtime.handle(tester, 'existing_handler', update_string),
'I am the existing test handler')
assert_equals(tester.user_state, update_string)
# when the handler needs to use the fallback as given name can't be found
new_update_string = "new update"
assert_equals(runtime.handle(tester, 'test_fallback_handler', new_update_string),
'I have been handled')
assert_equals(tester.user_state, new_update_string)
# request to use a handler which doesn't have XBlock.handler decoration
# should use the fallback
new_update_string = "new update"
assert_equals(runtime.handle(tester, 'handler_without_correct_decoration', new_update_string),
'gone to fallback')
assert_equals(tester.user_state, new_update_string)
# handler can't be found & no fallback handler supplied, should throw an exception
tester = TestXBlockNoFallback(runtime, scope_ids=Mock(spec=ScopeIds))
ultimate_string = "ultimate update"
with assert_raises(NoSuchHandlerError):
runtime.handle(tester, 'test_nonexistant_fallback_handler', ultimate_string)
# request to use a handler which doesn't have XBlock.handler decoration
# and no fallback should raise NoSuchHandlerError
with assert_raises(NoSuchHandlerError):
runtime.handle(tester, 'handler_without_correct_decoration', 'handled')
def test_runtime_render():
key_store = DictKeyValueStore()
field_data = KvsFieldData(key_store)
runtime = MockRuntimeForQuerying(services={'field-data': field_data})
block_type = 'test'
def_id = runtime.id_generator.create_definition(block_type)
usage_id = runtime.id_generator.create_usage(def_id)
tester = TestXBlock(runtime, scope_ids=ScopeIds('user', block_type, def_id, usage_id))
# string we want to update using the handler
update_string = u"user state update"
# test against the student view
frag = runtime.render(tester, 'student_view', [update_string])
assert_in(update_string, frag.body_html())
assert_equals(tester.preferences, update_string)
# test against the fallback view
update_string = u"new update"
frag = runtime.render(tester, 'test_fallback_view', [update_string])
assert_in(update_string, frag.body_html())
assert_equals(tester.preferences, update_string)
# test block-first
update_string = u"penultimate update"
frag = tester.render('student_view', [update_string])
assert_in(update_string, frag.body_html())
assert_equals(tester.preferences, update_string)
# test against the no-fallback XBlock
update_string = u"ultimate update"
tester = TestXBlockNoFallback(Mock(), scope_ids=Mock(spec=ScopeIds))
with assert_raises(NoSuchViewError):
runtime.render(tester, 'test_nonexistent_view', [update_string])
class SerialDefaultKVS(DictKeyValueStore):
"""
A kvs which gives each call to default the next int (nonsensical but for testing default fn)
"""
def __init__(self, *args, **kwargs):
super(SerialDefaultKVS, self).__init__(*args, **kwargs)
self.default_counter = 0
def default(self, _key):
self.default_counter += 1
return self.default_counter
class TestIntegerXblock(XBlock):
"""
XBlock with an integer field, for testing.
"""
counter = Integer(scope=Scope.content)
def test_default_fn():
key_store = SerialDefaultKVS()
field_data = KvsFieldData(key_store)
runtime = TestRuntime(services={'field-data': field_data})
tester = TestIntegerXblock(runtime, scope_ids=Mock(spec=ScopeIds))
tester2 = TestIntegerXblock(runtime, scope_ids=Mock(spec=ScopeIds))
# ensure value is not in tester before any actions
assert_false(field_data.has(tester, 'counter'))
# ensure value is same over successive calls for same DbModel
first_call = tester.counter
assert_equals(first_call, 1)
assert_equals(first_call, tester.counter)
# ensure the value is not saved in the object
assert_false(field_data.has(tester, 'counter'))
# ensure save does not save the computed default back to the object
tester.save()
assert_false(field_data.has(tester, 'counter'))
# ensure second object gets another value
second_call = tester2.counter
assert_equals(second_call, 2)
class TestSimpleMixin(object):
"""Toy class for mixin testing"""
field_x = List(scope=Scope.content)
field_y = String(scope=Scope.user_state, default="default_value")
@property
def field_x_with_default(self):
"""
Test method for generating programmatic default values for fields
"""
return self.field_x or [1, 2, 3]
class FieldTester(XBlock):
"""Test XBlock for field access testing"""
field_a = Integer(scope=Scope.settings)
field_b = Integer(scope=Scope.content, default=10)
field_c = Integer(scope=Scope.user_state, default=42)
# Test that access to fields from mixins works as expected
def test_mixin_field_access():
field_data = DictFieldData({
'field_a': 5,
'field_x': [1, 2, 3],
})
runtime = TestRuntime(Mock(), mixins=[TestSimpleMixin], services={'field-data': field_data})
field_tester = runtime.construct_xblock_from_class(FieldTester, Mock())
assert_equals(5, field_tester.field_a)
assert_equals(10, field_tester.field_b)
assert_equals(42, field_tester.field_c)
assert_equals([1, 2, 3], field_tester.field_x)
assert_equals('default_value', field_tester.field_y)
field_tester.field_x = ['a', 'b']
field_tester.save()
assert_equals(['a', 'b'], field_tester._field_data.get(field_tester, 'field_x'))
del field_tester.field_x
assert_equals([], field_tester.field_x)
assert_equals([1, 2, 3], field_tester.field_x_with_default)
with assert_raises(AttributeError):
getattr(field_tester, 'field_z')
with assert_raises(AttributeError):
delattr(field_tester, 'field_z')
field_tester.field_z = 'foo'
assert_equals('foo', field_tester.field_z)
assert_false(field_tester._field_data.has(field_tester, 'field_z'))
class Dynamic(object):
"""
Object for testing that sets attrs based on __init__ kwargs
"""
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class TestObjectAggregator(object):
"""
Test that the ObjectAggregator behaves correctly
"""
def setUp(self):
# Create some objects that only have single attributes
self.first = Dynamic(first=1)
self.second = Dynamic(second=2)
self.agg = ObjectAggregator(self.first, self.second)
def test_get(self):
assert_equals(1, self.agg.first)
assert_equals(2, self.agg.second)
assert_false(hasattr(self.agg, 'other'))
with assert_raises(AttributeError):
self.agg.other # pylint: disable=W0104
def test_set(self):
assert_equals(1, self.agg.first)
self.agg.first = 10
assert_equals(10, self.agg.first)
assert_equals(10, self.first.first) # pylint: disable=E1101
with assert_raises(AttributeError):
self.agg.other = 99
assert_false(hasattr(self.first, 'other'))
assert_false(hasattr(self.second, 'other'))
def test_delete(self):
assert_equals(1, self.agg.first)
del self.agg.first
assert_false(hasattr(self.first, 'first'))
with assert_raises(AttributeError):
self.agg.first # pylint: disable=W0104
with assert_raises(AttributeError):
del self.agg.other
class FirstMixin(XBlockMixin):
"""Test class for mixin ordering."""
number = 1
field = Integer(default=1)
class SecondMixin(XBlockMixin):
"""Test class for mixin ordering."""
number = 2
field = Integer(default=2)
class ThirdMixin(XBlockMixin):
"""Test class for mixin ordering."""
field = Integer(default=3)
class TestMixologist(object):
"""Test that the Mixologist class behaves correctly."""
def setUp(self):
self.mixologist = Mixologist([FirstMixin, SecondMixin])
# Test that the classes generated by the mixologist are cached
# (and only generated once)
def test_only_generate_classes_once(self):
assert_is(
self.mixologist.mix(FieldTester),
self.mixologist.mix(FieldTester),
)
assert_is_not(
self.mixologist.mix(FieldTester),
self.mixologist.mix(TestXBlock),
)
# Test that mixins are applied in order
def test_mixin_order(self):
assert_is(1, self.mixologist.mix(FieldTester).number)
assert_is(1, self.mixologist.mix(FieldTester).fields['field'].default)
def test_unmixed_class(self):
assert_is(FieldTester, self.mixologist.mix(FieldTester).unmixed_class)
def test_mixin_fields(self):
assert_is(FirstMixin.fields['field'], FirstMixin.field)
def test_mixed_fields(self):
mixed = self.mixologist.mix(FieldTester)
assert_is(mixed.fields['field'], FirstMixin.field)
assert_is(mixed.fields['field_a'], FieldTester.field_a)
def test_duplicate_mixins(self):
singly_mixed = self.mixologist.mix(FieldTester)
doubly_mixed = self.mixologist.mix(singly_mixed)
assert_is(singly_mixed, doubly_mixed)
assert_is(FieldTester, singly_mixed.unmixed_class)
def test_multiply_mixed(self):
mixalot = Mixologist([ThirdMixin, FirstMixin])
pre_mixed = mixalot.mix(self.mixologist.mix(FieldTester))
post_mixed = self.mixologist.mix(mixalot.mix(FieldTester))
assert_is(pre_mixed.fields['field'], FirstMixin.field)
assert_is(post_mixed.fields['field'], ThirdMixin.field)
assert_is(FieldTester, pre_mixed.unmixed_class)
assert_is(FieldTester, post_mixed.unmixed_class)
assert_equals(4, len(pre_mixed.__bases__)) # 1 for the original class + 3 mixin classes
assert_equals(4, len(post_mixed.__bases__))
@XBlock.needs("i18n", "no_such_service")
@XBlock.wants("secret_service", "another_not_service")
class XBlockWithServices(XBlock):
"""
Test XBlock class with service declarations.
"""
def student_view(self, _context):
"""Try out some services."""
# i18n is available, and works.
def assert_equals_unicode(str1, str2):
"""`str1` equals `str2`, and both are Unicode strings."""
assert_equals(str1, str2)
assert isinstance(str1, unicode)
assert isinstance(str2, unicode)
i18n = self.runtime.service(self, "i18n")
assert_equals_unicode(u"Welcome!", i18n.ugettext("Welcome!"))
assert_equals_unicode(u"Plural", i18n.ungettext("Singular", "Plural", 0))
assert_equals_unicode(u"Singular", i18n.ungettext("Singular", "Plural", 1))
assert_equals_unicode(u"Plural", i18n.ungettext("Singular", "Plural", 2))
when = datetime(2013, 2, 14, 22, 30, 17)
assert_equals_unicode(u"2013-02-14", i18n.strftime(when, "%Y-%m-%d"))
assert_equals_unicode(u"Feb 14, 2013", i18n.strftime(when, "SHORT_DATE"))
assert_equals_unicode(u"Thursday, February 14, 2013", i18n.strftime(when, "LONG_DATE"))
assert_equals_unicode(u"Feb 14, 2013 at 22:30", i18n.strftime(when, "DATE_TIME"))
assert_equals_unicode(u"10:30:17 PM", i18n.strftime(when, "TIME"))
# secret_service is available.
assert_equals(self.runtime.service(self, "secret_service"), 17)
# no_such_service is not available, and raises an exception, because we
# said we needed it.
with assert_raises_regexp(NoSuchServiceError, "is not available"):
self.runtime.service(self, "no_such_service")
# another_not_service is not available, and returns None, because we
# didn't need it, we only wanted it.
assert_is(self.runtime.service(self, "another_not_service"), None)
return Fragment()
def test_service():
runtime = TestRuntime(services={
'secret_service': 17,
'field-data': DictFieldData({}),
})
block_type = 'test'
def_id = runtime.id_generator.create_definition(block_type)
usage_id = runtime.id_generator.create_usage(def_id)
tester = XBlockWithServices(runtime, scope_ids=ScopeIds('user', block_type, def_id, usage_id))
# Call the student_view to run its assertions.
runtime.render(tester, 'student_view')
@XBlock.needs("no_such_service_sub")
@XBlock.wants("another_not_service_sub")
class SubXBlockWithServices(XBlockWithServices):
"""
Test that subclasses can use services declared on the parent.
"""
def student_view(self, context):
"""Try the services."""
# First, call the super class, its assertions should still pass.
super(SubXBlockWithServices, self).student_view(context)
# no_such_service_sub is not available, and raises an exception,
# because we said we needed it.
with assert_raises_regexp(NoSuchServiceError, "is not available"):
self.runtime.service(self, "no_such_service_sub")
# another_not_service_sub is not available, and returns None,
# because we didn't need it, we only wanted it.
assert_is(self.runtime.service(self, "another_not_service_sub"), None)
return Fragment()
def test_sub_service():
runtime = TestRuntime(id_reader=Mock(), services={
'secret_service': 17,
'field-data': DictFieldData({}),
})
tester = SubXBlockWithServices(runtime, scope_ids=Mock(spec=ScopeIds))
# Call the student_view to run its assertions.
runtime.render(tester, 'student_view')
class TestRuntimeGetBlock(TestCase):
"""
Test the get_block default method on Runtime.
"""
def setUp(self):
patcher = patch.object(TestRuntime, 'construct_xblock')
self.construct_block = patcher.start()
self.addCleanup(patcher.stop)
self.id_reader = Mock(IdReader)
self.user_id = Mock()
self.field_data = Mock(FieldData)
self.runtime = TestRuntime(self.id_reader, services={'field-data': self.field_data})
self.runtime.user_id = self.user_id
self.usage_id = 'usage_id'
# Can only get a definition id from the id_reader
self.def_id = self.id_reader.get_definition_id.return_value
# Can only get a block type from the id_reader
self.block_type = self.id_reader.get_block_type.return_value
def test_basic(self):
self.runtime.get_block(self.usage_id)
self.id_reader.get_definition_id.assert_called_with(self.usage_id)
self.id_reader.get_block_type.assert_called_with(self.def_id)
self.construct_block.assert_called_with(
self.block_type,
ScopeIds(self.user_id, self.block_type, self.def_id, self.usage_id),
for_parent=None,
)
def test_missing_usage(self):
self.id_reader.get_definition_id.side_effect = NoSuchUsage
with self.assertRaises(NoSuchUsage):
self.runtime.get_block(self.usage_id)
def test_missing_definition(self):
self.id_reader.get_block_type.side_effect = NoSuchDefinition
# If we don't have a definition, then the usage doesn't exist
with self.assertRaises(NoSuchUsage):
self.runtime.get_block(self.usage_id)
class TestRuntimeDeprecation(WarningTestMixin, TestCase):
"""
Tests to make sure that deprecated Runtime apis stay usable,
but raise warnings.
"""
def test_passed_field_data(self):
field_data = Mock(spec=FieldData)
with self.assertWarns(FieldDataDeprecationWarning):
runtime = TestRuntime(Mock(spec=IdReader), field_data)
with self.assertWarns(FieldDataDeprecationWarning):
self.assertEquals(runtime.field_data, field_data)
def test_set_field_data(self):
field_data = Mock(spec=FieldData)
runtime = TestRuntime(Mock(spec=IdReader), None)
with self.assertWarns(FieldDataDeprecationWarning):
runtime.field_data = field_data
with self.assertWarns(FieldDataDeprecationWarning):
self.assertEquals(runtime.field_data, field_data)
| nagyistoce/edx-XBlock | xblock/test/test_runtime.py | Python | apache-2.0 | 27,041 |
from django.contrib.gis.db import models
import datetime
class Area(models.Model):
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
poly = models.PolygonField()
objects = models.GeoManager()
| mariohmol/django-google-polygon | dgpolygon/gmappolygons/models.py | Python | mit | 245 |
#
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import tempfile
from functools import wraps
MODULE_LIST = ('cli', 'hooks', 'services')
def makePublic(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.superVdsm = True
return wrapper
def listPublicFunctions():
methods = []
for modName in MODULE_LIST:
try:
module = __import__('gluster.' + modName, fromlist=['gluster'])
for name in dir(module):
func = getattr(module, name)
if getattr(func, 'superVdsm', False):
funcName = 'gluster%s%s' % (name[0].upper(), name[1:])
methods.append((funcName, func))
except ImportError:
pass
return methods
def safeWrite(fileName, content):
with tempfile.NamedTemporaryFile(dir=os.path.dirname(fileName),
delete=False) as tmp:
tmp.write(content)
tmpFileName = tmp.name
os.rename(tmpFileName, fileName)
| edwardbadboy/vdsm-ubuntu | vdsm/gluster/__init__.py | Python | gpl-2.0 | 1,832 |
# -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.rossigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the blazar.db namespace. Call these
functions from blazar.db namespace, not the blazar.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/blazar/blazar.sqlite`.
"""
from oslo_config import cfg
from oslo_db import api as db_api
from oslo_log import log as logging
_BACKEND_MAPPING = {
'sqlalchemy': 'blazar.db.sqlalchemy.utils',
}
IMPL = db_api.DBAPI(cfg.CONF.database.backend,
backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
def setup_db():
"""Set up database, create tables, etc.
Return True on success, False otherwise
"""
return IMPL.setup_db()
def drop_db():
"""Drop database.
Return True on success, False otherwise
"""
return IMPL.drop_db()
# Helpers for building constraints / equality checks
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
def to_dict(func):
def decorator(*args, **kwargs):
res = func(*args, **kwargs)
if isinstance(res, list):
return [item.to_dict() for item in res]
if res:
return res.to_dict()
else:
return None
return decorator
def get_reservations_by_host_id(host_id, start_date, end_date):
return IMPL.get_reservations_by_host_id(host_id, start_date, end_date)
def get_reservations_by_host_ids(host_ids, start_date, end_date):
return IMPL.get_reservations_by_host_ids(host_ids, start_date, end_date)
def get_reservation_allocations_by_host_ids(host_ids, start_date, end_date,
lease_id=None,
reservation_id=None):
return IMPL.get_reservation_allocations_by_host_ids(host_ids,
start_date, end_date,
lease_id,
reservation_id)
def get_reservation_allocations_by_fip_ids(fip_ids, start_date, end_date,
lease_id=None, reservation_id=None):
return IMPL.get_reservation_allocations_by_fip_ids(
fip_ids, start_date, end_date, lease_id, reservation_id)
def get_plugin_reservation(resource_type, resource_id):
return IMPL.get_plugin_reservation(resource_type, resource_id)
def get_free_periods(resource_id, start_date, end_date, duration,
resource_type='host'):
"""Returns a list of free periods."""
return IMPL.get_free_periods(resource_id, start_date, end_date, duration,
resource_type=resource_type)
def get_reserved_periods(resource_id, start_date, end_date, duration,
resource_type='host'):
"""Returns a list of reserved periods."""
return IMPL.get_reserved_periods(resource_id, start_date, end_date,
duration, resource_type=resource_type)
| openstack/blazar | blazar/db/utils.py | Python | apache-2.0 | 4,552 |
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_survey_manager.survey_collections import SurveyCollection
def get_input_data_frame(year):
openfisca_survey_collection = SurveyCollection.load(
collection = "openfisca", config_files_directory = config_files_directory)
openfisca_survey = openfisca_survey_collection.get_survey("openfisca_data_{}".format(year))
input_data_frame = openfisca_survey.get_values(table = "input")
input_data_frame.rename(
columns = dict(
alr = 'pensions_alimentaires_percues',
choi = 'cho',
cho_ld = 'chomeur_longue_duree',
fra = 'frais_reels',
rsti = 'rst',
sali = 'salaire_imposable',
),
inplace = True,
)
input_data_frame.reset_index(inplace = True)
return input_data_frame
| MalkIPP/openfisca-france-data | openfisca_france_data/input_data_builders/__init__.py | Python | agpl-3.0 | 1,803 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Participant'
db.delete_table(u'pa_participant')
# Removing M2M table for field user on 'Participant'
db.delete_table('pa_participant_user')
# Adding M2M table for field user on 'ReportingPeriod'
db.create_table(u'pa_reportingperiod_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reportingperiod', models.ForeignKey(orm[u'pa.reportingperiod'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_reportingperiod_user', ['reportingperiod_id', 'user_id'])
def backwards(self, orm):
# Adding model 'Participant'
db.create_table(u'pa_participant', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reporting_period', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pa.ReportingPeriod'])),
))
db.send_create_signal(u'pa', ['Participant'])
# Adding M2M table for field user on 'Participant'
db.create_table(u'pa_participant_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'pa.participant'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_participant_user', ['participant_id', 'user_id'])
# Removing M2M table for field user on 'ReportingPeriod'
db.delete_table('pa_reportingperiod_user')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pa.activity': {
'Meta': {'object_name': 'Activity'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Category']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'pa.activityentry': {
'Meta': {'object_name': 'ActivityEntry'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Activity']"}),
'day': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.User']"})
},
u'pa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'grouping': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reporting_period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.ReportingPeriod']"})
},
u'pa.profession': {
'Meta': {'object_name': 'Profession'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'pa.reportingperiod': {
'Meta': {'object_name': 'ReportingPeriod'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'slots_per_hour': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pa.User']", 'symmetrical': 'False'})
},
u'pa.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Profession']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['pa'] | Mathew/psychoanalysis | psychoanalysis/apps/pa/migrations/0002_auto__del_participant.py | Python | mit | 7,476 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
from .error_detail import ErrorDetail
from .error_response import ErrorResponse
from .error_response_wrapper import ErrorResponseWrapper, ErrorResponseWrapperException
from .storage_account_properties import StorageAccountProperties
from .container_registry_properties import ContainerRegistryProperties
from .service_principal_properties import ServicePrincipalProperties
from .kubernetes_cluster_properties import KubernetesClusterProperties
from .system_service import SystemService
from .acs_cluster_properties import AcsClusterProperties
from .app_insights_properties import AppInsightsProperties
from .ssl_configuration import SslConfiguration
from .service_auth_configuration import ServiceAuthConfiguration
from .auto_scale_configuration import AutoScaleConfiguration
from .global_service_configuration import GlobalServiceConfiguration
from .operationalization_cluster import OperationalizationCluster
from .operationalization_cluster_update_parameters import OperationalizationClusterUpdateParameters
from .storage_account_credentials import StorageAccountCredentials
from .container_registry_credentials import ContainerRegistryCredentials
from .container_service_credentials import ContainerServiceCredentials
from .app_insights_credentials import AppInsightsCredentials
from .operationalization_cluster_credentials import OperationalizationClusterCredentials
from .check_system_services_updates_available_response import CheckSystemServicesUpdatesAvailableResponse
from .update_system_services_response import UpdateSystemServicesResponse
from .resource_operation_display import ResourceOperationDisplay
from .resource_operation import ResourceOperation
from .available_operations import AvailableOperations
from .operationalization_cluster_paged import OperationalizationClusterPaged
from .machine_learning_compute_management_client_enums import (
OperationStatus,
ClusterType,
OrchestratorType,
SystemServiceType,
AgentVMSizeTypes,
Status,
UpdatesAvailable,
)
__all__ = [
'Resource',
'ErrorDetail',
'ErrorResponse',
'ErrorResponseWrapper', 'ErrorResponseWrapperException',
'StorageAccountProperties',
'ContainerRegistryProperties',
'ServicePrincipalProperties',
'KubernetesClusterProperties',
'SystemService',
'AcsClusterProperties',
'AppInsightsProperties',
'SslConfiguration',
'ServiceAuthConfiguration',
'AutoScaleConfiguration',
'GlobalServiceConfiguration',
'OperationalizationCluster',
'OperationalizationClusterUpdateParameters',
'StorageAccountCredentials',
'ContainerRegistryCredentials',
'ContainerServiceCredentials',
'AppInsightsCredentials',
'OperationalizationClusterCredentials',
'CheckSystemServicesUpdatesAvailableResponse',
'UpdateSystemServicesResponse',
'ResourceOperationDisplay',
'ResourceOperation',
'AvailableOperations',
'OperationalizationClusterPaged',
'OperationStatus',
'ClusterType',
'OrchestratorType',
'SystemServiceType',
'AgentVMSizeTypes',
'Status',
'UpdatesAvailable',
]
| lmazuel/azure-sdk-for-python | azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/__init__.py | Python | mit | 3,596 |
import re
from datetime import datetime
from marshmallow import Schema, fields
from theatrics.scoring import get_default_score_functions
from theatrics.utils.handlers import with_params, json_response
from theatrics.utils.collections import get_all
from ..helpers import get_list
from .events import EXPANDABLE_RELATIONS
__all__ = ['search']
BROKEN_UP_EM_REGEX = re.compile(r'<\/em>([ \-"“”«»\.,;:–—]*)<em>')
EM_CONTENT_REGEX = re.compile(r'<em>(.*?)<\/em>')
class SearchParams(Schema):
q = fields.String(required=True)
location = fields.String()
include_past = fields.Boolean()
@json_response
async def search(request):
results = await get_list(
request,
query=build_query_from_request(request),
relations=EXPANDABLE_RELATIONS,
)
for item in results['items']:
highlight = item.get('highlight')
if highlight:
item['highlight'] = cleanup_highlight(highlight)
return results
@with_params(SearchParams)
def build_query_from_request(request, q, location=None, include_past=False):
filters = [
{
'bool': {
'should': [
{'term': {'is_stub': False}},
{'missing': {'field': 'is_stub'}},
]
}
}
]
if location:
filters.append({'term': {'location': location}})
if not include_past:
now = datetime.now().isoformat()
filters.append({
'bool': {
'should': [
{'range': {'end': {'gte': now}}},
{'missing': {'field': 'end'}}
]
}
})
return {
'query': {
'function_score': {
'query': {'bool': {
'must': [
{'multi_match': {
'query': q,
'type': 'phrase',
'fields': [
'name.ngram',
'name.text',
'full_name.ngram',
'full_name.text'
],
'slop': 50
}},
],
'filter': filters,
'should': [
{'term': {'_type': 'place'}},
{'term': {'is_for_kids': False}},
],
}},
'functions': get_default_score_functions()
}
},
'highlight': {
'fields': {
'name.ngram': {},
'name.text': {},
'full_name.text': {},
'full_name.ngram': {},
}
}
}
def cleanup_highlight(highlight):
name_matches = get_all(highlight, ('name.ngram', 'name.text'))
full_name_matches = get_all(highlight, ('full_name.text', 'full_name.ngram'))
new_highlight = {}
if name_matches:
new_highlight['name'] = combine_broken_up_ems(max(name_matches, key=count_highlighted_chars))
if full_name_matches:
new_highlight['full_name'] = combine_broken_up_ems(max(full_name_matches, key=count_highlighted_chars))
return new_highlight
def count_highlighted_chars(string):
return sum(len(match.group(1)) for match in EM_CONTENT_REGEX.finditer(string))
def combine_broken_up_ems(string):
return BROKEN_UP_EM_REGEX.sub('\\1', string)
| despawnerer/theatrics | api/theatrics/handlers/v1/search.py | Python | mit | 3,519 |
from django.contrib.syndication.feeds import Feed, FeedDoesNotExist
from brainstorm.models import Subsite
class SubsiteFeed(Feed):
title_template = 'brainstorm/feed_title.html'
description_template = 'brainstorm/feed_description.html'
def get_object(self, bits):
return Subsite.objects.get(slug__exact=bits[0])
def title(self, obj):
return 'Latest ideas submitted for %s' % obj.name
def description(self, obj):
return 'Latest ideas submitted for %s' % obj.name
def link(self, obj):
if not obj:
raise FeedDoesNotExist
return obj.get_absolute_url()
def items(self, obj):
return obj.ideas.order_by('-submit_date')[:30]
def item_link(self, item):
return item.get_absolute_url()
def item_author_name(self, item):
return item.user
def item_pubdate(self, item):
return item.submit_date
| gandalfar/django-brainstorm | brainstorm/feeds.py | Python | bsd-3-clause | 914 |
# coding: utf8
# Copyright 2012-2015 Vincent Jacques <vincent@vincent-jacques.net>
import unittest
import textwrap
import MockMockMock
from InteractiveCommandLine import Program, Command, Option
class CommandGrouping(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.mocks = MockMockMock.Engine()
self.input = self.mocks.create("input")
self.output = self.mocks.create("output")
self.p = Program("program", self.input.object, self.output.object)
self.command = Command("command", "A command in a group")
self.commandExecute = self.mocks.create("commandExecute")
self.command.execute = self.commandExecute.object
def tearDown(self):
unittest.TestCase.tearDown(self)
self.mocks.tearDown()
def testDoc(self):
g = self.p.createCommandGroup("Command group")
g.addCommand(self.command)
self.output.expect.write(textwrap.dedent("""\
Usage:
Command-line mode: program command [options]
Interactive mode: program
Commands:
help Display this help message
Command group:
command A command in a group
"""))
self.p._execute("help")
def testExecute(self):
g = self.p.createCommandGroup("Command group")
g.addCommand(self.command)
self.commandExecute.expect()
self.p._execute("command")
def testExecuteWithRecursiveGroups(self):
g1 = self.p.createCommandGroup("Command group 1")
g2 = g1.createCommandGroup("Command group 2")
g3 = g2.createCommandGroup("Command group 3")
g3.addCommand(self.command)
self.commandExecute.expect()
self.p._execute("command")
def testDocWithRecursiveGroups(self):
g1 = self.p.createCommandGroup("Command group 1")
g2 = g1.createCommandGroup("Command group 2")
g3 = g2.createCommandGroup("Command group 3")
g3.addCommand(self.command)
self.output.expect.write(textwrap.dedent("""\
Usage:
Command-line mode: program command [options]
Interactive mode: program
Commands:
help Display this help message
Command group 1:
Command group 2:
Command group 3:
command A command in a group
"""))
self.p._execute("help")
class OptionGrouping(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.mocks = MockMockMock.Engine()
self.input = self.mocks.create("input")
self.output = self.mocks.create("output")
self.p = Program("program", self.input.object, self.output.object)
self.option = Option("option", "An option in a group")
def tearDown(self):
unittest.TestCase.tearDown(self)
self.mocks.tearDown()
def testDoc(self):
g = self.p.createOptionGroup("Option group")
g.addOption(self.option)
self.output.expect.write(textwrap.dedent("""\
Usage:
Command-line mode: program [global-options] command [options]
Interactive mode: program [global-options]
Global options:
Option group:
--option An option in a group
Commands:
help Display this help message
"""))
self.p._execute("help")
def testDocWithRecursiveGroups(self):
g1 = self.p.createOptionGroup("Option group 1")
g2 = g1.createOptionGroup("Option group 2")
g3 = g2.createOptionGroup("Option group 3")
g3.addOption(self.option)
self.output.expect.write(textwrap.dedent("""\
Usage:
Command-line mode: program [global-options] command [options]
Interactive mode: program [global-options]
Global options:
Option group 1:
Option group 2:
Option group 3:
--option An option in a group
Commands:
help Display this help message
"""))
self.p._execute("help")
| jacquev6/InteractiveCommandLine | InteractiveCommandLine/tests/Grouping.py | Python | mit | 4,252 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import base64
import imp
import itertools
import logging
import os
import re
import sys
import threading
import zipfile
import zipimport
from cStringIO import StringIO
from os.path import join as opj
from zipfile import PyZipFile, ZIP_DEFLATED
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.netsvc as netsvc
import openerp.osv as osv
import openerp.pooler as pooler
import openerp.release as release
import openerp.tools as tools
import openerp.tools.osutil as osutil
from openerp import SUPERUSER_ID
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.modules.module import \
get_modules, get_modules_with_version, \
load_information_from_description_file, \
get_module_resource, zip_directory, \
get_module_path, initialize_sys_path, \
load_openerp_module, init_module_models
_logger = logging.getLogger(__name__)
def open_openerp_namespace():
# See comment for open_openerp_namespace.
if openerp.conf.deprecation.open_openerp_namespace:
for k, v in list(sys.modules.items()):
if k.startswith('openerp.') and sys.modules.get(k[8:]) is None:
sys.modules[k[8:]] = v
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: status dictionary for keeping track of progress
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases, and even raise osv_except if certificate is invalid)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def process_sql_file(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
load_init_xml = lambda *args: _load_data(cr, *args, kind='init_xml')
load_update_xml = lambda *args: _load_data(cr, *args, kind='update_xml')
load_demo_xml = lambda *args: _load_data(cr, *args, kind='demo_xml')
load_data = lambda *args: _load_data(cr, *args, kind='data')
load_demo = lambda *args: _load_data(cr, *args, kind='demo')
def load_test(module_name, idref, mode):
cr.commit()
if not tools.config.options['test_disable']:
try:
threading.currentThread().testing = True
_load_data(cr, module_name, idref, mode, 'test')
except Exception, e:
_logger.exception(
'Tests failed to execute in module %s', module_name)
finally:
threading.currentThread().testing = False
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
for filename in package.data[kind]:
_logger.info("module %s: loading %s", module_name, filename)
_, ext = os.path.splitext(filename)
pathname = os.path.join(module_name, filename)
fp = tools.file_open(pathname)
noupdate = False
if kind in ('demo', 'demo_xml'):
noupdate = True
try:
if ext == '.csv':
if kind in ('init', 'init_xml'):
noupdate = True
tools.convert_csv_import(cr, module_name, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
process_sql_file(cr, fp)
elif ext == '.yml':
tools.convert_yaml_import(cr, module_name, fp, idref, mode, noupdate)
else:
tools.convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report)
finally:
fp.close()
if status is None:
status = {}
processed_modules = []
loaded_modules = []
pool = pooler.get_pool(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.debug('loading %d packages...', len(graph))
# get db timestamp
cr.execute("select (now() at time zone 'UTC')::timestamp")
dt_before_load = cr.fetchone()[0]
# register, instantiate and initialize models for each modules
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
_logger.info('module %s: loading objects', package.name)
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
models = pool.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
init_module_models(cr, package.name, models)
status['progress'] = float(index) / len(graph)
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = pool.get('ir.module.module')
if perform_checks:
modobj.check(cr, 1, [module_id])
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, 1, [module_id], modobj.get_values_from_terp(package.data))
load_init_xml(module_name, idref, mode)
load_update_xml(module_name, idref, mode)
load_data(module_name, idref, mode)
if hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed'):
status['progress'] = (index + 0.75) / len(graph)
load_demo_xml(module_name, idref, mode)
load_demo(module_name, idref, mode)
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
# launch tests only in demo mode, as most tests will depend
# on demo data. Other tests can be added into the regular
# 'data' section, but should probably not alter the data,
# as there is no rollback.
load_test(module_name, idref, mode)
processed_modules.append(package.name)
migrations.migrate_module(package, 'post')
ver = release.major_version + '.' + package.data['version']
# Set new modules and dependencies
modobj.write(cr, 1, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
#modobj.update_translations(cr, 1, [module_id], None)
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None, {'overwrite': openerp.tools.config["overwrite_existing_translations"]})
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
cr.commit()
# mark new res_log records as read
cr.execute("update res_log set read=True where create_date >= %s", (dt_before_load,))
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
new_modules_in_graph = graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed: break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
# TODO status['progress'] reporting is broken: used twice (and reset each
# time to zero) in load_module_graph, not fine-grained enough.
# It should be a method exposed by the pool.
initialize_sys_path()
open_openerp_namespace()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new pool, just created in pooler.get_db_and_pool()
pool = pooler.get_pool(cr.dbname)
report = tools.assertion_report()
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=(not update_module), report=report)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = pool.get('ir.module.module')
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
_logger.info('updating modules list')
modobj.update_list(cr, 1)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, 1, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, 1, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, 1, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, 1, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
states_to_load = ['installed', 'to upgrade']
processed = load_marked_modules(cr, graph, states_to_load, force, status, report, loaded_modules)
processed_modules.extend(processed)
if update_module:
states_to_load = ['to install']
processed = load_marked_modules(cr, graph, states_to_load, force, status, report, loaded_modules)
processed_modules.extend(processed)
# load custom models
cr.execute('select model from ir_model where state=%s', ('manual',))
for model in cr.dictfetchall():
pool.get('ir.model').instanciate(cr, 1, model['model'], {})
# STEP 4: Finish and cleanup
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and not model_obj.is_transient():
#_logger.warning('Model %s (%s) has no access rules!', model, name)
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,1,1,1',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and model_obj.is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
obj = pool.get(model)
if obj:
obj._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
pool.get('ir.model.data')._process_end(cr, 1, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("select id,name from ir_module_module where state=%s", ('to remove',))
for mod_id, mod_name in cr.fetchall():
cr.execute('select model,res_id from ir_model_data where noupdate=%s and module=%s order by id desc', (False, mod_name,))
for rmod, rid in cr.fetchall():
uid = 1
rmod_module= pool.get(rmod)
if rmod_module:
# TODO group by module so that we can delete multiple ids in a call
rmod_module.unlink(cr, uid, [rid])
else:
_logger.error('Could not locate %s to remove res=%d' % (rmod,rid))
cr.execute('delete from ir_model_data where noupdate=%s and module=%s', (False, mod_name,))
cr.commit()
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# This code could be a method of ir_ui_menu.
# TODO: remove menu without actions of children
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# Pretend that modules to be removed are actually uninstalled.
cr.execute("update ir_module_module set state=%s where state=%s", ('uninstalled', 'to remove',))
cr.commit()
_logger.info('Modules loaded.')
# STEP 7: call _register_hook on every model
for model in pool.models.values():
model._register_hook(cr, SUPERUSER_ID, [])
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| iw3hxn/server | openerp/modules/loading.py | Python | agpl-3.0 | 19,023 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.core.src.local_scheduler.liblocal_scheduler_library import (
Task, LocalSchedulerClient, ObjectID, check_simple_value, task_from_string,
task_to_string)
from .local_scheduler_services import start_local_scheduler
__all__ = ["Task", "LocalSchedulerClient", "ObjectID", "check_simple_value",
"task_from_string", "task_to_string", "start_local_scheduler"]
| alanamarzoev/ray | python/ray/local_scheduler/__init__.py | Python | apache-2.0 | 492 |
#!/usr/bin/env python
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
# $Id: manpage.py 5645 2008-09-21 08:25:13Z grubert $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module is put into the public domain.
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
# NOTE: the macros only work when at line start, so try the rule
# start new lines in visit_ functions.
__docformat__ = 'reStructuredText'
import sys
import os
import time
import re
from types import ListType
import docutils
from docutils import nodes, utils, writers, languages
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r"""
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level magin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage')
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table:
def __init__(self):
self._rows = []
self._options = ['center', ]
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
self._rows[-1].append(cell_lines)
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def astext(self):
text = '.TS\n'
text += ' '.join(self._options) + ';\n'
text += '|%s|.\n' % ('|'.join(self._coldefs))
for row in self._rows:
# row = array of cells. cell = array of lines.
# line above
text += '_\n'
max_lns_in_cell = 0
for cell in row:
max_lns_in_cell = max(len(cell), max_lns_in_cell)
for ln_cnt in range(max_lns_in_cell):
line = []
for cell in row:
if len(cell) > ln_cnt:
line.append(cell[ln_cnt])
else:
line.append(" ")
text += self._tab_char.join(line) + '\n'
text += '_\n'
text += '.TE\n'
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
document_start = """Man page generated from reStructeredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : "",
"date" : "",
"copyright" : "",
"version" : "",
}
self._in_docinfo = None
self._active_table = None
self._in_entry = None
self.header_written = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition' : ('', ''),
'definition_list' : ('', '.TP 0\n'),
'definition_list_item' : ('\n.TP', ''),
#field_list
#field
'field_name' : ('\n.TP\n.B ', '\n'),
'field_body' : ('', '.RE\n', ),
'literal' : ('\\fB', '\\fP'),
'literal_block' : ('\n.nf\n', '\n.fi\n'),
#option_list
'option_list_item' : ('\n.TP', ''),
#option_group, option
'description' : ('\n', ''),
'reference' : (r'\fI\%', r'\fP'),
#'target' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# TODO dont specify the newline before a dot-command, but ensure
# check it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of line/comment."""
prefix = '\n.\\" '
return prefix+prefix.join(text.split('\n'))
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'\n'
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.head.append(self.header())
return ''.join(self.head + self.body + self.foot)
def visit_Text(self, node):
text = node.astext().replace('-','\-')
text = text.replace("'","\\'")
self.body.append(text)
def depart_Text(self, node):
pass
def list_start(self, node):
class enum_char:
enum_style = {
'arabic' : (3,1),
'loweralpha' : (3,'a'),
'upperalpha' : (3,'A'),
'lowerroman' : (5,'i'),
'upperroman' : (5,'I'),
'bullet' : (2,'\\(bu'),
'emdash' : (2,'\\(em'),
}
def __init__(self, style):
if style == 'arabic':
if node.has_key('start'):
start = node['start']
else:
start = 1
self._style = (
len(str(len(node.children)))+2,
start )
# BUG: fix start for alpha
else:
self._style = self.enum_style[style]
self._cnt = -1
def next(self):
self._cnt += 1
# BUG add prefix postfix
try:
return "%d." % (self._style[1] + self._cnt)
except:
if self._style[1][0] == '\\':
return self._style[1]
# BUG romans dont work
# BUG alpha only a...z
return "%c." % (ord(self._style[1])+self._cnt)
def get_width(self):
return self._style[0]
def __repr__(self):
return 'enum_style%r' % list(self._style)
if node.has_key('enumtype'):
self._list_char.append(enum_char(node['enumtype']))
else:
self._list_char.append(enum_char('bullet'))
if len(self._list_char) > 1:
# indent nested lists
# BUG indentation depends on indentation of parent list.
self.indent(self._list_char[-2].get_width())
else:
self.indent(self._list_char[-1].get_width())
def list_end(self):
self.dedent()
self._list_char.pop()
def header(self):
tmpl = (".TH %(title)s %(manual_section)s"
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def append_header(self):
"""append header with .TH and .SH NAME"""
# TODO before everything
# .TH title section date source manual
if self.header_written:
return
self.body.append(self.header())
self.body.append(MACRO_DEF)
self.header_written = 1
def visit_address(self, node):
raise NotImplementedError, node.astext()
self.visit_docinfo_item(node, 'address', meta=None)
def depart_address(self, node):
self.depart_docinfo_item()
def visit_admonition(self, node, name):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'div', CLASS=name))
self.body.append('<p class="admonition-title">'
+ self.language.labels[name] + '</p>\n')
def depart_admonition(self):
raise NotImplementedError, node.astext()
self.body.append('</div>\n')
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
self.depart_admonition()
def visit_author(self, node):
self._docinfo['author'] = node.astext()
raise nodes.SkipNode
def depart_author(self, node):
pass
def visit_authors(self, node):
self.body.append(self.comment('visit_authors'))
def depart_authors(self, node):
self.body.append(self.comment('depart_authors'))
def visit_block_quote(self, node):
#self.body.append(self.comment('visit_block_quote'))
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(BLOCKQOUTE_INDENT)
self.indent(0)
def depart_block_quote(self, node):
#self.body.append(self.comment('depart_block_quote'))
self.dedent()
self.dedent()
def visit_bullet_list(self, node):
self.list_start(node)
def depart_bullet_list(self, node):
self.list_end()
def visit_caption(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
raise NotImplementedError, node.astext()
self.body.append('</p>\n')
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
self.depart_admonition()
def visit_citation(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'table', CLASS='citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<col />\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
raise NotImplementedError, node.astext()
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
raise NotImplementedError, node.astext()
href = ''
if node.has_key('refid'):
href = '#' + node['refid']
elif node.has_key('refname'):
href = '#' + self.document.nameids[node['refname']]
self.body.append(self.starttag(node, 'a', '[', href=href,
CLASS='citation-reference'))
def depart_citation_reference(self, node):
raise NotImplementedError, node.astext()
self.body.append(']</a>')
def visit_classifier(self, node):
raise NotImplementedError, node.astext()
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
raise NotImplementedError, node.astext()
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
def depart_colspec(self, node):
pass
def write_colspecs(self):
self.body.append("%s.\n" % ('L '*len(self.colspecs)))
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
self.body.append(self.comment(node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self._docinfo['copyright'] = node.astext()
raise nodes.SkipNode
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
self.depart_admonition()
def visit_date(self, node):
self._docinfo['date'] = node.astext()
raise nodes.SkipNode
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append(self.defs['definition'][0])
def depart_definition(self, node):
self.body.append(self.defs['definition'][1])
def visit_definition_list(self, node):
self.indent(DEFINITION_LIST_INDENT)
def depart_definition_list(self, node):
self.dedent()
def visit_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][0])
def depart_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][1])
def visit_description(self, node):
self.body.append(self.defs['description'][0])
def depart_description(self, node):
self.body.append(self.defs['description'][1])
def visit_docinfo(self, node):
self._in_docinfo = 1
def depart_docinfo(self, node):
self._in_docinfo = None
# TODO nothing should be written before this
self.append_header()
def visit_docinfo_item(self, node, name):
self.body.append(self.comment('%s: ' % self.language.labels[name]))
if len(node):
return
if isinstance(node[0], nodes.Element):
node[0].set_class('first')
if isinstance(node[0], nodes.Element):
node[-1].set_class('last')
def depart_docinfo_item(self):
pass
def visit_doctest_block(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
raise NotImplementedError, node.astext()
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.body.append(self.comment(self.document_start).lstrip())
# writing header is postboned
self.header_written = 0
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('\n.SH AUTHOR\n%s\n'
% self._docinfo['author'])
if self._docinfo['copyright']:
self.body.append('\n.SH COPYRIGHT\n%s\n'
% self._docinfo['copyright'])
self.body.append(
self.comment(
'Generated by docutils manpage writer on %s.\n'
% (time.strftime('%Y-%m-%d %H:%M')) ) )
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_emphasis(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_entry(self, node):
# BUG entries have to be on one line separated by tab force it.
self.context.append(len(self.body))
self._in_entry = 1
def depart_entry(self, node):
start = self.context.pop()
self._active_table.append_cell(self.body[start:])
del self.body[start:]
self._in_entry = 0
def visit_enumerated_list(self, node):
self.list_start(node)
def depart_enumerated_list(self, node):
self.list_end()
def visit_error(self, node):
self.visit_admonition(node, 'error')
def depart_error(self, node):
self.depart_admonition()
def visit_field(self, node):
#self.body.append(self.comment('visit_field'))
pass
def depart_field(self, node):
#self.body.append(self.comment('depart_field'))
pass
def visit_field_body(self, node):
#self.body.append(self.comment('visit_field_body'))
if self._in_docinfo:
self._docinfo[
self._field_name.lower().replace(" ","_")] = node.astext()
raise nodes.SkipNode
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
self.indent(FIELD_LIST_INDENT)
def depart_field_list(self, node):
self.dedent('depart_field_list')
def visit_field_name(self, node):
if self._in_docinfo:
self._field_name = node.astext()
raise nodes.SkipNode
else:
self.body.append(self.defs['field_name'][0])
def depart_field_name(self, node):
self.body.append(self.defs['field_name'][1])
def visit_figure(self, node):
raise NotImplementedError, node.astext()
def depart_figure(self, node):
raise NotImplementedError, node.astext()
def visit_footer(self, node):
raise NotImplementedError, node.astext()
def depart_footer(self, node):
raise NotImplementedError, node.astext()
start = self.context.pop()
footer = (['<hr class="footer"/>\n',
self.starttag(node, 'div', CLASS='footer')]
+ self.body[start:] + ['</div>\n'])
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'table', CLASS='footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
raise NotImplementedError, node.astext()
if self.settings.footnote_backlinks and node.hasattr('backrefs'):
backrefs = node['backrefs']
if len(backrefs) == 1:
self.context.append('')
self.context.append('<a class="fn-backref" href="#%s" '
'name="%s">' % (backrefs[0], node['id']))
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context.append('<a name="%s">' % node['id'])
else:
self.context.append('')
self.context.append('<a name="%s">' % node['id'])
def depart_footnote(self, node):
raise NotImplementedError, node.astext()
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
raise NotImplementedError, node.astext()
href = ''
if node.has_key('refid'):
href = '#' + node['refid']
elif node.has_key('refname'):
href = '#' + self.document.nameids[node['refname']]
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
elif format == 'superscript':
suffix = '<sup>'
self.context.append('</sup>')
else: # shouldn't happen
suffix = '???'
self.content.append('???')
self.body.append(self.starttag(node, 'a', suffix, href=href,
CLASS='footnote-reference'))
def depart_footnote_reference(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
raise NotImplementedError, node.astext()
self.context.append(len(self.body))
def depart_header(self, node):
raise NotImplementedError, node.astext()
start = self.context.pop()
self.body_prefix.append(self.starttag(node, 'div', CLASS='header'))
self.body_prefix.extend(self.body[start:])
self.body_prefix.append('<hr />\n</div>\n')
del self.body[start:]
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
self.depart_admonition()
def visit_image(self, node):
raise NotImplementedError, node.astext()
atts = node.attributes.copy()
atts['src'] = atts['uri']
del atts['uri']
if not atts.has_key('alt'):
atts['alt'] = atts['src']
if isinstance(node.parent, nodes.TextElement):
self.context.append('')
else:
self.body.append('<p>')
self.context.append('</p>\n')
self.body.append(self.emptytag(node, 'img', '', **atts))
def depart_image(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.context.pop())
def visit_important(self, node):
self.visit_admonition(node, 'important')
def depart_important(self, node):
self.depart_admonition()
def visit_label(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
raise NotImplementedError, node.astext()
self.body.append(']</a></td><td>%s' % self.context.pop())
def visit_legend(self, node):
raise NotImplementedError, node.astext()
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
raise NotImplementedError, node.astext()
self.body.append('</div>\n')
def visit_line_block(self, node):
self.body.append('\n')
def depart_line_block(self, node):
self.body.append('\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.body.append('\n.br\n')
def visit_list_item(self, node):
# man 7 man argues to use ".IP" instead of ".TP"
self.body.append('\n.IP %s %d\n' % (
self._list_char[-1].next(),
self._list_char[-1].get_width(),) )
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.body.append(self.defs['literal'][0])
def depart_literal(self, node):
self.body.append(self.defs['literal'][1])
def visit_literal_block(self, node):
self.body.append(self.defs['literal_block'][0])
def depart_literal_block(self, node):
self.body.append(self.defs['literal_block'][1])
def visit_meta(self, node):
raise NotImplementedError, node.astext()
self.head.append(self.emptytag(node, 'meta', **node.attributes))
def depart_meta(self, node):
pass
def visit_note(self, node):
self.visit_admonition(node, 'note')
def depart_note(self, node):
self.depart_admonition()
def indent(self, by=0.5):
# if we are in a section ".SH" there already is a .RS
#self.body.append('\n[[debug: listchar: %r]]\n' % map(repr, self._list_char))
#self.body.append('\n[[debug: indent %r]]\n' % self._indent)
step = self._indent[-1]
self._indent.append(by)
self.body.append(self.defs['indent'][0] % step)
def dedent(self, name=''):
#self.body.append('\n[[debug: dedent %s %r]]\n' % (name, self._indent))
self._indent.pop()
self.body.append(self.defs['indent'][1])
def visit_option_list(self, node):
self.indent(OPTION_LIST_INDENT)
def depart_option_list(self, node):
self.dedent()
def visit_option_list_item(self, node):
# one item of the list
self.body.append(self.defs['option_list_item'][0])
def depart_option_list_item(self, node):
self.body.append(self.defs['option_list_item'][1])
def visit_option_group(self, node):
# as one option could have several forms it is a group
# options without parameter bold only, .B, -v
# options with parameter bold italic, .BI, -f file
# we do not know if .B or .BI
self.context.append('.B') # blind guess
self.context.append(len(self.body)) # to be able to insert later
self.context.append(0) # option counter
def depart_option_group(self, node):
self.context.pop() # the counter
start_position = self.context.pop()
text = self.body[start_position:]
del self.body[start_position:]
self.body.append('\n%s%s' % (self.context.pop(), ''.join(text)))
def visit_option(self, node):
# each form of the option will be presented separately
if self.context[-1]>0:
self.body.append(' ,')
if self.context[-3] == '.BI':
self.body.append('\\')
self.body.append(' ')
def depart_option(self, node):
self.context[-1] += 1
def visit_option_string(self, node):
# do not know if .B or .BI
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.context[-3] = '.BI' # bold/italic alternate
if node['delimiter'] != ' ':
self.body.append('\\fn%s ' % node['delimiter'] )
elif self.body[len(self.body)-1].endswith('='):
# a blank only means no blank in output, just changing font
self.body.append(' ')
else:
# backslash blank blank
self.body.append('\\ ')
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
raise NotImplementedError, node.astext()
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
raise NotImplementedError, node.astext()
self.depart_docinfo_item()
def visit_paragraph(self, node):
# BUG every but the first paragraph in a list must be intended
# TODO .PP or new line
return
def depart_paragraph(self, node):
# TODO .PP or an empty line
if not self._in_entry:
self.body.append('\n\n')
def visit_problematic(self, node):
self.body.append(self.defs['problematic'][0])
def depart_problematic(self, node):
self.body.append(self.defs['problematic'][1])
def visit_raw(self, node):
if node.get('format') == 'manpage':
self.body.append(node.astext())
# Keep non-manpage raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
"""E.g. link or email address."""
self.body.append(self.defs['reference'][0])
def depart_reference(self, node):
self.body.append(self.defs['reference'][1])
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item()
def visit_row(self, node):
self._active_table.new_row()
def depart_row(self, node):
pass
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_status(self, node):
raise NotImplementedError, node.astext()
self.visit_docinfo_item(node, 'status', meta=None)
def depart_status(self, node):
self.depart_docinfo_item()
def visit_strong(self, node):
self.body.append(self.defs['strong'][1])
def depart_strong(self, node):
self.body.append(self.defs['strong'][1])
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
self._docinfo["subtitle"] = node.astext()
raise nodes.SkipNode
def visit_system_message(self, node):
# TODO add report_level
#if node['level'] < self.document.reporter['writer'].report_level:
# Level is too low to display:
# raise nodes.SkipNode
self.body.append('\.SH system-message\n')
attr = {}
backref_text = ''
if node.hasattr('id'):
attr['name'] = node['id']
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s (%s:%s)\n'
% (node['type'], node['level'], node['source'], line))
def depart_system_message(self, node):
self.body.append('\n')
def visit_table(self, node):
self._active_table = Table()
def depart_table(self, node):
self.body.append(self._active_table.astext())
self._active_table = None
def visit_target(self, node):
self.body.append(self.comment('visit_target'))
#self.body.append(self.defs['target'][0])
#self.body.append(node['refuri'])
def depart_target(self, node):
self.body.append(self.comment('depart_target'))
#self.body.append(self.defs['target'][1])
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append(self.defs['term'][0])
def depart_term(self, node):
self.body.append(self.defs['term'][1])
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
raise NotImplementedError, node.astext()
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self.context.append('')
self.body.append(self.starttag(node, 'thead', valign='bottom'))
def depart_thead(self, node):
raise NotImplementedError, node.astext()
self.body.append('</thead>\n')
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
self.depart_admonition()
def visit_title(self, node):
if isinstance(node.parent, nodes.topic):
self.body.append(self.comment('topic-title'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(self.comment('sidebar-title'))
elif isinstance(node.parent, nodes.admonition):
self.body.append(self.comment('admonition-title'))
elif self.section_level == 0:
# document title for .TH
self._docinfo['title'] = node.astext()
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('\n.SH ')
else:
self.body.append('\n.SS ')
def depart_title(self, node):
self.body.append('\n')
def visit_title_reference(self, node):
"""inline citation reference"""
self.body.append(self.defs['title_reference'][0])
def depart_title_reference(self, node):
self.body.append(self.defs['title_reference'][1])
def visit_topic(self, node):
self.body.append(self.comment('topic: '+node.astext()))
raise nodes.SkipNode
##self.topic_class = node.get('class')
def depart_topic(self, node):
##self.topic_class = ''
pass
def visit_transition(self, node):
# .PP Begin a new paragraph and reset prevailing indent.
# .sp N leaves N lines of blank space.
# .ce centers the next line
self.body.append('\n.sp\n.ce\n----\n')
def depart_transition(self, node):
self.body.append('\n.ce 0\n.sp\n')
def visit_version(self, node):
self._docinfo["version"] = node.astext()
raise nodes.SkipNode
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
self.depart_admonition()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# vim: set et ts=4 ai :
description = ("Generates plain man. " + default_description)
publish_cmdline(writer=Writer(), description=description)
| leigh123linux/nemo-dropbox | rst2man.py | Python | gpl-3.0 | 34,803 |
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
def get_demo_image():
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3, 4, -4, 3)
def add_inner_title(ax, title, loc, size=None, **kwargs):
from matplotlib.offsetbox import AnchoredText
from matplotlib.patheffects import withStroke
if size is None:
size = dict(size=plt.rcParams['legend.fontsize'])
at = AnchoredText(title, loc=loc, prop=size,
pad=0., borderpad=0.5,
frameon=False, **kwargs)
ax.add_artist(at)
at.txt._text.set_path_effects([withStroke(foreground="w", linewidth=3)])
return at
if 1:
F = plt.figure(1, (6, 6))
F.clf()
# prepare images
Z, extent = get_demo_image()
ZS = [Z[i::3, :] for i in range(3)]
extent = extent[0], extent[1]/3., extent[2], extent[3]
# demo 1 : colorbar at each axes
grid = ImageGrid(F, 211, # similar to subplot(111)
nrows_ncols=(1, 3),
direction="row",
axes_pad=0.05,
add_all=True,
label_mode="1",
share_all=True,
cbar_location="top",
cbar_mode="each",
cbar_size="7%",
cbar_pad="1%",
)
for ax, z in zip(grid, ZS):
im = ax.imshow(
z, origin="lower", extent=extent, interpolation="nearest")
ax.cax.colorbar(im)
for ax, im_title in zip(grid, ["Image 1", "Image 2", "Image 3"]):
t = add_inner_title(ax, im_title, loc=3)
t.patch.set_alpha(0.5)
for ax, z in zip(grid, ZS):
ax.cax.toggle_label(True)
#axis = ax.cax.axis[ax.cax.orientation]
#axis.label.set_text("counts s$^{-1}$")
#axis.label.set_size(10)
#axis.major_ticklabels.set_size(6)
# changing the colorbar ticks
grid[1].cax.set_xticks([-1, 0, 1])
grid[2].cax.set_xticks([-1, 0, 1])
grid[0].set_xticks([-2, 0])
grid[0].set_yticks([-2, 0, 2])
# demo 2 : shared colorbar
grid2 = ImageGrid(F, 212,
nrows_ncols=(1, 3),
direction="row",
axes_pad=0.05,
add_all=True,
label_mode="1",
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="10%",
cbar_pad=0.05,
)
grid2[0].set_xlabel("X")
grid2[0].set_ylabel("Y")
vmax, vmin = np.max(ZS), np.min(ZS)
import matplotlib.colors
norm = matplotlib.colors.Normalize(vmax=vmax, vmin=vmin)
for ax, z in zip(grid2, ZS):
im = ax.imshow(z, norm=norm,
origin="lower", extent=extent,
interpolation="nearest")
# With cbar_mode="single", cax attribute of all axes are identical.
ax.cax.colorbar(im)
ax.cax.toggle_label(True)
for ax, im_title in zip(grid2, ["(a)", "(b)", "(c)"]):
t = add_inner_title(ax, im_title, loc=2)
t.patch.set_ec("none")
t.patch.set_alpha(0.5)
grid2[0].set_xticks([-2, 0])
grid2[0].set_yticks([-2, 0, 2])
plt.draw()
plt.show()
| bundgus/python-playground | matplotlib-playground/examples/axes_grid/demo_axes_grid2.py | Python | mit | 3,488 |
from django_filters import ModelMultipleChoiceFilter, FilterSet
from django_filters.fields import ModelChoiceField
from geotrek.authent.models import Structure
from geotrek.common.models import TargetPortal
from geotrek.trekking.models import POI, Trek
from django.forms import ValidationError
class ComaSeparatedMultipleModelChoiceField(ModelChoiceField):
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.filter(**{f'{key}__in': value.split(',')})
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
class ComaSeparatedMultipleModelChoiceFilter(ModelMultipleChoiceFilter):
field_class = ComaSeparatedMultipleModelChoiceField
class CirkwiPOIFilterSet(FilterSet):
structures = ComaSeparatedMultipleModelChoiceFilter(field_name='structure', required=False,
queryset=Structure.objects.all())
class Meta:
model = POI
fields = ('structures', )
class CirkwiTrekFilterSet(FilterSet):
structures = ComaSeparatedMultipleModelChoiceFilter(field_name='structure', required=False,
queryset=Structure.objects.all())
portals = ComaSeparatedMultipleModelChoiceFilter(field_name='portal', required=False,
queryset=TargetPortal.objects.all())
class Meta:
model = Trek
fields = ('structures', 'portals', )
| GeotrekCE/Geotrek-admin | geotrek/cirkwi/filters.py | Python | bsd-2-clause | 1,662 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from abc import ABCMeta, abstractmethod
from datetime import datetime
from pytz import utc
import superdesk
from superdesk import get_resource_service
from superdesk.errors import SuperdeskApiError, SuperdeskIngestError
from superdesk.io.registry import registered_feed_parsers, restricted_feeding_service_parsers
from superdesk.utc import utcnow
from superdesk.utils import Timer
logger = logging.getLogger(__name__)
class FeedingService(metaclass=ABCMeta):
"""
Base Class for all Feeding Service classes.
A Feeding Service class must have the following attributes:
1. `NAME` - unique name under which to register the class.
2. `ERRORS` - representing a list of <error_number, error_message> pairs that might be raised by the class
instances' methods.
Optional attributes:
1. label: service label for UI view
2. fields: list of dictionaries; contains the descriptions of configuration fields. All fields must
have the following properties:
- id: field identifier
- type: valid values: text, password, boolean, mapping, choices
Optional properties:
- label: field label for UI view
- required: if true the field is required
- errors: dictionary of with key being the error code and value the error description
- required_expression: if the evaluation of the expression is true the field is required
on validation. Field values can be referred by enclosing the field identifier in
accolades: {field_id}
- readonly: if true, the field is not editable
- show_expression: if the evaluation of the expression is true the field is displayed.
Field values can be referred by enclosing the field identifier in accolades: {field_id}
- default_value: value to use
The fields can be of the following types:
1. text: has the following properties besides the generic ones:
- placeholder: placeholder text
2. password
3. boolean
4. mapping: defines a mapping from a list of controlled values to values inputed by the user
and has the following properties besides the generic ones:
- add_mapping_label: label for add mapping button
- remove_mapping_label: label for mapping removal button
- empty_label: label to display when the mapping is empty
- first_field_options: dictionary with the following keys:
- label
- values: list of available options
- second_field_options: dictionary with the following keys:
- label
- placeholder
5. choices: render field as a dropdown. Has the following properties besides the generic ones:
- choices: a tuple of tuples which defines keys and values for dropdown. Example:
'choices': (
('key_one', 'Key one'),
('key_two', 'Key two'),
)
- default: preselect value in dropdown. Must be value from 'choices' preperties.
"""
def __init__(self):
self._timer = Timer()
self._provider = None
@abstractmethod
def _update(self, provider, update):
"""
Subclasses must override this method and get items from the provider as per the configuration.
:param provider: Ingest Provider Details.
.. seealso:: :class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:type provider: dict
:param update: Any update that is required on provider.
:type update: dict
:return: a list of articles which can be saved in Ingest Collection.
"""
raise NotImplementedError()
def _test_feed_parser(self, provider):
"""
Checks if the feed_parser value was in the restricted values list.
:param provider: ingest provider document
"""
feeding_service = provider.get('feeding_service')
feed_parser = provider.get('feed_parser')
if feeding_service and feed_parser and restricted_feeding_service_parsers.get(feeding_service) and \
not restricted_feeding_service_parsers.get(feeding_service).get(feed_parser):
raise SuperdeskIngestError.invalidFeedParserValue(provider=provider)
def _test(self, provider):
"""
Subclasses should override this method and do specific config test.
:param provider: ingest provider document
"""
return
def config_test(self, provider=None):
"""Test provider configuration.
:param provider: provider data
"""
if not provider: # nosetests run this for some reason
return
if self._is_closed(provider):
return
self._test_feed_parser(provider)
return self._test(provider)
def _is_closed(self, provider):
"""Test if provider is closed.
:param provider: provider data
:return bool: True if is closed
"""
is_closed = provider.get('is_closed', False)
if isinstance(is_closed, datetime):
is_closed = False
return is_closed
def _log_msg(self, msg, level='info'):
getattr(logger, level)(
"Ingest:{} '{}': {}".format(self._provider['_id'], self._provider['name'], msg)
)
def update(self, provider, update):
"""
Clients consuming Ingest Services should invoke this to get items from the provider.
:param provider: Ingest Provider Details.
:type provider: dict :py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:param update: Any update that is required on provider.
:type update: dict
:return: a list of articles which can be saved in Ingest Collection.
:raises SuperdeskApiError.internalError if Provider is closed
:raises SuperdeskIngestError if failed to get items from provider
"""
if self._is_closed(provider):
raise SuperdeskApiError.internalError('Ingest Provider is closed')
else:
try:
self._provider = provider
self._log_msg("Start update execution.")
self._timer.start('update')
return self._update(provider, update) or []
except SuperdeskIngestError as error:
self.close_provider(provider, error)
raise error
finally:
self._log_msg("Stop update execution. Exec time: {:.4f} secs.".format(self._timer.stop('update')))
# just in case stop all timers
self._timer.stop_all()
def close_provider(self, provider, error, force=False):
"""Closes the provider and uses error as reason for closing.
:param provider: Ingest Provider Details.
.. seealso:: :class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:param error: ingest error
:param force: force closing of provider, no matter how it's configured
"""
if provider.get('critical_errors', {}).get(str(error.code)) or force:
updates = {
'is_closed': True,
'last_closed': {
'closed_at': utcnow(),
'message': 'Channel closed due to critical error: {}'.format(error)
}
}
get_resource_service('ingest_providers').system_update(provider[superdesk.config.ID_FIELD],
updates, provider)
def add_timestamps(self, item):
"""
Adds firstcreated and versioncreated timestamps to item
:param item: object which can be saved to ingest collection
:type item: dict
"""
item['firstcreated'] = utc.localize(item['firstcreated']) if item.get('firstcreated') else utcnow()
item['versioncreated'] = utc.localize(item['versioncreated']) if item.get('versioncreated') else utcnow()
def log_item_error(self, err, item, provider):
"""TODO: put item into provider error basket."""
logger.warning('ingest error msg={} item={} provider={}'.format(
str(err),
item.get('guid'),
provider.get('name')
))
def prepare_href(self, href, mimetype=None):
"""Prepare a link to an external resource (e.g. an image file).
It can be directly used by the ingest machinery for fetching it.
Sub-classes can override this method if properties like HTTP Authentication need to be added to the href.
:param href: the original URL as extracted from an RSS entry
:type href: str
:return: prepared URL
:rtype: str
"""
return href
def get_feed_parser(self, provider, article=None):
"""
Returns instance of configured feed parser for the given provider.
:param provider: Ingest Provider Details.
:type provider: dict :py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:param article: article which needs to parsed by the parser, defaults to None. When None, the registered parser
will be returned without checking if the parser can parse the article.
:return: Feed Parser instance.
:rtype: :py:class:`superdesk.io.feed_parsers.FeedParser`
:raises: SuperdeskIngestError.parserNotFoundError()
if either feed_parser value is empty or Feed Parser not found.
"""
parser = registered_feed_parsers.get(provider.get('feed_parser', ''))
if not parser:
raise SuperdeskIngestError.parserNotFoundError(provider=provider)
if article is not None and not parser.can_parse(article):
raise SuperdeskIngestError.parserNotFoundError(provider=provider)
if article is not None:
parser = parser.__class__()
return parser
# must be imported for registration
from superdesk.io.feeding_services.email import EmailFeedingService # NOQA
from superdesk.io.feeding_services.file_service import FileFeedingService # NOQA
from superdesk.io.feeding_services.ftp import FTPFeedingService # NOQA
from superdesk.io.feeding_services.ritzau import RitzauFeedingService # NOQA
from superdesk.io.feeding_services.http_service import HTTPFeedingService # NOQA
from superdesk.io.feeding_services.rss import RSSFeedingService # NOQA
from superdesk.io.feeding_services.twitter import TwitterFeedingService # NOQA
from superdesk.io.feeding_services.ap import APFeedingService # NOQA
from superdesk.io.feeding_services.bbc_ldrs import BBCLDRSFeedingService # NOQA
| mdhaman/superdesk-core | superdesk/io/feeding_services/__init__.py | Python | agpl-3.0 | 11,417 |
import os
import tempfile
import numpy as np
import xarray as xr
from typhon.files import NetCDF4
class TestNetCDF4:
def test_dimension_mapping(self):
"""
If a subgroup has not defined a dimension, but its parent group has one
with the same size and name, the subgroup should use that one.
Otherwise it should use the one of the subgroup (test with dim1 in the
root and subgroups).
"""
fh = NetCDF4()
with tempfile.TemporaryDirectory() as tdir:
tfile = os.path.join(tdir, 'testfile')
before = xr.Dataset({
"var1": ("dim1", np.arange(5)),
"group1/var1": ("group1/dim1", np.arange(5)),
"group1/var2": ("group1/dim2", np.arange(5)),
"group1/subgroup1/var1":
("group1/subgroup1/dim1", np.arange(5)),
"group1/subgroup1/var2":
("group1/subgroup1/dim2", np.arange(5)),
"group2/var1": ("group2/dim1", np.arange(5)),
"group2/subgroup1/var1":
("group1/subgroup1/dim1", np.arange(5)),
"group3/var1": ("group3/dim1", np.arange(10)),
}, coords={
"dim1": ("dim1", np.arange(5)),
"group1/dim1": ("group1/dim1", np.arange(5))
})
# Save the dataset and load it again:
fh.write(before, tfile)
after = fh.read(tfile)
# How it should be after loading:
check = xr.Dataset({
"var1": ("dim1", np.arange(5)),
"group1/var1": ("group1/dim1", np.arange(5)),
"group1/var2": ("group1/dim2", np.arange(5)),
"group1/subgroup1/var1": ("group1/dim1", np.arange(5)),
"group1/subgroup1/var2": ("group1/dim2", np.arange(5)),
"group2/var1": ("dim1", np.arange(5)),
"group2/subgroup1/var1": ("dim1", np.arange(5)),
"group3/var1": ("group3/dim1", np.arange(10)),
}, coords={
"dim1": ("dim1", np.arange(5)),
"group1/dim1": ("group1/dim1", np.arange(5))
})
assert after.equals(check)
def test_scalar_masked(self):
"""Test if scalar masked values read OK
Test for issue #277
"""
fh = NetCDF4()
with tempfile.TemporaryDirectory() as tdir:
tfile = os.path.join(tdir, "testfile.nc")
before = xr.Dataset({"a": xr.DataArray(42)})
before["a"].encoding = {"_FillValue": 42}
fh.write(before, tfile)
after = fh.read(tfile)
assert np.isnan(after["a"]) # fill value should become nan
def test_times(self):
"""Test if times are read correctly
"""
fh = NetCDF4()
with tempfile.TemporaryDirectory() as tdir:
tfile = os.path.join(tdir, "testfile.nc")
before = xr.Dataset(
{"a":
xr.DataArray(
np.array(
["2019-02-14T09:00:00", "2019-02-14T09:00:01"],
dtype="M8[ns]"))})
before["a"].encoding = {
"units": "seconds since 2019-02-14 09:00:00",
"scale_factor": 0.1}
fh.write(before, tfile)
after = fh.read(tfile)
assert np.array_equal(before["a"], after["a"])
def test_scalefactor(self):
"""Test if scale factors written/read correctly
"""
fh = NetCDF4()
with tempfile.TemporaryDirectory() as tdir:
tfile = os.path.join(tdir, "testfile.nc")
before = xr.Dataset(
{"a":
xr.DataArray(
np.array([0.1, 0.2]))})
before["a"].encoding = {
"scale_factor": 0.1,
"_FillValue": 42,
"dtype": "int16"}
fh.write(before, tfile)
after = fh.read(tfile)
assert np.allclose(before["a"], after["a"])
| atmtools/typhon | typhon/tests/files/handlers/test_netcdf4.py | Python | mit | 4,163 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from aldryn_github_stats import __version__
REQUIREMENTS = [
'Django',
'django-cms',
'PyGithub',
]
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
setup(
name='aldryn-github-stats',
version=__version__,
description='Simple plugin to add dynamic stats from GitHub repos.',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/aldryn/aldryn-github-stats',
packages=find_packages(),
license='LICENSE.txt',
platforms=['OS Independent'],
install_requires=REQUIREMENTS,
classifiers=CLASSIFIERS,
include_package_data=True,
zip_safe=False,
# test_suite="test_settings.run",
)
| aldryn/aldryn-github-stats | setup.py | Python | bsd-3-clause | 1,139 |
import py, pytest
import sys, os
import execnet
import xdist.remote
from _pytest import runner # XXX load dynamically
class NodeManager(object):
EXIT_TIMEOUT = 10
def __init__(self, config, specs=None, defaultchdir="pyexecnetcache"):
self.config = config
self._nodesready = py.std.threading.Event()
self.trace = self.config.trace.get("nodemanager")
self.group = execnet.Group()
if specs is None:
specs = self._getxspecs()
self.specs = []
for spec in specs:
if not isinstance(spec, execnet.XSpec):
spec = execnet.XSpec(spec)
if not spec.chdir and not spec.popen:
spec.chdir = defaultchdir
self.group.allocate_id(spec)
self.specs.append(spec)
self.roots = self._getrsyncdirs()
def rsync_roots(self):
""" make sure that all remote gateways
have the same set of roots in their
current directory.
"""
options = {
'ignores': self.config.getini("rsyncignore"),
'verbose': self.config.option.verbose,
}
if self.roots:
# send each rsync root
for root in self.roots:
self.rsync(root, **options)
def makegateways(self):
assert not list(self.group)
self.config.hook.pytest_xdist_setupnodes(config=self.config,
specs=self.specs)
for spec in self.specs:
gw = self.group.makegateway(spec)
self.config.hook.pytest_xdist_newgateway(gateway=gw)
def setup_nodes(self, putevent):
self.makegateways()
self.rsync_roots()
self.trace("setting up nodes")
for gateway in self.group:
node = SlaveController(self, gateway, self.config, putevent)
gateway.node = node # to keep node alive
node.setup()
self.trace("started node %r" % node)
def teardown_nodes(self):
self.group.terminate(self.EXIT_TIMEOUT)
def _getxspecs(self):
xspeclist = []
for xspec in self.config.getvalue("tx"):
i = xspec.find("*")
try:
num = int(xspec[:i])
except ValueError:
xspeclist.append(xspec)
else:
xspeclist.extend([xspec[i+1:]] * num)
if not xspeclist:
raise pytest.UsageError(
"MISSING test execution (tx) nodes: please specify --tx")
return [execnet.XSpec(x) for x in xspeclist]
def _getrsyncdirs(self):
for spec in self.specs:
if not spec.popen or spec.chdir:
break
else:
return []
import pytest, _pytest
pytestpath = pytest.__file__.rstrip("co")
pytestdir = py.path.local(_pytest.__file__).dirpath()
config = self.config
candidates = [py._pydir,pytestpath,pytestdir]
candidates += config.option.rsyncdir
rsyncroots = config.getini("rsyncdirs")
if rsyncroots:
candidates.extend(rsyncroots)
roots = []
for root in candidates:
root = py.path.local(root).realpath()
if not root.check():
raise pytest.UsageError("rsyncdir doesn't exist: %r" %(root,))
if root not in roots:
roots.append(root)
return roots
def rsync(self, source, notify=None, verbose=False, ignores=None):
""" perform rsync to all remote hosts.
"""
rsync = HostRSync(source, verbose=verbose, ignores=ignores)
seen = py.builtin.set()
gateways = []
for gateway in self.group:
spec = gateway.spec
if spec.popen and not spec.chdir:
# XXX this assumes that sources are python-packages
# and that adding the basedir does not hurt
gateway.remote_exec("""
import sys ; sys.path.insert(0, %r)
""" % os.path.dirname(str(source))).waitclose()
continue
if spec not in seen:
def finished():
if notify:
notify("rsyncrootready", spec, source)
rsync.add_target_host(gateway, finished=finished)
seen.add(spec)
gateways.append(gateway)
if seen:
self.config.hook.pytest_xdist_rsyncstart(
source=source,
gateways=gateways,
)
rsync.send()
self.config.hook.pytest_xdist_rsyncfinish(
source=source,
gateways=gateways,
)
class HostRSync(execnet.RSync):
""" RSyncer that filters out common files
"""
def __init__(self, sourcedir, *args, **kwargs):
self._synced = {}
ignores= None
if 'ignores' in kwargs:
ignores = kwargs.pop('ignores')
self._ignores = ignores or []
super(HostRSync, self).__init__(sourcedir=sourcedir, **kwargs)
def filter(self, path):
path = py.path.local(path)
if not path.ext in ('.pyc', '.pyo'):
if not path.basename.endswith('~'):
if path.check(dotfile=0):
for x in self._ignores:
if path == x:
break
else:
return True
def add_target_host(self, gateway, finished=None):
remotepath = os.path.basename(self._sourcedir)
super(HostRSync, self).add_target(gateway, remotepath,
finishedcallback=finished,
delete=True,)
def _report_send_file(self, gateway, modified_rel_path):
if self._verbose:
path = os.path.basename(self._sourcedir) + "/" + modified_rel_path
remotepath = gateway.spec.chdir
py.builtin.print_('%s:%s <= %s' %
(gateway.spec, remotepath, path))
def make_reltoroot(roots, args):
# XXX introduce/use public API for splitting py.test args
splitcode = "::"
l = []
for arg in args:
parts = arg.split(splitcode)
fspath = py.path.local(parts[0])
for root in roots:
x = fspath.relto(root)
if x or fspath == root:
parts[0] = root.basename + "/" + x
break
else:
raise ValueError("arg %s not relative to an rsync root" % (arg,))
l.append(splitcode.join(parts))
return l
class SlaveController(object):
ENDMARK = -1
def __init__(self, nodemanager, gateway, config, putevent):
self.nodemanager = nodemanager
self.putevent = putevent
self.gateway = gateway
self.config = config
self.slaveinput = {'slaveid': gateway.id}
self._down = False
self.log = py.log.Producer("slavectl-%s" % gateway.id)
if not self.config.option.debug:
py.log.setconsumer(self.log._keywords, None)
def __repr__(self):
return "<%s %s>" %(self.__class__.__name__, self.gateway.id,)
def setup(self):
self.log("setting up slave session")
spec = self.gateway.spec
args = self.config.args
if not spec.popen or spec.chdir:
args = make_reltoroot(self.nodemanager.roots, args)
option_dict = vars(self.config.option)
if spec.popen:
name = "popen-%s" % self.gateway.id
basetemp = self.config._tmpdirhandler.getbasetemp()
option_dict['basetemp'] = str(basetemp.join(name))
self.config.hook.pytest_configure_node(node=self)
self.channel = self.gateway.remote_exec(xdist.remote)
self.channel.send((self.slaveinput, args, option_dict))
if self.putevent:
self.channel.setcallback(self.process_from_remote,
endmarker=self.ENDMARK)
def ensure_teardown(self):
if hasattr(self, 'channel'):
if not self.channel.isclosed():
self.log("closing", self.channel)
self.channel.close()
#del self.channel
if hasattr(self, 'gateway'):
self.log("exiting", self.gateway)
self.gateway.exit()
#del self.gateway
def send_runtest(self, nodeid):
self.sendcommand("runtests", ids=[nodeid])
def send_runtest_all(self):
self.sendcommand("runtests_all",)
def shutdown(self):
if not self._down:
try:
self.sendcommand("shutdown")
except IOError:
pass
def sendcommand(self, name, **kwargs):
""" send a named parametrized command to the other side. """
self.log("sending command %s(**%s)" % (name, kwargs))
self.channel.send((name, kwargs))
def notify_inproc(self, eventname, **kwargs):
self.log("queuing %s(**%s)" % (eventname, kwargs))
self.putevent((eventname, kwargs))
def process_from_remote(self, eventcall):
""" this gets called for each object we receive from
the other side and if the channel closes.
Note that channel callbacks run in the receiver
thread of execnet gateways - we need to
avoid raising exceptions or doing heavy work.
"""
try:
if eventcall == self.ENDMARK:
err = self.channel._getremoteerror()
if not self._down:
if not err or isinstance(err, EOFError):
err = "Not properly terminated" # lost connection?
self.notify_inproc("errordown", node=self, error=err)
self._down = True
return
eventname, kwargs = eventcall
if eventname in ("collectionstart"):
self.log("ignoring %s(%s)" %(eventname, kwargs))
elif eventname == "slaveready":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "slavefinished":
self._down = True
self.slaveoutput = kwargs['slaveoutput']
self.notify_inproc("slavefinished", node=self)
#elif eventname == "logstart":
# self.notify_inproc(eventname, node=self, **kwargs)
elif eventname in ("testreport", "collectreport", "teardownreport"):
rep = unserialize_report(eventname, kwargs['data'])
self.notify_inproc(eventname, node=self, rep=rep)
elif eventname == "collectionfinish":
self.notify_inproc(eventname, node=self, ids=kwargs['ids'])
else:
raise ValueError("unknown event: %s" %(eventname,))
except KeyboardInterrupt:
# should not land in receiver-thread
raise
except:
excinfo = py.code.ExceptionInfo()
py.builtin.print_("!" * 20, excinfo)
self.config.pluginmanager.notify_exception(excinfo)
def unserialize_report(name, reportdict):
d = reportdict
if name == "testreport":
return runner.TestReport(**d)
elif name == "collectreport":
return runner.CollectReport(**d)
| curzona/pytest-xdist | xdist/slavemanage.py | Python | mit | 11,298 |
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: vic.iglesias@eucalyptus.com
import time
from prettytable import PrettyTable
from boto.ec2.tag import TagSet
class TaggedResource():
def __init__(self):
pass
def create_tags(self, tags, timeout=600):
self.tester.debug("Current tags: " + str(self.tags))
self.tester.create_tags([self.id], tags)
self.wait_for_tags(tags, timeout=timeout)
def wait_for_tags(self, tags, creation=True, timeout=60):
start = time.time()
elapsed = 0
while elapsed < timeout:
self.update()
applied_tags = self.convert_tag_list_to_dict(
self.tester.ec2.get_all_tags(filters={u'resource_id': self.id}))
self.tester.debug("Current tags: " + str(applied_tags))
found_keys = 0
for key, value in tags.iteritems():
if key in applied_tags:
found_keys += 1
self.tester.debug(
"Found key # " + str(found_keys) + " out of " + str(len(tags)) + ":" + key)
if creation:
if found_keys == len(tags):
return True
else:
pass
else:
if found_keys == 0:
return True
else:
pass
elapsed = int(time.time() - start)
time.sleep(5)
raise Exception("Did not apply tags within " + str(timeout) + " seconds")
def convert_tag_list_to_dict(self, list):
new_dict = {}
for tag in list:
new_dict[tag.name] = tag.value
return new_dict
def delete_tags(self, tags, timeout=600):
self.tester.debug("Current tags: " + str(self.tags))
self.tester.delete_tags([self.id], tags)
self.wait_for_tags(tags, creation=False, timeout=timeout)
def show_tags(self, tags, printmethod=None, printme=True):
if not isinstance(tags, TagSet) and not isinstance(tags, dict):
if hasattr(tags, 'tags'):
tags = tags.tags
else:
raise ValueError('unknown tags object of type "{0}" passed to show_tags'
.format(type(tags)))
name_header = self.markup("TAG NAME")
value_header = self.markup("TAG VALUE")
pt = PrettyTable([name_header, value_header])
pt.padding_width = 0
pt.align = 'l'
pt.hrules = 1
pt.max_width[name_header] = 20
pt.max_width[value_header] = 80
for tag in tags:
pt.add_row([str(tag), str(tags.get(tag, None))])
if printme:
printmethod = printmethod or self.debug
printmethod( "\n" + str(pt) + "\n")
else:
return pt | nephomaniac/nephoria | nephoria/euca/taggedresource.py | Python | bsd-2-clause | 4,215 |
#1strand Bushing Tool
#Standalone program for minimized cruft
import math
print "This program is for printing the best possible circular bushings"
print "Printer config values are hardcoded for ease of use (for me)"
xpath = [] #These are initialized and default values
ypath = []
zpath = []
step = []
epath = []
xstart = 10.0
ystart = 10.0
zstart = 0.5
height = 0.0
LayerHeight = 0.3
ExtrusionWidth = 0.6
FilamentDiameter=3
FilamentArea = FilamentDiameter * FilamentDiameter * 3.14159 / 4.0
GooCoefficient = LayerHeight * ExtrusionWidth / FilamentArea
configlist = [LayerHeight, ExtrusionWidth, FilamentDiameter, GooCoefficient]
BrimDiameter = 0.0
OuterDiameter = 0.0
InnerDiameter = 0.0
N = 1
ActualExtrusionWidth = ExtrusionWidth
print "Current values are:"
print "LayerHeight =", configlist[0] #This assignment is super important
print "ExtrusionWidth=", configlist[1] #and needs to be consistent with
print "FilamentDiameter=", configlist[2] #with other code blocks related
print "GooCoefficient=", configlist[3] #to these options.
BrimDiameter = float(raw_input("Enter brim diameter in mm:"))
OuterDiameter = float(raw_input("Enter Outer Diameter in mm:"))
InnerDiameter = float(raw_input("Enter Inner Diameter in mm:"))
N = int(raw_input("Enter number of line segments in your alleged circles"))
anglestep = 2 * math.pi / N
height = float(raw_input("Enter Height"))
centerx = (BrimDiameter / 2.0)+5 #Center is chosen so brim is 5mm from edge
centery = (BrimDiameter / 2.0)+5 #Center is chosen so brim is 5mm from edge
thickness = (OuterDiameter-InnerDiameter)/2
perimeters = thickness/ExtrusionWidth
print "Thickness = ", thickness
print "Needed perimeters = ", perimeters
perimeters = int(perimeters)
ActualExtrusionWidth = thickness/perimeters
print "Revised perimeters = ", perimeters
print "Revised extrusion width = ", ActualExtrusionWidth
BrimThickness = (BrimDiameter-InnerDiameter)/2
BrimPerimeters = int(BrimThickness/ActualExtrusionWidth)
print "Brim Thickness = ", BrimThickness
print "Brim Perimeters = ", BrimPerimeters
#Brim layer is first, and treated separately.
j=0
i=0
radius = BrimDiameter/2 - (j+0.5)*ActualExtrusionWidth
xpath.append(centerx+radius)
ypath.append(centery)
zpath.append(LayerHeight)
while (j<BrimPerimeters):
radius = BrimDiameter/2 - (j+0.5)*ActualExtrusionWidth
j=j+1
i=0
while (i<N):
i=i+1
#print "i=", i, "j=", j, "radius=", radius
xpath.append(centerx+radius*math.cos(i*anglestep))
ypath.append(centery+radius*math.sin(i*anglestep))
zpath.append(LayerHeight)
#
#
#
#Now the actual bushing begins printing.
#
#
#
CurrentLayer=2
CurrentHeight=LayerHeight*CurrentLayer
k=0
#Even layers (1st bushing layer is 2) are inside to outside
#odd layers are outside to inside, to maintain strand continuity
j=0
i=0
radius = InnerDiameter/2 + (j-0.5)*ActualExtrusionWidth
xpath.append(centerx+radius)
ypath.append(centery)
zpath.append(CurrentHeight)
while (j<=perimeters):
radius = InnerDiameter/2 + (j-0.5)*ActualExtrusionWidth
j=j+1
i=0
while (i<N):
i=i+1
#print "i=", i, "j=", j, "radius=", radius
xpath.append(centerx+radius*math.cos(i*anglestep))
ypath.append(centery+radius*math.sin(i*anglestep))
zpath.append(CurrentHeight)
#odd layers are outside to inside, to maintain strand continuity
CurrentLayer=3
CurrentHeight=LayerHeight*CurrentLayer
j=0
i=0
radius = OuterDiameter/2 - (j+0.5)*ActualExtrusionWidth
xpath.append(centerx+radius)
ypath.append(centery)
zpath.append(CurrentHeight)
while (j<perimeters):
radius = OuterDiameter/2 - (j+0.5)*ActualExtrusionWidth
j=j+1
i=0
while (i<N):
i=i+1
#print "i=", i, "j=", j, "radius=", radius
xpath.append(centerx+radius*math.cos(i*anglestep))
ypath.append(centery+radius*math.sin(i*anglestep))
zpath.append(CurrentHeight)
while (CurrentLayer*LayerHeight < height):
CurrentLayer=CurrentLayer+1
CurrentHeight=LayerHeight*CurrentLayer
#Even layers (1st bushing layer is 2) are inside to outside
#odd layers are outside to inside, to maintain strand continuity
j=1
i=0
radius = InnerDiameter/2 + (j-0.5)*ActualExtrusionWidth
xpath.append(centerx+radius)
ypath.append(centery)
zpath.append(CurrentHeight)
while (j<=perimeters):
radius = InnerDiameter/2 + (j-0.5)*ActualExtrusionWidth
j=j+1
i=0
while (i<N):
i=i+1
#print "i=", i, "j=", j, "layer=", CurrentLayer, "radius=", radius
xpath.append(centerx+radius*math.cos(i*anglestep))
ypath.append(centery+radius*math.sin(i*anglestep))
zpath.append(CurrentHeight)
#odd layers are outside to inside, to maintain strand continuity
CurrentLayer=CurrentLayer+1
CurrentHeight=LayerHeight*CurrentLayer
j=0
i=0
radius = OuterDiameter/2 - (j+0.5)*ActualExtrusionWidth
xpath.append(centerx+radius)
ypath.append(centery)
zpath.append(CurrentHeight)
while (j<perimeters):
radius = OuterDiameter/2 - (j+0.5)*ActualExtrusionWidth
j=j+1
i=0
while (i<N):
i=i+1
#print "i=", i, "j=", j, "layer=", CurrentLayer, "radius=", radius
xpath.append(centerx+radius*math.cos(i*anglestep))
ypath.append(centery+radius*math.sin(i*anglestep))
zpath.append(CurrentHeight)
#Extrusion is only handled here temporarily for testing
for x in xrange(len(xpath)): # This initializes the arrays so I can
step.append(0.0) #avoid that append() bullshit where I dont
epath.append(0.0) #know where I'm writing.
for x in xrange(2, len(xpath)): # This calculates how much extruder movement per step
distance=((xpath[x]-xpath[x-1])**2+(ypath[x]-ypath[x-1])**2+(zpath[x]-zpath[x-1])**2)**0.5
step[x]=distance*GooCoefficient
epath[x]=epath[x-1]+step[x]
#for x in range(len(xpath)): #Human readable raw output
# print xpath[x-1], ypath[x-1], zpath[x-1], step[x-1], epath[x-1]
goutput = open("output1.gcode", "wb") #Now save to output1.gcode
goutput.write("G28 \nG21 \nG90 \nG92 E0 \nM82")
x=0
for x in range(len(xpath)):
goutput.write("G1 X" );
goutput.write( str(xpath[x]) );
goutput.write( " Y" );
goutput.write( str(ypath[x]) );
goutput.write( " Z" );
goutput.write( str(zpath[x]) );
goutput.write( " E" );
goutput.write( str(epath[x]) );
goutput.write( " F2000 \n" );
goutput.close()
| kanethemediocre/1strand | 1strandbushinga001.py | Python | gpl-2.0 | 6,501 |
#!/usr/bin/env python
from tincanradar.fwdmodel import friis
from argparse import ArgumentParser
def main():
p = ArgumentParser()
p.add_argument("freq_Hz", help="frequency [Hz]", type=float)
p.add_argument("dist_m", help="distance (one-way) [meters]", type=float)
p = p.parse_args()
print(f"{friis(p.dist_m,p.freq_Hz):.2f} dB")
if __name__ == "__main__":
main()
| scivision/tincanradar | Friis.py | Python | gpl-3.0 | 392 |
from django.urls import path
from .views import ProfileView
app_name = "dashboard"
urlpatterns = [
path("", ProfileView.as_view(), name="dashboard"),
]
| nmunro/azathoth | dashboard/urls.py | Python | agpl-3.0 | 158 |
#!/usr/bin/env python3
import sys
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
import requests
import socket
import boto3
DNS_RR_TYPE = 'A'
URL_META = 'http://169.254.169.254/latest/meta-data/public-ipv4'
URL_TMOUT = 5
def main():
try:
logging.basicConfig(filename='route53_update.log', format='%(asctime)s %(message)s', level=logging.INFO)
zone, a_rr = arg_params()
local_pub_ip = ec2_local_pub_ip()
client = boto3.client('route53')
hosted_zone_id, dns_rrs = get_dns_rrs(client, zone, a_rr)
change_dns_rr(client, hosted_zone_id, dns_rrs, local_pub_ip, a_rr)
except Exception as e:
logging.critical("Route53Update: Invalid: " + str(e))
return 1
def arg_params():
"""
:return:
"""
parser = ArgumentParser(description="AWS Route53 A RR update script",
formatter_class=RawTextHelpFormatter)
parser.add_argument('-z', dest='zone_name', help='route53 hosted zone', required=True)
parser.add_argument('-a', dest='a_record', help='A resource record to be updated', required=True)
args = parser.parse_args()
zone_name = args.zone_name
a_record = args.a_record
if a_record and zone_name:
logging.info("Route53Update: Gathering parameters. Zone: {zone} and A record: {a_rr}".format(
zone=zone_name,
a_rr=a_record
))
return zone_name, a_record
def ec2_local_pub_ip():
"""
:return:
"""
try:
response = requests.get(URL_META, timeout=URL_TMOUT)
ipaddress = response.content.decode("utf-8")
logging.info("Route53Update: Local Public IPv4 address: " + ipaddress)
socket.inet_aton(ipaddress)
return ipaddress
except socket.error as e:
logging.critical("Route53Update: EC2 metadata public ipv4 is unavailable or not IP address format")
return 2
def get_dns_rrs(client, zone, a_record):
"""
:param client:
:param zone:
:param a_record:
:return:
"""
response_hosted_zone = client.list_hosted_zones_by_name(DNSName=zone)
if response_hosted_zone['HostedZones']:
hosted_zone_id = str(response_hosted_zone['HostedZones'][0]['Id'])
logging.info("Route53Update: Hosted Zone id: " + hosted_zone_id)
dns_rrs = client.list_resource_record_sets(HostedZoneId=hosted_zone_id,
StartRecordName=a_record,
StartRecordType=DNS_RR_TYPE,
MaxItems='1')
return hosted_zone_id, dns_rrs
else:
raise ValueError("Zone {zone} provided does not match any route53 hosted zone. "
"Response: {response}".format(zone=zone, response=str(response_hosted_zone)))
def change_dns_rr(client, hostedzoneid, dns_rrs, local_pub_ip, a_record):
"""
:param client:
:param hostedzoneid:
:param dns_rrs:
:param local_pub_ip:
:param a_record:
:return:
"""
if dns_rrs['ResourceRecordSets']:
for dns_rr in dns_rrs['ResourceRecordSets']:
if dns_rr['Type'] == DNS_RR_TYPE and dns_rr['Name'] == (a_record + "."):
rr_ip = dns_rr['ResourceRecords'][0]['Value']
logging.info("Route53Update: Current Host zone RR: " + str(dns_rr))
if rr_ip != local_pub_ip:
dns_rr['ResourceRecords'][0]['Value'] = local_pub_ip
client.change_resource_record_sets(
HostedZoneId=hostedzoneid,
ChangeBatch={'Changes': [
{'Action': 'UPSERT',
'ResourceRecordSet': dns_rr
}
]})
logging.info("A record {a_rr} in zone {zone} update with new IP public ipv4: {local_pip}".format(
a_rr = a_record,
zone=hostedzoneid,
local_pip=local_pub_ip
))
else:
logging.info("Route53Update: DNS address is the same as the local public ipv4 address. "
"Change is not required.")
else:
logging.info("Route53Update: A record {a_rr} "
"does not match any records in the hosted zone {zone}".format(a_rr=a_record,
zone=hostedzoneid
))
if __name__ == '__main__':
sys.exit(main())
| nenni/aws_playground | aws_route53_rr_update.py | Python | mit | 4,752 |
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
from corehq.apps.users.models import CouchUser, CommCareUser
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
self.stdout.write("Population location_id field...\n")
relevant_ids = set([r['id'] for r in CouchUser.get_db().view(
'users/by_username',
reduce=False,
).all()])
to_save = []
domain_cache = {}
exclude = (
"drewpsi",
"psi",
"psi-ors",
"psi-test",
"psi-test2",
"psi-test3",
"psi-unicef",
"psi-unicef-wb",
)
def _is_location_domain(domain):
if domain in domain_cache:
return domain_cache[domain]
else:
domain_obj = Domain.get_by_name(domain)
val = domain_obj.commtrack_enabled or domain_obj.locations_enabled
domain_cache[domain] = val
return val
for user_doc in iter_docs(CommCareUser.get_db(), relevant_ids):
if user_doc['doc_type'] == 'WebUser':
continue
if user_doc['domain'] in exclude:
continue
if not _is_location_domain(user_doc['domain']):
continue
user = CommCareUser.get(user_doc['_id'])
if user._locations:
user_doc['location_id'] = user._locations[0]._id
to_save.append(user_doc)
if len(to_save) > 500:
self.stdout.write("Saving 500")
CouchUser.get_db().bulk_save(to_save)
to_save = []
if to_save:
CouchUser.get_db().bulk_save(to_save)
| puttarajubr/commcare-hq | corehq/apps/locations/management/commands/set_location_id.py | Python | bsd-3-clause | 1,867 |
# -*- coding: utf-8 -*-
# $Id: testboxstatus.py $
"""
Test Manager - TestBoxStatus.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 107843 $"
# Standard python imports.
import unittest;
# Validation Kit imports.
from testmanager.core.base import ModelDataBase, ModelDataBaseTestCase, ModelLogicBase, TMTooManyRows, TMRowNotFound;
from testmanager.core.testbox import TestBoxData;
class TestBoxStatusData(ModelDataBase):
"""
TestBoxStatus Data.
"""
## @name TestBoxState_T
# @{
ksTestBoxState_Idle = 'idle';
ksTestBoxState_Testing = 'testing';
ksTestBoxState_GangGathering = 'gang-gathering';
ksTestBoxState_GangGatheringTimedOut = 'gang-gathering-timedout';
ksTestBoxState_GangTesting = 'gang-testing';
ksTestBoxState_GangCleanup = 'gang-cleanup';
ksTestBoxState_Rebooting = 'rebooting';
ksTestBoxState_Upgrading = 'upgrading';
ksTestBoxState_UpgradingAndRebooting = 'upgrading-and-rebooting';
ksTestBoxState_DoingSpecialCmd = 'doing-special-cmd';
## @}
ksParam_idTestBox = 'TestBoxStatus_idTestBox';
ksParam_idGenTestBox = 'TestBoxStatus_idGenTestBox'
ksParam_tsUpdated = 'TestBoxStatus_tsUpdated';
ksParam_enmState = 'TestBoxStatus_enmState';
ksParam_idTestSet = 'TestBoxStatus_idTestSet';
ksParam_iWorkItem = 'TestBoxStatus_iWorkItem';
kasAllowNullAttributes = ['idTestSet', ];
kasValidValues_enmState = \
[
ksTestBoxState_Idle, ksTestBoxState_Testing, ksTestBoxState_GangGathering,
ksTestBoxState_GangGatheringTimedOut, ksTestBoxState_GangTesting, ksTestBoxState_GangCleanup,
ksTestBoxState_Rebooting, ksTestBoxState_Upgrading, ksTestBoxState_UpgradingAndRebooting,
ksTestBoxState_DoingSpecialCmd,
];
kcDbColumns = 6;
def __init__(self):
ModelDataBase.__init__(self);
#
# Initialize with defaults.
# See the database for explanations of each of these fields.
#
self.idTestBox = None;
self.idGenTestBox = None;
self.tsUpdated = None;
self.enmState = self.ksTestBoxState_Idle;
self.idTestSet = None;
self.iWorkItem = None;
def initFromDbRow(self, aoRow):
"""
Internal worker for initFromDbWithId and initFromDbWithGenId as well as
TestBoxStatusLogic.
"""
if aoRow is None:
raise TMRowNotFound('TestBoxStatus not found.');
self.idTestBox = aoRow[0];
self.idGenTestBox = aoRow[1];
self.tsUpdated = aoRow[2];
self.enmState = aoRow[3];
self.idTestSet = aoRow[4];
self.iWorkItem = aoRow[5];
return self;
def initFromDbWithId(self, oDb, idTestBox):
"""
Initialize the object from the database.
"""
oDb.execute('SELECT *\n'
'FROM TestBoxStatuses\n'
'WHERE idTestBox = %s\n'
, (idTestBox, ) );
return self.initFromDbRow(oDb.fetchOne());
def initFromDbWithGenId(self, oDb, idGenTestBox):
"""
Initialize the object from the database.
"""
oDb.execute('SELECT *\n'
'FROM TestBoxStatuses\n'
'WHERE idGenTestBox = %s\n'
, (idGenTestBox, ) );
return self.initFromDbRow(oDb.fetchOne());
class TestBoxStatusLogic(ModelLogicBase):
"""
TestBoxStatus logic.
"""
## The number of seconds between each time to call touchStatus() when
# returning CMD_IDLE.
kcSecIdleTouchStatus = 120;
def __init__(self, oDb):
ModelLogicBase.__init__(self, oDb);
def tryFetchStatus(self, idTestBox):
"""
Attempts to fetch the status of the given testbox.
Returns a TestBoxStatusData object on success.
Returns None if no status was found.
Raises exception on other errors.
"""
self._oDb.execute('SELECT *\n'
'FROM TestBoxStatuses\n'
'WHERE idTestBox = %s\n',
(idTestBox,));
if self._oDb.getRowCount() == 0:
return None;
oStatus = TestBoxStatusData();
return oStatus.initFromDbRow(self._oDb.fetchOne());
def tryFetchStatusAndConfig(self, idTestBox, sTestBoxUuid, sTestBoxAddr):
"""
Tries to fetch the testbox status and current testbox config.
Returns (TestBoxStatusData, TestBoxData) on success, (None, None) if
not found. May throw an exception on database error.
"""
self._oDb.execute('SELECT TestBoxStatuses.*,\n'
' TestBoxesWithStrings.*\n'
'FROM TestBoxStatuses,\n'
' TestBoxesWithStrings\n'
'WHERE TestBoxStatuses.idTestBox = %s\n'
' AND TestBoxesWithStrings.idTestBox = %s\n'
' AND TestBoxesWithStrings.tsExpire = \'infinity\'::TIMESTAMP\n'
' AND TestBoxesWithStrings.uuidSystem = %s\n'
' AND TestBoxesWithStrings.ip = %s\n'
, ( idTestBox,
idTestBox,
sTestBoxUuid,
sTestBoxAddr,) );
cRows = self._oDb.getRowCount();
if cRows != 1:
if cRows != 0:
raise TMTooManyRows('tryFetchStatusForCommandReq got %s rows for idTestBox=%s' % (cRows, idTestBox));
return (None, None);
aoRow = self._oDb.fetchOne();
return (TestBoxStatusData().initFromDbRow(aoRow[:TestBoxStatusData.kcDbColumns]),
TestBoxData().initFromDbRow(aoRow[TestBoxStatusData.kcDbColumns:]));
def insertIdleStatus(self, idTestBox, idGenTestBox, fCommit = False):
"""
Inserts an idle status for the specified testbox.
"""
self._oDb.execute('INSERT INTO TestBoxStatuses (\n'
' idTestBox,\n'
' idGenTestBox,\n'
' enmState,\n'
' idTestSet,\n'
' iWorkItem)\n'
'VALUES ( %s,\n'
' %s,\n'
' \'idle\'::TestBoxState_T,\n'
' NULL,\n'
' 0)\n'
, (idTestBox, idGenTestBox) );
self._oDb.maybeCommit(fCommit);
return True;
def touchStatus(self, idTestBox, fCommit = False):
"""
Touches the testbox status row, i.e. sets tsUpdated to the current time.
"""
self._oDb.execute('UPDATE TestBoxStatuses\n'
'SET tsUpdated = CURRENT_TIMESTAMP\n'
'WHERE idTestBox = %s\n'
, (idTestBox,));
self._oDb.maybeCommit(fCommit);
return True;
def updateState(self, idTestBox, sNewState, idTestSet = None, fCommit = False):
"""
Updates the testbox state.
"""
self._oDb.execute('UPDATE TestBoxStatuses\n'
'SET enmState = %s,\n'
' idTestSet = %s,\n'
' tsUpdated = CURRENT_TIMESTAMP\n'
'WHERE idTestBox = %s\n',
(sNewState, idTestSet, idTestBox));
self._oDb.maybeCommit(fCommit);
return True;
def updateGangStatus(self, idTestSetGangLeader, sNewState, fCommit = False):
"""
Update the state of all members of a gang.
"""
self._oDb.execute('UPDATE TestBoxStatuses\n'
'SET enmState = %s,\n'
' tsUpdated = CURRENT_TIMESTAMP\n'
'WHERE idTestBox IN (SELECT idTestBox\n'
' FROM TestSets\n'
' WHERE idTestSetGangLeader = %s)\n'
, (sNewState, idTestSetGangLeader,) );
self._oDb.maybeCommit(fCommit);
return True;
def updateWorkItem(self, idTestBox, iWorkItem, fCommit = False):
"""
Updates the testbox state.
"""
self._oDb.execute('UPDATE TestBoxStatuses\n'
'SET iWorkItem = %s\n'
'WHERE idTestBox = %s\n'
, ( iWorkItem, idTestBox,));
self._oDb.maybeCommit(fCommit);
return True;
def isWholeGangDoneTesting(self, idTestSetGangLeader):
"""
Checks if the whole gang is done testing.
"""
self._oDb.execute('SELECT COUNT(*)\n'
'FROM TestBoxStatuses, TestSets\n'
'WHERE TestBoxStatuses.idTestSet = TestSets.idTestSet\n'
' AND TestSets.idTestSetGangLeader = %s\n'
' AND TestBoxStatuses.enmState IN (%s, %s)\n'
, ( idTestSetGangLeader,
TestBoxStatusData.ksTestBoxState_GangGathering,
TestBoxStatusData.ksTestBoxState_GangTesting));
return self._oDb.fetchOne()[0] == 0;
def isTheWholeGangThere(self, idTestSetGangLeader):
"""
Checks if the whole gang is done testing.
"""
self._oDb.execute('SELECT COUNT(*)\n'
'FROM TestBoxStatuses, TestSets\n'
'WHERE TestBoxStatuses.idTestSet = TestSets.idTestSet\n'
' AND TestSets.idTestSetGangLeader = %s\n'
' AND TestBoxStatuses.enmState IN (%s, %s)\n'
, ( idTestSetGangLeader,
TestBoxStatusData.ksTestBoxState_GangGathering,
TestBoxStatusData.ksTestBoxState_GangTesting));
return self._oDb.fetchOne()[0] == 0;
def timeSinceLastChangeInSecs(self, oStatusData):
"""
Figures the time since the last status change.
"""
tsNow = self._oDb.getCurrentTimestamp();
oDelta = tsNow - oStatusData.tsUpdated;
return oDelta.seconds + oDelta.days * 24 * 3600;
#
# Unit testing.
#
# pylint: disable=C0111
class TestBoxStatusDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestBoxStatusData(),];
if __name__ == '__main__':
unittest.main();
# not reached.
| miguelinux/vbox | src/VBox/ValidationKit/testmanager/core/testboxstatus.py | Python | gpl-2.0 | 12,082 |
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| presidentielcoin/presidentielcoin | contrib/linearize/linearize-data.py | Python | mit | 8,836 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import vtk
import vtk.test.Testing
# ------------------------------------------------------------
# Purpose: Test more parametric functions.
# ------------------------------------------------------------
class TestMoreParametricFunctions(vtk.test.Testing.vtkTest):
def testMoreParametricFunctions(self):
# ------------------------------------------------------------
# For each parametric surface:
# 1) Create it
# 2) Assign mappers and actors
# 3) Position the object
# 5) Add a label
# ------------------------------------------------------------
# ------------------------------------------------------------
# Create Kuen's Surface.
# ------------------------------------------------------------
kuen = vtk.vtkParametricKuen()
kuenSource = vtk.vtkParametricFunctionSource()
kuenSource.SetParametricFunction(kuen)
kuenSource.SetScalarModeToU()
kuenMapper = vtk.vtkPolyDataMapper()
kuenMapper.SetInputConnection(kuenSource.GetOutputPort())
kuenActor = vtk.vtkActor()
kuenActor.SetMapper(kuenMapper)
kuenActor.SetPosition(0, -19, 0)
kuenActor.RotateX(90)
kuenTextMapper = vtk.vtkTextMapper()
kuenTextMapper.SetInput("Kuen")
kuenTextMapper.GetTextProperty().SetJustificationToCentered()
kuenTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
kuenTextMapper.GetTextProperty().SetColor(1, 0, 0)
kuenTextMapper.GetTextProperty().SetFontSize(14)
kuenTextActor = vtk.vtkActor2D()
kuenTextActor.SetMapper(kuenTextMapper)
kuenTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
kuenTextActor.GetPositionCoordinate().SetValue(0, -22.5, 0)
# ------------------------------------------------------------
# Create a Pseudosphere
# ------------------------------------------------------------
pseudo = vtk.vtkParametricPseudosphere()
pseudo.SetMinimumU(-3)
pseudo.SetMaximumU(3)
pseudoSource = vtk.vtkParametricFunctionSource()
pseudoSource.SetParametricFunction(pseudo)
pseudoSource.SetScalarModeToY()
pseudoMapper = vtk.vtkPolyDataMapper()
pseudoMapper.SetInputConnection(pseudoSource.GetOutputPort())
pseudoActor = vtk.vtkActor()
pseudoActor.SetMapper(pseudoMapper)
pseudoActor.SetPosition(8, -19, 0)
pseudoActor.RotateX(90)
pseudoTextMapper = vtk.vtkTextMapper()
pseudoTextMapper.SetInput("Pseudosphere")
pseudoTextMapper.GetTextProperty().SetJustificationToCentered()
pseudoTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
pseudoTextMapper.GetTextProperty().SetColor(1, 0, 0)
pseudoTextMapper.GetTextProperty().SetFontSize(14)
pseudoTextActor = vtk.vtkActor2D()
pseudoTextActor.SetMapper(pseudoTextMapper)
pseudoTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
pseudoTextActor.GetPositionCoordinate().SetValue(8, -22.5, 0)
# ------------------------------------------------------------
# Create a Bohemian Dome
# ------------------------------------------------------------
bdome = vtk.vtkParametricBohemianDome()
bdomeSource = vtk.vtkParametricFunctionSource()
bdomeSource.SetParametricFunction(bdome)
bdomeSource.SetScalarModeToU()
bdomeMapper = vtk.vtkPolyDataMapper()
bdomeMapper.SetInputConnection(bdomeSource.GetOutputPort())
bdomeActor = vtk.vtkActor()
bdomeActor.SetMapper(bdomeMapper)
bdomeActor.SetPosition(16, -19, 0)
bdomeActor.RotateY(90)
bdomeTextMapper = vtk.vtkTextMapper()
bdomeTextMapper.SetInput("Bohemian Dome")
bdomeTextMapper.GetTextProperty().SetJustificationToCentered()
bdomeTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
bdomeTextMapper.GetTextProperty().SetColor(1, 0, 0)
bdomeTextMapper.GetTextProperty().SetFontSize(14)
bdomeTextActor = vtk.vtkActor2D()
bdomeTextActor.SetMapper(bdomeTextMapper)
bdomeTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
bdomeTextActor.GetPositionCoordinate().SetValue(16, -22.5, 0)
# ------------------------------------------------------------
# Create Henneberg's Minimal Surface
# ------------------------------------------------------------
hberg = vtk.vtkParametricHenneberg()
hberg.SetMinimumU(-.3)
hberg.SetMaximumU(.3)
hbergSource = vtk.vtkParametricFunctionSource()
hbergSource.SetParametricFunction(hberg)
hbergSource.SetScalarModeToV()
hbergMapper = vtk.vtkPolyDataMapper()
hbergMapper.SetInputConnection(hbergSource.GetOutputPort())
hbergActor = vtk.vtkActor()
hbergActor.SetMapper(hbergMapper)
hbergActor.SetPosition(24, -19, 0)
hbergActor.RotateY(90)
hbergTextMapper = vtk.vtkTextMapper()
hbergTextMapper.SetInput("Henneberg")
hbergTextMapper.GetTextProperty().SetJustificationToCentered()
hbergTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
hbergTextMapper.GetTextProperty().SetColor(1, 0, 0)
hbergTextMapper.GetTextProperty().SetFontSize(14)
hbergTextActor = vtk.vtkActor2D()
hbergTextActor.SetMapper(hbergTextMapper)
hbergTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
hbergTextActor.GetPositionCoordinate().SetValue(24, -22.5, 0)
# ------------------------------------------------------------
# Create Catalan's Minimal Surface
# ------------------------------------------------------------
catalan = vtk.vtkParametricCatalanMinimal()
catalan.SetMinimumU(-2.*math.pi)
catalan.SetMaximumU( 2.*math.pi)
catalanSource = vtk.vtkParametricFunctionSource()
catalanSource.SetParametricFunction(catalan)
catalanSource.SetScalarModeToV()
catalanMapper = vtk.vtkPolyDataMapper()
catalanMapper.SetInputConnection(catalanSource.GetOutputPort())
catalanActor = vtk.vtkActor()
catalanActor.SetMapper(catalanMapper)
catalanActor.SetPosition(0, -27, 0)
catalanActor.SetScale(.5, .5, .5)
catalanTextMapper = vtk.vtkTextMapper()
catalanTextMapper.SetInput("Catalan")
catalanTextMapper.GetTextProperty().SetJustificationToCentered()
catalanTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
catalanTextMapper.GetTextProperty().SetColor(1, 0, 0)
catalanTextMapper.GetTextProperty().SetFontSize(14)
catalanTextActor = vtk.vtkActor2D()
catalanTextActor.SetMapper(catalanTextMapper)
catalanTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
catalanTextActor.GetPositionCoordinate().SetValue(0, -30.5, 0)
# ------------------------------------------------------------
# Create Bour's Minimal Surface
# ------------------------------------------------------------
bour = vtk.vtkParametricBour()
bourSource = vtk.vtkParametricFunctionSource()
bourSource.SetParametricFunction(bour)
bourSource.SetScalarModeToU()
bourMapper = vtk.vtkPolyDataMapper()
bourMapper.SetInputConnection(bourSource.GetOutputPort())
bourActor = vtk.vtkActor()
bourActor.SetMapper(bourMapper)
bourActor.SetPosition(8, -27, 0)
bourTextMapper = vtk.vtkTextMapper()
bourTextMapper.SetInput("Bour")
bourTextMapper.GetTextProperty().SetJustificationToCentered()
bourTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
bourTextMapper.GetTextProperty().SetColor(1, 0, 0)
bourTextMapper.GetTextProperty().SetFontSize(14)
bourTextActor = vtk.vtkActor2D()
bourTextActor.SetMapper(bourTextMapper)
bourTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
bourTextActor.GetPositionCoordinate().SetValue(8, -30.5, 0)
# ------------------------------------------------------------
# Create Plucker's Conoid Surface
# ------------------------------------------------------------
plucker = vtk.vtkParametricPluckerConoid()
pluckerSource = vtk.vtkParametricFunctionSource()
pluckerSource.SetParametricFunction(plucker)
pluckerSource.SetScalarModeToZ()
pluckerMapper = vtk.vtkPolyDataMapper()
pluckerMapper.SetInputConnection(pluckerSource.GetOutputPort())
pluckerActor = vtk.vtkActor()
pluckerActor.SetMapper(pluckerMapper)
pluckerActor.SetPosition(16, -27, 0)
pluckerTextMapper = vtk.vtkTextMapper()
pluckerTextMapper.SetInput("Plucker")
pluckerTextMapper.GetTextProperty().SetJustificationToCentered()
pluckerTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
pluckerTextMapper.GetTextProperty().SetColor(1, 0, 0)
pluckerTextMapper.GetTextProperty().SetFontSize(14)
pluckerTextActor = vtk.vtkActor2D()
pluckerTextActor.SetMapper(pluckerTextMapper)
pluckerTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
pluckerTextActor.GetPositionCoordinate().SetValue(16, -30.5, 0)
# ------------------------------------------------------------
# Create the RenderWindow, Renderer and all vtkActors
# ------------------------------------------------------------
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# add actors
ren.AddViewProp(kuenActor)
ren.AddViewProp(pseudoActor)
ren.AddViewProp(bdomeActor)
ren.AddViewProp(hbergActor)
ren.AddViewProp(catalanActor)
ren.AddViewProp(bourActor)
ren.AddViewProp(pluckerActor)
#add text actors
ren.AddViewProp(kuenTextActor)
ren.AddViewProp(pseudoTextActor)
ren.AddViewProp(bdomeTextActor)
ren.AddViewProp(hbergTextActor)
ren.AddViewProp(catalanTextActor)
ren.AddViewProp(bourTextActor)
ren.AddViewProp(pluckerTextActor)
ren.SetBackground(0.9, 0.9, 0.9)
renWin.SetSize(500, 500)
ren.ResetCamera()
iren.Initialize()
renWin.Render()
img_file = "TestMoreParametricFunctions.png"
vtk.test.Testing.compareImage(iren.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=10)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestMoreParametricFunctions, 'test')])
| hlzz/dotfiles | graphics/VTK-7.0.0/Common/ComputationalGeometry/Testing/Python/TestMoreParametricFunctions.py | Python | bsd-3-clause | 11,232 |
grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']]
def print_grid(some_grid):
row_length = len(some_grid)
column_length = len(some_grid[0])
for i in range(column_length):
for j in range(row_length):
print(grid[j][i], end='')
print('')
print_grid(grid)
| sallyyoo/ced2 | py/practice/print_grid.py | Python | mit | 541 |
#!/usr/bin/env python
#encoding: utf-8
import chardataeffect, inkex, string
convert_table = {\
'a': unicode("⠁", "utf-8"),\
'b': unicode("⠃", "utf-8"),\
'c': unicode("⠉", "utf-8"),\
'd': unicode("⠙", "utf-8"),\
'e': unicode("⠑", "utf-8"),\
'f': unicode("⠋", "utf-8"),\
'g': unicode("⠛", "utf-8"),\
'h': unicode("⠓", "utf-8"),\
'i': unicode("⠊", "utf-8"),\
'j': unicode("⠚", "utf-8"),\
'k': unicode("⠅", "utf-8"),\
'l': unicode("⠇", "utf-8"),\
'm': unicode("⠍", "utf-8"),\
'n': unicode("⠝", "utf-8"),\
'o': unicode("⠕", "utf-8"),\
'p': unicode("⠏", "utf-8"),\
'q': unicode("⠟", "utf-8"),\
'r': unicode("⠗", "utf-8"),\
's': unicode("⠎", "utf-8"),\
't': unicode("⠞", "utf-8"),\
'u': unicode("⠥", "utf-8"),\
'v': unicode("⠧", "utf-8"),\
'w': unicode("⠺", "utf-8"),\
'x': unicode("⠭", "utf-8"),\
'y': unicode("⠽", "utf-8"),\
'z': unicode("⠵", "utf-8"),\
}
class C(chardataeffect.CharDataEffect):
def process_chardata(self,text, line, par):
r = ""
for c in text:
if convert_table.has_key(c.lower()):
r = r + convert_table[c.lower()]
else:
r = r + c
return r
c = C()
c.affect()
| piksels-and-lines-orchestra/inkscape | share/extensions/text_braille.py | Python | gpl-2.0 | 1,169 |
from django.core.urlresolvers import reverse
from django.db import models
from .base import BaseDatedModel
from .staff import Staff
class BaseItem(BaseDatedModel):
authorized = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
author = models.ForeignKey(Staff)
title = models.TextField(max_length=140)
description = models.TextField(max_length=280)
class Meta:
abstract = True
def authorize(self):
self.authorized = True
def suspend(self):
self.authorized = False
def __str__(self):
return self.title
def get_url(self):
raise NotImplementedError
class NewsItem(BaseItem):
category = 'N'
text = models.TextField()
def get_url(self):
return reverse('revuo:item_view', kwargs={'category': self.category, 'item_id': str(self.id)})
class BlogItem(BaseItem):
category = 'B'
text = models.TextField()
def get_url(self):
return reverse('revuo:item_view', kwargs={'category': self.category, 'item_id': str(self.id)})
def publication_destination(instance, filename):
return '_'.join([str(instance.author.id),filename])
class Publication(BaseItem):
category = 'P'
attachment = models.FileField(upload_to=publication_destination)
def get_url(self):
return reverse('revuo:item_view', kwargs={'category': self.category, 'item_id': str(self.id)})
| Lasanha/revuo | revuo/models/item.py | Python | bsd-3-clause | 1,425 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import ARMException
from cairis.tools.quickSetup import quick_setup
from sqlalchemy.exc import SQLAlchemyError
import npyscreen as np
import os
import sys
__author__ = 'Shamal Faily'
class CAIRISDatabaseConfigurationForm(np.ActionForm):
# quick_setup_headless.py --dbHost self.theHost --dbport self.thePort --dbRootPassword self.theRootPassword --tmpdir self.theTmpDir --rootDir self.theRootDir --imageDir self.theImageDir --configFile self.theFileName --webPort self.theWebPort --logLevel self.theLogLevel --staticDir self.theStaticDir --uploadDir self.theUploadDir --user self.theUsername --password self.thePassword
def create(self):
self.findRootDir()
pathName = os.path.realpath(__file__)
pathName = pathName.replace("quick_setup.py", "")
self.name = "Configure CAIRIS database and initial account"
self.theHost = self.add(np.TitleText, name = "Database host:", value = "localhost")
self.thePort = self.add(np.TitleText, name = "Database port:", value = "3306")
self.theRootPassword = self.add(np.TitlePassword, name = "Database root password:", value = "")
self.theTmpDir = self.add(np.TitleText, name = "Temp directory:", value = "/tmp")
self.theRootDir = self.add(np.TitleText, name = "Root directory:", value = pathName + "cairis")
self.theImageDir = self.add(np.TitleText, name = "Default image directory:", value = "/tmp")
self.theFileName = self.add(np.TitleText, name = "CAIRIS configuration file name:", value = os.environ.get("HOME") + "/cairis.cnf")
self.theWebPort = self.add(np.TitleText,name = "Web port:", value = "7071")
self.theLogLevel = self.add(np.TitleText,name = "Log level:", value = "debug");
self.theStaticDir = self.add(np.TitleText,name = "Static directory:", value = pathName + "cairis/dist")
self.theAssetDir = self.add(np.TitleText,name = "Asset directory:", value = pathName + "cairis/dist")
self.theUploadDir = self.add(np.TitleText,name = "Upload directory:", value = "/tmp")
self.theUsername = self.add(np.TitleText, name = "Initial Username:", value = "")
self.thePassword = self.add(np.TitlePassword, name = "Initial Password:", value = "")
def findRootDir(self):
self.defaultRootDir = "/usr/local/lib/python2.7/dist-packages/cairis"
for cpath in sys.path:
if "/dist-packages/cairis-" in cpath and cpath.endswith(".egg"):
self.defaultRootDir = os.path.join(cpath, "cairis")
break
def on_ok(self):
try:
quick_setup(self.theHost.value,int(self.thePort.value),self.theRootPassword.value,self.theTmpDir.value,self.theRootDir.value,self.theImageDir.value,self.theFileName.value,int(self.theWebPort.value),self.theLogLevel.value,self.theStaticDir.value,self.theAssetDir.value,self.theUploadDir.value,self.theUsername.value,self.thePassword.value)
self.parentApp.setNextForm(None)
except ARMException as e:
np.notify_confirm(str(e), title = 'Error')
except SQLAlchemyError as e:
np.notify_confirm('Error adding CAIRIS user: ' + str(e), title = 'Error')
def on_cancel(self):
self.parentApp.setNextForm(None)
class CAIRISConfigurationApp(np.NPSAppManaged):
def onStart(self):
self.addForm("MAIN",CAIRISDatabaseConfigurationForm)
def main(args=None):
if args is None:
args = sys.argv[1:]
App = CAIRISConfigurationApp()
try:
App.run()
except np.wgwidget.NotEnoughSpaceForWidget:
print "The terminal window is too small to display the configuration form, please resize it and try again."
if __name__ == '__main__':
main()
| nathanbjenx/cairis | quick_setup.py | Python | apache-2.0 | 4,394 |
"Django admin support for treebeard"
from django.contrib import admin, messages
from django.contrib.admin.views.main import ChangeList
from django.conf.urls.defaults import url, patterns
from django.http import HttpResponseBadRequest, HttpResponse
from treebeard.forms import MoveNodeForm
from treebeard.templatetags.admin_tree import check_empty_dict
from treebeard.exceptions import InvalidPosition, MissingNodeOrderBy, InvalidMoveToDescendant, PathOverflow
class TreeChangeList(ChangeList):
def get_ordering(self):
"""
Overriding default's ChangeList.get_ordering so we don't sort the
results by '-id' as default
"""
if not check_empty_dict(self.params):
return super(TreeChangeList, self).get_ordering()
return None, 'asc'
class TreeAdmin(admin.ModelAdmin):
"Django Admin class for treebeard"
change_list_template = 'admin/tree_change_list.html'
form = MoveNodeForm
def get_changelist(self, request):
return TreeChangeList
def queryset(self, request):
from treebeard.al_tree import AL_Node
if issubclass(self.model, AL_Node):
# AL Trees return a list instead of a QuerySet for .get_tree()
# So we're returning the regular .queryset cause we will use
# the old admin
return super(TreeAdmin, self).queryset(request)
else:
return self.model.get_tree()
def changelist_view(self, request, extra_context=None):
from treebeard.al_tree import AL_Node
if issubclass(self.model, AL_Node):
# For AL trees, use the old admin display
self.change_list_template = 'admin/tree_list.html'
return super(TreeAdmin, self).changelist_view(request, extra_context)
def get_urls(self):
"""
Adds a url to move nodes to this admin
"""
urls = super(TreeAdmin, self).get_urls()
new_urls = patterns('',
url('^move/$',
self.admin_site.admin_view(self.move_node),),
)
return new_urls + urls
def move_node(self, request):
try:
node_id = request.POST['node_id']
parent_id = request.POST['parent_id']
sibling_id = request.POST['sibling_id']
as_child = request.POST.get('as_child', False)
as_child = bool(int(as_child))
except (KeyError, ValueError), e:
# Some parameters were missing return a BadRequest
return HttpResponseBadRequest(u'Malformed POST params')
node = self.model.objects.get(pk=node_id)
# Parent is not used at this time, need to handle special case
# for root elements that do not have a parent
#parent = self.model.objects.get(pk=parent_id)
sibling = self.model.objects.get(pk=sibling_id)
try:
try:
if as_child:
node.move(sibling, pos='target')
else:
node.move(sibling, pos='left')
except InvalidPosition, e:
# This could be due two reasons (from the docs):
# :raise InvalidPosition: when passing an invalid ``pos`` parm
# :raise InvalidPosition: when :attr:`node_order_by` is enabled and
# the``pos`` parm wasn't ``sorted-sibling`` or ``sorted-child``
#
# If it happened because the node is not a 'sorted-sibling' or
# 'sorted-child' then try to move just a child without preserving the
# order, so try a different move
if as_child:
try:
# Try as unsorted tree
node.move(sibling, pos='last-child')
except InvalidPosition:
# We are talking about a sorted tree
node.move(sibling, pos='sorted-child')
else:
node.move(sibling)
# If we are here, means that we moved it in onf of the tries
messages.info(request, u'Moved node "%s" as %s of "%s"' % (node,
('sibling', 'child')[as_child], sibling))
except (MissingNodeOrderBy, PathOverflow, InvalidMoveToDescendant,
InvalidPosition), e:
# An error was raised while trying to move the node, then set an
# error message and return 400, this will cause a reload on the client
# to show the message
messages.error(request, u'Exception raised while moving node: %s' % e)
return HttpResponseBadRequest(u'Exception raised during move')
return HttpResponse('OK')
| Ernsting/django-treebeard | treebeard/admin.py | Python | apache-2.0 | 4,728 |
# encoding: utf-8
# module gtk.gdk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
from exceptions import Warning
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
import pango as __pango
import pangocairo as __pangocairo
class DisplayManager(__gobject__gobject.GObject):
"""
Object GdkDisplayManager
Signals from GdkDisplayManager:
display-opened (GdkDisplay)
Properties from GdkDisplayManager:
default-display -> GdkDisplay: Default Display
The default display for GDK
Signals from GObject:
notify (GParam)
"""
@classmethod
def do_display_opened(cls, *args, **kwargs): # real signature unknown
pass
def get_default_display(self, *args, **kwargs): # real signature unknown
pass
def list_displays(self, *args, **kwargs): # real signature unknown
pass
def set_default_display(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__gtype__ = None # (!) real value is ''
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/gdk/DisplayManager.py | Python | gpl-2.0 | 1,176 |
# Copyright 2011 Colin Scott
# Copyright 2011 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
These are example uses of the recoco cooperative threading library. Hopefully
they will save time for developers getting used to the POX environment.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from pox.lib.recoco import *
class EventLoopExample (Task):
"""
Suppose we have a component of our application that uses it's own event
loop. recoco allows us to "add" our select loop to the other event
loops running within pox.
First note that we inherit from Task. The Task class is recoco's equivalent
of python's threading.thread interface.
"""
def __init__(self):
Task.__init__(self) # call our superconstructor
self.sockets = self.get_sockets() # ... the sockets to listen to events on
# Note! We can't start our event loop until the core is up. Therefore,
# we'll add an event handler.
core.addListener(pox.core.GoingUpEvent, self.start_event_loop)
def start_event_loop(self, event):
"""
Takes a second parameter: the GoingUpEvent object (which we ignore)
"""
# This causes us to be added to the scheduler's recurring Task queue
Task.start(self)
def get_sockets(self):
return []
def handle_read_events(self):
pass
def run(self):
"""
run() is the method that gets called by the scheduler to execute this task
"""
while core.running:
"""
This looks almost exactly like python's select.select, except that it's
it's handled cooperatively by recoco
The only difference in Syntax is the "yield" statement, and the
capital S on "Select"
"""
rlist,wlist,elist = yield Select(self.sockets, [], [], 3)
events = []
for read_sock in rlist:
if read_sock in self.sockets:
events.append(read_sock)
if events:
self.handle_read_events() # ...
"""
And that's it!
TODO: write example usages of the other recoco BlockingTasks, e.g. recoco.Sleep
"""
| twood02/adv-net-samples | sdn/pox/pox/lib/recoco/examples.py | Python | mit | 2,636 |
""" Utilities for writing plugins.
This is different from bokeh.pluginutils because these are ways of
patching routes and objects directly into the bokeh server. You
would run this type of code using the --script option
"""
import uuid
from flask import abort, render_template
from bokeh.exceptions import DataIntegrityException
from bokeh.resources import Resources
from ..app import bokeh_app
from ..views.backbone import init_bokeh
from ..views.main import _makedoc
def object_page(prefix):
""" Decorator for a function which turns an object into a web page
from bokeh.server.app import bokeh_app
@bokeh_app.route("/myapp")
@object_page("mypage")
def make_object():
#make some bokeh object here
return obj
This decorator will
- create a randomized title for a bokeh document using the prefix
- initialize bokeh plotting libraries to use that document
- call the function you pass in, add that object to the plot context
- render that object in a web page
"""
def decorator(func):
def wrapper(*args, **kwargs):
## setup the randomly titled document
docname = prefix + str(uuid.uuid4())
bokehuser = bokeh_app.current_user()
try:
doc = _makedoc(bokeh_app.servermodel_storage, bokehuser, docname)
except DataIntegrityException as e:
return abort(409, e.message)
docid = doc.docid
clientdoc = bokeh_app.backbone_storage.get_document(docid)
## initialize our plotting APIs to use that document
init_bokeh(clientdoc)
obj = func(*args, **kwargs)
clientdoc.add(obj)
bokeh_app.backbone_storage.store_document(clientdoc)
if hasattr(obj, 'extra_generated_classes'):
extra_generated_classes = obj.extra_generated_classes
else:
extra_generated_classes = []
resources = Resources()
return render_template("oneobj.html",
elementid=str(uuid.uuid4()),
docid=docid,
objid=obj._id,
hide_navbar=True,
extra_generated_classes=extra_generated_classes,
splitjs=bokeh_app.splitjs,
username=bokehuser.username,
loglevel=resources.log_level)
wrapper.__name__ = func.__name__
return wrapper
return decorator
| jakevdp/bokeh | bokeh/server/utils/plugins.py | Python | bsd-3-clause | 2,634 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, threading
from future_builtins import map
from calibre import browser, random_user_agent
from calibre.customize import Plugin
from calibre.utils.icu import capitalize, lower, upper
from calibre.ebooks.metadata import check_isbn
def create_log(ostream=None):
from calibre.utils.logging import ThreadSafeLog, FileStream
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
log.outputs = [FileStream(ostream)]
return log
# Comparing Metadata objects for relevance {{{
words = ("the", "a", "an", "of", "and")
prefix_pat = re.compile(r'^(%s)\s+'%("|".join(words)))
trailing_paren_pat = re.compile(r'\(.*\)$')
whitespace_pat = re.compile(r'\s+')
def cleanup_title(s):
if not s:
s = _('Unknown')
s = s.strip().lower()
s = prefix_pat.sub(' ', s)
s = trailing_paren_pat.sub('', s)
s = whitespace_pat.sub(' ', s)
return s.strip()
class InternalMetadataCompareKeyGen(object):
'''
Generate a sort key for comparison of the relevance of Metadata objects,
given a search query. This is used only to compare results from the same
metadata source, not across different sources.
The sort key ensures that an ascending order sort is a sort by order of
decreasing relevance.
The algorithm is:
* Prefer results that have the same ISBN as specified in the query
* Prefer results with a cached cover URL
* Prefer results with all available fields filled in
* Prefer results that are an exact title match to the query
* Prefer results with longer comments (greater than 10% longer)
* Use the relevance of the result as reported by the metadata source's search
engine
'''
def __init__(self, mi, source_plugin, title, authors, identifiers):
isbn = 1 if mi.isbn and mi.isbn == identifiers.get('isbn', None) else 2
all_fields = 1 if source_plugin.test_fields(mi) is None else 2
exact_title = 1 if title and \
cleanup_title(title) == cleanup_title(mi.title) else 2
has_cover = 2 if (not source_plugin.cached_cover_url_is_reliable or
source_plugin.get_cached_cover_url(mi.identifiers) is None) else 1
self.base = (isbn, has_cover, all_fields, exact_title)
self.comments_len = len(mi.comments.strip() if mi.comments else '')
self.extra = (getattr(mi, 'source_relevance', 0), )
def __cmp__(self, other):
result = cmp(self.base, other.base)
if result == 0:
# Now prefer results with the longer comments, within 10%
cx, cy = self.comments_len, other.comments_len
t = (cx + cy) / 20
delta = cy - cx
if abs(delta) > t:
result = delta
else:
result = cmp(self.extra, other.extra)
return result
# }}}
def get_cached_cover_urls(mi):
from calibre.customize.ui import metadata_plugins
plugins = list(metadata_plugins(['identify']))
for p in plugins:
url = p.get_cached_cover_url(mi.identifiers)
if url:
yield (p, url)
def dump_caches():
from calibre.customize.ui import metadata_plugins
return {p.name:p.dump_caches() for p in metadata_plugins(['identify'])}
def load_caches(dump):
from calibre.customize.ui import metadata_plugins
plugins = list(metadata_plugins(['identify']))
for p in plugins:
cache = dump.get(p.name, None)
if cache:
p.load_caches(cache)
def cap_author_token(token):
lt = lower(token)
if lt in ('von', 'de', 'el', 'van', 'le'):
return lt
# no digits no spez. characters
if re.match(r'([^\d\W]\.){2,}$', lt, re.UNICODE) is not None:
# Normalize tokens of the form J.K. to J. K.
parts = token.split('.')
return '. '.join(map(capitalize, parts)).strip()
scots_name = None
for x in ('mc', 'mac'):
if (token.lower().startswith(x) and len(token) > len(x) and
(
token[len(x)] == upper(token[len(x)]) or
lt == token
)):
scots_name = len(x)
break
ans = capitalize(token)
if scots_name is not None:
ans = ans[:scots_name] + upper(ans[scots_name]) + ans[scots_name+1:]
for x in ('-', "'"):
idx = ans.find(x)
if idx > -1 and len(ans) > idx+2:
ans = ans[:idx+1] + upper(ans[idx+1]) + ans[idx+2:]
return ans
def fixauthors(authors):
if not authors:
return authors
ans = []
for x in authors:
ans.append(' '.join(map(cap_author_token, x.split())))
return ans
def fixcase(x):
if x:
from calibre.utils.titlecase import titlecase
x = titlecase(x)
return x
class Option(object):
__slots__ = ['type', 'default', 'label', 'desc', 'name', 'choices']
def __init__(self, name, type_, default, label, desc, choices=None):
'''
:param name: The name of this option. Must be a valid python identifier
:param type_: The type of this option, one of ('number', 'string',
'bool', 'choices')
:param default: The default value for this option
:param label: A short (few words) description of this option
:param desc: A longer description of this option
:param choices: A dict of possible values, used only if type='choices'.
dict is of the form {key:human readable label, ...}
'''
self.name, self.type, self.default, self.label, self.desc = (name,
type_, default, label, desc)
if choices and not isinstance(choices, dict):
choices = dict([(x, x) for x in choices])
self.choices = choices
class Source(Plugin):
type = _('Metadata source')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
#: Set of capabilities supported by this plugin.
#: Useful capabilities are: 'identify', 'cover'
capabilities = frozenset()
#: List of metadata fields that can potentially be download by this plugin
#: during the identify phase
touched_fields = frozenset()
#: Set this to True if your plugin returns HTML formatted comments
has_html_comments = False
#: Setting this to True means that the browser object will add
#: Accept-Encoding: gzip to all requests. This can speedup downloads
#: but make sure that the source actually supports gzip transfer encoding
#: correctly first
supports_gzip_transfer_encoding = False
#: Cached cover URLs can sometimes be unreliable (i.e. the download could
#: fail or the returned image could be bogus. If that is often the case
#: with this source set to False
cached_cover_url_is_reliable = True
#: A list of :class:`Option` objects. They will be used to automatically
#: construct the configuration widget for this plugin
options = ()
#: A string that is displayed at the top of the config widget for this
#: plugin
config_help_message = None
#: If True this source can return multiple covers for a given query
can_get_multiple_covers = False
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
self.running_a_test = False # Set to True when using identify_test()
self._isbn_to_identifier_cache = {}
self._identifier_to_cover_url_cache = {}
self.cache_lock = threading.RLock()
self._config_obj = None
self._browser = None
self.prefs.defaults['ignore_fields'] = []
for opt in self.options:
self.prefs.defaults[opt.name] = opt.default
# Configuration {{{
def is_configured(self):
'''
Return False if your plugin needs to be configured before it can be
used. For example, it might need a username/password/API key.
'''
return True
def is_customizable(self):
return True
def customization_help(self):
return 'This plugin can only be customized using the GUI'
def config_widget(self):
from calibre.gui2.metadata.config import ConfigWidget
return ConfigWidget(self)
def save_settings(self, config_widget):
config_widget.commit()
@property
def prefs(self):
if self._config_obj is None:
from calibre.utils.config import JSONConfig
self._config_obj = JSONConfig('metadata_sources/%s.json'%self.name)
return self._config_obj
# }}}
# Browser {{{
@property
def user_agent(self):
# Pass in an index to random_user_agent() to test with a particular
# user agent
return random_user_agent()
@property
def browser(self):
if self._browser is None:
self._browser = browser(user_agent=self.user_agent)
if self.supports_gzip_transfer_encoding:
self._browser.set_handle_gzip(True)
return self._browser.clone_browser()
# }}}
# Caching {{{
def get_related_isbns(self, id_):
with self.cache_lock:
for isbn, q in self._isbn_to_identifier_cache.iteritems():
if q == id_:
yield isbn
def cache_isbn_to_identifier(self, isbn, identifier):
with self.cache_lock:
self._isbn_to_identifier_cache[isbn] = identifier
def cached_isbn_to_identifier(self, isbn):
with self.cache_lock:
return self._isbn_to_identifier_cache.get(isbn, None)
def cache_identifier_to_cover_url(self, id_, url):
with self.cache_lock:
self._identifier_to_cover_url_cache[id_] = url
def cached_identifier_to_cover_url(self, id_):
with self.cache_lock:
return self._identifier_to_cover_url_cache.get(id_, None)
def dump_caches(self):
with self.cache_lock:
return {'isbn_to_identifier':self._isbn_to_identifier_cache.copy(),
'identifier_to_cover':self._identifier_to_cover_url_cache.copy()}
def load_caches(self, dump):
with self.cache_lock:
self._isbn_to_identifier_cache.update(dump['isbn_to_identifier'])
self._identifier_to_cover_url_cache.update(dump['identifier_to_cover'])
# }}}
# Utility functions {{{
def get_author_tokens(self, authors, only_first_author=True):
'''
Take a list of authors and return a list of tokens useful for an
AND search query. This function tries to return tokens in
first name middle names last name order, by assuming that if a comma is
in the author name, the name is in lastname, other names form.
'''
if authors:
# Leave ' in there for Irish names
remove_pat = re.compile(r'[!@#$%^&*(){}`~"\s\[\]/]')
replace_pat = re.compile(r'[-+.:;,]')
if only_first_author:
authors = authors[:1]
for au in authors:
has_comma = ',' in au
au = replace_pat.sub(' ', au)
parts = au.split()
if has_comma:
# au probably in ln, fn form
parts = parts[1:] + parts[:1]
for tok in parts:
tok = remove_pat.sub('', tok).strip()
if len(tok) > 2 and tok.lower() not in ('von', 'van',
_('Unknown').lower()):
yield tok
def get_title_tokens(self, title, strip_joiners=True, strip_subtitle=False):
'''
Take a title and return a list of tokens useful for an AND search query.
Excludes connectives(optionally) and punctuation.
'''
if title:
# strip sub-titles
if strip_subtitle:
subtitle = re.compile(r'([\(\[\{].*?[\)\]\}]|[/:\\].*$)')
if len(subtitle.sub('', title)) > 1:
title = subtitle.sub('', title)
title_patterns = [(re.compile(pat, re.IGNORECASE), repl) for pat, repl in
[
# Remove things like: (2010) (Omnibus) etc.
(r'(?i)[({\[](\d{4}|omnibus|anthology|hardcover|audiobook|audio\scd|paperback|turtleback|mass\s*market|edition|ed\.)[\])}]', ''),
# Remove any strings that contain the substring edition inside
# parentheses
(r'(?i)[({\[].*?(edition|ed.).*?[\]})]', ''),
# Remove commas used a separators in numbers
(r'(\d+),(\d+)', r'\1\2'),
# Remove hyphens only if they have whitespace before them
(r'(\s-)', ' '),
# Remove single quotes not followed by 's'
(r"'(?!s)", ''),
# Replace other special chars with a space
(r'''[:,;!@$%^&*(){}.`~"\s\[\]/]''', ' '),
]]
for pat, repl in title_patterns:
title = pat.sub(repl, title)
tokens = title.split()
for token in tokens:
token = token.strip()
if token and (not strip_joiners or token.lower() not in ('a',
'and', 'the', '&')):
yield token
def split_jobs(self, jobs, num):
'Split a list of jobs into at most num groups, as evenly as possible'
groups = [[] for i in range(num)]
jobs = list(jobs)
while jobs:
for gr in groups:
try:
job = jobs.pop()
except IndexError:
break
gr.append(job)
return [g for g in groups if g]
def test_fields(self, mi):
'''
Return the first field from self.touched_fields that is null on the
mi object
'''
for key in self.touched_fields:
if key.startswith('identifier:'):
key = key.partition(':')[-1]
if not mi.has_identifier(key):
return 'identifier: ' + key
elif mi.is_null(key):
return key
def clean_downloaded_metadata(self, mi):
'''
Call this method in your plugin's identify method to normalize metadata
before putting the Metadata object into result_queue. You can of
course, use a custom algorithm suited to your metadata source.
'''
docase = mi.language == 'eng' or mi.is_null('language')
if docase and mi.title:
mi.title = fixcase(mi.title)
mi.authors = fixauthors(mi.authors)
if mi.tags and docase:
mi.tags = list(map(fixcase, mi.tags))
mi.isbn = check_isbn(mi.isbn)
def download_multiple_covers(self, title, authors, urls, get_best_cover, timeout, result_queue, abort, log, prefs_name='max_covers'):
if not urls:
log('No images found for, title: %r and authors: %r'%(title, authors))
return
from threading import Thread
import time
if prefs_name:
urls = urls[:self.prefs[prefs_name]]
if get_best_cover:
urls = urls[:1]
log('Downloading %d covers'%len(urls))
workers = [Thread(target=self.download_image, args=(u, timeout, log, result_queue)) for u in urls]
for w in workers:
w.daemon = True
w.start()
alive = True
start_time = time.time()
while alive and not abort.is_set() and time.time() - start_time < timeout:
alive = False
for w in workers:
if w.is_alive():
alive = True
break
abort.wait(0.1)
def download_image(self, url, timeout, log, result_queue):
try:
ans = self.browser.open_novisit(url, timeout=timeout).read()
result_queue.put((self, ans))
log('Downloaded cover from: %s'%url)
except Exception:
self.log.exception('Failed to download cover from: %r'%url)
# }}}
# Metadata API {{{
def get_book_url(self, identifiers):
'''
Return a 3-tuple or None. The 3-tuple is of the form:
(identifier_type, identifier_value, URL).
The URL is the URL for the book identified by identifiers at this
source. identifier_type, identifier_value specify the identifier
corresponding to the URL.
This URL must be browseable to by a human using a browser. It is meant
to provide a clickable link for the user to easily visit the books page
at this source.
If no URL is found, return None. This method must be quick, and
consistent, so only implement it if it is possible to construct the URL
from a known scheme given identifiers.
'''
return None
def get_book_url_name(self, idtype, idval, url):
'''
Return a human readable name from the return value of get_book_url().
'''
return self.name
def get_cached_cover_url(self, identifiers):
'''
Return cached cover URL for the book identified by
the identifiers dict or None if no such URL exists.
Note that this method must only return validated URLs, i.e. not URLS
that could result in a generic cover image or a not found error.
'''
return None
def identify_results_keygen(self, title=None, authors=None,
identifiers={}):
'''
Return a function that is used to generate a key that can sort Metadata
objects by their relevance given a search query (title, authors,
identifiers).
These keys are used to sort the results of a call to :meth:`identify`.
For details on the default algorithm see
:class:`InternalMetadataCompareKeyGen`. Re-implement this function in
your plugin if the default algorithm is not suitable.
'''
def keygen(mi):
return InternalMetadataCompareKeyGen(mi, self, title, authors,
identifiers)
return keygen
def identify(self, log, result_queue, abort, title=None, authors=None,
identifiers={}, timeout=30):
'''
Identify a book by its title/author/isbn/etc.
If identifiers(s) are specified and no match is found and this metadata
source does not store all related identifiers (for example, all ISBNs
of a book), this method should retry with just the title and author
(assuming they were specified).
If this metadata source also provides covers, the URL to the cover
should be cached so that a subsequent call to the get covers API with
the same ISBN/special identifier does not need to get the cover URL
again. Use the caching API for this.
Every Metadata object put into result_queue by this method must have a
`source_relevance` attribute that is an integer indicating the order in
which the results were returned by the metadata source for this query.
This integer will be used by :meth:`compare_identify_results`. If the
order is unimportant, set it to zero for every result.
Make sure that any cover/isbn mapping information is cached before the
Metadata object is put into result_queue.
:param log: A log object, use it to output debugging information/errors
:param result_queue: A result Queue, results should be put into it.
Each result is a Metadata object
:param abort: If abort.is_set() returns True, abort further processing
and return as soon as possible
:param title: The title of the book, can be None
:param authors: A list of authors of the book, can be None
:param identifiers: A dictionary of other identifiers, most commonly
{'isbn':'1234...'}
:param timeout: Timeout in seconds, no network request should hang for
longer than timeout.
:return: None if no errors occurred, otherwise a unicode representation
of the error suitable for showing to the user
'''
return None
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
'''
Download a cover and put it into result_queue. The parameters all have
the same meaning as for :meth:`identify`. Put (self, cover_data) into
result_queue.
This method should use cached cover URLs for efficiency whenever
possible. When cached data is not present, most plugins simply call
identify and use its results.
If the parameter get_best_cover is True and this plugin can get
multiple covers, it should only get the "best" one.
'''
pass
# }}}
| sss/calibre-at-bzr | src/calibre/ebooks/metadata/sources/base.py | Python | gpl-3.0 | 21,218 |
from app import Handler
from handlers.auth import Auth
class SignUpHandler(Handler):
def get(self):
self.render("signup.html")
def post(self):
username = self.request.get("username")
password = self.request.get("password")
password_verify = self.request.get("password_verify")
email = self.request.get("email")
try:
user = Auth.signup(username, password, password_verify, email)
# sets the auth cookie
self.response.headers.add_header('Set-Cookie',
'user_id=' +
Auth.make_secure_cookie(user))
self.redirect("/welcome")
except Exception as e:
self.render("signup.html", username=username, email=email, error=e)
| diegopettengill/multiuserblog | handlers/signup.py | Python | mit | 830 |
# Generated by Django 1.11.28 on 2020-02-27 16:38
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0236_add_program_type_uuid_and_coaching'),
]
operations = [
migrations.CreateModel(
name='AlgoliaProxyCourse',
fields=[
],
options={
'indexes': [],
'proxy': True,
},
bases=('course_metadata.course',),
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='AlgoliaProxyProgram',
fields=[
],
options={
'indexes': [],
'proxy': True,
},
bases=('course_metadata.program',),
),
]
| edx/course-discovery | course_discovery/apps/course_metadata/migrations/0237_algoliaproxycourse_algoliaproxyprogram.py | Python | agpl-3.0 | 920 |
#!/usr/bin/env python
import os
from app import create_app, db
from app.models import User, Role, Permission
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Permission=Permission)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| onekeeper/okp-analysis-web | manage.py | Python | mit | 754 |
# The Craftr build system
# Copyright (C) 2016 Niklas Rosenstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`craftr.defaults`
======================
This module provides the default global namespace for Craftr modules. Names
starting with an underscore will be ignored.
"""
from craftr.core import build as _build
from craftr.core.logging import logger
from craftr.core.manifest import Namespace
from craftr.core.session import session, ModuleNotFound
from craftr.utils import path, shell
from craftr.targetbuilder import gtn, TargetBuilder, Framework
from craftr import platform
import builtins as _builtins
import itertools as _itertools
import os as _os
import require
import sys as _sys
require = require.Require(write_bytecode=False)
class ToolDetectionError(Exception):
pass
class ModuleError(Exception):
pass
def include_defs(filename, globals=None):
"""
Uses :mod:`require` to load a Python file and then copies all symbols
that do not start with an underscore into the *globals* dictionary. If
*globals* is not specified, it will fall back to the globals of the frame
that calls the function.
"""
module = require(filename, _stackdepth=1)
if globals is None:
globals = _sys._getframe(1).f_globals
for key, value in vars(module).items():
if not key.startswith('_'):
globals[key] = value
def glob(patterns, parent=None, exclude=(), include_dotfiles=False):
"""
Wrapper for :func:`path.glob` that automatically uses the current modules
project directory for the *parent* argument if it has not been specifically
set.
"""
if parent is None and session and session.module:
parent = session.module.project_dir
return path.glob(patterns, parent, exclude, include_dotfiles)
def local(rel_path):
"""
Given a relative path, returns the absolute path relative to the current
module's project directory.
"""
parent = session.module.project_dir
return path.norm(rel_path, parent)
def buildlocal(rel_path):
"""
Given a relative path, returns the path (still relative) to the build
directory for the current module. This is basically a shorthand for
prepending the module name and version to *path*.
"""
if path.isabs(rel_path):
return rel_path
return path.canonical(path.join(session.module.ident, rel_path))
def relocate_files(files, outdir, suffix, replace_suffix=True, parent=None):
"""
Converts a list of filenames, relocating them to *outdir* and replacing
their existing suffix. If *suffix* is a callable, it will be passed the
new filename and expected to return the same filename, eventually with
a different suffix.
"""
if parent is None:
parent = session.module.project_dir
result = []
for filename in files:
filename = path.join(outdir, path.rel(filename, parent))
filename = path.addsuffix(filename, suffix, replace=replace_suffix)
result.append(filename)
return result
def filter(predicate, iterable):
"""
Alternative for the built-in ``filter()`` function that returns a list
instead of an iterable (which is the behaviour since Python 3).
"""
result = []
for item in iterable:
if predicate(item):
result.append(item)
return result
def map(procedure, iterable):
"""
Alternative for the built-in ``map()`` function that returns a list instead
of an iterable (which is the behaviour since Python 3).
"""
result = []
for item in iterable:
result.append(procedure(item))
return result
def zip(*iterables, fill=NotImplemented):
"""
Alternative to the Python built-in ``zip()`` function. This function returns
a list rather than an iterable and also supports swapping to the
:func:`itertools.izip_longest` version if the *fill* parameter is specified.
"""
if fill is NotImplemented:
return list(_builtins.zip(*iterables))
else:
return list(_itertools.zip_longest(*iterables, fillvalue=fill))
def load_module(name, into=None, get_namespace=True, _stackframe=1):
"""
Load a Craftr module by name and return it. If *into* is specified, it must
be a dictionary that will be filled with all the members of the module. Note
that this function returns the namespace object of the module rather than
the actual :class:`craftr.core.session.Module` object that wraps the module
information unless *get_namespace* is False.
The version criteria is read from the current module's manifest.
:param name: The name of the module to load. If this name is suffixed
with the two characters ``.*`` and the *into* parameter is :const:`None`,
the contents of the module will be exported into the globals of the
calling frame.
:param into: If specified, must be a dictionary.
:param get_namespace:
:return: The module namespace object (of type :class:`types.ModuleType`)
or the actual :class:`craftr.core.session.Module` if *get_namespace*
is False.
:raise ModuleNotFound: If the module could not be found.
:raise RuntimeError: If the module that is attempted to be loaded is not
declared in the current module's manifest.
Examples:
.. code:: python
cxx = load_module('lang.cxx')
load_module('lang.cxx.*')
assert cxx.c_compile is c_compile
"""
if name.endswith('.*') and into is None:
name = name[:-2]
into = _sys._getframe(_stackframe).f_globals
if not session:
raise RuntimeError('no session context')
module = session.module
if not module:
raise RuntimeError('no current module')
if name not in module.manifest.dependencies:
raise RuntimeError('"{}" can not load "{}", make sure that it is listed '
'in the dependencies'.format(module.ident, name))
loaded_module = session.find_module(name, module.manifest.dependencies[name])
if not loaded_module.executed:
loaded_module.run()
if into is not None:
module_builtins = frozenset('loader project_dir options'.split())
all_vars = getattr(loaded_module.namespace, '__all__', None)
for key, value in vars(loaded_module.namespace).items():
if all_vars is not None:
if key in all_vars:
into[key] = value
else:
if not key.startswith('_') and key not in module_builtins and key not in globals():
into[key] = value
if get_namespace:
return loaded_module.namespace
return loaded_module
def load_file(filename):
"""
Loads a Python file into a new module-like object and returns it. The
*filename* is assumed relative to the currently executed module's
directory (NOT the project directory which can be different).
"""
if not path.isabs(filename):
filename = path.join(session.module.directory, filename)
with open(filename, 'r') as fp:
code = compile(fp.read(), filename, 'exec')
scope = Namespace()
vars(scope).update(globals())
exec(code, vars(scope))
return scope
def gentool(commands, preamble=None, environ=None, name=None):
"""
Create a :class:`~_build.Tool` object. The name of the tool will be derived
from the variable name it is assigned to unless *name* is specified.
"""
tool = _build.Tool(gtn(name), commands, preamble, environ)
session.graph.add_tool(tool)
return tool
def gentarget(commands, inputs=(), outputs=(), *args, **kwargs):
"""
Create a :class:`~_build.Target` object. The name of the target will be
derived from the variable name it is assigned to unless *name* is specified.
"""
target = _build.Target(gtn(kwargs.pop('name', None)), commands, inputs,
outputs, *args, **kwargs)
session.graph.add_target(target)
return target
def runtarget(target, *args, inputs=(), outputs=(), **kwargs):
"""
Simplification of :func:`gentarget` to make it more obvious that a
generate target is actually executed.
"""
name = gtn(kwargs.pop('name', None))
kwargs.setdefault('explicit', True)
return gentarget([[target] + list(args)], inputs, outputs, name=name, **kwargs)
def write_response_file(arguments, builder=None, name=None, force_file=False):
"""
Creates a response-file with the specified *name* in the in the
``buildfiles/`` directory and writes the *arguments* list quoted into
the file. If *builder* is specified, it must be a :class:`TargetBuilder`
and the response file will be added to the implicit dependencies.
If *force_file* is set to True, a file will always be written. Otherwise,
the function will into possible limitations of the platform and decide
whether to write a response file or to return the *arguments* as is.
Returns a tuple of ``(filename, arguments)``. If a response file is written,
the returned *arguments* will be a list with a single string that is the
filename prepended with ``@``. The *filename* part can be None if no
response file needed to be exported.
"""
if not name:
if not builder:
raise ValueError('builder must be specified if name is bot')
name = builder.name + '.response.txt'
if platform.name != 'win':
return None, arguments
# We'll just assume that there won't be more than 2048 characters for
# other flags. The windows max buffer size is 8192.
content = shell.join(arguments)
if len(content) < 6144:
return None, arguments
filename = buildlocal(path.join('buildfiles', name))
if builder:
builder.implicit_deps.append(filename)
if session.builddir:
path.makedirs(path.dirname(filename))
with open(filename, 'w') as fp:
fp.write(content)
return filename, ['@' + filename]
def error(*message):
"""
Raises a :class:`ModuleError`.
"""
raise ModuleError(' '.join(map(str, message)))
def append_PATH(*paths):
"""
This is a helper function that is used to generate a ``PATH`` environment
variable from the value that already exists and add the specified *paths*
to it. It is typically used for example like this:
.. code:: python
run = gentarget(
commands = [[main, local('example.ini')]],
explicit=True,
environ = {'PATH': append_PATH(qt5.bin_dir if qt5 else None)}
)
"""
result = _os.getenv('PATH')
paths = _os.path.pathsep.join(filter(bool, paths))
if paths:
result += _os.path.pathsep + paths
return result
| winksaville/craftr | craftr/defaults.py | Python | gpl-3.0 | 10,759 |
"""The tests for the hassio component."""
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.exceptions import HomeAssistantError
from tests.async_mock import Mock, patch
async def test_auth_success(hass, hassio_client_supervisor):
"""Test no auth needed for ."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
) as mock_login:
resp = await hassio_client_supervisor.post(
"/api/hassio_auth",
json={"username": "test", "password": "123456", "addon": "samba"},
)
# Check we got right response
assert resp.status == 200
mock_login.assert_called_with("test", "123456")
async def test_auth_fails_no_supervisor(hass, hassio_client):
"""Test if only supervisor can access."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
) as mock_login:
resp = await hassio_client.post(
"/api/hassio_auth",
json={"username": "test", "password": "123456", "addon": "samba"},
)
# Check we got right response
assert resp.status == 401
assert not mock_login.called
async def test_auth_fails_no_auth(hass, hassio_noauth_client):
"""Test if only supervisor can access."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
) as mock_login:
resp = await hassio_noauth_client.post(
"/api/hassio_auth",
json={"username": "test", "password": "123456", "addon": "samba"},
)
# Check we got right response
assert resp.status == 401
assert not mock_login.called
async def test_login_error(hass, hassio_client_supervisor):
"""Test no auth needed for error."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
Mock(side_effect=HomeAssistantError()),
) as mock_login:
resp = await hassio_client_supervisor.post(
"/api/hassio_auth",
json={"username": "test", "password": "123456", "addon": "samba"},
)
# Check we got right response
assert resp.status == 401
mock_login.assert_called_with("test", "123456")
async def test_login_no_data(hass, hassio_client_supervisor):
"""Test auth with no data -> error."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
Mock(side_effect=HomeAssistantError()),
) as mock_login:
resp = await hassio_client_supervisor.post("/api/hassio_auth")
# Check we got right response
assert resp.status == 400
assert not mock_login.called
async def test_login_no_username(hass, hassio_client_supervisor):
"""Test auth with no username in data -> error."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
Mock(side_effect=HomeAssistantError()),
) as mock_login:
resp = await hassio_client_supervisor.post(
"/api/hassio_auth", json={"password": "123456", "addon": "samba"}
)
# Check we got right response
assert resp.status == 400
assert not mock_login.called
async def test_login_success_extra(hass, hassio_client_supervisor):
"""Test auth with extra data."""
with patch(
"homeassistant.auth.providers.homeassistant."
"HassAuthProvider.async_validate_login",
) as mock_login:
resp = await hassio_client_supervisor.post(
"/api/hassio_auth",
json={
"username": "test",
"password": "123456",
"addon": "samba",
"path": "/share",
},
)
# Check we got right response
assert resp.status == 200
mock_login.assert_called_with("test", "123456")
async def test_password_success(hass, hassio_client_supervisor):
"""Test no auth needed for ."""
with patch(
"homeassistant.components.hassio.auth.HassIOPasswordReset._change_password",
) as mock_change:
resp = await hassio_client_supervisor.post(
"/api/hassio_auth/password_reset",
json={"username": "test", "password": "123456"},
)
# Check we got right response
assert resp.status == 200
mock_change.assert_called_with("test", "123456")
async def test_password_fails_no_supervisor(hass, hassio_client):
"""Test if only supervisor can access."""
with patch(
"homeassistant.auth.providers.homeassistant.Data.async_save",
) as mock_save:
resp = await hassio_client.post(
"/api/hassio_auth/password_reset",
json={"username": "test", "password": "123456"},
)
# Check we got right response
assert resp.status == 401
assert not mock_save.called
async def test_password_fails_no_auth(hass, hassio_noauth_client):
"""Test if only supervisor can access."""
with patch(
"homeassistant.auth.providers.homeassistant.Data.async_save",
) as mock_save:
resp = await hassio_noauth_client.post(
"/api/hassio_auth/password_reset",
json={"username": "test", "password": "123456"},
)
# Check we got right response
assert resp.status == 401
assert not mock_save.called
async def test_password_no_user(hass, hassio_client_supervisor):
"""Test no auth needed for ."""
with patch(
"homeassistant.auth.providers.homeassistant.Data.async_save",
) as mock_save:
resp = await hassio_client_supervisor.post(
"/api/hassio_auth/password_reset",
json={"username": "test", "password": "123456"},
)
# Check we got right response
assert resp.status == HTTP_INTERNAL_SERVER_ERROR
assert not mock_save.called
| pschmitt/home-assistant | tests/components/hassio/test_auth.py | Python | apache-2.0 | 6,090 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpoid(base_resource) :
""" Configuration for SNMP Object Identifier resource. """
def __init__(self) :
self._entitytype = ""
self._name = ""
self._Snmpoid = ""
self.___count = 0
@property
def entitytype(self) :
ur"""The type of entity whose SNMP OIDs you want to displayType of entity whose SNMP OIDs you want the NetScaler appliance to display.<br/>Possible values = VSERVER, SERVICE, SERVICEGROUP.
"""
try :
return self._entitytype
except Exception as e:
raise e
@entitytype.setter
def entitytype(self, entitytype) :
ur"""The type of entity whose SNMP OIDs you want to displayType of entity whose SNMP OIDs you want the NetScaler appliance to display.<br/>Possible values = VSERVER, SERVICE, SERVICEGROUP
"""
try :
self._entitytype = entitytype
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the entity whose SNMP OID you want the NetScaler appliance to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the entity whose SNMP OID you want the NetScaler appliance to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def Snmpoid(self) :
ur"""The snmp oid.
"""
try :
return self._Snmpoid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpoid_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpoid
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the snmpoid resources that are configured on netscaler.
"""
try :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [snmpoid() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
ur""" Use this API to fetch all the snmpoid resources that are configured on netscaler.
# This uses snmpoid_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = snmpoid()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_, obj) :
ur""" Use this API to fetch filtered set of snmpoid resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client, obj) :
ur""" Use this API to count the snmpoid resources configured on NetScaler.
"""
try :
option_ = options()
option_.count = True
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_, obj) :
ur""" Use this API to count filtered the set of snmpoid resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.count = True
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Entitytype:
VSERVER = "VSERVER"
SERVICE = "SERVICE"
SERVICEGROUP = "SERVICEGROUP"
class snmpoid_response(base_response) :
def __init__(self, length=1) :
self.snmpoid = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpoid = [snmpoid() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpoid.py | Python | apache-2.0 | 6,179 |
# -*- coding: utf-8 -*-
""" MPR Missing Person Registry - Controllers
@author: nursix
"""
module = request.controller
if module not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
module = request.controller
# -----------------------------------------------------------------------------
# Options Menu (available in all Functions" Views)
def shn_menu():
response.menu_options = [
[T("Search for a Person"), False, URL(r=request, f="person", args="search_simple")],
[T("Missing Persons"), False, URL(r=request, f="person"), [
[T("List"), False, URL(r=request, f="person")],
[T("Add"), False, URL(r=request, f="person", args="create")],
]]]
menu_selected = []
if session.rcvars and "pr_person" in session.rcvars:
person = db.pr_person
query = (person.id == session.rcvars["pr_person"])
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = shn_pr_person_represent(record.id)
menu_selected.append(["%s: %s" % (T("Person"), name), False,
URL(r=request, f="person", args=[record.id])])
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
shn_menu()
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = deployment_settings.modules[module].name_nice
except:
module_name = T("Person Registry")
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def person():
""" RESTful CRUD controller """
resource = request.function
def person_prep(jr):
if jr.component_name == "config":
_config = db.gis_config
defaults = db(_config.id == 1).select(limitby=(0, 1)).first()
for key in defaults.keys():
if key not in ["id", "uuid", "mci", "update_record", "delete_record"]:
_config[key].default = defaults[key]
if jr.http == "POST" and jr.method == "create" and not jr.component:
# Don't know why web2py always adds that,
# remove it here as we want to manually redirect
jr.request.post_vars.update(_next=None)
return True
response.s3.prep = person_prep
s3xrc.model.configure(db.pr_group_membership,
list_fields=["id",
"group_id",
"group_head",
"description"])
def person_postp(jr, output):
if jr.representation in ("html", "popup"):
if not jr.component:
label = READ
else:
label = UPDATE
linkto = shn_linkto(jr, sticky=True)("[id]")
report = URL(r=request, f="person", args=("[id]", "missing_report"))
response.s3.actions = [
dict(label=str(label), _class="action-btn", url=str(linkto)),
dict(label=str(T("Report")), _class="action-btn", url=str(report))
]
if jr.http == "POST" and jr.method == "create" and not jr.component:
# If a new person gets added, redirect to mpr_next
if response.s3.mpr_next:
jr.next = response.s3.mpr_next
response.s3.mpr_next = None
return output
response.s3.postp = person_postp
db.pr_person.missing.readable = False
db.pr_person.missing.writable = False
db.pr_person.missing.default = True
db.mpr_missing_report.person_id.readable = False
db.mpr_missing_report.person_id.writable = False
# Show only missing persons in list views
if len(request.args) == 0:
response.s3.filter = (db.pr_person.missing == True)
mpr_tabs = [
(T("Person Details"), None),
(T("Missing Report"), "missing_report"),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "pe_contact"),
(T("Presence Log"), "presence"),
]
rheader = lambda r: shn_pr_rheader(r, tabs=mpr_tabs)
response.s3.pagination = True
output = shn_rest_controller("pr", resource,
main="first_name",
extra="last_name",
listadd=False,
rheader=rheader)
shn_menu()
return output
# -----------------------------------------------------------------------------
def download():
""" Download a file. """
return response.download(request, db)
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax tooltips """
if "formfield" in request.vars:
response.view = "pr/ajaxtips/%s.html" % request.vars.formfield
return dict()
# -----------------------------------------------------------------------------
def shn_mpr_person_onvalidate(form):
pass
#
# -----------------------------------------------------------------------------
| luisibanez/SahanaEden | controllers/mpr.py | Python | mit | 5,474 |
"""
@date: 27/Nov/2014
@author: Haridas N<haridas.nss@gmail.com>
Active Testcases on this files are -
1. Test authentication API.
2. Test Upload API
3. Test the Core data structure and Image Model's methods.
4. AWS S3 behavior
5. Full Integration Test
All these test will run without Http Server or Celery.
"""
import os
import json
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import SESSION_KEY
from django.utils.module_loading import import_string
from django.utils import timezone
from django.core.files import File
from django.test import TestCase
from django.test import Client
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from PIL import Image as PILImage
from django.conf import settings
from .models import Image
from .tasks import FILE_PATH_PARSER
class TestAuthAPI(TestCase):
def setUp(self):
self.client = Client()
self.auth_url = reverse("authenticate")
self.auth_data = {
'username': 'haridas',
'password': 'haridas'
}
def test_auth_failure(self):
response = self.client.post(self.auth_url, data=self.auth_data)
content = json.loads(response.content)
self.assertTrue(response.status_code == 200)
self.assertFalse(content['success']) # No User created yet.
def test_auth_success(self):
self._create_user()
response = self.client.post(self.auth_url, data=self.auth_data)
content = json.loads(response.content)
self.assertTrue(response.status_code == 200)
self.assertTrue(content['success'])
def test_authentication_using_auth_token(self):
user = self._create_user()
response = self.client.post(self.auth_url, data=self.auth_data)
content = json.loads(response.content)
auth_token = content['auth_token']
session = import_string(
settings.SESSION_ENGINE).SessionStore(auth_token)
self.assertTrue(session.get_expiry_date() > timezone.now())
self.assertTrue(SESSION_KEY in session)
self.assertTrue(session[SESSION_KEY] == user.pk)
# Check auth_toekn is valid on backend itself.
def test_api_with_junk_data(self):
self._create_user()
self.auth_data['username'] = 'haridas1'
response = self.client.post(self.auth_url, data=self.auth_data)
content = json.loads(response.content)
self.assertFalse(content['success'])
def _create_user(self):
u = User(username=self.auth_data['username'])
u.set_password(self.auth_data['password'])
u.save()
return u
class TestUploadAPI(TestCase):
def setUp(self):
self.upload_url = reverse("upload_image")
self.auth_url = reverse("authenticate")
self.auth_data = {
'username': 'haridas',
'password': 'haridas'
}
self.client = Client()
self.image = open(os.path.join(os.path.dirname(__file__),
"fixtures/images/me.jpg"), 'rb')
def test_upload_image(self):
payload = {
'auth_token': self._get_auth_token(),
'image': self.image
}
response = json.loads(
self.client.post(self.upload_url, data=payload).content)
self.assertTrue(response['success'])
self.assertTrue(not response['error_msg'])
self.assertEqual(len(response['image_urls']),
len(settings.IMAGE_VARIANTS) + 1)
self.assertEqual(response['id'], 1)
def test_error_upload(self):
payload = {
'auth_token': self._get_auth_token(),
'image1': self.image
}
response = json.loads(
self.client.post(self.upload_url, data=payload).content)
self.assertFalse(response['success'])
self.assertTrue(response['error_msg'])
def _get_auth_token(self):
u = User(username=self.auth_data['username'])
u.set_password(self.auth_data['password'])
u.save()
response = self.client.post(self.auth_url, data=self.auth_data)
content = json.loads(response.content)
auth_token = content['auth_token']
return auth_token
class TestImageModel(TestCase):
def setUp(self):
self.user = User()
self.user.username = "haridas"
self.user.set_password("haridas")
self.user.save()
self.file_name = os.path.join(os.path.dirname(__file__),
"fixtures/images/me.jpg")
def test_create_new_image(self):
"""
Test creation of new image.
"""
img = self._create_new_img()
self.assertEqual(img.name, os.path.basename(img.image.name))
self.assertTrue(os.path.exists(img.image.path))
img.delete()
def test_image_delete(self):
""" Test Image delete and cleanup opeation"""
img = self._create_new_img()
img.delete()
self.assertFalse(os.path.exists(img.image.path))
def test_resized_image_urls(self):
""" Test the validity of the resized image urls."""
img = self._create_new_img()
self.assertEqual(len(img.resized_image_urls),
len(settings.IMAGE_VARIANTS) + 1)
img.delete()
def test_resized_image_path(self):
""" Check all image paths exists """
img = self._create_new_img()
resized_img_paths = img.resized_image_paths
for resized in settings.IMAGE_VARIANTS:
self.assertTrue(resized[0] in resized_img_paths)
def test_resized_img_creation(self):
img = self._create_new_img()
img.delete()
def _create_new_img(self):
img = Image()
img.name = "test1.png"
img.user = self.user
img.image = File(open(self.file_name))
img.save()
return img
class AwsS3Tests(TestCase):
def setUp(self):
self.conn = S3Connection(
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
self.bucket_name = settings.S3_IMAGE_BUCKET_NAME
self.filename = os.path.join(os.path.dirname(__file__),
"fixtures/images/me.jpg")
def test_bucket_creation_and_access(self):
try:
bucket = self.conn.create_bucket(self.bucket_name)
except Exception as ex:
print ex
key = Key(bucket)
key.key = 'me.jpg'
key.set_contents_from_filename(self.filename)
keys = []
for key in bucket.list():
keys.append(key.name)
self.assertTrue(key.key in keys)
#
# Kinda full integration test to cover all the components of the system.
#
class FullIntegrationTest(TestCase):
def setUp(self):
self.upload_url = reverse("upload_image")
self.auth_url = reverse("authenticate")
self.auth_data = {
'username': 'haridas',
'password': 'haridas'
}
self.client = Client()
self.filename = os.path.join(os.path.dirname(__file__),
"fixtures/images/me.jpg")
self.image = open(self.filename, 'rb')
# S3 Configurations Details.
self.conn = S3Connection(
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
self.bucket_name = settings.S3_IMAGE_BUCKET_NAME
def test_upload_api_and_cloud_syncing(self):
"""
End-to-End Testing.
"""
payload = {
'auth_token': self._get_auth_token(),
'image': self.image,
'async_operation': 'false'
}
response = json.loads(
self.client.post(self.upload_url, data=payload).content)
# Logger file got updated after the image upload.
self.logger_size_before = os.path.getsize(os.path.join(
settings.BASE_DIR, "logs/image_resize.log"))
# Check created images are being generated as per the specificatioin.
self.image_variants = dict([(a[0],
a[1]) for a in settings.IMAGE_VARIANTS])
self._check_api_response(response)
self._check_image_resize_operation(response)
self._check_image_logger_operation(response)
self._check_could_syncing_operation(response)
def _check_api_response(self, response):
self.assertTrue(response['success'])
def _check_image_resize_operation(self, response):
for name, img in response['image_urls'].iteritems():
img_file = os.path.join(settings.MEDIA_ROOT, img)
# File exists or not test
self.assertTrue(os.path.exists(img_file))
# Check the resize operation was done properly.
if name != Image.IMG_LABEL:
self.assertTrue(
PILImage.open(img_file).size == self.image_variants.get(
name))
def _check_image_logger_operation(self, response):
curr_size = os.path.getsize(os.path.join(
settings.BASE_DIR, "logs/image_resize.log"))
# print curr_size, self.logger_size_before
# Checking that the logger got updated via its modification time.
# self.assertTrue(curr_size > self.logger_size_before)
# TODO: Since there is a disk buffering effect the size or time change
# may not be sync on time, so we can't test the file change using time
# or size change of the logfile effectively. Find some alternate.
def _check_could_syncing_operation(self, response):
bucket = self.conn.create_bucket(self.bucket_name)
file_list = [key.name for key in bucket.list()]
img_file = ["/".join(FILE_PATH_PARSER.findall(
os.path.join(settings.MEDIA_ROOT, img))[0])
for img in response['image_urls'].values()]
self.assertTrue(set(file_list).issuperset(set(img_file)))
def _get_auth_token(self):
u = User(username=self.auth_data['username'])
u.set_password(self.auth_data['password'])
u.save()
response = self.client.post(self.auth_url, data=self.auth_data)
content = json.loads(response.content)
auth_token = content['auth_token']
return auth_token
| haridas/image_uploader_py | uploader/tests.py | Python | mit | 10,460 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
ROOT = os.path.dirname(__file__)
def read(fname):
return open(os.path.join(ROOT, fname)).read()
setup(
name="django-soundcheck",
version="0.9",
url="https://github.com/bsvetchine/django-soundcheck",
license="MIT",
description="",
long_description=read("README.md"),
author="Bertrand Svetchine",
author_email="bertrand.svetchine@gmail.com",
packages=find_packages(),
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
],
)
| bsvetchine/django-soundcheck | setup.py | Python | mit | 841 |
# -*- coding: utf-8 -*-
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import csv, gzip, os
from pyqtgraph import Point
class GlassDB:
"""
Database of dispersion coefficients for Schott glasses
+ Corning 7980
"""
def __init__(self, fileName='schott_glasses.csv'):
path = os.path.dirname(__file__)
fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb')
r = csv.reader(map(str, fh.readlines()))
lines = [x for x in r]
self.data = {}
header = lines[0]
for l in lines[1:]:
info = {}
for i in range(1, len(l)):
info[header[i]] = l[i]
self.data[l[0]] = info
self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog.
'B1': 0.68374049400,
'B2': 0.42032361300,
'B3': 0.58502748000,
'C1': 0.00460352869,
'C2': 0.01339688560,
'C3': 64.49327320000,
'TAUI25/250': 0.95, ## transmission data is fabricated, but close.
'TAUI25/1400': 0.98,
}
for k in self.data:
self.data[k]['ior_cache'] = {}
def ior(self, glass, wl):
"""
Return the index of refraction for *glass* at wavelength *wl*.
The *glass* argument must be a key in self.data.
"""
info = self.data[glass]
cache = info['ior_cache']
if wl not in cache:
B = list(map(float, [info['B1'], info['B2'], info['B3']]))
C = list(map(float, [info['C1'], info['C2'], info['C3']]))
w2 = (wl/1000.)**2
n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2])))
cache[wl] = n
return cache[wl]
def transmissionCurve(self, glass):
data = self.data[glass]
keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x]
keys.sort()
curve = np.empty((2,len(keys)))
for i in range(len(keys)):
curve[0][i] = keys[i]
key = 'TAUI25/%d' % keys[i]
val = data[key]
if val == '':
val = 0
else:
val = float(val)
curve[1][i] = val
return curve
GLASSDB = GlassDB()
def wlPen(wl):
"""Return a pen representing the given wavelength"""
l1 = 400
l2 = 700
hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8)
val = 1.0
if wl > 700:
val = 1.0 * (((700-wl)/700.) + 1)
elif wl < 400:
val = wl * 1.0/400.
#print hue, val
color = pg.hsvColor(hue, 1.0, val)
pen = pg.mkPen(color)
return pen
class ParamObj(object):
# Just a helper for tracking parameters and responding to changes
def __init__(self):
self.__params = {}
def __setitem__(self, item, val):
self.setParam(item, val)
def setParam(self, param, val):
self.setParams(**{param:val})
def setParams(self, **params):
"""Set parameters for this optic. This is a good function to override for subclasses."""
self.__params.update(params)
self.paramStateChanged()
def paramStateChanged(self):
pass
def __getitem__(self, item):
# bug in pyside 1.2.2 causes getitem to be called inside QGraphicsObject.parentItem:
return self.getParam(item) # PySide bug: https://bugreports.qt.io/browse/PYSIDE-441
def getParam(self, param):
return self.__params[param]
class Optic(pg.GraphicsObject, ParamObj):
sigStateChanged = QtCore.Signal()
def __init__(self, gitem, **params):
ParamObj.__init__(self)
pg.GraphicsObject.__init__(self) #, [0,0], [1,1])
self.gitem = gitem
self.surfaces = gitem.surfaces
gitem.setParentItem(self)
self.roi = pg.ROI([0,0], [1,1])
self.roi.addRotateHandle([1, 1], [0.5, 0.5])
self.roi.setParentItem(self)
defaults = {
'pos': Point(0,0),
'angle': 0,
}
defaults.update(params)
self._ior_cache = {}
self.roi.sigRegionChanged.connect(self.roiChanged)
self.setParams(**defaults)
def updateTransform(self):
self.resetTransform()
self.setPos(0, 0)
self.translate(Point(self['pos']))
self.rotate(self['angle'])
def setParam(self, param, val):
ParamObj.setParam(self, param, val)
def paramStateChanged(self):
"""Some parameters of the optic have changed."""
# Move graphics item
self.gitem.setPos(Point(self['pos']))
self.gitem.resetTransform()
self.gitem.rotate(self['angle'])
# Move ROI to match
try:
self.roi.sigRegionChanged.disconnect(self.roiChanged)
br = self.gitem.boundingRect()
o = self.gitem.mapToParent(br.topLeft())
self.roi.setAngle(self['angle'])
self.roi.setPos(o)
self.roi.setSize([br.width(), br.height()])
finally:
self.roi.sigRegionChanged.connect(self.roiChanged)
self.sigStateChanged.emit()
def roiChanged(self, *args):
pos = self.roi.pos()
# rotate gitem temporarily so we can decide where it will need to move
self.gitem.resetTransform()
self.gitem.rotate(self.roi.angle())
br = self.gitem.boundingRect()
o1 = self.gitem.mapToParent(br.topLeft())
self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, p, *args):
pass
def ior(self, wavelength):
return GLASSDB.ior(self['glass'], wavelength)
class Lens(Optic):
def __init__(self, **params):
defaults = {
'dia': 25.4, ## diameter of lens
'r1': 50., ## positive means convex, use 0 for planar
'r2': 0, ## negative means convex
'd': 4.0,
'glass': 'N-BK7',
'reflect': False,
}
defaults.update(params)
d = defaults.pop('d')
defaults['x1'] = -d/2.
defaults['x2'] = d/2.
gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults)
Optic.__init__(self, gitem, **defaults)
def propagateRay(self, ray):
"""Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays"""
"""
NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs)
For the incident vector I and surface normal N, and the
ratio of indices of refraction eta, return the refraction
vector. The result is computed by
k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))
if (k < 0.0)
return genType(0.0)
else
return eta * I - (eta * dot(N, I) + sqrt(k)) * N
The input parameters for the incident vector I and the
surface normal N must already be normalized to get the
desired results. eta == ratio of IORs
For reflection:
For the incident vector I and surface orientation N,
returns the reflection direction:
I – 2 ∗ dot(N, I) ∗ N
N must already be normalized in order to achieve the
desired result.
"""
iors = [self.ior(ray['wl']), 1.0]
for i in [0,1]:
surface = self.surfaces[i]
ior = iors[i]
p1, ai = surface.intersectRay(ray)
#print "surface intersection:", p1, ai*180/3.14159
#trans = self.sceneTransform().inverted()[0] * surface.sceneTransform()
#p1 = trans.map(p1)
if p1 is None:
ray.setEnd(None)
break
p1 = surface.mapToItem(ray, p1)
#print "adjusted position:", p1
#ior = self.ior(ray['wl'])
rd = ray['dir']
a1 = np.arctan2(rd[1], rd[0])
ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior))
#print [x for x in [a1, ai, (np.sin(ai) * ray['ior'] / ior), ar]]
#print ai, np.sin(ai), ray['ior'], ior
ray.setEnd(p1)
dp = Point(np.cos(ar), np.sin(ar))
#p2 = p1+dp
#p1p = self.mapToScene(p1)
#p2p = self.mapToScene(p2)
#dpp = Point(p2p-p1p)
ray = Ray(parent=ray, ior=ior, dir=dp)
return [ray]
class Mirror(Optic):
def __init__(self, **params):
defaults = {
'r1': 0,
'r2': 0,
'd': 0.01,
}
defaults.update(params)
d = defaults.pop('d')
defaults['x1'] = -d/2.
defaults['x2'] = d/2.
gitem = CircularSolid(brush=(100,100,100,255), **defaults)
Optic.__init__(self, gitem, **defaults)
def propagateRay(self, ray):
"""Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays"""
surface = self.surfaces[0]
p1, ai = surface.intersectRay(ray)
if p1 is not None:
p1 = surface.mapToItem(ray, p1)
rd = ray['dir']
a1 = np.arctan2(rd[1], rd[0])
ar = a1 + np.pi - 2*ai
ray.setEnd(p1)
dp = Point(np.cos(ar), np.sin(ar))
ray = Ray(parent=ray, dir=dp)
else:
ray.setEnd(None)
return [ray]
class CircularSolid(pg.GraphicsObject, ParamObj):
"""GraphicsObject with two circular or flat surfaces."""
def __init__(self, pen=None, brush=None, **opts):
"""
Arguments for each surface are:
x1,x2 - position of center of _physical surface_
r1,r2 - radius of curvature
d1,d2 - diameter of optic
"""
defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4)
defaults.update(opts)
ParamObj.__init__(self)
self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])]
pg.GraphicsObject.__init__(self)
for s in self.surfaces:
s.setParentItem(self)
if pen is None:
self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True)
else:
self.pen = pg.mkPen(pen)
if brush is None:
self.brush = pg.mkBrush((230, 230, 255, 30))
else:
self.brush = pg.mkBrush(brush)
self.setParams(**defaults)
def paramStateChanged(self):
self.updateSurfaces()
def updateSurfaces(self):
self.surfaces[0].setParams(self['r1'], self['d1'])
self.surfaces[1].setParams(-self['r2'], self['d2'])
self.surfaces[0].setPos(self['x1'], 0)
self.surfaces[1].setPos(self['x2'], 0)
self.path = QtGui.QPainterPath()
self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos()))
self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed())
self.path.closeSubpath()
def boundingRect(self):
return self.path.boundingRect()
def shape(self):
return self.path
def paint(self, p, *args):
p.setRenderHints(p.renderHints() | p.Antialiasing)
p.setPen(self.pen)
p.fillPath(self.path, self.brush)
p.drawPath(self.path)
class CircleSurface(pg.GraphicsObject):
def __init__(self, radius=None, diameter=None):
"""center of physical surface is at 0,0
radius is the radius of the surface. If radius is None, the surface is flat.
diameter is of the optic's edge."""
pg.GraphicsObject.__init__(self)
self.r = radius
self.d = diameter
self.mkPath()
def setParams(self, r, d):
self.r = r
self.d = d
self.mkPath()
def mkPath(self):
self.prepareGeometryChange()
r = self.r
d = self.d
h2 = d/2.
self.path = QtGui.QPainterPath()
if r == 0: ## flat surface
self.path.moveTo(0, h2)
self.path.lineTo(0, -h2)
else:
## half-height of surface can't be larger than radius
h2 = min(h2, abs(r))
#dx = abs(r) - (abs(r)**2 - abs(h2)**2)**0.5
#p.moveTo(-d*w/2.+ d*dx, d*h2)
arc = QtCore.QRectF(0, -r, r*2, r*2)
#self.surfaces.append((arc.center(), r, h2))
a1 = np.arcsin(h2/r) * 180. / np.pi
a2 = -2*a1
a1 += 180.
self.path.arcMoveTo(arc, a1)
self.path.arcTo(arc, a1, a2)
#if d == -1:
#p1 = QtGui.QPainterPath()
#p1.addRect(arc)
#self.paths.append(p1)
self.h2 = h2
def boundingRect(self):
return self.path.boundingRect()
def paint(self, p, *args):
return ## usually we let the optic draw.
#p.setPen(pg.mkPen('r'))
#p.drawPath(self.path)
def intersectRay(self, ray):
## return the point of intersection and the angle of incidence
#print "intersect ray"
h = self.h2
r = self.r
p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords.
#print " ray: ", p, dir
p = p - Point(r, 0) ## move position so center of circle is at 0,0
#print " adj: ", p, r
if r == 0:
#print " flat"
if dir[0] == 0:
y = 0
else:
y = p[1] - p[0] * dir[1]/dir[0]
if abs(y) > h:
return None, None
else:
return (Point(0, y), np.arctan2(dir[1], dir[0]))
else:
#print " curve"
## find intersection of circle and line (quadratic formula)
dx = dir[0]
dy = dir[1]
dr = (dx**2 + dy**2) ** 0.5
D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1]
idr2 = 1.0 / dr**2
disc = r**2 * dr**2 - D**2
if disc < 0:
return None, None
disc2 = disc**0.5
if dy < 0:
sgn = -1
else:
sgn = 1
br = self.path.boundingRect()
x1 = (D*dy + sgn*dx*disc2) * idr2
y1 = (-D*dx + abs(dy)*disc2) * idr2
if br.contains(x1+r, y1):
pt = Point(x1, y1)
else:
x2 = (D*dy - sgn*dx*disc2) * idr2
y2 = (-D*dx - abs(dy)*disc2) * idr2
pt = Point(x2, y2)
if not br.contains(x2+r, y2):
return None, None
raise Exception("No intersection!")
norm = np.arctan2(pt[1], pt[0])
if r < 0:
norm += np.pi
#print " norm:", norm*180/3.1415
dp = p - pt
#print " dp:", dp
ang = np.arctan2(dp[1], dp[0])
#print " ang:", ang*180/3.1415
#print " ai:", (ang-norm)*180/3.1415
#print " intersection:", pt
return pt + Point(r, 0), ang-norm
class Ray(pg.GraphicsObject, ParamObj):
"""Represents a single straight segment of a ray"""
sigStateChanged = QtCore.Signal()
def __init__(self, **params):
ParamObj.__init__(self)
defaults = {
'ior': 1.0,
'wl': 500,
'end': None,
'dir': Point(1,0),
}
self.params = {}
pg.GraphicsObject.__init__(self)
self.children = []
parent = params.get('parent', None)
if parent is not None:
defaults['start'] = parent['end']
defaults['wl'] = parent['wl']
self['ior'] = parent['ior']
self['dir'] = parent['dir']
parent.addChild(self)
defaults.update(params)
defaults['dir'] = Point(defaults['dir'])
self.setParams(**defaults)
self.mkPath()
def clearChildren(self):
for c in self.children:
c.clearChildren()
c.setParentItem(None)
self.scene().removeItem(c)
self.children = []
def paramStateChanged(self):
pass
def addChild(self, ch):
self.children.append(ch)
ch.setParentItem(self)
def currentState(self, relativeTo=None):
pos = self['start']
dir = self['dir']
if relativeTo is None:
return pos, dir
else:
trans = self.itemTransform(relativeTo)[0]
p1 = trans.map(pos)
p2 = trans.map(pos + dir)
return Point(p1), Point(p2-p1)
def setEnd(self, end):
self['end'] = end
self.mkPath()
def boundingRect(self):
return self.path.boundingRect()
def paint(self, p, *args):
#p.setPen(pg.mkPen((255,0,0, 150)))
p.setRenderHints(p.renderHints() | p.Antialiasing)
p.setCompositionMode(p.CompositionMode_Plus)
p.setPen(wlPen(self['wl']))
p.drawPath(self.path)
def mkPath(self):
self.prepareGeometryChange()
self.path = QtGui.QPainterPath()
self.path.moveTo(self['start'])
if self['end'] is not None:
self.path.lineTo(self['end'])
else:
self.path.lineTo(self['start']+500*self['dir'])
def trace(rays, optics):
if len(optics) < 1 or len(rays) < 1:
return
for r in rays:
r.clearChildren()
o = optics[0]
r2 = o.propagateRay(r)
trace(r2, optics[1:])
class Tracer(QtCore.QObject):
"""
Simple ray tracer.
Initialize with a list of rays and optics;
calling trace() will cause rays to be extended by propagating them through
each optic in sequence.
"""
def __init__(self, rays, optics):
QtCore.QObject.__init__(self)
self.optics = optics
self.rays = rays
for o in self.optics:
o.sigStateChanged.connect(self.trace)
self.trace()
def trace(self):
trace(self.rays, self.optics)
| pmaunz/pyqtgraph | examples/optics/pyoptic.py | Python | mit | 18,598 |
#!/usr/bin/env python3
import requests
import sys
from optparse import OptionParser, BadOptionError, AmbiguousOptionError
from orjson import dump
from time import sleep
import six
class PhiloLogicRequest(object):
"""
Process a PhiloLogic request and save the full result set in a text file
in JSON or tab delimited
"""
def __init__(self, db_url, report, export_format="json", **args):
self.db_url = db_url
self.report = report
self.export_format = export_format.lower()
self.philo_args = args
self.query_params = {
"format": "json"
} ## Hardcoded to JSON since no other export function is currently available
self.query_params["report"] = report
self.query_params.update(args)
if "method" not in self.query_params:
self.query_params["method"] = "proxy"
if "results_per_page" not in self.query_params:
self.query_params["results_per_page"] = 500
if "q" not in self.query_params:
self.query_params["q"] = ""
self.total_hits()
print("\nTotal hits: %d" % self.total, file=sys.stderr)
## Count number of queries needed to retrieve the full result set
if report == "concordance" or report == "kwic":
self.interval = self.query_params["results_per_page"]
else:
self.interval = 1000
self.steps = self.total / self.interval
remainder = self.total % self.interval
if remainder:
self.steps += 1
print(
"%d queries in batches of %d hits will be performed to retrieve the full result set:"
% (self.steps, self.interval),
file=sys.stderr,
)
def total_hits(self):
print(self.query_params)
try:
r = requests.get(self.db_url + "/scripts/get_total_results.py", params=self.query_params, timeout=5)
print(r.json())
exit()
except requests.exceptions.ReadTimeout:
print("\nGiving a couple seconds to compute hits...", file=sys.stderr)
sleep(5)
r = requests.get(self.db_url + "/scripts/get_total_results.py", params=self.query_params, timeout=30)
except ValueError:
print("Invalid URL:", file=sys.stderr)
print(r.url, file=sys.stderr)
exit()
if self.report == "time_series":
r = self.query(start=0, end=1000)
self.total = r.json()["results_length"]
else:
self.total = int(r.json())
def query(self, timeout=60, params=None, **args):
if params == None:
params = self.query_params
if args:
for k, v in args.items():
params[k] = v
response = requests.get(self.db_url + "/query", params=params, timeout=timeout)
return response
def build_result_set(self):
if self.report == "concordance" or self.report == "kwic":
self.concatenate_concordance()
elif self.report == "collocation":
self.concatenate_collocation()
elif self.report == "time_series":
self.concatenate_time_series()
def concatenate_concordance(self):
params = dict(self.query_params)
self.results = {"results": []}
for i in range(self.steps):
start = i * self.interval
end = start + self.interval - 1
if end > self.total:
end = self.total - 1
params["start"] = start
params["end"] = end
print("Retrieving results %d-%d..." % (start + 1, end + 1), end=" ", file=sys.stderr)
response = self.query(params=params)
print("done.", file=sys.stderr)
for k, v in six.iteritems(response.json()):
if k == "results":
self.results["results"] += v
continue
if k not in self.results:
self.results[k] = v
self.results["query"]["end"] = str(self.total - 1)
def concatenate_collocation(self):
params = dict(self.query_params)
params["start"] = 0
params["end"] = self.interval
self.results = {}
for i in range(self.steps):
start = i * self.interval
end = start + self.interval - 1
if end > self.total:
end = self.total - 1
params["start"] = start
params["end"] = end
print("Retrieving results %d-%d..." % (start + 1, end + 1), end=" ", file=sys.stderr)
response = self.query(params=params)
print("done.", file=sys.stderr)
results_object = response.json()
if not self.results:
self.results = results_object
else:
for word, word_obj in six.iteritems(results_object["all_collocates"]):
if word in self.results["all_collocates"]:
self.results["all_collocates"][word]["count"] += word_obj["count"]
else:
self.results["all_collocates"][word] = word_obj
for word, word_obj in six.iteritems(results_object["right_collocates"]):
if word in self.results["right_collocates"]:
self.results["right_collocates"][word]["count"] += word_obj["count"]
else:
self.results["right_collocates"][word] = word_obj
for word, word_obj in six.iteritems(results_object["left_collocates"]):
if word in self.results["left_collocates"]:
self.results["left_collocates"][word]["count"] += word_obj["count"]
else:
self.results["left_collocates"][word] = word_obj
self.results["all_collocates"] = sorted(
six.iteritems(self.results["all_collocates"]), key=lambda x: x[1], reverse=True
)
self.results["right_collocates"] = sorted(
six.iteritems(self.results["right_collocates"]), key=lambda x: x[1], reverse=True
)
self.results["left_collocates"] = sorted(
six.iteritems(self.results["left_collocates"]), key=lambda x: x[1], reverse=True
)
def concatenate_time_series(self):
params = dict(self.query_params)
if "year_interval" not in params:
params["year_interval"] = "10"
params["start"] = 0
params["end"] = self.interval
self.results = {}
for i in range(self.steps):
start = i * self.interval
end = start + self.interval - 1
if end > self.total:
end = self.total - 1
params["start"] = start
params["end"] = end
print("Retrieving results %d-%d..." % (start + 1, end + 1), end=" ", file=sys.stderr)
response = self.query(params=params)
print("done.", file=sys.stderr)
results_object = response.json()
if not self.results:
self.results = results_object
else:
for year, count in six.iteritems(results_object["results"]["absolute_count"]):
if year in self.results["results"]["absolute_count"]:
self.results["results"]["absolute_count"][year] += count
else:
self.results["results"]["absolute_count"][year] = count
for year, count in six.iteritems(results_object["results"]["date_count"]):
if year not in self.results["results"]["date_count"]:
self.results["results"]["date_count"][year] = count
self.results["results"]["absolute_count"] = sorted(
six.iteritems(self.results["results"]["absolute_count"]), key=lambda x: x[0]
)
self.results["results"]["date_count"] = sorted(
six.iteritems(self.results["results"]["date_count"]), key=lambda x: x[0]
)
def save_file(self):
if self.export_format == "json":
filename = "%s_results.json" % self.report
output_file = open(filename, "w")
print("\nSaving %s to disk..." % filename, end=" ", file=sys.stderr)
dump(self.results, output_file, indent=2)
elif self.export_format == "tab":
print("TAB export not implemented yet, try JSON output", file=sys.stderr)
exit()
print("done.", file=sys.stderr)
class PassThroughOptionParser(OptionParser):
"""
An unknown option pass-through implementation of OptionParser.
When unknown arguments are encountered, bundle with largs and try again,
until rargs is depleted.
sys.exit(status) will still be called if a known argument is passed
incorrectly (e.g. missing arguments or bad argument types, etc.)
"""
def _process_args(self, largs, rargs, values):
while rargs:
try:
OptionParser._process_args(self, largs, rargs, values)
except (BadOptionError, AmbiguousOptionError) as e:
largs.append(e.opt_str)
def parse_command_line(arguments):
parser = PassThroughOptionParser()
parser.add_option(
"-r",
"--report",
action="store",
default="",
type="string",
dest="report",
help="select PhiloLogic search report",
)
parser.add_option(
"-d",
"--db-url",
action="store",
default="",
type="string",
dest="db_url",
help="select database URL for search",
)
parser.add_option(
"-e",
"--export-format",
action="store",
default="",
type="string",
dest="export_format",
help="select output format. Options are JSON or TAB.",
)
## Parse command-line arguments
options, args = parser.parse_args(arguments)
## Parse all other options
arg_dict = {}
while args:
key = args.pop(0).replace("-", "")
val = args.pop(0)
arg_dict[key] = val
if not options.db_url:
print("No database URL provided, exiting...", file=sys.stderr)
exit()
if not options.report:
print("No search report selected, defaulting to concordance...", file=sys.stderr)
report = "concordance"
else:
report = options.report
if not options.export_format:
print("No export format selected, exporting to JSON...", file=sys.stderr)
export_format = "json"
else:
export_format = options.export_format
return options.db_url, report, export_format, arg_dict
if __name__ == "__main__":
db_url, report, export_format, query_args = parse_command_line(sys.argv[1:])
philo_request = PhiloLogicRequest(db_url, report, export_format=export_format, **query_args)
philo_request.build_result_set()
philo_request.save_file()
| ARTFL-Project/PhiloLogic4 | extras/exportResults.py | Python | gpl-3.0 | 11,018 |
# -*- coding: utf-8 -*-
from demo_app import controller
import actions
def register_actions():
controller.action_controller.packs.extend([
actions.GaragePack(),
actions.ToolPack(),
actions.StaffPack(),
])
| barsgroup/objectpack | tests/objectpack_demo/demo_app/slaves/app_meta.py | Python | mit | 241 |
import os
import time
from unittest import TestCase
from mwscan import scan, settings
from mwscan.ruleset import Files
from collections import namedtuple
try:
from unittest import mock
except ImportError:
import mock
class TestWebMalwareScanner(TestCase):
def _load_file_rules(self, path):
args = namedtuple('Args', 'rules')(rules=path)
return Files(args=args).get()
def setUp(self):
settings.CACHEDIR = '/tmp'
settings.LAST_RUN_FILE = '/tmp/last_run'
self.fixture_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures')
self.rules_path = os.path.join(self.fixture_path, 'rules.yar')
self.target_path = os.path.join(self.fixture_path, 'files')
self.state_file = scan.scanpath_to_runfile(self.target_path)
self.new_file = os.path.join(self.target_path, 'newer_malware')
assert self.state_file.startswith('/tmp')
# might still exist from cancelled earlier test
for i in self.new_file, self.state_file:
try:
os.unlink(i)
except OSError:
pass
self.rules, self.whitelist = self._load_file_rules(self.rules_path)
def test_normal_scan(self):
files = scan.find_targets(self.target_path)
malware, whitelisted = scan.scan_files(files, self.rules, self.whitelist)
self.assertEqual(len(malware), 2)
self.assertEqual(len(whitelisted), 1)
def test_scan_callback(self):
targets = scan.find_targets(self.target_path)
testcb = mock.MagicMock()
scan.scan_files(targets, self.rules, self.whitelist, testcb)
self.assertEqual(testcb.call_count, 2)
def test_filter_extensions(self):
ext = ['php']
files = scan.find_targets(self.target_path, req_ext=ext)
malware, whitelisted = scan.scan_files(files, self.rules, self.whitelist)
self.assertEqual(len(malware), 1)
self.assertEqual(len(whitelisted), 0)
def test_external_rule_file(self):
files = scan.find_targets(self.target_path)
rules_path = os.path.join(self.fixture_path, 'rules-vanilla.yar')
self.rules, self.whitelist = self._load_file_rules(rules_path)
malware, whitelisted = scan.scan_files(files, self.rules, self.whitelist)
self.assertEqual(len(malware), 2)
self.assertEqual(len(whitelisted), 0)
def test_scan_targets_for_new_files_only(self):
the_past = time.time() - 5
with open(self.new_file, 'w') as fh:
fh.write('BAD\n')
files = scan.find_targets(self.target_path, newer_than=the_past)
self.assertEqual(len(list(files)), 1)
os.unlink(self.new_file)
| davidalger/magento-malware-scanner | mwscan/tests/test_mwscan.py | Python | gpl-3.0 | 2,739 |
"""
Explicit mapping of NumPy array operations to Chapel array operations.
"""
from pych.extern import Chapel
import numpy as np
# pylint: disable=no-member
# The ndarray member is added dynamically and therefore not visible to pylint.
# pylint: disable=unused-argument
# The interoperability interface exploits arguments, their use are therefore
# not visible to pylint.
@Chapel()
def pych_ewise_add(in1=np.ndarray, in2=np.ndarray, res=np.ndarray):
"""
res = in1 + in2;
"""
return None
#@Chapel()
#def pych_ewise_assign(in1=np.ndarray, res=np.ndarray):
# """
# res = in1;
# """
@Chapel()
def pych_ewise_assign(in1=int, res=np.ndarray):
"""
res = in1;
"""
return None
@Chapel()
def pych_ewise_subtract(in1=np.ndarray, in2=np.ndarray, res=np.ndarray):
"""
res = in1 - in2;
"""
return None
@Chapel()
def pych_scan_add(in1=np.ndarray, in2=np.ndarray, res=np.ndarray):
"""
res = in1 - in2;
"""
return None
@Chapel()
def pych_reduce_add(in1=np.ndarray, axis=int, res=np.ndarray):
"""
res = +reduce(in1);
"""
return None
| chapel-lang/pychapel | module/pych/array_ops.py | Python | apache-2.0 | 1,111 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para uploadable
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("pelisalacarta.servers.uploadable test_video_exists(page_url='%s')" % page_url)
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("pelisalacarta.servers.uploadable get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# https://www.uploadable.ch/list/cKMCXrm7gZqv
patronvideos = 'uploadable.ch/list/([A-Za-z0-9]+)'
logger.info("pelisalacarta.servers.uploadable find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[uploadable]"
url = "https://www.uploadable.ch/list/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'uploadable' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| conejoninja/pelisalacarta | python/main-classic/servers/uploadable.py | Python | gpl-3.0 | 1,505 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.components.archives import utils
log = logging.getLogger('archives')
class FilterArchives(object):
"""
Accepts entries that are valid Zip or RAR archives
This plugin requires the rarfile Python module and unrar command line utility to handle RAR
archives.
Configuration:
unrar_tool: Specifies the path of the unrar tool. Only necessary if its location is not
defined in the operating system's PATH environment variable.
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {'unrar_tool': {'type': 'string'}},
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
"""
Prepare config for processing
"""
if not isinstance(config, dict):
config = {}
config.setdefault('unrar_tool', '')
return config
@plugin.priority(200)
def on_task_filter(self, task, config):
"""
Task handler for archives
"""
if isinstance(config, bool) and not config:
return
config = self.prepare_config(config)
utils.rarfile_set_tool_path(config)
for entry in task.entries:
archive_path = entry.get('location', '')
if utils.is_archive(archive_path):
entry.accept()
else:
entry.reject()
@event('plugin.register')
def register_plugin():
plugin.register(FilterArchives, 'archives', api_ver=2)
| gazpachoking/Flexget | flexget/components/archives/archives.py | Python | mit | 1,819 |
import re
from hqm_helpers import load_legacy_chapters, dump_chapters
from migration_map import load_remap_as_dict
class IdRules(object):
def __init__(self, sub_rules=[]):
uncompiled = sub_rules
self.sub_rules = []
for regex, sub in uncompiled:
self.sub_rules.append((re.compile(regex), sub))
def apply_rules(self, in_id):
transformed = in_id
for subexpr in self.sub_rules:
transformed = subexpr[0].sub(subexpr[1], transformed)
return transformed
# Let's keep a tally of all the stuff we muck with!
change_count = 0
def iterate_all_ids(json_obj, id_rules, meta_remaps, remap_dict):
global change_count
if type(json_obj) is dict:
found_remap = False
# First let's see if we have a 1:1 map for it.
if "id" in json_obj:
val = json_obj["id"]
remap_key = val
if "damage" in json_obj:
remap_key = "%s:%s" % (val, json_obj["damage"])
if remap_key in remap_dict:
remapped = remap_dict[remap_key]
new_name = remapped.name
json_obj["id"] = new_name
if hasattr(remapped, "metadata"):
add_damage = remapped.metadata
new_name += " metadata=%s" % remapped.metadata
else:
if "damage" in json_obj:
# We can't delete it here because we're actively iterate json_obj
del_damage = True
print("Remap %s => %s" % (val, new_name))
found_remap = True
change_count += 1
# Try metadata remap rules
if not found_remap:
for meta_remap in meta_remaps:
if meta_remap.matches_json_dict(json_obj):
meta_remap.remap(json_obj)
found_remap = True
change_count += 1
break
if not found_remap:
for key, val in json_obj.items():
# Try the regular expressions and other sacks full of tricks.
if type(val) is str and key.lower() == "id":
original = val
transformed = id_rules.apply_rules(val)
print("Regex %s => %s" % (original, transformed))
change_count += 1
json_obj[key] = transformed
elif type(val) is dict or type(val) is list:
iterate_all_ids(val, id_rules, meta_remaps, remap_dict)
elif type(json_obj) is list:
for list_item in json_obj:
iterate_all_ids(list_item, id_rules, meta_remaps, remap_dict)
class MetaBlockRemap(object):
def __init__(self, original_id, original_damage, new_id, new_damage=None):
self.original_id = original_id
self.original_damage = original_damage
self.new_id = new_id
self.new_damage = new_damage
def matches_json_dict(self, json_dict):
if "id" in json_dict and "damage" in json_dict:
if json_dict["id"] == self.original_id and json_dict["damage"] == self.original_damage:
return True
return False
def remap(self, json_dict):
json_dict["id"] = self.new_id
if self.new_damage is not None:
json_dict["damage"] = self.new_damage
else:
del json_dict["damage"]
if __name__ == "__main__":
sets = load_legacy_chapters()
rules = [
("GalacticraftCore:tile\.", "galacticraftcore:"),
("GalacticraftCore:item\.", "galacticraftcore:"),
("galacticraftcore:oxygenCollector", "galacticraftcore:collector"),
("galacticraftcore:oxygenPipe", "galacticraftcore:fluid_pipe"),
("galacticraftcore:rocketWorkbench", "galacticraftcore:rocket_workbench"),
("galacticraftcore:oxygenDetector", "galacticraftcore:oxygen_detector"),
("galacticraftcore:glowstoneTorch", "galacticraftcore:glowstone_torch"),
("galacticraftcore:item.itemTier1Rocket", "galacticraftcore:rocket_t1"),
("GalacticraftMars:item.itemTier2Rocket", "galacticraftplanets:rocket_t2"),
("GalacticraftMars:item.itemTier3Rocket", "galacticraftplanets:rocket_t3"),
("GalacticraftMars:item.itemAstroMiner", "galacticraftplanets:astro_miner"),
("GalacticraftMars:tile.mars", "galacticraftplanets:mars"),
("galacticraftcore:landingPad", "galacticraftcore:landing_pad"),
("galacticraftcore:oxygenGear", "galacticraftcore:oxygen_gear"),
("galacticraftcore:rocketWorkbench", "galacticraftcore:rocket_workbench"),
("galacticraftcore:spinThruster", "galacticraftcore:spin_thruster"),
("galacticraftcore:airLockFrame", "galacticraftcore:air_lock_frame"),
("galacticraftcore:meteoricIronRaw", "galacticraftcore:meteoric_iron_raw"),
("appliedenergistics2:item\.", "appliedenergistics2:"),
("appliedenergistics2:tile\.", "appliedenergistics2:"),
("appliedenergistics2:BlockCharger", "appliedenergistics2:charger"),
("appliedenergistics2:BlockSecurity", "appliedenergistics2:security_station"),
("appliedenergistics2:BlockEnergyAcceptor", "appliedenergistics2:energy_acceptor"),
("appliedenergistics2:BlockMolecularAssembler", "appliedenergistics2:molecular_assembler"),
("appliedenergistics2:BlockController", "appliedenergistics2:controller"),
("appliedenergistics2:BlockSkyCompass", "appliedenergistics2:sky_compass"),
("appliedenergistics2:BlockCraftingStorage", "appliedenergistics2:crafting_storage_1k"),
("appliedenergistics2:BlockChest", "appliedenergistics2:chest"),
("appliedenergistics2:BlockDrive", "appliedenergistics2:drive"),
("appliedenergistics2:ToolWirelessTerminal", "appliedenergistics2:wireless_terminal"),
("appliedenergistics2:BlockQuartzGrowthAccelerator", "appliedenergistics2:quartz_growth_accelerator"),
("appliedenergistics2:ItemBasicStorageCell.1k", "appliedenergistics2:storage_cell_1k"),
("appliedenergistics2:ItemEncodedPattern", "appliedenergistics2:encoded_pattern"),
("appliedenergistics2:BlockQuantumLinkChamber", "appliedenergistics2:quantum_link_chamber"),
("appliedenergistics2:BlockSpatialPylon", "appliedenergistics2:spatial_pylon"),
("appliedenergistics2:ItemMultiPart", "appliedenergistics2:part"),
("appliedenergistics2:ItemMultiMaterial", "appliedenergistics2:material"),
("BiblioCraft:item\.", "bibliocraft:"),
("TConstruct:Smeltery", "tconstruct:seared"),
("TConstruct:GlassBlock", "tconstruct:clear_glass"),
("TConstruct:SearedBrick", "tconstruct:materials"),
("TConstruct:ToolStationBlock", "tconstruct:tooltables"),
("TConstruct:ToolForgeBlock", "tconstruct:toolforge"),
("TConstruct:Crossbow", "tconstruct:crossbow"),
("TConstruct:jerky", "tconstruct:edible"),
("TConstruct:metalPattern", "tconstruct:pattern"),
("ExtraUtilities", "extrautils2"),
("ExtraUtilities:dark_portal", "extrautils2:teleporter"),
("ExtraUtilities:watering_can", "extrautils2:wateringcan"),
("ExtraUtilities:", "extrautils2:"),
("ExtraUtilities:", "extrautils2:"),
("ExtraUtilities:", "extrautils2:"),
("MineFactoryReloaded:machine.0", "minefactoryreloaded:machine_0"),
("MineFactoryReloaded:machine.1", "minefactoryreloaded:machine_1"),
("MineFactoryReloaded:machine.2", "minefactoryreloaded:machine_2"),
("MineFactoryReloaded:laserfocus", "minefactoryreloaded:laser_focus"),
("MineFactoryReloaded:plastic.bag", "minefactoryreloaded:plastic_bag"),
("MineFactoryReloaded:plastic.cup", "minefactoryreloaded:plastic_cup"),
("MineFactoryReloaded:upgrade.radius", "minefactoryreloaded:upgrade_radius"),
("MineFactoryReloaded:rubber\.", "minefactoryreloaded:rubber_"),
("chisel:factoryblock", "chisel:factory"),
("chisel:paperwall", "chisel:paper"),
("BigReactors:BROre", "bigreactors:minerals"),
("BigReactors:BRIngot", "bigreactors:ingotmetals"),
("BigReactors:BRMetalBlock", "bigreactors:blockmetals"),
("BigReactors:YelloriumFuelRod", "bigreactors:reactorfuelrod"),
("BigReactors:BRReactorRedstonePort", "bigreactors:reactorredstoneport"),
("BigReactors:BRDevice", "bigreactors:reactorredstoneport"),
("BigReactors:BRReactorPart", "bigreactors:reactorcasing"), # Sometimes BRReactorPart shows up without metadata. It looks like it's indicating reactor casings.
("Natura:natura.axe.bloodwood", "natura:bloodwood_axe"),
("Natura:NetherFurnace", "natura:netherrack_furnace"),
("Natura:berryMedley", "natura:soups"),
("Natura:berry", "natura:edibles"),
("Natura:barley", "natura:materials"),
("Natura:barleyFood", "natura:barley_crop"),
("rftools:remoteStorageBlock", "rftools:remote_storage"),
("rftools:dimensionEnscriberBlock", "rftools:dimension_enscriber"),
("rftools:dialingDeviceBlock", "rftools:dialing_device"),
("rftools:matterTransmitterBlock", "rftools:matter_transmitter"),
("rftools:remoteStorageBlock", "rftools:remote_storage"),
("rftools:modularStorageBlock", "rftools:modular_storage"),
("rftools:unknownDimlet", "rftools:unknown_dimlet"),
("Mekanism:PartTransmitter", "mekanism:MultipartTransmitter"),
("OpenBlocks:paintmixer", "openblocks:paint_mixer"),
("OpenBlocks:hangglider", "openblocks:hang_glider"),
("ThermalFoundation:FluidEnder", "thermalfoundation:fluid_ender"),
("ThermalFoundation:", "thermalfoundation:"),
("ThermalExpansion:", "thermalexpansion:"),
("CompactMachines:machine", "cm2:machine"),
("CompactMachines:innerwallcreative", "cm2:wall"),
("StorageDrawers:halfDrawers4", "storagedrawers:basicDrawers")
#("", ""),
]
id_rules = IdRules(rules)
id_damage_remaps = [MetaBlockRemap("BigReactors:BRReactorPart", 0, "bigreactors:reactorcasing"),
MetaBlockRemap("BigReactors:BRReactorPart", 1, "bigreactors:reactorcontroller"),
MetaBlockRemap("BigReactors:BRReactorPart", 2, "bigreactors:reactorcontrolrod"),
MetaBlockRemap("BigReactors:BRReactorPart", 3, "bigreactors:reactorpowertaprf"),
MetaBlockRemap("BigReactors:BRReactorPart", 4, "bigreactors:reactoraccessport"),
MetaBlockRemap("BigReactors:BRReactorPart", 5, "bigreactors:reactorcoolantport"),
MetaBlockRemap("BigReactors:BRReactorPart", 6, "bigreactors:reactorrednetport"),
MetaBlockRemap("BigReactors:BRReactorPart", 7, "bigreactors:reactorcomputerport"),
MetaBlockRemap("BigReactors:BRMultiblockGlass", 0, "bigreactors:reactorglass"),
MetaBlockRemap("BigReactors:BRMultiblockGlass", 1, "bigreactors:turbineglass"),
MetaBlockRemap("BigReactors:BRTurbinePart", 0, "bigreactors:turbinehousing"),
MetaBlockRemap("BigReactors:BRTurbinePart", 1, "bigreactors:turbinecontroller"),
MetaBlockRemap("BigReactors:BRTurbinePart", 2, "bigreactors:turbinepowertaprf"),
MetaBlockRemap("BigReactors:BRTurbinePart", 3, "bigreactors:turbinefluidport"),
MetaBlockRemap("BigReactors:BRTurbinePart", 4, "bigreactors:turbinebearing"),
MetaBlockRemap("BigReactors:BRTurbinePart", 5, "bigreactors:turbinecomputerport"),
MetaBlockRemap("BigReactors:BRTurbineRotorPart", 0, "bigreactors:turbinerotorshaft"),
MetaBlockRemap("BigReactors:BRTurbineRotorPart", 1, "bigreactors:turbinerotorblade"),
]
remap_dict = load_remap_as_dict(r"C:\temp\20170906\bfsr_ids_map.json")
iterate_all_ids(sets, id_rules, id_damage_remaps, remap_dict)
dump_chapters(sets)
print("Performend %s changes" % change_count) | rockobonaparte/bfsr | scripts/patch_quest_book.py | Python | cc0-1.0 | 12,219 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for BigQuery file loads utilities."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import random
import sys
import time
import unittest
import mock
from hamcrest.core import assert_that as hamcrest_assert
from hamcrest.core.core.allof import all_of
from hamcrest.core.core.is_ import is_
from nose.plugins.attrib import attr
from parameterized import param
from parameterized import parameterized
import apache_beam as beam
from apache_beam.io.filebasedsink_test import _TestCaseWithTempDirCleanUp
from apache_beam.io.gcp import bigquery_file_loads as bqfl
from apache_beam.io.gcp import bigquery
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.internal.clients import bigquery as bigquery_api
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryFullResultMatcher
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryFullResultStreamingMatcher
from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import combiners
from apache_beam.typehints.typehints import Tuple
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
raise unittest.SkipTest('GCP dependencies are not installed')
_LOGGER = logging.getLogger(__name__)
_DESTINATION_ELEMENT_PAIRS = [
# DESTINATION 1
('project1:dataset1.table1', {
'name': 'beam', 'language': 'py'
}),
('project1:dataset1.table1', {
'name': 'beam', 'language': 'java'
}),
('project1:dataset1.table1', {
'name': 'beam', 'language': 'go'
}),
('project1:dataset1.table1', {
'name': 'flink', 'language': 'java'
}),
('project1:dataset1.table1', {
'name': 'flink', 'language': 'scala'
}),
# DESTINATION 3
('project1:dataset1.table3', {
'name': 'spark', 'language': 'scala'
}),
# DESTINATION 1
('project1:dataset1.table1', {
'name': 'spark', 'language': 'py'
}),
('project1:dataset1.table1', {
'name': 'spark', 'language': 'scala'
}),
# DESTINATION 2
('project1:dataset1.table2', {
'name': 'beam', 'foundation': 'apache'
}),
('project1:dataset1.table2', {
'name': 'flink', 'foundation': 'apache'
}),
('project1:dataset1.table2', {
'name': 'spark', 'foundation': 'apache'
}),
]
_DISTINCT_DESTINATIONS = list({elm[0] for elm in _DESTINATION_ELEMENT_PAIRS})
_ELEMENTS = [elm[1] for elm in _DESTINATION_ELEMENT_PAIRS]
_ELEMENTS_SCHEMA = bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery_api.TableSchema(
fields=[
bigquery_api.TableFieldSchema(
name="name", type="STRING", mode="REQUIRED"),
bigquery_api.TableFieldSchema(name="language", type="STRING"),
bigquery_api.TableFieldSchema(name="foundation", type="STRING"),
]))
class TestWriteRecordsToFile(_TestCaseWithTempDirCleanUp):
maxDiff = None
def _consume_input(self, fn, checks=None):
if checks is None:
return
with TestPipeline() as p:
output_pcs = (
p
| beam.Create(_DESTINATION_ELEMENT_PAIRS, reshuffle=False)
| beam.ParDo(fn, self.tmpdir).with_outputs(
fn.WRITTEN_FILE_TAG, fn.UNWRITTEN_RECORD_TAG))
checks(output_pcs)
return output_pcs
@parameterized.expand([
param(file_format=bigquery_tools.FileFormat.AVRO),
param(file_format=bigquery_tools.FileFormat.JSON),
param(file_format=None),
])
def test_files_created(self, file_format):
"""Test that the files are created and written."""
fn = bqfl.WriteRecordsToFile(
schema=_ELEMENTS_SCHEMA, file_format=file_format)
self.tmpdir = self._new_tempdir()
def check_files_created(output_pcs):
dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]
files = dest_file_pc | "GetFiles" >> beam.Map(lambda x: x[1][0])
file_count = files | "CountFiles" >> combiners.Count.Globally()
_ = files | "FilesExist" >> beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
assert_that(file_count, equal_to([3]), label='check file count')
destinations = (
dest_file_pc
| "GetDests" >>
beam.Map(lambda x: bigquery_tools.get_hashable_destination(x[0])))
assert_that(
destinations,
equal_to(list(_DISTINCT_DESTINATIONS)),
label='check destinations ')
self._consume_input(fn, check_files_created)
def test_many_files(self):
"""Forces records to be written to many files.
For each destination multiple files are necessary. This is because the max
file length is very small, so only a couple records fit in each file.
"""
fn = bqfl.WriteRecordsToFile(schema=_ELEMENTS_SCHEMA, max_file_size=50)
self.tmpdir = self._new_tempdir()
def check_many_files(output_pcs):
dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]
files_per_dest = (
dest_file_pc
| beam.Map(lambda x: x).with_output_types(
beam.typehints.KV[str, Tuple[str, int]])
| combiners.Count.PerKey())
files_per_dest = (
files_per_dest
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))
assert_that(
files_per_dest,
equal_to([('project1:dataset1.table1', 4),
('project1:dataset1.table2', 2),
('project1:dataset1.table3', 1)]))
# Check that the files exist
_ = dest_file_pc | beam.Map(lambda x: x[1][0]) | beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
self._consume_input(fn, check_many_files)
@parameterized.expand([
param(file_format=bigquery_tools.FileFormat.AVRO),
param(file_format=bigquery_tools.FileFormat.JSON),
])
def test_records_are_spilled(self, file_format):
"""Forces records to be written to many files.
For each destination multiple files are necessary, and at most two files
can be created. This forces records to be spilled to the next stage of
processing.
"""
fn = bqfl.WriteRecordsToFile(
schema=_ELEMENTS_SCHEMA,
max_files_per_bundle=2,
file_format=file_format)
self.tmpdir = self._new_tempdir()
def check_many_files(output_pcs):
dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]
spilled_records_pc = output_pcs[
bqfl.WriteRecordsToFile.UNWRITTEN_RECORD_TAG]
spilled_records_count = (spilled_records_pc | combiners.Count.Globally())
assert_that(spilled_records_count, equal_to([3]), label='spilled count')
files_per_dest = (
dest_file_pc
| beam.Map(lambda x: x).with_output_types(
beam.typehints.KV[str, Tuple[str, int]])
| combiners.Count.PerKey())
files_per_dest = (
files_per_dest
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))
# Only table1 and table3 get files. table2 records get spilled.
assert_that(
files_per_dest,
equal_to([('project1:dataset1.table1', 1),
('project1:dataset1.table3', 1)]),
label='file count')
# Check that the files exist
_ = dest_file_pc | beam.Map(lambda x: x[1][0]) | beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
self._consume_input(fn, check_many_files)
class TestWriteGroupedRecordsToFile(_TestCaseWithTempDirCleanUp):
def _consume_input(self, fn, input, checks):
if checks is None:
return
with TestPipeline() as p:
res = (
p
| beam.Create(input)
| beam.GroupByKey()
| beam.ParDo(fn, self.tmpdir))
checks(res)
return res
@parameterized.expand([
param(file_format=bigquery_tools.FileFormat.AVRO),
param(file_format=bigquery_tools.FileFormat.JSON),
param(file_format=None),
])
def test_files_are_created(self, file_format):
"""Test that the files are created and written."""
fn = bqfl.WriteGroupedRecordsToFile(
schema=_ELEMENTS_SCHEMA, file_format=file_format)
self.tmpdir = self._new_tempdir()
def check_files_created(output_pc):
files = output_pc | "GetFiles" >> beam.Map(lambda x: x[1][0])
file_count = files | "CountFiles" >> combiners.Count.Globally()
_ = files | "FilesExist" >> beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
assert_that(file_count, equal_to([3]), label='check file count')
destinations = (
output_pc
| "GetDests" >>
beam.Map(lambda x: bigquery_tools.get_hashable_destination(x[0])))
assert_that(
destinations,
equal_to(list(_DISTINCT_DESTINATIONS)),
label='check destinations ')
self._consume_input(fn, _DESTINATION_ELEMENT_PAIRS, check_files_created)
def test_multiple_files(self):
"""Forces records to be written to many files.
For each destination multiple files are necessary. This is because the max
file length is very small, so only a couple records fit in each file.
"""
fn = bqfl.WriteGroupedRecordsToFile(
schema=_ELEMENTS_SCHEMA, max_file_size=50)
self.tmpdir = self._new_tempdir()
def check_multiple_files(output_pc):
files_per_dest = output_pc | combiners.Count.PerKey()
files_per_dest = (
files_per_dest
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))
assert_that(
files_per_dest,
equal_to([
('project1:dataset1.table1', 4),
('project1:dataset1.table2', 2),
('project1:dataset1.table3', 1),
]))
# Check that the files exist
_ = output_pc | beam.Map(lambda x: x[1][0]) | beam.Map(os.path.exists)
self._consume_input(fn, _DESTINATION_ELEMENT_PAIRS, check_multiple_files)
class TestPartitionFiles(unittest.TestCase):
_ELEMENTS = [(
'destination0', [('file0', 50), ('file1', 50), ('file2', 50),
('file3', 50)]),
('destination1', [('file0', 50), ('file1', 50)])]
def test_partition(self):
partition = bqfl.PartitionFiles.Partition(1000, 1)
self.assertEqual(partition.can_accept(50), True)
self.assertEqual(partition.can_accept(2000), False)
self.assertEqual(partition.can_accept(1000), True)
partition.add('file1', 50)
self.assertEqual(partition.files, ['file1'])
self.assertEqual(partition.size, 50)
self.assertEqual(partition.can_accept(50), False)
self.assertEqual(partition.can_accept(0), False)
def test_partition_files_dofn_file_split(self):
"""Force partitions to split based on max_files"""
multiple_partitions_result = [('destination0', ['file0', 'file1']),
('destination0', ['file2', 'file3'])]
single_partition_result = [('destination1', ['file0', 'file1'])]
with TestPipeline() as p:
destination_file_pairs = p | beam.Create(self._ELEMENTS, reshuffle=False)
partitioned_files = (
destination_file_pairs
| beam.ParDo(bqfl.PartitionFiles(1000, 2)).with_outputs(
bqfl.PartitionFiles.MULTIPLE_PARTITIONS_TAG,
bqfl.PartitionFiles.SINGLE_PARTITION_TAG))
multiple_partitions = partitioned_files[bqfl.PartitionFiles\
.MULTIPLE_PARTITIONS_TAG]
single_partition = partitioned_files[bqfl.PartitionFiles\
.SINGLE_PARTITION_TAG]
assert_that(
multiple_partitions,
equal_to(multiple_partitions_result),
label='CheckMultiplePartitions')
assert_that(
single_partition,
equal_to(single_partition_result),
label='CheckSinglePartition')
def test_partition_files_dofn_size_split(self):
"""Force partitions to split based on max_partition_size"""
multiple_partitions_result = [('destination0', ['file0', 'file1', 'file2']),
('destination0', ['file3'])]
single_partition_result = [('destination1', ['file0', 'file1'])]
with TestPipeline() as p:
destination_file_pairs = p | beam.Create(self._ELEMENTS, reshuffle=False)
partitioned_files = (
destination_file_pairs
| beam.ParDo(bqfl.PartitionFiles(150, 10)).with_outputs(
bqfl.PartitionFiles.MULTIPLE_PARTITIONS_TAG,
bqfl.PartitionFiles.SINGLE_PARTITION_TAG))
multiple_partitions = partitioned_files[bqfl.PartitionFiles\
.MULTIPLE_PARTITIONS_TAG]
single_partition = partitioned_files[bqfl.PartitionFiles\
.SINGLE_PARTITION_TAG]
assert_that(
multiple_partitions,
equal_to(multiple_partitions_result),
label='CheckMultiplePartitions')
assert_that(
single_partition,
equal_to(single_partition_result),
label='CheckSinglePartition')
class TestBigQueryFileLoads(_TestCaseWithTempDirCleanUp):
def test_records_traverse_transform_with_mocks(self):
destination = 'project1:dataset1.table1'
job_reference = bigquery_api.JobReference()
job_reference.projectId = 'project1'
job_reference.jobId = 'job_name1'
result_job = bigquery_api.Job()
result_job.jobReference = job_reference
mock_job = mock.Mock()
mock_job.status.state = 'DONE'
mock_job.status.errorResult = None
mock_job.jobReference = job_reference
bq_client = mock.Mock()
bq_client.jobs.Get.return_value = mock_job
bq_client.jobs.Insert.return_value = result_job
transform = bqfl.BigQueryBatchFileLoads(
destination,
custom_gcs_temp_location=self._new_tempdir(),
test_client=bq_client,
validate=False,
temp_file_format=bigquery_tools.FileFormat.JSON)
# Need to test this with the DirectRunner to avoid serializing mocks
with TestPipeline('DirectRunner') as p:
outputs = p | beam.Create(_ELEMENTS) | transform
dest_files = outputs[bqfl.BigQueryBatchFileLoads.DESTINATION_FILE_PAIRS]
dest_job = outputs[bqfl.BigQueryBatchFileLoads.DESTINATION_JOBID_PAIRS]
jobs = dest_job | "GetJobs" >> beam.Map(lambda x: x[1])
files = dest_files | "GetFiles" >> beam.Map(lambda x: x[1][0])
destinations = (
dest_files
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1]))
| "GetUniques" >> combiners.Count.PerKey()
| "GetFinalDests" >> beam.Keys())
# All files exist
_ = (
files
| beam.Map(lambda x: hamcrest_assert(os.path.exists(x), is_(True))))
# One file per destination
assert_that(
files | combiners.Count.Globally(), equal_to([1]), label='CountFiles')
assert_that(
destinations, equal_to([destination]), label='CheckDestinations')
assert_that(jobs, equal_to([job_reference]), label='CheckJobs')
@unittest.skipIf(sys.version_info[0] == 2, 'Mock pickling problems in Py 2')
@mock.patch('time.sleep')
def test_wait_for_job_completion(self, sleep_mock):
job_references = [bigquery_api.JobReference(), bigquery_api.JobReference()]
job_references[0].projectId = 'project1'
job_references[0].jobId = 'jobId1'
job_references[1].projectId = 'project1'
job_references[1].jobId = 'jobId2'
job_1_waiting = mock.Mock()
job_1_waiting.status.state = 'RUNNING'
job_2_done = mock.Mock()
job_2_done.status.state = 'DONE'
job_2_done.status.errorResult = None
job_1_done = mock.Mock()
job_1_done.status.state = 'DONE'
job_1_done.status.errorResult = None
bq_client = mock.Mock()
bq_client.jobs.Get.side_effect = [
job_1_waiting, job_2_done, job_1_done, job_2_done
]
waiting_dofn = bqfl.WaitForBQJobs(bq_client)
dest_list = [(i, job) for i, job in enumerate(job_references)]
with TestPipeline('DirectRunner') as p:
references = beam.pvalue.AsList(p | 'job_ref' >> beam.Create(dest_list))
outputs = (p | beam.Create(['']) | beam.ParDo(waiting_dofn, references))
assert_that(outputs, equal_to(dest_list))
sleep_mock.assert_called_once()
@unittest.skipIf(sys.version_info[0] == 2, 'Mock pickling problems in Py 2')
@mock.patch('time.sleep')
def test_one_job_failed_after_waiting(self, sleep_mock):
job_references = [bigquery_api.JobReference(), bigquery_api.JobReference()]
job_references[0].projectId = 'project1'
job_references[0].jobId = 'jobId1'
job_references[1].projectId = 'project1'
job_references[1].jobId = 'jobId2'
job_1_waiting = mock.Mock()
job_1_waiting.status.state = 'RUNNING'
job_2_done = mock.Mock()
job_2_done.status.state = 'DONE'
job_2_done.status.errorResult = None
job_1_error = mock.Mock()
job_1_error.status.state = 'DONE'
job_1_error.status.errorResult = 'Some problems happened'
bq_client = mock.Mock()
bq_client.jobs.Get.side_effect = [
job_1_waiting, job_2_done, job_1_error, job_2_done
]
waiting_dofn = bqfl.WaitForBQJobs(bq_client)
dest_list = [(i, job) for i, job in enumerate(job_references)]
with self.assertRaises(Exception):
with TestPipeline('DirectRunner') as p:
references = beam.pvalue.AsList(p | 'job_ref' >> beam.Create(dest_list))
_ = (p | beam.Create(['']) | beam.ParDo(waiting_dofn, references))
sleep_mock.assert_called_once()
def test_multiple_partition_files(self):
destination = 'project1:dataset1.table1'
job_reference = bigquery_api.JobReference()
job_reference.projectId = 'project1'
job_reference.jobId = 'job_name1'
result_job = mock.Mock()
result_job.jobReference = job_reference
mock_job = mock.Mock()
mock_job.status.state = 'DONE'
mock_job.status.errorResult = None
mock_job.jobReference = job_reference
bq_client = mock.Mock()
bq_client.jobs.Get.return_value = mock_job
bq_client.jobs.Insert.return_value = result_job
bq_client.tables.Delete.return_value = None
with TestPipeline('DirectRunner') as p:
outputs = (
p
| beam.Create(_ELEMENTS, reshuffle=False)
| bqfl.BigQueryBatchFileLoads(
destination,
custom_gcs_temp_location=self._new_tempdir(),
test_client=bq_client,
validate=False,
temp_file_format=bigquery_tools.FileFormat.JSON,
max_file_size=45,
max_partition_size=80,
max_files_per_partition=2))
dest_files = outputs[bqfl.BigQueryBatchFileLoads.DESTINATION_FILE_PAIRS]
dest_load_jobs = outputs[
bqfl.BigQueryBatchFileLoads.DESTINATION_JOBID_PAIRS]
dest_copy_jobs = outputs[
bqfl.BigQueryBatchFileLoads.DESTINATION_COPY_JOBID_PAIRS]
load_jobs = dest_load_jobs | "GetLoadJobs" >> beam.Map(lambda x: x[1])
copy_jobs = dest_copy_jobs | "GetCopyJobs" >> beam.Map(lambda x: x[1])
files = dest_files | "GetFiles" >> beam.Map(lambda x: x[1][0])
destinations = (
dest_files
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1]))
| "GetUniques" >> combiners.Count.PerKey()
| "GetFinalDests" >> beam.Keys())
# All files exist
_ = (
files
| beam.Map(lambda x: hamcrest_assert(os.path.exists(x), is_(True))))
# One file per destination
assert_that(
files | "CountFiles" >> combiners.Count.Globally(),
equal_to([6]),
label='CheckFileCount')
assert_that(
destinations, equal_to([destination]), label='CheckDestinations')
assert_that(
load_jobs | "CountLoadJobs" >> combiners.Count.Globally(),
equal_to([6]),
label='CheckLoadJobCount')
assert_that(
copy_jobs | "CountCopyJobs" >> combiners.Count.Globally(),
equal_to([6]),
label='CheckCopyJobCount')
class BigQueryFileLoadsIT(unittest.TestCase):
BIG_QUERY_DATASET_ID = 'python_bq_file_loads_'
BIG_QUERY_SCHEMA = (
'{"fields": [{"name": "name","type": "STRING"},'
'{"name": "language","type": "STRING"}]}')
BIG_QUERY_SCHEMA_2 = (
'{"fields": [{"name": "name","type": "STRING"},'
'{"name": "foundation","type": "STRING"}]}')
BIG_QUERY_STREAMING_SCHEMA = ({
'fields': [{
'name': 'Integr', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
})
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
self.project = self.test_pipeline.get_option('project')
self.dataset_id = '%s%s%d' % (
self.BIG_QUERY_DATASET_ID,
str(int(time.time())),
random.randint(0, 10000))
self.bigquery_client = bigquery_tools.BigQueryWrapper()
self.bigquery_client.get_or_create_dataset(self.project, self.dataset_id)
self.output_table = "%s.output_table" % (self.dataset_id)
_LOGGER.info(
"Created dataset %s in project %s", self.dataset_id, self.project)
@attr('IT')
def test_multiple_destinations_transform(self):
output_table_1 = '%s%s' % (self.output_table, 1)
output_table_2 = '%s%s' % (self.output_table, 2)
output_table_3 = '%s%s' % (self.output_table, 3)
output_table_4 = '%s%s' % (self.output_table, 4)
schema1 = bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA))
schema2 = bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA_2))
schema_kv_pairs = [(output_table_1, schema1), (output_table_2, schema2),
(output_table_3, schema1), (output_table_4, schema2)]
pipeline_verifiers = [
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_2,
data=[(d['name'], d['foundation']) for d in _ELEMENTS
if 'foundation' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_3,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_4,
data=[(d['name'], d['foundation']) for d in _ELEMENTS
if 'foundation' in d])
]
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=all_of(*pipeline_verifiers),
experiments='use_beam_bq_sink')
with beam.Pipeline(argv=args) as p:
input = p | beam.Create(_ELEMENTS, reshuffle=False)
schema_map_pcv = beam.pvalue.AsDict(
p | "MakeSchemas" >> beam.Create(schema_kv_pairs))
table_record_pcv = beam.pvalue.AsDict(
p | "MakeTables" >> beam.Create([('table1', output_table_1),
('table2', output_table_2)]))
# Get all input in same machine
input = (
input
| beam.Map(lambda x: (None, x))
| beam.GroupByKey()
| beam.FlatMap(lambda elm: elm[1]))
_ = (
input | "WriteWithMultipleDestsFreely" >> bigquery.WriteToBigQuery(
table=lambda x,
tables:
(tables['table1'] if 'language' in x else tables['table2']),
table_side_inputs=(table_record_pcv, ),
schema=lambda dest,
schema_map: schema_map.get(dest, None),
schema_side_inputs=(schema_map_pcv, ),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY))
_ = (
input | "WriteWithMultipleDests" >> bigquery.WriteToBigQuery(
table=lambda x:
(output_table_3 if 'language' in x else output_table_4),
schema=lambda dest,
schema_map: schema_map.get(dest, None),
schema_side_inputs=(schema_map_pcv, ),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY,
max_file_size=20,
max_files_per_bundle=-1))
@attr('IT')
def test_bqfl_streaming(self):
if isinstance(self.test_pipeline.runner, TestDataflowRunner):
self.skipTest("TestStream is not supported on TestDataflowRunner")
output_table = '%s_%s' % (self.output_table, 'ints')
_SIZE = 100
schema = self.BIG_QUERY_STREAMING_SCHEMA
l = [{'Integr': i} for i in range(_SIZE)]
state_matcher = PipelineStateMatcher(PipelineState.RUNNING)
bq_matcher = BigqueryFullResultStreamingMatcher(
project=self.project,
query="SELECT Integr FROM %s" % output_table,
data=[(i, ) for i in range(100)])
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=all_of(state_matcher, bq_matcher),
experiments='use_beam_bq_sink',
streaming=True)
with beam.Pipeline(argv=args) as p:
stream_source = (
TestStream().advance_watermark_to(0).advance_processing_time(
100).add_elements(l[:_SIZE // 4]).
advance_processing_time(100).advance_watermark_to(100).add_elements(
l[_SIZE // 4:2 * _SIZE // 4]).advance_processing_time(
100).advance_watermark_to(200).add_elements(
l[2 * _SIZE // 4:3 * _SIZE // 4]).advance_processing_time(
100).advance_watermark_to(300).add_elements(
l[3 * _SIZE // 4:]).advance_processing_time(
100).advance_watermark_to_infinity())
_ = (p
| stream_source
| bigquery.WriteToBigQuery(output_table,
schema=schema,
method=bigquery.WriteToBigQuery \
.Method.FILE_LOADS,
triggering_frequency=100))
@attr('IT')
def test_one_job_fails_all_jobs_fail(self):
# If one of the import jobs fails, then other jobs must not be performed.
# This is to avoid reinsertion of some records when a pipeline fails and
# is rerun.
output_table_1 = '%s%s' % (self.output_table, 1)
output_table_2 = '%s%s' % (self.output_table, 2)
self.bigquery_client.get_or_create_table(
self.project,
self.dataset_id,
output_table_1.split('.')[1],
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA),
None,
None)
self.bigquery_client.get_or_create_table(
self.project,
self.dataset_id,
output_table_2.split('.')[1],
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA_2),
None,
None)
pipeline_verifiers = [
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_2,
data=[])
]
args = self.test_pipeline.get_full_options_as_args(
experiments='use_beam_bq_sink')
with self.assertRaises(Exception):
# The pipeline below fails because neither a schema nor SCHEMA_AUTODETECT
# are specified.
with beam.Pipeline(argv=args) as p:
input = p | beam.Create(_ELEMENTS)
input2 = p | "Broken record" >> beam.Create(['language_broken_record'])
input = (input, input2) | beam.Flatten()
_ = (
input | "WriteWithMultipleDests" >> bigquery.WriteToBigQuery(
table=lambda x:
(output_table_1 if 'language' in x else output_table_2),
create_disposition=(
beam.io.BigQueryDisposition.CREATE_IF_NEEDED),
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
temp_file_format=bigquery_tools.FileFormat.JSON))
hamcrest_assert(p, all_of(*pipeline_verifiers))
def tearDown(self):
request = bigquery_api.BigqueryDatasetsDeleteRequest(
projectId=self.project, datasetId=self.dataset_id, deleteContents=True)
try:
_LOGGER.info(
"Deleting dataset %s in project %s", self.dataset_id, self.project)
self.bigquery_client.client.datasets.Delete(request)
except HttpError:
_LOGGER.debug(
'Failed to clean up dataset %s in project %s',
self.dataset_id,
self.project)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| iemejia/incubator-beam | sdks/python/apache_beam/io/gcp/bigquery_file_loads_test.py | Python | apache-2.0 | 30,663 |
"""
Acceptance tests for the Import and Export pages
"""
from nose.plugins.attrib import attr
from datetime import datetime
from flaky import flaky
from abc import abstractmethod
from common.test.acceptance.tests.studio.base_studio_test import StudioLibraryTest, StudioCourseTest
from common.test.acceptance.pages.studio.import_export import (
ExportLibraryPage,
ExportCoursePage,
ImportLibraryPage,
ImportCoursePage)
from common.test.acceptance.pages.studio.library import LibraryEditPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.staff_view import StaffPage
class ExportTestMixin(object):
"""
Tests to run both for course and library export pages.
"""
def test_export(self):
"""
Scenario: I am able to export a course or library
Given that I have a course or library
And I click the download button
The download will succeed
And the file will be of the right MIME type.
"""
good_status, is_tarball_mimetype = self.export_page.download_tarball()
self.assertTrue(good_status)
self.assertTrue(is_tarball_mimetype)
@attr(shard=7)
class TestCourseExport(ExportTestMixin, StudioCourseTest):
"""
Export tests for courses.
"""
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a course.
Given that I have a course to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Course Export')
@attr(shard=7)
class TestLibraryExport(ExportTestMixin, StudioLibraryTest):
"""
Export tests for libraries.
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(TestLibraryExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a library.
Given that I have a library to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Library Export')
@attr(shard=7)
class ImportTestMixin(object):
"""
Tests to run for both course and library import pages.
"""
def setUp(self):
super(ImportTestMixin, self).setUp()
self.import_page = self.import_page_class(*self.page_args())
self.landing_page = self.landing_page_class(*self.page_args())
self.import_page.visit()
@abstractmethod
def page_args(self):
"""
Generates the args for initializing a page object.
"""
return []
def test_upload(self):
"""
Scenario: I want to upload a course or library for import.
Given that I have a library or course to import into
And I have a valid .tar.gz file containing data to replace it with
I can select the file and upload it
And the page will give me confirmation that it uploaded successfully
"""
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
def test_import_timestamp(self):
"""
Scenario: I perform a course / library import
On import success, the page displays a UTC timestamp previously not visible
And if I refresh the page, the timestamp is still displayed
"""
self.assertFalse(self.import_page.is_timestamp_visible())
# Get the time when the import has started.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_start_time = datetime.utcnow().replace(microsecond=0, second=0)
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
# Get the time when the import has finished.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_finish_time = datetime.utcnow().replace(microsecond=0, second=0)
import_timestamp = self.import_page.parsed_timestamp
self.import_page.wait_for_timestamp_visible()
# Verify that 'import_timestamp' is between start and finish upload time
self.assertLessEqual(
upload_start_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.assertGreaterEqual(
upload_finish_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.import_page.visit()
self.import_page.wait_for_tasks(completed=True)
self.import_page.wait_for_timestamp_visible()
def test_landing_url(self):
"""
Scenario: When uploading a library or course, a link appears for me to view the changes.
Given that I upload a library or course
A button will appear that contains the URL to the library or course's main page
"""
self.import_page.upload_tarball(self.tarball_name)
self.assertEqual(self.import_page.finished_target_url(), self.landing_page.url)
def test_bad_filename_error(self):
"""
Scenario: I should be reprimanded for trying to upload something that isn't a .tar.gz file.
Given that I select a file that is an .mp4 for upload
An error message will appear
"""
self.import_page.upload_tarball('funny_cat_video.mp4')
self.import_page.wait_for_filename_error()
def test_task_list(self):
"""
Scenario: I should see feedback checkpoints when uploading a course or library
Given that I am on an import page
No task checkpoint list should be showing
When I upload a valid tarball
Each task in the checklist should be marked confirmed
And the task list should be visible
"""
# The task list shouldn't be visible to start.
self.assertFalse(self.import_page.is_task_list_showing(), "Task list shown too early.")
self.import_page.wait_for_tasks()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_tasks(completed=True)
self.assertTrue(self.import_page.is_task_list_showing(), "Task list did not display.")
def test_bad_import(self):
"""
Scenario: I should see a failed checklist when uploading an invalid course or library
Given that I am on an import page
And I upload a tarball with a broken XML file
The tasks should be confirmed up until the 'Updating' task
And the 'Updating' task should be marked failed
And the remaining tasks should not be marked as started
"""
self.import_page.upload_tarball(self.bad_tarball_name)
self.import_page.wait_for_tasks(fail_on='Updating')
@attr(shard=7)
class TestEntranceExamCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = 'entrance_exam_course.2015.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
@flaky # TODO fix this, see TNL-6009
def test_course_updated_with_entrance_exam(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section' or 'Entrance Exam'
When I visit the import page
And I upload a course that has an entrance exam section named 'Entrance Exam'
And I visit the course outline page again
The section named 'Entrance Exam' should now be available.
And when I switch the view mode to student view and Visit CourseWare
Then I see one section in the sidebar that is 'Entrance Exam'
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.assertRaises(IndexError, self.landing_page.section, "Entrance Exam")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There should be two sections. 'Entrance Exam' and 'Section' on the landing page.
self.landing_page.section("Entrance Exam")
self.landing_page.section("Section")
self.landing_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
self.assertIn(
"To access course materials, you must score", courseware.entrance_exam_message_selector.text[0]
)
@attr(shard=7)
class TestCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = '2015.lzdwNM.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
@flaky # TNL-6042
def test_course_updated(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section'
When I visit the import page
And I upload a course that has a section named 'Section'
And I visit the course outline page again
The section named 'Section' should now be available
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There's a section named 'Section' in the tarball.
self.landing_page.section("Section")
def test_header(self):
"""
Scenario: I should see the correct text when importing a course.
Given that I have a course to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Course Import')
def test_multiple_course_import_message(self):
"""
Given that I visit an empty course before import
When I visit the import page
And I upload a course with file name 2015.lzdwNM.tar.gz
Then timestamp is visible after course is updated successfully
And then I create a new course
When I visit the import page of this new course
Then timestamp is not visible
"""
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.assertTrue(self.import_page.is_timestamp_visible())
# Create a new course and visit the import page
self.course_info = {
'org': 'orgX',
'number': self.unique_id + '_2',
'run': 'test_run_2',
'display_name': 'Test Course 2' + self.unique_id
}
self.install_course_fixture()
self.import_page = self.import_page_class(*self.page_args())
self.import_page.visit()
# As this is new course which is never import so timestamp should not present
self.assertFalse(self.import_page.is_timestamp_visible())
@attr(shard=7)
class TestLibraryImport(ImportTestMixin, StudioLibraryTest):
"""
Tests the Library import page
"""
tarball_name = 'library.HhJfPD.tar.gz'
bad_tarball_name = 'bad_library.tar.gz'
import_page_class = ImportLibraryPage
landing_page_class = LibraryEditPage
def page_args(self):
return [self.browser, self.library_key]
@flaky # TODO: SOL-430
def test_library_updated(self):
"""
Given that I visit an empty library
No XBlocks should be shown
When I visit the import page
And I upload a library that contains three XBlocks
And I visit the library page
Three XBlocks should be shown
"""
self.landing_page.visit()
self.landing_page.wait_until_ready()
# No items should be in the library to start.
self.assertEqual(len(self.landing_page.xblocks), 0)
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
self.landing_page.wait_until_ready()
# There are three blocks in the tarball.
self.assertEqual(len(self.landing_page.xblocks), 3)
def test_header(self):
"""
Scenario: I should see the correct text when importing a library.
Given that I have a library to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Library Import')
| synergeticsedx/deployment-wipro | common/test/acceptance/tests/studio/test_import_export.py | Python | agpl-3.0 | 14,174 |
import demistomock as demisto
from CommonServerPython import *
import base64
import json
import os
import re
import urllib.parse
import requests
import traceback
from typing import Dict, Any, Tuple, Pattern
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" # ISO8601 format with UTC, default in XSOAR
STANDARD_INVESTIGATIVE_DETAILS_OSX = { # pragma: no cover
"commands": [
{
"name": "sysinfo"
},
{
"name": "disks"
},
{
"name": "volumes"
},
{
"name": "useraccounts"
},
{
"name": "groups"
},
{
"name": "files-api",
"parameters": [
{
"name": "Path",
"value": "/"
},
{
"name": "Regex",
"value": "^(?:Applications|Library|System|User|bin|cores|opt|private|sbin|usr)+"
},
{
"name": "Include Remote Locations",
"value": False
},
{
"name": "Depth",
"value": -1
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": False
},
{
"name": "AND Operator",
"value": False
},
{
"name": "Include Files",
"value": True
},
{
"name": "Include Directories",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "persistence",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": False
}
]
},
{
"name": "tasks",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "processes-api"
},
{
"name": "urlhistory",
"parameters": [
{
"name": "TargetBrowser",
"value": "Chrome"
},
{
"name": "TargetBrowser",
"value": "Firefox"
},
{
"name": "TargetBrowser",
"value": "Safari"
}
]
},
{
"name": "quarantine-events"
},
{
"name": "ports"
},
{
"name": "services",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "stateagentinspector",
"parameters": [
{
"name": "eventTypes",
"value": []
}
]
},
{
"name": "syslog"
}
]
}
STANDARD_INVESTIGATIVE_DETAILS_LINUX = {
"commands": [
{
"name": "sysinfo"
},
{
"name": "files-api",
"parameters": [
{
"name": "Path",
"value": "/"
},
{
"name": "Regex",
"value": "^(?:usr|lib|lib64|opt|home|sbin|bin|etc|root)+"
},
{
"name": "Include Remote Locations",
"value": False
},
{
"name": "Depth",
"value": -1
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "AND Operator",
"value": False
},
{
"name": "Include Files",
"value": True
},
{
"name": "Include Directories",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "processes-api"
},
{
"name": "ports"
},
{
"name": "shell-history",
"parameters": [
{
"name": "ShellList",
"value": [
"bash",
"zsh",
"ksh93"
]
}
]
}
]
}
STANDARD_INVESTIGATIVE_DETAILS_WIN = {
"commands": [
{
"name": "sysinfo"
},
{
"name": "disks",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "volumes",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "useraccounts",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "prefetch",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "files-raw",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "Active Files",
"value": True
},
{
"name": "Deleted Files",
"value": True
},
{
"name": "Parse NTFS INDX Buffers",
"value": True
},
{
"name": "Path",
"value": "%systemdrive%"
},
{
"name": "Depth",
"value": -1
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Analyze Entropy",
"value": False
},
{
"name": "Enumerate Imports",
"value": False
},
{
"name": "Enumerate Exports",
"value": False
},
{
"name": "Analyze File Anomalies",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": False
},
{
"name": "Strings",
"value": False
},
{
"name": "AND Operator",
"value": False
},
{
"name": "Include Files",
"value": True
},
{
"name": "Include Directories",
"value": True
},
{
"name": "Get Resources",
"value": False
},
{
"name": "Get Resource Data",
"value": False
},
{
"name": "Get Version Info",
"value": False
}
]
},
{
"name": "persistence",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "Enumerate Imports",
"value": False
},
{
"name": "Enumerate Exports",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Analyze Entropy",
"value": False
},
{
"name": "Analyze File Anomalies",
"value": False
},
{
"name": "Get Resources",
"value": False
},
{
"name": "Get Version Info",
"value": False
},
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "registry-raw",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "Type",
"value": "All"
}
]
},
{
"name": "tasks",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "raw mode",
"value": False
}
]
},
{
"name": "eventlogs",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "processes-memory",
"parameters": [
{
"name": "Preserve Times",
"value": False
},
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "MemD5",
"value": False
},
{
"name": "enumerate imports",
"value": True
},
{
"name": "enumerate exports",
"value": True
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "sections",
"value": True
},
{
"name": "ports",
"value": True
},
{
"name": "handles",
"value": True
},
{
"name": "detect injected dlls",
"value": True
},
{
"name": "raw mode",
"value": False
},
{
"name": "strings",
"value": False
}
]
},
{
"name": "urlhistory",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "GetThumbnails",
"value": False
},
{
"name": "GetIndexedPageContent",
"value": False
}
]
},
{
"name": "ports",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "services",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "raw mode",
"value": False
}
]
},
{
"name": "stateagentinspector",
"parameters": [
{
"name": "eventTypes",
"value": []
}
]
}
]
}
SYS_SCRIPT_MAP = {
'osx': STANDARD_INVESTIGATIVE_DETAILS_OSX,
'win': STANDARD_INVESTIGATIVE_DETAILS_WIN,
'linux': STANDARD_INVESTIGATIVE_DETAILS_LINUX
}
TABLE_POLLING_COMMANDS = {
'searching': {
'type': 'searchId',
'message': 'Searching... , started polling for id '
},
'acquisition': {
'type': 'acquisition_id',
'message': 'Acquisition is not yet ready, started polling for id '
}
}
''' CLIENT CLASS '''
class Client(BaseClient):
def __init__(self, base_url: str, verify: bool = True, proxy: bool = False, auth: Optional[tuple] = None):
headers = {'Accept': 'application/json'}
super().__init__(base_url, verify=verify, proxy=proxy, ok_codes=range(200, 205), headers=headers, auth=auth)
self._headers['X-FeApi-Token'] = self.get_token_request()
def get_token_request(self):
"""
returns a token on successful request
"""
# basic authentication
try:
response = self._http_request(
method='GET',
url_suffix='token',
resp_type='response'
)
except Exception as e:
demisto.debug(f'Encountered an error for url {self._base_url}/token: {e}')
raise ValueError("Server URL incorrect")
# successful request
response_headers = response.headers
token = response_headers.get('X-FeApi-Token')
return token
"""
POLICIES REQUEST
"""
def list_policy_request(self, offset: int, limit: int, policy_id: str = None, name: str = None, enabled: bool = None):
params = assign_params(_id=policy_id, name=name, offset=offset, limit=limit, enabled=enabled)
return self._http_request(
method='GET',
url_suffix='policies',
params=params,
)
def list_host_set_policy_request(self, offset: int, limit: int, policy_id: str = ''):
params = assign_params(policy_id=policy_id, offset=offset, limit=limit)
return self._http_request(
method="GET",
url_suffix="host_set_policies",
params=params
)
def list_host_set_policy_by_hostSetId_request(self, host_set_id):
return self._http_request(
method="GET",
url_suffix=f"host_sets/{host_set_id}/host_set_policies"
)
def assign_host_set_policy_request(self, body: Dict[str, Any]):
return self._http_request(
method="POST",
url_suffix="host_set_policies",
json_data=body,
return_empty_response=True)
def delete_host_set_policy_request(self, host_set_id, policy_id):
return self._http_request(
method="DELETE",
url_suffix=f'host_sets/{host_set_id}/host_set_policies/{policy_id}',
return_empty_response=True
)
"""
HOST INFORMATION REQUEST
"""
def get_hosts_by_agentId_request(self, agent_id: str):
return self._http_request(
method="GET",
url_suffix=f"hosts/{agent_id}"
)
def get_hosts_request(self, limit=None, offset=None, has_active_threats=None, has_alerts=None,
agent_version=None, containment_queued=None, containment_state=None,
host_name=None, os_platform=None, reported_clone=None, time_zone=None):
params = assign_params(
limit=limit,
offset=offset,
has_active_threats=has_active_threats,
has_alerts=has_alerts,
agent_version=agent_version,
containment_queued=containment_queued,
containment_state=containment_state,
hostname=host_name,
reported_clone=reported_clone,
time_zone=time_zone)
if os_platform:
params['os.platform'] = os_platform
return self._http_request(
method="GET",
url_suffix="hosts",
params=params,
headers=self._headers
)
def get_host_set_information_request(self, body, host_set_id):
url = f"host_sets/{host_set_id}" if host_set_id else "host_sets"
return self._http_request(
method='GET',
url_suffix=url,
params=body
)
"""
HOST CONTAINMENT REQUESTS
"""
def host_containmet_request(self, agent_id: str):
self._http_request(
method="POST",
url_suffix=f"hosts/{agent_id}/containment",
)
def approve_containment_request(self, agent_id: str):
return self._http_request(
method="PATCH",
url_suffix=f"hosts/{agent_id}/containment",
json_data={"state": "contain"},
return_empty_response=True
)
def cancel_containment_request(self, agent_id: str):
self._http_request(
method="DELETE",
url_suffix=f"hosts/{agent_id}/containment",
return_empty_response=True
)
def get_list_containment_request(self, offset: int, limit: int, state_update_time: str):
params = assign_params(offset=offset, limit=limit, state_update_time=state_update_time)
return self._http_request(
method="GET",
url_suffix="containment_states",
params=params
)
"""
ACQUISITION REQUEST
"""
def data_acquisition_request(self, agent_id: str, body: Dict):
return self._http_request(
method="POST",
url_suffix=f"hosts/{agent_id}/live",
json_data=body
)
def data_acquisition_information_request(self, acquisition_id):
return self._http_request(
method='GET',
url_suffix=f'acqs/live/{acquisition_id}'
).get('data')
def delete_data_acquisition_request(self, acquisition_id):
self._http_request(
method="DELETE",
url_suffix=f"acqs/live/{acquisition_id}",
return_empty_response=True
)
def data_collection_request(self, acquisition_id):
return self._http_request(
method='GET',
url_suffix=f"acqs/live/{acquisition_id}.mans",
resp_type='content'
)
def file_acquisition_request(self, agent_id, file_name, file_path, comment=None, external_id=None, req_use_api=None):
body = assign_params(req_path=file_path, req_filename=file_name,
comment=comment, external_id=external_id, req_use_api=req_use_api)
return self._http_request(
method='POST',
url_suffix=f'hosts/{agent_id}/files',
json_data=body
).get('data')
def file_acquisition_information_request(self, acquisition_id):
return self._http_request(
method='GET',
url_suffix=f'acqs/files/{acquisition_id}'
).get('data')
def file_acquisition_package_request(self, acquisition_id):
return self._http_request(
method='GET',
url_suffix=f"acqs/files/{acquisition_id}.zip"
)["content"]
def delete_file_acquisition_request(self, acquisition_id):
"""
no return value on successful request
"""
self._http_request(
method='DELETE',
url_suffix=f"acqs/files/{acquisition_id}",
return_empty_response=True
)
"""
ALERTS REQUEST
"""
def get_alerts_request(self, has_share_mode=None, resolution=None, agent_id=None,
condition_id=None, limit=None, offset=None, sort=None, min_id=None,
event_at=None, alert_id=None, matched_at=None, reported_at=None, source=None, filter_query=None):
"""
returns the response body on successful request
"""
params = assign_params(
has_share_mode=has_share_mode,
resolution=resolution,
event_at=event_at,
min_id=min_id,
_id=alert_id,
matched_at=matched_at,
reported_at=reported_at,
source=source,
limit=limit,
offset=offset,
sort=sort
)
if agent_id:
params["agent._id"] = agent_id
if condition_id:
params["condition._id"] = condition_id
if filter_query:
return self._http_request(
'GET',
url_suffix=f"alerts?filterQuery={filter_query}",
params=params,
headers=self._headers
)
else:
return self._http_request(
'GET',
url_suffix="alerts",
params=params,
headers=self._headers
)
def get_alert_request(self, alert_id: int):
return self._http_request(
method='GET',
url_suffix=f'/alerts/{alert_id}',
headers=self._headers
)
def suppress_alert_request(self, alert_id: int):
"""
no return value on successful request
"""
return self._http_request(
method='DELETE',
url_suffix=f'/alerts/{alert_id}',
return_empty_response=True
)
"""
INDICATORS REQUEST
"""
def get_indicator_request(self, category, name):
"""
returns a json object representing an indicator
"""
try:
return self._http_request(
method='GET',
url_suffix=f"/indicators/{category}/{name}"
)["data"]
except Exception as e:
if '404' in str(e):
raise ValueError(f"The indicator '{name}' was not found")
else:
raise ValueError(e)
def get_indicators_request(self, params):
try:
return self._http_request(
method='GET',
url_suffix="/indicators" if not params.get("category") else f"/indicators/{params.get('category')}",
params=params,
)
except Exception as e:
demisto.debug(str(e))
raise ValueError('Failed to parse response body')
def get_indicator_conditions_request(self, category, name, offset):
"""
returns a list of json objects, each representing an indicator condition
if no results are found- returns None
"""
params = {'offset': offset, 'enabled': True}
try:
return self._http_request(
method='GET',
url_suffix=f'/indicators/{category}/{name}/conditions',
params=params
)
except Exception as e:
demisto.debug(str(e))
raise ValueError('Failed to parse response body')
def append_conditions_request(self, name: str, category: str, body: str):
self._headers['Content-Type'] = 'text/plain'
return self._http_request(
method="PATCH",
url_suffix=f"/indicators/{category}/{name}/conditions",
data=body
)
def new_indicator_request(self, category):
"""
Create a new indicator
"""
try:
return self._http_request(
method='POST',
url_suffix=f"indicators/{category}"
)
except Exception as e:
demisto.debug(str(e))
raise ValueError('Failed to parse response body, unexpected response structure from the server.')
"""
SEARCHES REQUEST
"""
def get_search_by_id_request(self, search_id: int):
return self._http_request(
method="GET",
url_suffix=f"searches/{search_id}"
)
def get_search_list_request(self, offset: int, limit: int, state: str = None, host_set_id: int = None,
actor_username: str = None, sort: str = None):
params = assign_params(offset=offset, limit=limit,
state=state, sort=sort)
if actor_username:
params['update_actor.username'] = actor_username
if host_set_id:
params['host_set._id'] = host_set_id
return self._http_request(
method='GET',
url_suffix="searches",
params=params
)
def search_stop_request(self, search_id: str):
return self._http_request(
method="POST",
url_suffix=f"searches/{search_id}/actions/stop",
)
def delete_search_request(self, search_id):
"""
no return value on successful request
"""
self._http_request(
method='DELETE',
url_suffix=f"searches/{search_id}",
return_empty_response=True
)
def search_result_get_request(self, search_id: str):
return self._http_request(
method="GET",
url_suffix=f"searches/{search_id}/results",
)
def search_request(self, body: Dict):
return self._http_request(
method="POST",
url_suffix="searches",
json_data=body
)
''' HELPER FUNCTIONS '''
def get_alerts(client: Client, args: Dict[str, Any]) -> List:
offset = 0
alerts = [] # type: List[Dict[str, str]]
max_records = args.get("limit") or float('inf')
while len(alerts) < max_records:
alerts_partial_results = client.get_alerts_request(
has_share_mode=args.get("hasShareMode"),
resolution=args.get("resolution"),
agent_id=args.get("agentId"),
condition_id=args.get("conditionId"),
event_at=args.get("eventAt"),
alert_id=args.get("alertId"),
matched_at=args.get("matchedAt"),
reported_at=args.get("reportedAt"),
source=args.get("source"),
offset=offset,
limit=args.get("limit") or 100,
sort=args.get("sort"),
filter_query=args.get("filterQuery")
)
# empty list
if len(alerts_partial_results['data']['entries']) == 0:
break
alerts.extend(alerts_partial_results['data']['entries'])
offset = len(alerts)
# remove excess results
if len(alerts) > max_records:
alerts[int(max_records) - 1: -1] = []
return alerts
def get_agent_id_by_host_name(client: Client, host_name: str):
return client.get_hosts_request(host_name=host_name, limit=1)["data"]["entries"][0]["_id"]
def host_set_entry(host_sets: List[Dict]) -> List[Dict]:
return [{
'Name': host_set.get('name'),
'ID': host_set.get('_id'),
'Type': host_set.get('type')
} for host_set in host_sets]
def general_context_from_event(alert: Dict):
def file_context(values: Dict):
dbot = Common.DBotScore(values.get('fileWriteEvent/md5'), DBotScoreType.FILE,
integration_name="FireEye-HX", score=Common.DBotScore.NONE)
return Common.File(
dbot,
name=values.get('fileWriteEvent/fileName'),
md5=values.get('fileWriteEvent/md5'),
extension=values.get('fileWriteEvent/fileExtension'),
path=values.get('fileWriteEvent/fullPath')
)
def ip_context(values: Dict):
dbot = Common.DBotScore(
values.get("ipv4NetworkEvent/remoteIP"),
DBotScoreType.IP,
integration_name="FireEye-HX",
score=Common.DBotScore.NONE
)
return Common.IP(values.get("ipv4NetworkEvent/remoteIP"), dbot_score=dbot)
context_map = {
'fileWriteEvent': file_context,
'ipv4NetworkEvent': ip_context
}
if context_map.get(alert['event_type']) is not None:
f = context_map[alert['event_type']]
return f(alert['event_values'])
return None
def oneFromList(list_of_args, args):
checker = 0
for arg in list_of_args:
if args.get(arg):
checker += 1
result = (arg, args.get(arg))
return result if checker == 1 else False
def organize_search_body_host(client: Client, arg: Tuple, body: Dict):
if arg[0] == "hostsNames":
hostsNames = arg[1].split(",")
agentsIds = []
for hostName in hostsNames:
try:
agentsIds.append({"_id": get_agent_id_by_host_name(client, hostName)})
except Exception:
raise ValueError(f"Host Name {hostName} is not valid")
body["hosts"] = agentsIds
elif arg[0] == "agentsIds":
agentsIds = arg[1].split(",")
agentsIds = [{"_id": agentId} for agentId in agentsIds]
body["hosts"] = agentsIds
elif arg[0] == "hostSetName":
hostSet = {"_id": client.get_host_set_information_request({"name": arg[1]}, None)["data"]["entries"][0]["_id"]}
body["host_set"] = hostSet
elif arg[0] == "hostSet":
hostSet = {"_id": int(arg[1])}
body["host_set"] = hostSet
return body
def organize_search_body_query(argForQuery: Tuple, args: Dict):
query = []
if argForQuery[0] == "fieldSearchName":
if not args.get("fieldSearchOperator") or not args.get("fieldSearchValue"):
raise ValueError("fieldSearchOperator and fieldSearchValue are required arguments")
fieldSearchValue = argToList(args.get("fieldSearchValue", ""))
for searchValue in fieldSearchValue:
query.append(assign_params(field=argForQuery[1], operator=args.get("fieldSearchOperator"), value=searchValue))
else:
if not args.get(f"{argForQuery[0]}Operator"):
raise ValueError(f"{argForQuery[0]}Operator is required argument")
arg_to_query_field_map = {
'dnsHostname': 'DNS Hostname',
'fileFullPath': 'File Full Path',
'fileMD5Hash': 'File MD5 Hash',
'ipAddress': 'IP Address'
}
for searchValue in argToList(argForQuery[1]):
query.append(assign_params(field=arg_to_query_field_map[argForQuery[0]],
operator=args.get(f"{argForQuery[0]}Operator"),
value=searchValue)
)
return query
def get_collect_endpoint_contxt(host: Dict):
return {
'Hostname': host.get('hostname'),
'ID': host.get('_id'),
'IPAddress': host.get('primary_ip_address'),
'Domain': host.get('domain'),
'MACAddress': host.get('primary_mac'),
'OS': host.get('os', {}).get('platform'),
'OSVersion': host.get('os', {}).get('product_name')
}
def get_data_acquisition(client: Client, args: Dict[str, Any]) -> Dict:
host_name = args.get("hostName", "")
agent_id = args.get("agentId")
script = args.get("script", "")
script_name = args.get("scriptName")
default_system_script = args.get("defaultSystemScript")
if not host_name and not agent_id:
raise ValueError('Please provide either agentId or hostName')
if not default_system_script and not script:
raise ValueError('If the script is not provided, defaultSystemScript must be specified')
if script and not script_name:
raise ValueError('If the script is provided, script name must be specified as well')
if not agent_id:
agent_id = get_agent_id_by_host_name(client, host_name)
# determine whether to use the default script
sys = default_system_script
if sys:
script = json.dumps(SYS_SCRIPT_MAP[sys])
script_name = f'{sys}DefaultScript'
body = {
'name': script_name,
'script': {'b64': base64.b64encode(bytes(script, 'utf-8')).decode()}
}
return client.data_acquisition_request(agent_id, body)["data"]
def get_alert_entry(alert: Dict):
alert_entry = {
'Alert ID': alert.get('_id'),
'Reported': alert.get('reported_at'),
'Event Type': alert.get('event_type'),
'Agent ID': alert.get('agent', {}).get('_id')
}
return alert_entry
def get_indicator_entry(indicator: Dict):
indicator_entry = {
'OS': ', '.join(indicator.get('platforms', [])),
'Name': indicator.get('name'),
'Created By': indicator.get('created_by'),
'Active Since': indicator.get('active_since'),
'Category': indicator.get('category', {}).get('name'),
'Signature': indicator.get('signature'),
'Active Condition': indicator.get('stats', {}).get('active_conditions'),
'Hosts With Alerts': indicator.get('stats', {}).get('alerted_agents'),
'Source Alerts': indicator.get('stats', {}).get('source_alerts')
}
return indicator_entry
def get_indicator_command_result(alert: Dict[str, Any]) -> CommandResults:
if alert.get("event_type") == 'fileWriteEvent':
indicator = general_context_from_event(alert)
event_values: Dict[str, Any] = alert.get('event_values', {})
md_table = tableToMarkdown(
name="File",
t={'Name': event_values.get('fileWriteEvent/fileName'),
'md5': event_values.get('fileWriteEvent/md5'),
'Extension': event_values.get('fileWriteEvent/fileExtension'),
'Path': event_values.get('fileWriteEvent/fullPath')},
headers=['Name', 'md5', 'Extension', 'Path']
)
return CommandResults(
outputs_prefix="File",
indicator=indicator,
readable_output=md_table
)
else:
indicator = general_context_from_event(alert)
event_values = alert.get('event_values', {})
md_table = tableToMarkdown(
name="Ip",
t={'Ipv4': event_values.get('ipv4NetworkEvent/remoteIP')}
)
return CommandResults(
outputs_prefix="Ip",
indicator=indicator,
readable_output=md_table
)
def get_condition_entry(condition: Dict):
indicator_entry = {
'Event Type': condition.get('event_type'),
'Operator': condition.get('tests', {})[0].get('operator'),
'Value': condition.get('tests', {})[0].get('value'),
}
return indicator_entry
def get_all_indicators(client: Client, category=None, search=None,
share_mode=None, sort=None, created_by=None,
alerted=None, limit=None):
max_records = limit or float('inf')
indicators = [] # type: List[Dict[str, str]]
params = assign_params(category=category, search=search, sort=sort, created_by=created_by, offset=0, limit=limit or 100)
if share_mode:
params["category.share_mode"] = share_mode
if alerted:
params["stats.alerted_agents"] = share_mode
# get all results
while len(indicators) < max_records:
indicators_partial_results = client.get_indicators_request(params)["data"]["entries"]
if not indicators_partial_results:
break
indicators.extend(indicators_partial_results)
params["offset"] = len(indicators)
# remove access results
if len(indicators) > max_records:
indicators[int(max_records) - 1: -1] = []
return indicators
def get_all_enabled_conditions(client: Client, indicator_category, indicator_name):
offset = 0
conditions = [] # type: List[Dict[str, str]]
# get all results
while True:
conditions_partial_results = client.get_indicator_conditions_request(
indicator_category,
indicator_name,
offset=offset
)['data']['entries']
if not conditions_partial_results:
break
conditions.extend(conditions_partial_results)
offset = len(conditions)
return conditions
def get_indicator_conditions(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
returns a list of enabled conditions assosiated with a specific indicator to the war room
"""
conditions = get_all_enabled_conditions(
client,
args.get('category'),
args.get('name')
)
conditions_entries = [get_condition_entry(condition) for condition in conditions]
md_table = tableToMarkdown(
name=f"Indicator '{args.get('name')}' Alerts on",
t=conditions_entries
)
return CommandResults(
outputs_prefix="FireEyeHX.Conditions",
outputs_key_field="_id",
outputs=conditions,
readable_output=md_table
)
"""helper fetch-incidents"""
def organize_reported_at(reported_at):
milisecond = int(reported_at[-4:-1]) + 1
if milisecond == 1000:
reported_at = date_to_timestamp(reported_at[:-5], date_format=DATE_FORMAT) + 1000
reported_at = timestamp_to_datestring(reported_at, date_format=DATE_FORMAT) + ".000Z"
else:
if milisecond < 10:
reported_at = reported_at[:-4] + '00' + str(milisecond) + reported_at[-1]
elif milisecond < 100:
reported_at = reported_at[:-4] + '0' + str(milisecond) + reported_at[-1]
else:
reported_at = reported_at[:-4] + str(milisecond) + reported_at[-1]
return reported_at
def query_fetch(reported_at=None, first_fetch: str = None):
query = '{"operator":"between","arg":['
if reported_at:
query += '"' + reported_at + '"' + ','
else:
query += '"' + timestamp_to_datestring(parse_date_range(first_fetch, to_timestamp=True, utc=False)[0]) + '"' + ','
query += '"' + timestamp_to_datestring(parse_date_range("1 days", to_timestamp=True,
utc=False)[1]) + '"' + '],"field":"reported_at"}'
return query
def parse_alert_to_incident(alert: Dict, pattern: Pattern) -> Dict:
event_type = alert.get('event_type')
event_type = 'NewEvent' if not event_type else event_type
event_values = alert.get('event_values', {})
event_indicators_map = {
'fileWriteEvent': 'fileWriteEvent/fileName',
'ipv4NetworkEvent': 'ipv4NetworkEvent/remoteIP',
'dnsLookupEvent': 'dnsLookupEvent/hostname',
'regKeyEvent': 'regKeyEvent/valueName'
}
event_indicator = event_indicators_map.get(event_type)
event_indicator = 'No Indicator' if not event_indicator else event_indicator
indicator = ''
if isinstance(event_values, dict):
indicator = event_values.get(event_indicator, '')
incident_name = u'{event_type_parsed}: {indicator}'.format(
event_type_parsed=pattern.sub("\g<1> \g<2>", event_type).title(),
indicator=indicator
)
incident = {
'name': incident_name,
'occurred': alert.get("event_at"),
'rawJSON': json.dumps(alert)
}
return incident
def run_commands_without_polling(client: Client, args: Dict[str, Any]):
if args.get('cmd') == 'fireeye-hx-search':
return start_search_command(client, args)[0]
if args.get('cmd') == 'fireeye-hx-data-acquisition':
return data_acquisition_command(client, args)[0]
if args.get('cmd') == 'fireeye-hx-file-acquisition':
return file_acquisition_command(client, args)[0]
''' COMMAND FUNCTIONS '''
"""
POLICIES
"""
def list_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
offset = args.get('offset', 0)
limit = args.get('limit', 50)
name = args.get('policyName')
policy_id = args.get('policyId')
enabled = args.get('enabled')
if name and policy_id:
raise ValueError("Enter a name or ID but not both")
response = client.list_policy_request(offset=offset, limit=limit, policy_id=policy_id, name=name, enabled=enabled)
for_table = []
for entry in response['data']["entries"]:
for_table.append({"Policy Id": entry["_id"],
"Policy Name": entry["name"],
"Description": entry["description"],
"Priority": entry["priority"],
"Enabled": entry["enabled"]})
headers_for_table = ["Policy Name", "Policy Id", "Description", "Priority", "Enabled"]
md = tableToMarkdown(name="FireEye HX List Policies", t=for_table, headers=headers_for_table)
command_results = CommandResults(
outputs_prefix='FireEyeHX.Policy',
outputs_key_field='_id',
outputs=response,
raw_response=response,
readable_output=md
)
return command_results
def list_host_set_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
offset = args.get("offset", 0)
limit = args.get("limit", 50)
host_set_id = args.get("hostSetId")
policy_id = args.get("policyId", "")
if host_set_id and policy_id:
raise ValueError("Enter a Policy Id or Host Set Id but not both")
if host_set_id:
response = client.list_host_set_policy_by_hostSetId_request(host_set_id)
else:
response = client.list_host_set_policy_request(offset=offset, limit=limit, policy_id=policy_id)
for_table = []
for entry in response["data"]["entries"]:
for_table.append({
"Policy Id": entry["policy_id"],
"Host Set Id": entry["persist_id"]
})
headers_for_table = ["Policy Id", "Host Set Id"]
md = tableToMarkdown(name="FireEye HX Host Set Policies", t=for_table, headers=headers_for_table)
return CommandResults(
outputs_prefix="FireEyeHX.HostSets.Policy",
outputs_key_field="_id",
outputs=response["data"]["entries"],
readable_output=md
)
def assign_host_set_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
host_set_id = args.get("hostSetId")
policy_id = args.get("policyId")
if not policy_id or not host_set_id:
raise ValueError("policy ID and hostSetId are required")
message = ""
response = None
try:
response = client.assign_host_set_policy_request({
"persist_id": host_set_id,
"policy_id": policy_id})
message = "Success"
except Exception as e:
if '400' in str(e):
demisto.debug(str(e))
message = "This hostset may already be included in this policy"
else:
raise ValueError(e)
return CommandResults(
readable_output=message,
outputs_prefix="FireEyeHX.Policy",
outputs=response
)
def delete_host_set_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
host_set_id = int(args.get('hostSetId', ''))
policy_id = args.get('policyId')
message = ''
try:
client.delete_host_set_policy_request(host_set_id, policy_id)
message = 'Success'
except Exception as e:
if '404' in str(e):
message = f'polisy ID - {policy_id} or Host Set ID - {host_set_id} Not Found'
else:
raise ValueError(e)
return CommandResults(readable_output=message)
"""
HOST INFORMAITION
"""
def get_all_hosts_information_command(client: Client, args: Dict[str, Any]) -> CommandResults:
offset = int(args.get('offset', 0))
hosts = []
limit = int(args.get('limit', 1000))
if limit > 1000:
limit = 1000
while True:
hosts_partial = client.get_hosts_request(offset=offset, limit=limit)
if not hosts_partial["data"]["entries"]:
break
hosts.extend(hosts_partial["data"]["entries"])
offset = len(hosts)
if len(hosts) > limit:
hosts[int(limit) - 1: -1] = []
outputs = []
for host in hosts:
outputs.append({
'Host Name': host.get('hostname'),
'Last Poll': host.get('last_poll_timestamp'),
'Agent ID': host.get('_id'),
'Agent Version': host.get('agent_version'),
'Host IP': host.get('primary_ip_address'),
'OS': host.get('os', {}).get('platform'),
'Containment State': host.get('containment_state'),
'Domain': host.get('domain'),
'Last Alert': host.get('last_alert')
})
headers_for_table = ['Host Name', 'Host IP', 'Agent ID', 'Agent Version',
'OS', 'Last Poll', 'Containment State', 'Domain', 'Last Alert']
md = tableToMarkdown(
name="FireEye HX Get Hosts Information",
t=outputs,
headers=headers_for_table
)
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=outputs,
raw_response=hosts,
readable_output=md
)
def get_host_information_command(client: Client, args: Dict[str, Any]) -> CommandResults:
agent_id = args.get("agentId")
host_name = args.get("hostName")
if not agent_id and not host_name:
raise ValueError("Please provide either agentId or hostName")
host: Dict
if agent_id:
try:
host = client.get_hosts_by_agentId_request(agent_id)["data"]
except Exception:
raise ValueError(f"agentId {agent_id} is not correct")
else:
try:
host = client.get_hosts_request(limit=1, host_name=host_name)["data"]["entries"][0]
except Exception:
raise ValueError(f"{host_name} is not found")
headers_for_table = ['Host Name', 'Host IP', 'Agent ID', 'Agent Version',
'OS', 'Last Poll', 'Containment State', 'Domain', 'Last Alert']
for_table = [{
'Host Name': host.get('hostname'),
'Last Poll': host.get('last_poll_timestamp'),
'Agent ID': host.get('_id'),
'Agent Version': host.get('agent_version'),
'Host IP': host.get('primary_ip_address'),
'OS': host.get('os', {}).get('platform'),
'Containment State': host.get('containment_state'),
'Domain': host.get('domain'),
'Last Alert': host.get('last_alert')
}]
md = tableToMarkdown(
name="FireEye HX Get Host Information",
t=for_table,
headers=headers_for_table
)
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=host,
readable_output=md
)
def get_host_set_information_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
return host set information to the war room according to given id or filters
"""
host_set_id = args.get('hostSetID')
body = assign_params(
limit=args.get('limit'),
offset=args.get('offset'),
search=args.get('search'),
sort=args.get('sort'),
name=args.get('name'),
type=args.get('type')
)
response = client.get_host_set_information_request(body, host_set_id)
host_set = [] # type: List[Dict[str, str]]
try:
if host_set_id:
data = response['data']
host_set = [data]
else:
data = response['data']
host_set = data.get('entries', [])
except Exception as e:
demisto.debug(str(e))
raise ValueError('Failed to get host set information - unexpected response from the server.\n' + response.text)
md_table = "No host sets found"
if len(host_set) > 0:
md_table = tableToMarkdown(
name='FireEye HX Get Host Sets Information',
t=host_set_entry(host_set),
headers=['Name', 'ID', 'Type']
)
return CommandResults(
outputs_prefix="FireEyeHX.HostSets",
outputs_key_field="_id",
outputs=host_set,
readable_output=md_table
)
"""
HOST CONTAINMENT
"""
def get_list_containment_command(client: Client, args: Dict[str, Any]) -> CommandResults:
state_update_time = args.get("state_update_time", "")
offset = args.get("offset", 0)
limit = args.get("limit", 50)
response = client.get_list_containment_request(offset=offset,
limit=limit,
state_update_time=state_update_time)["data"]["entries"]
for_table = []
for entry in response:
for_table.append({
"Id": entry["_id"],
"State": entry["state"],
"Request Origin": entry["requested_by_actor"],
"Request Date": entry["requested_on"],
"Containment Origin": entry["contained_by_actor"],
"Containment Date": entry["contained_on"],
"Last System information date": entry["last_sysinfo"]
})
headers_for_table = ["Id", "State", "Request Origin", "Request Date",
"Containment Origin", "Containment Date", "Last System information date"]
md = tableToMarkdown(name="List Containment", t=for_table, headers=headers_for_table)
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=response,
readable_output=md
)
def host_containment_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
agent_id = args.get("agentId")
host_name = args.get("hostName", "")
if not agent_id and not host_name:
raise ValueError("Please provide either agentId or hostName")
if not agent_id:
agent_id = get_agent_id_by_host_name(client, host_name)
try:
client.host_containmet_request(agent_id)
except Exception as e:
raise ValueError(e)
message = ""
try:
client.approve_containment_request(agent_id)
message = "Containment request for the host was sent and approved successfully"
except Exception as e:
if '422' in str(e):
message = "You do not have the required permissions for containment approve\n"\
"The containment request sent, but it is not approve."
elif '409' in str(e):
message = "This host may already in containment"
else:
raise ValueError(e)
host = client.get_hosts_by_agentId_request(agent_id)
return [CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=host['data'],
readable_output=message), CommandResults(outputs_prefix="Endpoint", outputs=get_collect_endpoint_contxt(host["data"]))]
def approve_containment_command(client: Client, args: Dict[str, Any]) -> CommandResults:
agent_id = args.get("agentId")
if not agent_id:
raise ValueError("Agent ID is required")
message = "Containment for the host was approved successfully"
try:
client.approve_containment_request(agent_id)
except Exception as e:
if '409' in str(e):
message = "This host may already in containment"
else:
message = "Containment for the host failed, check if you have the necessary permissions"
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
readable_output=message
)
def cancel_containment_command(client: Client, args: Dict[str, Any]) -> CommandResults:
agent_id = args.get("agentId")
host_name = args.get("hostName", "")
if not agent_id and not host_name:
raise ValueError("One of the following arguments is required -> [agentId, hostName]")
if not agent_id:
agent_id = get_agent_id_by_host_name(client, host_name)
message = "Success"
try:
client.cancel_containment_request(agent_id)
except Exception as e:
if '409' in str(e):
message = "This host may already in uncontain"
else:
raise ValueError(e)
return CommandResults(readable_output=message)
"""
ACQUISITION
"""
def data_acquisition_command(client: Client, args: Dict[str, Any]) -> Tuple[CommandResults, bool, str]:
if 'acquisition_id' not in args:
acquisition_info = get_data_acquisition(client, args)
acquisition_id = acquisition_info.get('_id')
demisto.debug('Acquisition request was successful. Waiting for acquisition process to be complete.')
acquisition_id = args.get('acquisition_id') if args.get('acquisition_id') else acquisition_id
acquisition_info = client.data_acquisition_information_request(acquisition_id)
if acquisition_info.get('state') != 'COMPLETE':
return CommandResults(
readable_output=f'Acquisition request was successful\nAcquisition ID: {acquisition_id}'), False, str(acquisition_id)
args['acquisition_info'] = acquisition_info
return CommandResults(
readable_output=f'Acquisition request was successful\nAcquisition ID: {acquisition_id}'), True, str(acquisition_id)
def data_acquisition_with_polling_command(client: Client, args: Dict[str, Any]):
return run_polling_command(
client,
args,
'fireeye-hx-data-acquisition',
data_acquisition_command,
result_data_acquisition,
'acquisition')
def result_data_acquisition(client: Client, args: Dict[str, Any]) -> List:
demisto.debug('Acquisition process has been complete. Fetching mans file.')
message = f'{args.get("fileName")} acquired successfully'
if args.get('acquisition_info', {}).get('error_message'):
message = args.get('acquisition_info', {}).get('error_message', '')
# output file and acquisition information to the war room
data = client.data_collection_request(args.get('acquisition_id'))
return [CommandResults(
outputs_prefix="FireEyeHX.Acquisitions.Data",
outputs=args.get('acquisition_info', {}),
readable_output=f'{message}\nacquisition ID: {args.get("acquisition_id")}'),
fileResult(f'agent_{args.get("agentId")}_data.mans', data)]
def delete_data_acquisition_command(client: Client, args: Dict[str, Any]) -> CommandResults:
if "acquisitionId" not in args:
raise ValueError("Acquisition Id is required")
client.delete_data_acquisition_request(args.get("acquisitionId"))
return CommandResults(
readable_output=f"data acquisition {args.get('acquisitionId')} deleted successfully"
)
def file_acquisition_command(client: Client, args: Dict[str, Any]) -> Tuple[CommandResults, bool, str]:
if "acquisition_id" not in args:
if not args.get('hostName') and not args.get('agentId'):
raise ValueError('Please provide either agentId or hostName')
if args.get('hostName'):
args['agentId'] = get_agent_id_by_host_name(client, args.get('hostName', ""))
use_api = args.get('acquireUsing') == 'API'
acquisition_info = client.file_acquisition_request(
args.get('agentId'),
args.get('fileName'),
args.get('filePath'),
req_use_api=use_api
)
acquisition_id = acquisition_info.get('_id')
demisto.debug('acquisition request was successful. Waiting for acquisition process to be complete.')
acquisition_id = args.get('acquisition_id') if args.get('acquisition_id') else str(acquisition_id)
acquisition_info = client.file_acquisition_information_request(acquisition_id)
state = acquisition_info.get('state')
if state not in ['COMPLETE', 'ERROR', 'FAILED']:
return CommandResults(
readable_output=f'acquisition request was successful, Acquisition Id: {acquisition_id}'), False, acquisition_id
args['acquisition_info'] = acquisition_info
return CommandResults(
readable_output=f'acquisition request was successful, Acquisition Id: {acquisition_id}'), True, acquisition_id
def file_acquisition_with_polling_command(client: Client, args: Dict[str, Any]):
return run_polling_command(
client,
args,
'fireeye-hx-file-acquisition',
file_acquisition_command,
result_file_acquisituon,
'acquisition')
def result_file_acquisituon(client: Client, args: Dict[str, Any]) -> List:
demisto.debug('acquisition process has been complete. Fetching zip file.')
acquired_file = client.file_acquisition_package_request(args.get('acquisition_id'))
message = f"{args.get('fileName')} acquired successfully"
if args.get('acquisition_info', {}).get('error_message'):
message = args.get('acquisition_info', {}).get('error_message')
return [CommandResults(
outputs_prefix="FireEyeHX.Acquisitions.Files",
outputs_key_field="_id",
outputs=args.get('acquisition_info'),
readable_output=f"{message}\nacquisition ID: {args.get('acquisition_id')}"
), fileResult(f"{os.path.splitext(args.get('fileName',''))[0]}.zip", acquired_file)]
def get_data_acquisition_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
Wait for acquisition process to complete and fetch the data
"""
# validate the acquisitionId was passed
if not args.get('acquisitionId'):
raise ValueError('Please provide acquisitionId')
acquisition_id = args.get("acquisitionId")
acquisition_info = client.data_acquisition_information_request(acquisition_id)
agent_id = acquisition_info.get('host').get('_id')
host_info = client.get_hosts_by_agentId_request(agent_id)["data"]
hostname = host_info.get('hostname')
# Add hostname to the host info of acquisition_info
acquisition_info["host"]["hostname"] = hostname
# Add Integration Instance to the acquisition_info
acquisition_info["instance"] = demisto.integrationInstance()
# if `state` equals to 'COMPLETE'
if acquisition_info.get('state') == 'COMPLETE':
message = 'Acquisition completed successfully.'
if acquisition_info.get('error_message'):
message = acquisition_info.get('error_message')
# output file and acquisition information to the war room
data = client.data_collection_request(acquisition_id)
return [CommandResults(
outputs_prefix="FireEyeHX.Acquisitions.Data",
outputs_key_field="_id",
outputs=acquisition_info,
readable_output=f"{message}\nacquisition ID: {acquisition_id}"
), fileResult('{}_agent_{}_data.mans'.format(acquisition_id, agent_id), data)]
# else return message for states in [ NEW, ERROR, QUEUED, RUNNING, FAILED ]
state = acquisition_info.get('state')
message = "Acquisition process not yet completed."
if acquisition_info.get('error_message'):
message = acquisition_info.get('error_message')
return [CommandResults(
outputs_prefix="FireEyeHX.Acquisitions.Data",
outputs_key_field="_id",
outputs=acquisition_info,
readable_output=f"{message}\nacquisition ID: {acquisition_id}\nstate: {state}"
)]
def initiate_data_acquisition_command(client: Client, args: Dict[str, Any]) -> CommandResults:
acquisition_info: Dict = get_data_acquisition(client, args)
# Add hostname to the host info of acquisition_info
acquisition_info["host"]["hostname"] = args.get("hostName")
# Add Integration Instance to the acquisition_info
acquisition_info["instance"] = demisto.integrationInstance()
return CommandResults(
outputs_prefix="FireEyeHX.Acquisitions.Data",
outputs=acquisition_info,
readable_output=f'Acquisition ID: {acquisition_info.get("_id")} on Instance: {acquisition_info.get("instance")}'
)
def delete_file_acquisition_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
returns a success message to the war room
"""
acquisition_id = args.get('acquisitionId')
client.delete_file_acquisition_request(acquisition_id)
# successful request
return CommandResults(readable_output=f'file acquisition {acquisition_id} deleted successfully')
"""
ALERTS
"""
def get_all_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
returns a list of alerts, all results up to limit
"""
source = []
# add source type
if args.get('MALsource'):
source.append('mal')
if args.get('EXDsource'):
source.append('exd')
if args.get('IOCsource'):
source.append('ioc')
if source:
args['source'] = source
sort_map = {
'agentId': 'agent._id',
'conditionId': 'condition._id',
'eventAt': 'event_at',
'alertId': '_id',
'matchedAt': 'matched_at',
'id': '_id',
'reportedAt': 'reported_at'
}
if args.get('sort'):
args['sort'] = f"{sort_map.get(args['sort'])}+{args.get('sortOrder', 'ascending')}"
if args.get('hostName'):
args['agentId'] = get_agent_id_by_host_name(client, args.get('hostName', ''))
args['limit'] = int(args.get('limit', '50'))
alerts = get_alerts(client, args)
# parse each alert to a record displayed in the human readable table
alerts_entries = [get_alert_entry(alert) for alert in alerts]
headers_for_table = ['Alert ID', 'Reported', 'Event Type', 'Agent ID']
md_table = tableToMarkdown(
name='FireEye HX Get Alerts',
t=alerts_entries,
headers=headers_for_table
)
registry_key = []
ips = []
files = []
for alert in alerts:
if alert["event_type"] == 'regKeyEvent':
registry_key.append({
'Path': alert.get("event_values").get('regKeyEvent/path'),
'Name': alert.get("event_values").get('regKeyEvent/valueName'),
'Value': alert.get("event_values").get('regKeyEvent/value')
})
elif alert["event_type"] == 'fileWriteEvent':
files.append(
{'Name': alert.get("event_values", {}).get('fileWriteEvent/fileName'),
'md5': alert.get("event_values", {}).get('fileWriteEvent/md5'),
'Extension': alert.get("event_values", {}).get('fileWriteEvent/fileExtension'),
'Path': alert.get("event_values", {}).get('fileWriteEvent/fullPath')}
)
elif alert["event_type"] == 'ipv4NetworkEvent':
ips.append({'Ipv4': alert.get("event_values", {}).get('ipv4NetworkEvent/remoteIP')})
results_outputs = assign_params(FireEyeHX={"Alerts": alerts}, RegistryKey=registry_key, File=files, Ip=ips)
return CommandResults(
outputs_key_field="_id",
outputs=results_outputs,
readable_output=md_table
)
def get_alert_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
alert_id = int(args.get('alertId', ""))
alert: Dict = client.get_alert_request(alert_id)["data"]
alert_entry = get_alert_entry(alert)
headers_for_table = ['Alert ID', 'Reported', 'Event Type', 'Agent ID']
alert_table = tableToMarkdown(
name=f'FireEye HX Get Alert # {alert_id}',
t=alert_entry,
headers=headers_for_table
)
event_type = alert.get('event_type')
event_type = 'NewEvent' if not event_type else event_type
event_type = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", event_type).title()
event_table = tableToMarkdown(
name=event_type,
t=alert.get('event_values')
)
result = [CommandResults(
outputs_prefix="FireEyeHX.Alerts",
outputs_key_field="_id",
outputs=alert,
readable_output=f'{alert_table}\n{event_table}'
)]
indicator = get_indicator_command_result(alert)
if indicator:
result.append(indicator)
return result
def suppress_alert_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
returns a success message to the war room
"""
alert_id = int(args.get('alertId', ''))
try:
client.suppress_alert_request(alert_id)
except Exception as e:
if '404' in str(e):
raise ValueError(f"Alert {alert_id} Not Found")
else:
raise ValueError(e)
# no exceptions raised->successful request
return CommandResults(
readable_output=f'Alert {alert_id} suppressed successfully.'
)
"""
INDICATORS
"""
def get_indicators_command(client: Client, args: Dict[str, Any]) -> CommandResults:
sort_map = {
'category': 'category',
'activeSince': 'active_since',
'createdBy': 'created_by',
'alerted': 'stats.alerted_agents'
}
if args.get('limit'):
args['limit'] = int(args['limit'])
if args.get('alerted'):
args['alerted'] = args['alerted'] == 'yes'
if args.get('sort'):
args['sort'] = sort_map.get(args.get('sort', ''))
# get all results
indicators = get_all_indicators(
client=client,
category=args.get('category'),
search=args.get('searchTerm'),
share_mode=args.get('shareMode'),
sort=args.get('sort'),
created_by=args.get('createdBy'),
alerted=args.get('alerted'),
limit=args.get('limit')
)
for_table = [get_indicator_entry(indicator) for indicator in indicators]
headers_for_table = ['OS', 'Name', 'Created By', 'Active Since', 'Category', 'Signature', 'Active Condition',
'Hosts With Alerts', 'Source Alerts']
md_table = tableToMarkdown(
name=f"FireEye HX Get Indicator- {args.get('name')}",
t=for_table,
headers=headers_for_table
)
return CommandResults(
outputs_prefix="FireEyeHX.Indicators",
outputs_key_field="_id",
outputs=indicators,
readable_output=md_table
)
def get_indicator_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
if not args.get("category") or not args.get("name"):
raise ValueError("The category and name arguments are required")
indicator = client.get_indicator_request(
args.get('category'),
args.get('name')
)
headers_for_table = ['OS', 'Name', 'Created By', 'Active Since', 'Category', 'Signature',
'Active Condition', 'Hosts With Alerts', 'Source Alerts']
md_table = tableToMarkdown(
name=f"FireEye HX Get Indicator- {args.get('name')}",
t=get_indicator_entry(indicator),
headers=headers_for_table
)
return [CommandResults(
outputs_prefix="FireEyeHX.Indicators",
outputs_key_field="_id",
outputs=indicator,
readable_output=md_table
), get_indicator_conditions(client, args)]
def append_conditions_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Append conditions to indicator
no return value on successfull request
"""
name = args.get('name')
category = args.get('category')
body = args.get('condition')
if not name or not category or not body:
raise ValueError("All of the following arguments are required -> ['name','category','condition']")
body = body.replace(',', '\n')
response = client.append_conditions_request(name, category, body)
md = tableToMarkdown(name="The conditions were added successfully", t={
'Name': name,
'Category': category,
'Conditions': body
})
return CommandResults(
outputs_prefix="FireEyeHX.Conditions",
outputs=response,
readable_output=md
)
def create_indicator_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Get new indicator details
returns a success message to the war room
"""
category = args.get('category')
response = client.new_indicator_request(category)["data"]
md_table = tableToMarkdown('FireEye HX New Indicator created successfully', {'ID': response.get('_id')})
return CommandResults(
outputs_prefix="FireEyeHX.Indicators",
outputs_key_field="_id",
outputs=response,
readable_output=md_table
)
"""
SEARCHES
"""
def start_search_command(client: Client, args: Dict[str, Any]) -> Tuple[CommandResults, bool, str]:
if 'searchId' not in args:
list_of_args = ["agentsIds", "hostsNames", "hostSet", "hostSetName"]
arg = oneFromList(list_of_args=list_of_args, args=args)
if arg is False:
raise ValueError("One of the following arguments is required -> [agentsIds, hostsNames, hostSet, hostSetName]")
# orgenized the search body, the function checks if provided only one argument,
# and returns dict with key of Host_name or Hosts
body = organize_search_body_host(client, arg, {})
# checking if provided only one of these following arguments
list_of_args = ['dnsHostname', 'fileFullPath', 'fileMD5Hash', 'ipAddress', 'fieldSearchName']
arg_for_query = oneFromList(list_of_args=list_of_args, args=args)
if arg_for_query is False:
raise ValueError("One of the following arguments is required ->"
" [dnsHostname, fileFullPath, fileMD5Hash, ipAddress, fieldSearchName]")
# this function organize the query of the request body, and returns list of queries
body["query"] = organize_search_body_query(arg_for_query, args)
body["exhaustive"] = False if args.get("exhaustive") == "false" else True
try:
search_id = client.search_request(body)["data"]["_id"]
except Exception as e:
raise ValueError(e)
if not args.get("limit"):
args['limit'] = 1000
search_id = str(args.get('searchId')) if args.get('searchId') else str(search_id)
searchInfo = client.get_search_by_id_request(search_id)["data"]
matched = searchInfo.get('stats', {}).get('search_state', {}).get('MATCHED', 0)
pending = searchInfo.get('stats', {}).get('search_state', {}).get('PENDING', 0)
if searchInfo.get("state") != "STOPPED" and matched < int(args.get('limit', '')) and pending != 0:
return CommandResults(readable_output=f"Search started,\nSearch ID: {search_id}"), False, search_id
return CommandResults(readable_output=f"Search started,\nSearch ID: {search_id}"), True, search_id
def start_search_with_polling_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, List[CommandResults]]:
return run_polling_command(
client,
args,
'fireeye-hx-search',
start_search_command,
search_result_get_command,
'searching')
def get_search_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
if args.get("searchId"):
searches_ids = sorted(args.get("searchId", "").split(","), reverse=True)
response = []
for search_id in searches_ids:
response.append(client.get_search_by_id_request(search_id)["data"])
else:
offset = args.get("offset") or 0
limit = args.get("limit") or 50
state = args.get("state")
hostSetId = args.get("hostSetId")
actorUsername = args.get("actorUsername")
sort = args.get("sort")
response = client.get_search_list_request(
offset=offset,
limit=limit,
state=state,
host_set_id=hostSetId,
actor_username=actorUsername,
sort=sort
)["data"]["entries"]
for_table = []
for search in response:
host_set = None
if search.get("host_set"):
host_set = search["host_set"].copy()
del host_set["url"]
for_table.append(
{
"Id": search.get("_id"),
"State": search.get("state"),
"Host Set": host_set,
"Created By": search.get("create_actor"),
"Created At": search.get("create_time"),
"Updated By": search.get("update_actor"),
"Updated At": search.get("update_time")
}
)
headers_for_table = ["Id", "State", "Host Set", "Created By", "Created At", "Updated By", "Updated At"]
md = tableToMarkdown(
name="",
t=for_table,
headers=headers_for_table
)
return CommandResults(
outputs_prefix="FireEyeHX.Search",
outputs_key_field="_id",
outputs=response,
readable_output=md
)
def search_stop_command(client: Client, args: Dict[str, Any]) -> CommandResults:
if not args.get("searchId"):
raise ValueError("Search Id is must be")
searches_ids = argToList(str(args.get("searchId")))
responses = []
md = "Results"
for search_id in searches_ids:
try:
response = client.search_stop_request(search_id)
md += f"\nSearch Id {search_id}: Success"
responses.append(response["data"])
except Exception:
md += f"\nSearch Id {search_id}: Not Found"
return CommandResults(
outputs_prefix="FireEyeHX.Search",
outputs_key_field="_id",
outputs=responses,
readable_output=md
)
def search_result_get_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
if not args.get("searchId"):
raise ValueError("Search Id is must be")
searches_ids = argToList(str(args.get("searchId")))
results: List[List[Dict]] = []
for search_id in searches_ids:
result = client.search_result_get_request(search_id)["data"]["entries"]
if result:
results.append(result)
commandsResults: List = []
for result in results:
for entry in result:
Title = f"Host Id {entry.get('host', {}).get('_id')}\nHost Name {entry.get('host', {}).get('hostname')}"
for_table = []
for res in entry.get("results", []):
for_table.append({
"Item Type": res.get("type"),
"Summary": [f"**{k}:** {v}" for k, v in res.get("data", {}).items()]
})
md = tableToMarkdown(
name=Title,
t=for_table,
headers=["Item Type", "Summary"]
)
commandsResults.append(CommandResults(
outputs_prefix="FireEyeHX.Search",
outputs_key_field="_id",
outputs=entry,
readable_output=md
))
if 'stopSearch' in args:
try:
if args.get('stopSearch') == 'stop':
message = 'Failed to stop search'
client.search_stop_request(searches_ids[0])
message = "The search was stopped successfully"
# no need to stop a search before deleting it.
if args.get('stopSearch') == 'stopAndDelete':
message = 'Failed to delete search'
client.delete_search_request(searches_ids[0])
message = "The search was deleted successfully"
except Exception as e:
demisto.debug(f'{message}\n{e}')
commandsResults[0].readable_output += f"\n\n{message}"
return commandsResults if commandsResults else [CommandResults(readable_output="No Results")]
def search_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:
search_ids = argToList(str(args.get('searchId')))
message = 'Results'
for search_id in search_ids:
try:
client.delete_search_request(search_id)
message += f'\nSearch Id {search_id}: Deleted successfully'
except Exception as e:
if '404' in str(e):
message += f'\nSearch Id {search_id}: Not Found'
else:
message += f'\nSearch Id {search_id}: Failed to delete search'
return CommandResults(readable_output=message)
"""
FETCH INCIDENT
"""
def fetch_incidents(client: Client, args: Dict[str, Any]) -> List:
last_run = demisto.getLastRun()
alerts = [] # type: List[Dict[str, str]]
fetch_limit = min([int(args.get('max_fetch') or '50'), 50])
args["sort"] = "reported_at+ascending"
args["limit"] = fetch_limit
# Checks if this is the first call to a function or not
if last_run and last_run.get('reported_at'):
# Design the filterQuery argument with last reported_at, and convert it to urlEncoding
query = query_fetch(reported_at=organize_reported_at(last_run.get('reported_at')))
demisto.debug(f'fetch-incident query -> {query}')
args["filterQuery"] = urllib.parse.quote_plus(query)
# Get all alerts with reported_at greater than last reported_at
alerts = get_alerts(client, args)
else:
# Design the filterQuery argument, and convert it to urlEncoding
first_fetch = args.get("first_fetch") if args.get("first_fetch") else "3 days"
query = query_fetch(first_fetch=first_fetch)
demisto.debug(f'fetch-incident query -> {query}')
args["filterQuery"] = urllib.parse.quote_plus(query)
# Receive alerts from last 3 days - if they are more than 50 return the 50 older alerts
alerts = get_alerts(client, args)
# Results are sorted in ascending order - the last alert holds the greatest time
reported_at = alerts[-1].get("reported_at") if alerts else None
# Parse the alerts as the incidents
pattern = re.compile("([a-z])([A-Z])")
incidents = [parse_alert_to_incident(alert, pattern) for alert in alerts]
# Keeps the last reported_at for next time
if reported_at is not None:
demisto.setLastRun({'reported_at': reported_at})
return incidents
''' POLLING '''
def run_polling_command(client, args, cmd, post_func, get_func, t):
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 60))
_, is_ready, item_id = post_func(client, args)
if not is_ready:
type_id = TABLE_POLLING_COMMANDS[t]['type']
readable_output = f"{TABLE_POLLING_COMMANDS[t]['message']}{item_id}" if type_id not in args else None
if not args.get(type_id):
args[type_id] = item_id
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=args,
timeout_in_seconds=600)
# result with scheduled_command only - no update to the war room
return CommandResults(readable_output=readable_output, scheduled_command=scheduled_command)
return get_func(client, args)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
polling_commands = {
"fireeye-hx-search": start_search_with_polling_command,
"fireeye-hx-data-acquisition": data_acquisition_with_polling_command,
"fireeye-hx-file-acquisition": file_acquisition_with_polling_command
}
commands = {
"fireeye-hx-get-host-information": get_host_information_command,
"fireeye-hx-get-all-hosts-information": get_all_hosts_information_command,
"fireeye-hx-host-containment": host_containment_command,
"fireeye-hx-cancel-containment": cancel_containment_command,
"fireeye-hx-get-host-set-information": get_host_set_information_command,
"fireeye-hx-search": run_commands_without_polling,
"fireeye-hx-search-list": get_search_list_command,
"fireeye-hx-search-stop": search_stop_command,
"fireeye-hx-search-result-get": search_result_get_command,
"fireeye-hx-search-delete": search_delete_command,
"fireeye-hx-append-conditions": append_conditions_command,
"fireeye-hx-get-indicators": get_indicators_command,
"fireeye-hx-get-indicator": get_indicator_command,
"fireeye-hx-create-indicator": create_indicator_command,
"fireeye-hx-data-acquisition": run_commands_without_polling,
"fireeye-hx-delete-data-acquisition": delete_data_acquisition_command,
"fireeye-hx-file-acquisition": run_commands_without_polling,
"fireeye-hx-delete-file-acquisition": delete_file_acquisition_command,
"fireeye-hx-get-data-acquisition": get_data_acquisition_command,
"fireeye-hx-initiate-data-acquisition": initiate_data_acquisition_command,
"fireeye-hx-get-alert": get_alert_command,
"fireeye-hx-get-alerts": get_all_alerts_command,
"fireeye-hx-suppress-alert": suppress_alert_command,
"fireeye-hx-list-policy": list_policy_command,
"fireeye-hx-list-host-set-policy": list_host_set_policy_command,
"fireeye-hx-assign-host-set-policy": assign_host_set_policy_command,
"fireeye-hx-delete-host-set-policy": delete_host_set_policy_command,
"fireeye-hx-approve-containment": approve_containment_command,
"fireeye-hx-list-containment": get_list_containment_command
}
params = demisto.params()
user_name = params.get("userName").get('identifier')
password = params.get("userName").get('password')
if not user_name or not password:
raise ValueError("User Name and Password are required")
# get the service API url
base_url = urljoin(params.get('server'), '/hx/api/v3/')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args()
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
auth=(user_name, password))
if command == 'test-module':
get_alerts(client, {"limit": 1})
return_results('ok')
elif command == 'fetch-incidents':
incidents = fetch_incidents(client, params)
demisto.incidents(incidents)
elif args.get('polling', 'false') == 'true':
result = polling_commands[command](client, args)
return_results(result)
else:
if command in ["fireeye-hx-search", "fireeye-hx-data-acquisition", "fireeye-hx-file-acquisition"]:
args['cmd'] = command
result = commands[command](client, args)
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| demisto/content | Packs/FireEyeHX/Integrations/FireEyeHXv2/FireEyeHXv2.py | Python | mit | 87,756 |
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
# conveyor/src/main/python/conveyor/platform/linux.py
#
# conveyor - Printing dispatch engine for 3D objects and their friends.
# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function, unicode_literals)
import os.path
DEFAULT_CONFIG_FILE = '/etc/conveyor.conf'
DEFAULT_CONFIG_COMMON_ADDRESS = 'pipe:/var/run/conveyor/conveyord.socket'
DEFAULT_CONFIG_COMMON_PID_FILE = '/var/run/conveyor/conveyord.pid'
DEFAULT_CONFIG_MAKERBOT_DRIVER_AVRDUDE_EXE = '/usr/bin/avrdude'
DEFAULT_CONFIG_MAKERBOT_DRIVER_AVRDUDE_CONF_FILE = '/etc/avrdude.conf'
DEFAULT_CONFIG_MAKERBOT_DRIVER_PROFILE_DIR = '/usr/share/makerbot/s3g/profiles/'
DEFAULT_CONFIG_MIRACLE_GRUE_EXE = '/usr/bin/miracle_grue'
DEFAULT_CONFIG_MIRACLE_GRUE_PROFILE_DIR = '/usr/share/makerbot/miraclegrue/'
DEFAULT_CONFIG_SKEINFORGE_FILE = '/usr/share/makerbot/skeinforge/skeinforge_application/skeinforge.py'
DEFAULT_CONFIG_SKEINFORGE_PROFILE_DIR = '/usr/share/makerbot/skeinforge/'
DEFAULT_CONFIG_SERVER_LOGGING_FILE = '/var/log/conveyor/conveyord.log'
DEFAULT_CONFIG_SERVER_UNIFIED_MESH_HACK_EXE = '/usr/bin/unified_mesh_hack'
| makerbot/conveyor | src/main/python/conveyor/platform/linux.py | Python | agpl-3.0 | 1,881 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from datetime import datetime
from lxml import etree
from django.core.urlresolvers import reverse
from django.test import TestCase
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.contrib.contenttypes.models import ContentType
from agon_ratings.models import OverallRating
from django.contrib.auth import get_user_model
from django.conf import settings
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.maps.utils import fix_baselayers
from geonode.utils import default_map_config
from geonode.base.populate_test_data import create_models
from geonode.maps.tests_populate_maplayers import create_maplayers
from geonode.tests.utils import NotificationsTestsHelper
from geonode.maps import MapsAppConfig
from django.contrib.auth.models import Group
from geonode.base.models import License, Region
VIEWER_CONFIG = """
{
"defaultSourceType": "gx_wmssource",
"about": {
"title": "Title",
"abstract": "Abstract"
},
"sources": {
"capra": {
"url":"http://localhost:8080/geoserver/wms"
}
},
"map": {
"projection":"EPSG:900913",
"units":"m",
"maxResolution":156543.0339,
"maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34],
"center":[-9428760.8688778,1436891.8972581],
"layers":[{
"source":"capra",
"buffer":0,
"wms":"capra",
"name":"base:nic_admin"
}],
"keywords":["saving", "keywords"],
"zoom":7
}
}
"""
class MapsTest(TestCase):
"""Tests geonode.maps app/module
"""
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
create_models(type='map')
create_models(type='layer')
create_maplayers()
default_abstract = "This is a demonstration of GeoNode, an application \
for assembling and publishing web based maps. After adding layers to the map, \
use the Save Map button above to contribute your map to the GeoNode \
community."
default_title = "GeoNode Default Map"
# This is a valid map viewer config, based on the sample data provided
# by andreas in issue 566. -dwins
viewer_config = VIEWER_CONFIG
viewer_config_alternative = """
{
"defaultSourceType": "gx_wmssource",
"about": {
"title": "Title2",
"abstract": "Abstract2"
},
"sources": {
"capra": {
"url":"http://localhost:8080/geoserver/wms"
}
},
"map": {
"projection":"EPSG:900913",
"units":"m",
"maxResolution":156543.0339,
"maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34],
"center":[-9428760.8688778,1436891.8972581],
"layers":[{
"source":"capra",
"buffer":0,
"wms":"capra",
"name":"base:nic_admin"
}],
"zoom":7
}
}
"""
perm_spec = {
"users": {
"admin": [
"change_resourcebase",
"change_resourcebase_permissions",
"view_resourcebase"]},
"groups": {}}
def test_map_json(self):
# Test that saving a map when not logged in gives 401
response = self.client.put(
reverse(
'map_json',
args=(
'1',
)),
data=self.viewer_config,
content_type="text/json")
self.assertEqual(response.status_code, 401)
self.client.login(username=self.user, password=self.passwd)
response = self.client.put(
reverse(
'map_json',
args=(
'1',
)),
data=self.viewer_config_alternative,
content_type="text/json")
self.assertEqual(response.status_code, 200)
map_obj = Map.objects.get(id=1)
self.assertEquals(map_obj.title, "Title2")
self.assertEquals(map_obj.abstract, "Abstract2")
self.assertEquals(map_obj.layer_set.all().count(), 1)
def test_map_save(self):
"""POST /maps/new/data -> Test saving a new map"""
new_map = reverse("new_map_json")
# Test that saving a map when not logged in gives 401
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEqual(response.status_code, 401)
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
# We have now 9 maps and 8 layers so the next pk will be 18
self.assertEquals(map_id, 18)
map_obj = Map.objects.get(id=map_id)
self.assertEquals(map_obj.title, "Title")
self.assertEquals(map_obj.abstract, "Abstract")
self.assertEquals(map_obj.layer_set.all().count(), 1)
self.assertEquals(map_obj.keyword_list(), [u"keywords", u"saving"])
self.assertNotEquals(map_obj.bbox_x0, None)
# Test an invalid map creation request
self.client.login(username=self.user, password=self.passwd)
response = self.client.post(
new_map,
data="not a valid viewer config",
content_type="text/json")
self.assertEquals(response.status_code, 400)
self.client.logout()
def test_map_fetch(self):
"""/maps/[id]/data -> Test fetching a map in JSON"""
map_obj = Map.objects.get(id=1)
map_obj.set_default_permissions()
response = self.client.get(reverse('map_json', args=(map_obj.id,)))
self.assertEquals(response.status_code, 200)
cfg = json.loads(response.content)
self.assertEquals(
cfg["about"]["abstract"],
'GeoNode default map abstract')
self.assertEquals(cfg["about"]["title"], 'GeoNode Default Map')
self.assertEquals(len(cfg["map"]["layers"]), 5)
def test_map_to_json(self):
""" Make some assertions about the data structure produced for serialization
to a JSON map configuration"""
map_obj = Map.objects.get(id=1)
cfg = map_obj.viewer_json(None, None)
self.assertEquals(
cfg['about']['abstract'],
'GeoNode default map abstract')
self.assertEquals(cfg['about']['title'], 'GeoNode Default Map')
def is_wms_layer(x):
if 'source' in x:
return cfg['sources'][x['source']]['ptype'] == 'gxp_wmscsource'
return False
layernames = [x['name']
for x in cfg['map']['layers'] if is_wms_layer(x)]
self.assertEquals(layernames, ['geonode:CA', ])
def test_map_to_wmc(self):
""" /maps/1/wmc -> Test map WMC export
Make some assertions about the data structure produced
for serialization to a Web Map Context Document
"""
map_obj = Map.objects.get(id=1)
map_obj.set_default_permissions()
response = self.client.get(reverse('map_wmc', args=(map_obj.id,)))
self.assertEquals(response.status_code, 200)
# check specific XPaths
wmc = etree.fromstring(response.content)
namespace = '{http://www.opengis.net/context}'
title = '{ns}General/{ns}Title'.format(ns=namespace)
abstract = '{ns}General/{ns}Abstract'.format(ns=namespace)
self.assertEquals(wmc.attrib.get('id'), '1')
self.assertEquals(wmc.find(title).text, 'GeoNode Default Map')
self.assertEquals(
wmc.find(abstract).text,
'GeoNode default map abstract')
def test_newmap_to_json(self):
""" Make some assertions about the data structure produced for serialization
to a new JSON map configuration"""
response = self.client.get(reverse('new_map_json'))
cfg = json.loads(response.content)
self.assertEquals(cfg['defaultSourceType'], "gxp_wmscsource")
def test_map_details(self):
"""/maps/1 -> Test accessing the map browse view function"""
map_obj = Map.objects.get(id=1)
map_obj.set_default_permissions()
response = self.client.get(reverse('map_detail', args=(map_obj.id,)))
self.assertEquals(response.status_code, 200)
def test_new_map_without_layers(self):
# TODO: Should this test have asserts in it?
self.client.get(reverse('new_map'))
def test_new_map_with_layer(self):
layer = Layer.objects.all()[0]
self.client.get(reverse('new_map') + '?layer=' + layer.typename)
def test_new_map_with_empty_bbox_layer(self):
layer = Layer.objects.all()[0]
self.client.get(reverse('new_map') + '?layer=' + layer.typename)
def test_ajax_map_permissions(self):
"""Verify that the ajax_layer_permissions view is behaving as expected
"""
# Setup some layer names to work with
mapid = Map.objects.all()[0].pk
invalid_mapid = "42"
def url(id):
return reverse('resource_permissions', args=[id])
# Test that an invalid layer.typename is handled for properly
response = self.client.post(
url(invalid_mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 404)
# Test that GET returns permissions
response = self.client.get(url(mapid))
assert('permissions' in response.content)
# Test that a user is required to have permissions
# First test un-authenticated
response = self.client.post(
url(mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
url(mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
url(mapid),
data=json.dumps(self.perm_spec),
content_type="application/json")
# Test that the method returns 200
self.assertEquals(response.status_code, 200)
# Test that the permissions specification is applied
def test_map_metadata(self):
"""Test that map metadata can be properly rendered
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_metadata', args=(map_id,))
# test unauthenticated user to modify map metadata
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
# test a user without metadata modify permission
self.client.login(username='norman', password='norman')
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
self.client.logout()
# Now test with a valid user using GET method
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# Now test with a valid user using POST method
self.client.login(username=self.user, password=self.passwd)
response = self.client.post(url)
self.assertEquals(response.status_code, 200)
# TODO: only invalid mapform is tested
def test_map_remove(self):
"""Test that map can be properly removed
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_remove', args=(map_id,))
# test unauthenticated user to remove map
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
# test a user without map removal permission
self.client.login(username='norman', password='norman')
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
self.client.logout()
# Now test with a valid user using GET method
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# Now test with a valid user using POST method,
# which removes map and associated layers, and redirects webpage
response = self.client.post(url)
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver/maps/')
# After removal, map is not existent
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
# Prepare map object for later test that if it is completely removed
# map_obj = Map.objects.get(id=1)
# TODO: Also associated layers are not existent
# self.assertEquals(map_obj.layer_set.all().count(), 0)
def test_map_embed(self):
"""Test that map can be properly embedded
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_embed', args=(map_id,))
url_no_id = reverse('map_embed')
# Now test with a map id
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# The embedded map is exempt from X-FRAME-OPTIONS restrictions.
if hasattr(response, 'xframe_options_exempt'):
self.assertTrue(response.xframe_options_exempt)
# Config equals to that of the map whose id is given
map_obj = Map.objects.get(id=map_id)
config_map = map_obj.viewer_json(None, None)
response_config_dict = json.loads(response.context['config'])
self.assertEquals(
config_map['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_map['about']['title'],
response_config_dict['about']['title'])
# Now test without a map id
response = self.client.get(url_no_id)
self.assertEquals(response.status_code, 200)
# Config equals to that of the default map
config_default = default_map_config(None)[0]
response_config_dict = json.loads(response.context['config'])
self.assertEquals(
config_default['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_default['about']['title'],
response_config_dict['about']['title'])
def test_map_view(self):
"""Test that map view can be properly rendered
"""
# first create a map
# Test successful new map creation
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
self.client.logout()
url = reverse('map_view', args=(map_id,))
# test unauthenticated user to view map
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# TODO: unauthenticated user can still access the map view
# test a user without map view permission
self.client.login(username='norman', password='norman')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.client.logout()
# TODO: the user can still access the map view without permission
# Now test with a valid user using GET method
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# Config equals to that of the map whose id is given
map_obj = Map.objects.get(id=map_id)
config_map = map_obj.viewer_json(None, None)
response_config_dict = json.loads(response.context['config'])
self.assertEquals(
config_map['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_map['about']['title'],
response_config_dict['about']['title'])
def test_new_map_config(self):
"""Test that new map config can be properly assigned
"""
self.client.login(username='admin', password='admin')
# Test successful new map creation
m = Map()
admin_user = get_user_model().objects.get(username='admin')
layer_name = Layer.objects.all()[0].typename
m.create_from_layer_list(admin_user, [layer_name], "title", "abstract")
map_id = m.id
url = reverse('new_map_json')
# Test GET method with COPY
response = self.client.get(url, {'copy': map_id})
self.assertEquals(response.status_code, 200)
map_obj = Map.objects.get(id=map_id)
config_map = map_obj.viewer_json(None, None)
response_config_dict = json.loads(response.content)
self.assertEquals(
config_map['map']['layers'],
response_config_dict['map']['layers'])
# Test GET method no COPY and no layer in params
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
config_default = default_map_config(None)[0]
response_config_dict = json.loads(response.content)
self.assertEquals(
config_default['about']['abstract'],
response_config_dict['about']['abstract'])
self.assertEquals(
config_default['about']['title'],
response_config_dict['about']['title'])
# Test GET method no COPY but with layer in params
response = self.client.get(url, {'layer': layer_name})
self.assertEquals(response.status_code, 200)
response_dict = json.loads(response.content)
self.assertEquals(response_dict['fromLayer'], True)
# Test POST method without authentication
self.client.logout()
response = self.client.post(url, {'layer': layer_name})
self.assertEquals(response.status_code, 401)
# Test POST method with authentication and a layer in params
self.client.login(username='admin', password='admin')
response = self.client.post(url, {'layer': layer_name})
# Should not accept the request
self.assertEquals(response.status_code, 400)
# Test POST method with map data in json format
response = self.client.post(
url,
data=self.viewer_config,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
# Test methods other than GET or POST and no layer in params
response = self.client.put(url)
self.assertEquals(response.status_code, 405)
def test_rating_map_remove(self):
"""Test map rating is removed on map remove
"""
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
# Create the map
response = self.client.post(
new_map,
data=self.viewer_config,
content_type="text/json")
map_id = int(json.loads(response.content)['id'])
# Create the rating with the correct content type
ctype = ContentType.objects.get(model='map')
OverallRating.objects.create(
category=1,
object_id=map_id,
content_type=ctype,
rating=3)
# Remove the map
response = self.client.post(reverse('map_remove', args=(map_id,)))
self.assertEquals(response.status_code, 302)
# Check there are no ratings matching the removed map
rating = OverallRating.objects.filter(category=1, object_id=map_id)
self.assertEquals(rating.count(), 0)
def test_fix_baselayers(self):
"""Test fix_baselayers function, used by the fix_baselayers command
"""
map_id = 1
map_obj = Map.objects.get(id=map_id)
# number of base layers (we remove the local geoserver entry from the total)
n_baselayers = len(settings.MAP_BASELAYERS) - 1
# number of local layers
n_locallayers = map_obj.layer_set.filter(local=True).count()
fix_baselayers(map_id)
self.assertEquals(map_obj.layer_set.all().count(), n_baselayers + n_locallayers)
def test_batch_edit(self):
Model = Map
view = 'map_batch_metadata'
resources = Model.objects.all()[:3]
ids = ','.join([str(element.pk) for element in resources])
# test non-admin access
self.client.login(username="bobby", password="bob")
response = self.client.get(reverse(view, args=(ids,)))
self.assertEquals(response.status_code, 401)
# test group change
group = Group.objects.first()
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse(view, args=(ids,)),
data={'group': group.pk},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
self.assertEquals(resource.group, group)
# test owner change
owner = get_user_model().objects.first()
response = self.client.post(
reverse(view, args=(ids,)),
data={'owner': owner.pk},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
self.assertEquals(resource.owner, owner)
# test license change
license = License.objects.first()
response = self.client.post(
reverse(view, args=(ids,)),
data={'license': license.pk},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
self.assertEquals(resource.license, license)
# test regions change
region = Region.objects.first()
response = self.client.post(
reverse(view, args=(ids,)),
data={'region': region.pk},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
self.assertTrue(region in resource.regions.all())
# test date change
date = datetime.now()
response = self.client.post(
reverse(view, args=(ids,)),
data={'date': date},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
self.assertEquals(resource.date, date)
# test language change
language = 'eng'
response = self.client.post(
reverse(view, args=(ids,)),
data={'language': language},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
self.assertEquals(resource.language, language)
# test keywords change
keywords = 'some,thing,new'
response = self.client.post(
reverse(view, args=(ids,)),
data={'keywords': keywords},
)
self.assertEquals(response.status_code, 302)
resources = Model.objects.filter(id__in=[r.pk for r in resources])
for resource in resources:
for word in resource.keywords.all():
self.assertTrue(word.name in keywords.split(','))
class MapModerationTestCase(TestCase):
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
super(MapModerationTestCase, self).setUp()
self.user = 'admin'
self.passwd = 'admin'
create_models(type='layer')
create_models(type='map')
self.u = get_user_model().objects.get(username=self.user)
self.u.email = 'test@email.com'
self.u.is_active = True
self.u.save()
def test_moderated_upload(self):
"""
Test if moderation flag works
"""
with self.settings(ADMIN_MODERATE_UPLOADS=False):
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(new_map,
data=VIEWER_CONFIG,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
l = Map.objects.get(id=map_id)
self.assertTrue(l.is_published)
with self.settings(ADMIN_MODERATE_UPLOADS=True):
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(new_map,
data=VIEWER_CONFIG,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
l = Map.objects.get(id=map_id)
self.assertFalse(l.is_published)
class MapsNotificationsTestCase(NotificationsTestsHelper):
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
super(MapsNotificationsTestCase, self).setUp()
self.user = 'admin'
self.passwd = 'admin'
create_models(type='layer')
create_models(type='map')
self.u = get_user_model().objects.get(username=self.user)
self.u.email = 'test@email.com'
self.u.is_active = True
self.u.save()
self.setup_notifications_for(MapsAppConfig.NOTIFICATIONS, self.u)
def testMapsNotifications(self):
with self.settings(NOTIFICATION_QUEUE_ALL=True):
self.clear_notifications_queue()
self.client.login(username=self.user, password=self.passwd)
new_map = reverse('new_map_json')
response = self.client.post(new_map,
data=VIEWER_CONFIG,
content_type="text/json")
self.assertEquals(response.status_code, 200)
map_id = int(json.loads(response.content)['id'])
l = Map.objects.get(id=map_id)
self.assertTrue(self.check_notification_out('map_created', self.u))
l.title = 'test notifications 2'
l.save()
self.assertTrue(self.check_notification_out('map_updated', self.u))
from dialogos.models import Comment
lct = ContentType.objects.get_for_model(l)
comment = Comment(author=self.u, name=self.u.username,
content_type=lct, object_id=l.id,
content_object=l, comment='test comment')
comment.save()
self.assertTrue(self.check_notification_out('map_comment', self.u))
| jj0hns0n/geonode | geonode/maps/tests.py | Python | gpl-3.0 | 30,184 |
###########################################################
#
# Copyright (c) 2009, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["ADSearchWdg", "ADInputWdg", 'ADCacheUserCbk']
from pyasm.web import *
from pyasm.common import Config
from tactic.ui.common import BaseRefreshWdg
from pyasm.security import Login
from pyasm.web import DivWdg, Widget, Table
from pyasm.widget import ButtonWdg, CheckboxWdg, HiddenWdg, TextWdg, ProdIconButtonWdg, IconButtonWdg, IconWdg
from pyasm.search import Search, SearchType
from pyasm.common import Environment
from tactic.ui.widget import TextBtnSetWdg
INSTALL_DIR = Environment.get_install_dir()
BASE_DIR = "%s/src/tactic/active_directory" % INSTALL_DIR
class ADSearchWdg(BaseRefreshWdg):
def init(self):
pass
def get_args_keys(self):
return {
'cbjs_action': 'callback when a user is clicked',
}
def get_display(self):
web = WebContainer.get_web()
key = web.get_form_value('name')
top = DivWdg()
top.add_class('ad_search_wdg_top')
self.set_as_panel(top)
text = TextWdg("name")
text.set_value(key)
close_wdg = SpanWdg()
close_wdg.add( IconWdg("Close", IconWdg.POPUP_WIN_CLOSE) )
close_wdg.add_style("float: right")
close_wdg.add_class("hand")
# NOTE: the div we are looking for to hide on 'close' is outside of the this widget and
# is part of the parent widget
close_wdg.add_behavior({
'type': 'click_up',
'cbjs_action': '''
var ad_input_content = bvr.src_el.getParent(".ad_input_content");
spt.toggle_show_hide(ad_input_content);
'''
})
top.add( close_wdg )
top.add("Active Directory Search:<br clear='all'/> ")
table = Table()
table.add_row()
table.add_cell(text)
td = table.add_cell(self.get_search_wdg())
td.add_style("display", "")
top.add(table)
results_div = DivWdg()
top.add(results_div)
results_div.add_style("border: solid 1px #444")
results_div.add_style("margin: 10px")
results_div.add_style("padding: 5px")
#results_div.add_style("max-height: 400px")
results_div.add_style("overflow: auto")
if not key:
results_div.add("Please enter search criteria")
return top
results_div.add("Results Found ...")
users = self.find_users(key)
max_num_users = 20
if len(users) > max_num_users:
display_users = users[:max_num_users]
else:
display_users = users
for user in display_users:
user_div = DivWdg()
user_div.add_style("margin: 5px")
user_div.add_class("hand")
user_div.add_event("onmouseover", "$(this).setStyle('background','#444')")
user_div.add_event("onmouseout", "$(this).setStyle('background','#222')")
checkbox = CheckboxWdg()
user_div.add(checkbox)
display_name = user.get('display_name')
if not display_name:
display_name = "%s %s" % (user.get('first_name'), user.get('last_name'))
email = user.get('email')
login = user.get('login')
phone_number = user.get('phone_number')
user_div.add(display_name)
if email:
user_div.add(" (%s) " % email)
self.cbjs_action = self.kwargs.get('cbjs_action')
if self.cbjs_action:
user_behavior = {
'type': 'click_up',
'cbjs_action': self.cbjs_action
}
user_div.add_behavior( user_behavior )
else:
user_behavior = {
'type': 'click_up',
'cbjs_action': 'alert("Not implemented")'
}
user_div.add_behavior( user_behavior )
user_div.add_attr("spt_input_value", login)
user_div.add_attr("spt_display_value", display_name)
user_div.add_attr("spt_phone_number", phone_number)
user_div.add_attr("spt_email", email)
results_div.add(user_div)
num_users = len(users)
if num_users > max_num_users:
results_div.add("... and %s more results matched" % (num_users-max_num_users))
results_div.add("<br/>Please narrow your search")
#nav_div = DivWdg()
#num_categories = num_users / max_num_users + 1
#if num_categories > 10:
# nav_div.add("<br/>Please narrow your search")
#else:
# for i in range(0, num_categories):
# span = SpanWdg()
# span.add(i)
# span.add(" ")
# nav_div.add(span)
#results_div.add(nav_div)
if not users:
user_div = DivWdg()
user_div.add_style("margin: 5px")
user_div.add("No Results")
results_div.add(user_div)
return top
def find_users(self, key):
# find users in the current database
users = []
try:
import active_directory
has_ad = True
except ImportError:
has_ad = False
python = Config.get_value('services', 'python')
if not python:
python = 'python'
has_ad = True
if has_ad:
# look for defined domains
domains_str = Config.get_value("active_directory", "domains")
if not domains_str:
domains = [None]
else:
domains = domains_str.split("|")
print("domains: ", domains)
from subprocess import Popen, PIPE
for domain in domains:
# get the info from a separate process
if domain:
cmd = [python, "%s/ad_get_user_list.py" % BASE_DIR, '-d', domain, "-k", key]
else:
cmd = [python, "%s/ad_get_user_list.py" % BASE_DIR, "-k", key,]
output = Popen( cmd, stdout=PIPE).communicate()[0]
#import StringIO
#output = StringIO.StringIO(output)
attrs_map = {
'sAMAccountName': 'login',
'displayName': 'display_name',
'telephoneNumber': 'phone_number',
'l': 'location',
'mail': 'email'
}
import simplejson
print("outpu: ", output0)
ad_users = simplejson.loads(output)
for ad_user in ad_users:
user = {}
for ad_key, key in attrs_map.items():
user[key] = ad_user.get(ad_key)
users.append(user)
# otherwise use sthpw login table
else:
if key:
logins = Search.eval("@SOBJECT(sthpw/login['login','like','%%%s%%'])" % key)
else:
logins = Search.eval("@SOBJECT(sthpw/login)")
if not logins:
return []
for i, login in enumerate(logins):
user = {
'login': login.get_value('login'),
'display_name': login.get_value('display_name'),
'email': login.get_value('email'),
'phone_number': login.get_value('phone_number')
}
users.append(user)
# sort the users
def sort(a, b):
return cmp( a.get('display_name'), b.get('display_name') )
users.sort(cmp)
return users
def get_search_wdg(self):
filter_div = DivWdg()
filter_div.add_style("width: 100px")
buttons_list = [
{'label': 'Run Search', 'tip': 'Run search with this criteria' },
]
txt_btn_set = TextBtnSetWdg( position='', buttons=buttons_list, spacing=6, size='large', side_padding=4 )
run_search_bvr = {
'type': 'click_up',
'cbjs_action': '''
spt.app_busy.show('Search ...', 'Searching Active Directory for matching users.');
setTimeout( function() {
var top = bvr.src_el.getParent('.ad_search_wdg_top');
var values = spt.api.Utility.get_input_values(top);
spt.panel.refresh(top, values);
spt.app_busy.hide();
}, 100);
'''
}
txt_btn_set.get_btn_by_label('Run Search').add_behavior( run_search_bvr )
#filter_div.add( txt_btn_set )
div = DivWdg()
div.add_behavior(run_search_bvr)
button = ProdIconButtonWdg("Run Search")
button.add_behavior(run_search_bvr)
div.add(button)
filter_div.add(div)
return filter_div
from pyasm.widget import BaseInputWdg
class ADInputWdg(BaseInputWdg):
def get_display(self):
top = DivWdg()
top.add_class("ad_input_top")
name = self.get_name()
text = TextWdg(self.get_input_name())
# get the login
sobject = self.get_current_sobject()
client = sobject.get_value("contact_name")
print("client: ", client)
if client:
login_sobj = Login.get_by_code(client)
else:
login_sobj = Environment.get_login()
# build the display_name
login = login_sobj.get_value("login")
display_name = login_sobj.get_value("display_name")
if not display_name:
display_name = "%s %s" % (user.get('first_name'), user.get('last_name'))
display_name = display_name.replace('"', "'")
print("login: ", login)
hidden = HiddenWdg(self.get_input_name())
hidden.set_options( self.options.copy() )
hidden.add_class("spt_ad_input")
if login:
hidden.set_value(login)
top.add(hidden)
# copy over some options
#text.set_options( self.options.copy() )
if login:
text.set_value(display_name)
text.set_option("read_only", "true")
text.add_class("spt_ad_display")
top.add(text)
top.add(" ")
groups_str = self.get_option("groups_allowed_to_search")
if groups_str:
stmt = 'groups_list = %s' % groups_str
exec(stmt)
else:
groups_list = None
allow_search = True
if groups_list:
allow_search = False
login_in_group_list = Search.eval("@SOBJECT(sthpw/login_in_group['login','=','%s'])" % login)
for login_in_group in login_in_group_list:
group = login_in_group.get_value("login_group")
if group in groups_list:
allow_search = True
break
if login == 'admin':
allow_search = True
if allow_search:
button = IconButtonWdg('Search for User', IconWdg.USER)
#button = ButtonWdg()
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent('.ad_input_top');
var content = top.getElement('.ad_input_content');
spt.toggle_show_hide(content);
'''
} )
top.add(button)
ad_top = DivWdg()
ad_top.add_class("ad_input_content")
ad_top.add_style("display: none")
ad_top.add_style("position: absolute")
ad_top.add_style("background: #222")
ad_top.add_style("min-width: 300px")
ad_top.add_style("border: solid 1px #000")
ad_top.add_style("padding: 20px")
cbjs_action = '''
var value = bvr.src_el.getAttribute('spt_input_value');
var display_value = bvr.src_el.getAttribute('spt_display_value');
var phone_number = bvr.src_el.getAttribute('spt_phone_number');
var email = bvr.src_el.getAttribute('spt_mail');
var top = bvr.src_el.getParent('.ad_input_top');
var content = top.getElement('.ad_input_content');
var input = top.getElement('.spt_ad_input');
var display = top.getElement('.spt_ad_display');
input.value = value;
display.value = display_value;
server = TacticServerStub.get()
server.execute_cmd("tactic.active_directory.ADCacheUserCbk", {login: value})
spt.toggle_show_hide(content);
'''
ad_search_wdg = ADSearchWdg(cbjs_action=cbjs_action)
ad_top.add(ad_search_wdg)
top.add(ad_top)
return top
from pyasm.command import Command
class ADCacheUserCbk(Command):
def execute(self):
# disabling for now
print("caching user ...")
web = WebContainer.get_web()
login = self.kwargs.get("login")
login_sobj = Search.eval("@SOBJECT(sthpw/login['login','%s'])" % login, show_retired=True)
if login_sobj:
print("login %s already exists" % login)
return
# cache the user
try:
from ad_authenticate import ADAuthenticate
authenticate = ADAuthenticate()
login_sobj = SearchType.create("sthpw/login")
login_sobj.set_value("login", login)
authenticate.add_user_info(login_sobj, password=None)
login_sobj.commit()
except Exception as e:
print("Error: ", str(e))
return
| diegocortassa/TACTIC | src/tactic/active_directory/ad_search_wdg.py | Python | epl-1.0 | 13,954 |
from openpyxl.writer.charts import PieChartWriter
from openpyxl.xml.constants import CHART_NS
from openpyxl.xml.functions import safe_iterator, fromstring
import pytest
from openpyxl.tests.helper import compare_xml
from openpyxl.tests.schema import chart_schema
class TestPieChart:
def test_ctor(self, PieChart):
c = PieChart()
assert c.TYPE, "pieChart"
@pytest.fixture
def pie_chart(ws, Reference, Series, PieChart):
ws.title = 'Pie'
for i in range(1, 5):
ws.append([i])
chart = PieChart()
values = Reference(ws, (1, 1), (10, 1))
series = Series(values, labels=values)
chart.add_serie(series)
return chart
class TestPieChartWriter(object):
def test_write_chart(self, pie_chart):
"""check if some characteristic tags of PieChart are there"""
cw = PieChartWriter(pie_chart)
cw._write_chart()
tagnames = ['{%s}pieChart' % CHART_NS,
'{%s}varyColors' % CHART_NS
]
root = safe_iterator(cw.root)
chart_tags = [e.tag for e in root]
for tag in tagnames:
assert tag in chart_tags
assert 'c:catAx' not in chart_tags
@pytest.mark.lxml_required
def test_serialised(self, pie_chart, datadir):
"""Check the serialised file against sample"""
cw = PieChartWriter(pie_chart)
xml = cw.write()
tree = fromstring(xml)
chart_schema.assertValid(tree)
datadir.chdir()
with open("PieChart.xml") as expected:
diff = compare_xml(xml, expected.read())
assert diff is None, diff
| Hitachi-Data-Systems/org-chart-builder | openpyxl/charts/tests/test_pie.py | Python | apache-2.0 | 1,630 |
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import string
import re
import datetime
import pyperclip
# Edit Me!
# Remember, this is during signup, so current month is not March, it's February.
currentMonthTotalDays = 29
currentMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
uppercaseMonth = string.upper(nextMonthName)
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
# TODO: testing
# currentDayOfMonthIndex = 28
participants = ParticipantCollection()
initialNumber = participants.size()
def templateForParticipants():
answer = ""
answer += "Here are the **INITIAL_NUMBER participants** who have already signed up:\n\n"
for participant in participants.participants:
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateForTooEarly():
answer = ""
answer += "(Too early. Come back on CURRENT_MONTH_NAME " + str(currentMonthTotalDays - 6) + ")\n"
return answer
def templateForFirstSignupDay():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, we had a great turnout for [Stay Clean CURRENT_MONTH_NAME](CURRENT_MONTH_URL) - let's see if we can knock it out of the park for NEXT_MONTH_NAME. Have you been clean for the month of CURRENT_MONTH_NAME? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread, and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin."
return answer
def templateForMiddleSignupDays():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, so far **INITIAL_NUMBER participants** have signed up. Have you been clean for **[the month of CURRENT_MONTH_NAME](CURRENT_MONTH_URL)**? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateForLastSignupDay():
answer = ""
answer += "LAST CHANCE TO SIGN UP FOR STAY CLEAN UPPERCASE_MONTH! Sign up here!\n"
answer += "The Stay Clean NEXT_MONTH_NAME challenge **begins tomorrow**! So far, we have **INITIAL_NUMBER participants** signed up. If you would like to be included in the challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and we will include you. After midnight tonight, we will not be accepting any more participants. I will create the official update post tomorrow.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateToUse():
if currentDayOfMonthIndex <= (currentMonthTotalDays - 7):
# if currentDayOfMonthIndex <= (currentMonthTotalDays - 8):
return templateForTooEarly()
elif currentDayOfMonthIndex == (currentMonthTotalDays - 6):
# elif currentDayOfMonthIndex == (currentMonthTotalDays - 7):
return templateForFirstSignupDay()
elif (currentMonthTotalDays - 5) <= currentDayOfMonthIndex <= (currentMonthTotalDays - 1):
# elif (currentMonthTotalDays - 6) <= currentDayOfMonthIndex <= (currentMonthTotalDays - 1):
return templateForMiddleSignupDays()
elif currentMonthTotalDays == currentDayOfMonthIndex:
return templateForLastSignupDay()
def stringToPrint():
answer = templateToUse()
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('CURRENT_MONTH_URL', currentMonthURL, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
answer = re.sub('UPPERCASE_MONTH', uppercaseMonth, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| foobarbazblarg/stayclean | stayclean-2020-march/display-during-signup.py | Python | mit | 6,945 |
import discord
from discord.ext import commands
import asyncio
import requests
from bs4 import BeautifulSoup
from urllib import parse
from urllib.parse import parse_qs, quote_plus
from .utils.chat_formatting import pagify, box
from urllib.request import Request, urlopen
import traceback
import inspect
import textwrap
from contextlib import redirect_stdout
import io
import aiohttp
from lxml import etree
from mtranslate import translate
import random
<<<<<<< HEAD
from cogs.utils import checks
import unicodedata
codes = {'ab': 'Abkhaz',
'aa': 'Afar',
'af': 'Afrikaans',
'ak': 'Akan',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'an': 'Aragonese',
'hy': 'Armenian',
'as': 'Assamese',
'av': 'Avaric',
'ae': 'Avestan',
'ay': 'Aymara',
'az': 'Azerbaijani',
'bm': 'Bambara',
'ba': 'Bashkir',
'eu': 'Basque',
'be': 'Belarusian',
'bn': 'Bengali',
'bh': 'Bihari',
'bi': 'Bislama',
'bs': 'Bosnian',
'br': 'Breton',
'bg': 'Bulgarian',
'my': 'Burmese',
'ca': 'Catalan',
'ch': 'Chamorro',
'ce': 'Chechen',
'ny': 'Nyanja',
'zh': 'Chinese',
'cv': 'Chuvash',
'kw': 'Cornish',
'co': 'Corsican',
'cr': 'Cree',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'dv': 'Divehi',
'nl': 'Dutch',
'dz': 'Dzongkha',
'en': 'English',
'eo': 'Esperanto',
'et': 'Estonian',
'ee': 'Ewe',
'fo': 'Faroese',
'fj': 'Fijian',
'fi': 'Finnish',
'fr': 'French',
'ff': 'Fula',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek',
'gn': 'Guarani',
'gu': 'Gujarati',
'ht': 'Haitian',
'ha': 'Hausa',
'he': 'Hebrew',
'hz': 'Herero',
'hi': 'Hindi',
'ho': 'Hiri-Motu',
'hu': 'Hungarian',
'ia': 'Interlingua',
'id': 'Indonesian',
'ie': 'Interlingue',
'ga': 'Irish',
'ig': 'Igbo',
'ik': 'Inupiaq',
'io': 'Ido',
'is': 'Icelandic',
'it': 'Italian',
'iu': 'Inuktitut',
'ja': 'Japanese',
'jv': 'Javanese',
'kl': 'Kalaallisut',
'kn': 'Kannada',
'kr': 'Kanuri',
'ks': 'Kashmiri',
'kk': 'Kazakh',
'km': 'Khmer',
'ki': 'Kikuyu',
'rw': 'Kinyarwanda',
'ky': 'Kyrgyz',
'kv': 'Komi',
'kg': 'Kongo',
'ko': 'Korean',
'ku': 'Kurdish',
'kj': 'Kwanyama',
'la': 'Latin',
'lb': 'Luxembourgish',
'lg': 'Luganda',
'li': 'Limburgish',
'ln': 'Lingala',
'lo': 'Lao',
'lt': 'Lithuanian',
'lu': 'Luba-Katanga',
'lv': 'Latvian',
'gv': 'Manx',
'mk': 'Macedonian',
'mg': 'Malagasy',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mi': 'Māori',
'mr': 'Marathi',
'mh': 'Marshallese',
'mn': 'Mongolian',
'na': 'Nauru',
'nv': 'Navajo',
'nb': 'Norwegian Bokmål',
'nd': 'North-Ndebele',
'ne': 'Nepali',
'ng': 'Ndonga',
'nn': 'Norwegian-Nynorsk',
'no': 'Norwegian',
'ii': 'Nuosu',
'nr': 'South-Ndebele',
'oc': 'Occitan',
'oj': 'Ojibwe',
'cu': 'Old-Church-Slavonic',
'om': 'Oromo',
'or': 'Oriya',
'os': 'Ossetian',
'pa': 'Panjabi',
'pi': 'Pāli',
'fa': 'Persian',
'pl': 'Polish',
'ps': 'Pashto',
'pt': 'Portuguese',
'qu': 'Quechua',
'rm': 'Romansh',
'rn': 'Kirundi',
'ro': 'Romanian',
'ru': 'Russian',
'sa': 'Sanskrit',
'sc': 'Sardinian',
'sd': 'Sindhi',
'se': 'Northern-Sami',
'sm': 'Samoan',
'sg': 'Sango',
'sr': 'Serbian',
'gd': 'Scottish-Gaelic',
'sn': 'Shona',
'si': 'Sinhala',
'sk': 'Slovak',
'sl': 'Slovene',
'so': 'Somali',
'st': 'Southern-Sotho',
'es': 'Spanish',
'su': 'Sundanese',
'sw': 'Swahili',
'ss': 'Swati',
'sv': 'Swedish',
'ta': 'Tamil',
'te': 'Telugu',
'tg': 'Tajik',
'th': 'Thai',
'ti': 'Tigrinya',
'bo': 'Tibetan',
'tk': 'Turkmen',
'tl': 'Tagalog',
'tn': 'Tswana',
'to': 'Tonga',
'tr': 'Turkish',
'ts': 'Tsonga',
'tt': 'Tatar',
'tw': 'Twi',
'ty': 'Tahitian',
'ug': 'Uighur',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
've': 'Venda',
'vi': 'Vietnamese',
'vo': 'Volapuk',
'wa': 'Walloon',
'cy': 'Welsh',
'wo': 'Wolof',
'fy': 'Western-Frisian',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'za': 'Zhuang',
'zu': 'Zulu',
}
=======
>>>>>>> 6a5e1800a9a418bae375fd9d2f90923b610e1e24
class Utility2:
def __init__(self, bot):
self.bot = bot
self._last_result = None
self.sessions = set()
<<<<<<< HEAD
@checks.is_owner()
@commands.command(pass_context=True)
async def invitelink(self):
data=None
try:
data = await self.bot.application_info()
except Exception as e:
print("Couldn't retrieve invite link.Error: {}".format(e))
await self.bot.say(discord.utils.oauth_url(data.id))
@commands.command(pass_context=True, aliases=['googlecalc', 'gcal', 'calc'])
async def gcalc(self, ctx,*, query):
"""Searches google and gives you top result."""
await self.bot.type()
try:
card, entries = await self.get_google_entries(query)
except RuntimeError as e:
await self.bot.say(str(e))
else:
if card:
value = '\n'.join(entries[:3])
if value:
if card.title != 'Calculator':
card.add_field(name='Search Results', value=value, inline=False)
await self.bot.say(embed=card)
asyncio.sleep(2)
await self.bot.delete_message(ctx.message)
return
await self.bot.say("Error: could not calculate expression")
await asyncio.sleep(2)
messages = []
async for m in self.bot.logs_from(ctx.message.channel, limit=2):
if m.author.id == ctx.message.author.id:
message = m
break
await self.bot.delete_message(ctx.message)
await self.bot.delete_message(message)
return
@checks.is_owner()
@commands.command(pass_context=True)
async def copyembed(self, ctx):
channel = ctx.message.channel
messages = []
async for m in self.bot.logs_from(channel, limit=2):
messages.append(m)
message = messages[1]
await self.bot.say('```'+str(message.embeds[0])+'```')
@checks.is_owner()
@commands.command(pass_context=True)
async def edit(self, ctx, *msg):
'''edit your previous message
works up to 20 messages ago'''
msg = list(msg)
msg = ' '.join(msg)
channel = ctx.message.channel
# use the 2nd last message because the last message would be the command
messages = []
async for m in self.bot.logs_from(channel, limit=20):
messages.append(m)
for m in messages[1:]:
if m.author.id == ctx.message.author.id:
message = m
break
if msg == None:
msg = message.content
print('{}')
msg = msg.replace('{}', message.content)
await self.bot.delete_message(ctx.message)
await self.bot.edit_message(message, new_content=msg)
@checks.is_owner()
@commands.command(pass_context=True)
async def replace(self, ctx, old, *newphrase):
'''replace one phrase to another in your previous message
works up to 20 messages ago'''
new = list(newphrase)
new = ' '.join(new)
channel = ctx.message.channel
# use the 2nd last message because the last message would be the command
messages = []
async for m in self.bot.logs_from(channel, limit=20):
messages.append(m)
for m in messages[1:]:
if m.author.id == ctx.message.author.id :
message = m
break
msg = message.content.replace(old, new)
await self.bot.delete_message(ctx.message)
await self.bot.edit_message(message, new_content=msg)
@checks.is_owner()
@commands.command(pass_context=True)
async def reverse(self, ctx):
'''reverse your previous message
works up to 20 messages ago'''
channel = ctx.message.channel
# use the 2nd last message because the last message would be the command
messages = []
async for m in self.bot.logs_from(channel, limit=20):
messages.append(m)
for m in messages[1:]:
if m.author.id == '222925389641547776':
message = m
break
await self.bot.delete_message(ctx.message)
await self.bot.edit_message(message, new_content=message.content[::-1])
@checks.is_owner()
@commands.command(pass_context=True)
async def merge(self, ctx, msgs:int, join_with='\n'):
if msgs>10:
msgs = 10
elif msgs < 2:
msg = await self.bot.say('can only merge 2 or more messages')
await asyncio.sleep(2)
await self.bot.delete_message(msg)
return
channel = ctx.message.channel
messages = []
await self.bot.delete_message(ctx.message)
n = 0
async for m in self.bot.logs_from(channel, limit=2*msgs+50):
if n < msgs:
pass
else:
break
if m.author.id == ctx.message.author.id:
messages.append(m)
n += 1
pastmsgs = []
for m in list(reversed(messages)):
pastmsgs.append(m.content)
newmsg = join_with.join(pastmsgs)
for m in messages[1:]:
await self.bot.delete_message(m)
await self.bot.edit_message(messages[0], new_content=newmsg)
@checks.is_owner()
@commands.command(pass_context=True)
async def findcmd(self, ctx, command):
cog = ''
for cogclass in self.bot.cogs:
for cmd in dir(self.bot.cogs[cogclass]):
if cmd.lower() == command.lower():
for cog in self.bot.extensions:
if cogclass in dir(self.bot.extensions[cog]):
break
await self.bot.say("Command `{}` is in class `{}` in cog `{}`".format(command, cogclass, cog))
return
await self.bot.say("Couldn't find command `{}`".format(command))
@commands.command(pass_context=True)
async def urb(self,ctx, *, search_terms : str, definition_number : int=1):
"""Urban Dictionary search
Definition number must be between 1 and 10"""
await self.bot.edit_message(ctx.message, new_content=search_terms + ':')
def encode(s):
return quote_plus(s, encoding='utf-8', errors='replace')
# definition_number is just there to show up in the help
# all this mess is to avoid forcing double quotes on the user
search_terms = search_terms.split(" ")
try:
if len(search_terms) > 1:
pos = int(search_terms[-1]) - 1
search_terms = search_terms[:-1]
else:
pos = 0
if pos not in range(0, 11): # API only provides the
pos = 0 # top 10 definitions
except ValueError:
pos = 0
search_terms = "+".join([encode(s) for s in search_terms])
url = "http://api.urbandictionary.com/v0/define?term=" + search_terms
try:
async with aiohttp.get(url) as r:
result = await r.json()
if result["list"]:
definition = result['list'][pos]['definition']
example = result['list'][pos]['example']
defs = len(result['list'])
msg = ("**Definition #{} out of {}:**\n{}\n\n"
"**Example:**\n{}".format(pos+1, defs, definition,
example))
msg = pagify(msg, ["\n"])
pages = []
for page in msg:
x = page.split('\n')
pages.extend(x)
em = discord.Embed(color=discord.Color.blue())
# em = discord.Embed(color=discord.Color(0xE86222))
em.set_author(name="Urban Dictionary", icon_url='http://i.imgur.com/6nJnuM4.png', url='http://www.urbandictionary.com/')
n = 0
prevn = n
lastfieldname = ''
lastfieldval = ''
for x in pages:
if x.startswith('**'):
lastfieldname = x.replace('**','')
em.add_field(name=lastfieldname, value='lol')
n += 1
else:
if n == prevn:
lastfieldval += x
lastfieldval +='\n'
else:
prevn = n
lastfieldval = x
# print("hi")
# print("name={}\nvalue={}".format(lastfieldname, lastfieldval))
em.set_field_at(n-1, name=lastfieldname, value=lastfieldval)
# print("name={}\nvalue={}".format(lastfieldname, lastfieldval))
# print("hi2")
await self.bot.say(embed=em)
else:
await self.bot.say("Your search terms gave no results.")
except IndexError:
await self.bot.say("There is no definition #{}".format(pos+1))
except:
await self.bot.say("Error.")
@checks.admin_or_permissions()
@commands.command(aliases=['nick'], pass_context=True, no_pm=True)
async def nickname(self, ctx, *, nick):
"""Change your nickname on a server."""
await self.bot.delete_message(ctx.message)
try:
await self.bot.change_nickname(ctx.message.author, nick)
await self.bot.say('Changed nickname to: `{}`'.format(nick), delete_after=5)
except:
await self.bot.say('Unable to change nickname.', delete_after=5)
@checks.is_owner()
@commands.command(pass_context=True)
async def raw(self, ctx, ID, chan : discord.channel=None):
"""Get the raw content of someones message!"""
channel = chan or ctx.message.channel
await self.bot.delete_message(ctx.message)
msg = None
async for m in self.bot.logs_from(channel, limit=1000):
if m.id == ID:
msg = m
break
out = msg.content.replace('*','\\*').replace('`','\\`').replace('~~','\\~~').replace('_','\\_').replace('<','\\<').replace('>','\\>')
try:
await self.bot.say(out)
except:
await self.bot.say('Message too long.')
@commands.group(pass_context=True, aliases=['t'], invoke_without_command=True)
async def translate(self, ctx, lang, *, text):
"""Translate text! Do .translate langs to get available languages!"""
if lang in codes:
return await self.bot.say('```{}```'.format(translate(text, lang)))
lang = dict(zip(codes.values(),codes.keys())).get(lang.lower().title())
if lang:
await self.bot.say('```{}```'.format(translate(text, lang)))
else:
await self.bot.say('```That is not an available language.```')
@translate.command(pass_context=True, name='langs')
async def _get(self, ctx):
em = discord.Embed(color=discord.Color.blue(),
title='Available Languages',
description=', '.join(codes.values()))
await self.bot.say(embed=em)
@checks.mod_or_permissions()
@commands.command(pass_context=True)
async def charinfo(self, ctx, *, characters: str):
"""Shows you information about a number of characters."""
if len(characters) > 15:
await self.bot.say('Too many characters ({}/15)'.format(len(characters)))
return
fmt = '`\\U{0:>08}`: {1} - {2} \N{EM DASH} <http://www.fileformat.info/info/unicode/char/{0}>'
def to_string(c):
digit = format(ord(c), 'x')
name = unicodedata.name(c, 'Name not found.')
return fmt.format(digit, name, c)
await self.bot.say('\n'.join(map(to_string, characters)))
@checks.is_owner()
@commands.command(pass_context=True)
async def quote(self, ctx, id : str, chan : discord.Channel=None):
"""Quote someone's message by ID"""
channel = chan or ctx.message.channel
await self.bot.delete_message(ctx.message)
msg = None
async for message in self.bot.logs_from(channel, limit=1000):
if message.id == id:
msg = message
break
if msg is None:
await self.bot.say('Could not find the message.')
return
auth = msg.author
channel = msg.channel
ts = msg.timestamp
em = discord.Embed(color=0x00FFFF,description=msg.clean_content,timestamp=ts)
em.set_author(name=str(auth),icon_url=auth.avatar_url or auth.default_avatar_url)
em.set_footer(text='#'+channel.name)
await self.bot.say(embed=em)
@commands.command(pass_context=True, aliases=['yt', 'vid', 'video'])
async def youtube(self, ctx, *, msg):
"""Search for videos on YouTube."""
search = parse.quote(msg)
response = requests.get("https://www.youtube.com/results?search_query={}".format(search)).text
result = BeautifulSoup(response, "lxml")
url="**Result:**\nhttps://www.youtube.com{}".format(result.find_all(attrs={'class': 'yt-uix-tile-link'})[0].get('href'))
await self.bot.say(url)
@checks.is_owner()
@commands.command(pass_context=True,description='Do .embed to see how to use it.')
async def embed(self, ctx, *, msg: str = None):
'''Embed complex rich embeds as the bot.'''
try:
if msg:
ptext = title = description = image = thumbnail = color = footer = author = None
timestamp = discord.Embed.Empty
def_color = False
embed_values = msg.split('|')
for i in embed_values:
if i.strip().lower().startswith('ptext='):
if i.strip()[6:].strip() == 'everyone':
ptext = '@everyone'
elif i.strip()[6:].strip() == 'here':
ptext = '@here'
else:
ptext = i.strip()[6:].strip()
elif i.strip().lower().startswith('title='):
title = i.strip()[6:].strip()
elif i.strip().lower().startswith('description='):
description = i.strip()[12:].strip()
elif i.strip().lower().startswith('desc='):
description = i.strip()[5:].strip()
elif i.strip().lower().startswith('image='):
image = i.strip()[6:].strip()
elif i.strip().lower().startswith('thumbnail='):
thumbnail = i.strip()[10:].strip()
elif i.strip().lower().startswith('colour='):
color = i.strip()[7:].strip()
elif i.strip().lower().startswith('color='):
color = i.strip()[6:].strip()
elif i.strip().lower().startswith('footer='):
footer = i.strip()[7:].strip()
elif i.strip().lower().startswith('author='):
author = i.strip()[7:].strip()
elif i.strip().lower().startswith('timestamp'):
timestamp = ctx.message.timestamp
if color:
if color.startswith('#'):
color = color[1:]
if not color.startswith('0x'):
color = '0x' + color
if ptext is title is description is image is thumbnail is color is footer is author is None and 'field=' not in msg:
await self.bot.delete_message(ctx.message)
return await self.bot.send_message(ctx.message.channel, content=None,
embed=discord.Embed(description=msg))
if color:
em = discord.Embed(timestamp=timestamp, title=title, description=description, color=int(color, 16))
else:
em = discord.Embed(timestamp=timestamp, title=title, description=description)
for i in embed_values:
if i.strip().lower().startswith('field='):
field_inline = True
field = i.strip().lstrip('field=')
field_name, field_value = field.split('value=')
if 'inline=' in field_value:
field_value, field_inline = field_value.split('inline=')
if 'false' in field_inline.lower() or 'no' in field_inline.lower():
field_inline = False
field_name = field_name.strip().lstrip('name=')
em.add_field(name=field_name, value=field_value.strip(), inline=field_inline)
if author:
if 'icon=' in author:
text, icon = author.split('icon=')
if 'url=' in icon:
print("here")
em.set_author(name=text.strip()[5:], icon_url=icon.split('url=')[0].strip(), url=icon.split('url=')[1].strip())
else:
em.set_author(name=text.strip()[5:], icon_url=icon)
else:
if 'url=' in author:
print("here")
em.set_author(name=author.split('url=')[0].strip()[5:], url=author.split('url=')[1].strip())
else:
em.set_author(name=author)
if image:
em.set_image(url=image)
if thumbnail:
em.set_thumbnail(url=thumbnail)
if footer:
if 'icon=' in footer:
text, icon = footer.split('icon=')
em.set_footer(text=text.strip()[5:], icon_url=icon)
else:
em.set_footer(text=footer)
await self.bot.send_message(ctx.message.channel, content=ptext, embed=em)
else:
msg = '*Params:*\n```bf\n[title][author][desc][field][footer][thumbnail][image][timestamp][ptext]```'
await self.bot.send_message(ctx.message.channel, msg)
try:
await self.bot.delete_message(ctx.message)
except:
pass
except:
await self.bot.send_message(ctx.message.channel, 'looks like something fucked up. or i dont have embed perms')
@commands.command(aliases=['g'])
async def google(self, *, query):
"""Searches google and gives you top result."""
await self.bot.type()
try:
card, entries = await self.get_google_entries(query)
except RuntimeError as e:
await self.bot.say(str(e))
else:
if card:
value = '\n'.join(entries[:3])
if value:
card.add_field(name='Search Results', value=value, inline=False)
return await self.bot.say(embed=card)
if len(entries) == 0:
return await self.bot.say('No results found... sorry.')
next_two = entries[1:3]
first_entry = entries[0]
if first_entry[-1] == ')':
first_entry = first_entry[:-1] + '%29'
if next_two:
formatted = '\n'.join(map(lambda x: '<%s>' % x, next_two))
msg = '{}\n\n**See also:**\n{}'.format(first_entry, formatted)
else:
msg = first_entry
await self.bot.say(msg)
@checks.is_owner()
@commands.command(pass_context=True)
async def source(self, ctx, *, command):
'''See the source code for any command.'''
await self.bot.say('```py\n'+str(inspect.getsource(self.bot.get_command(command).callback)+'```'))
=======
>>>>>>> 6a5e1800a9a418bae375fd9d2f90923b610e1e24
def parse_google_card(self, node):
if node is None:
return None
e = discord.Embed(colour=0x00FFFF)
# check if it's a calculator card:
calculator = node.find(".//table/tr/td/span[@class='nobr']/h2[@class='r']")
if calculator is not None:
e.title = 'Calculator'
e.description = ''.join(calculator.itertext())
return e
parent = node.getparent()
# check for unit conversion card
unit = parent.find(".//ol//div[@class='_Tsb']")
if unit is not None:
e.title = 'Unit Conversion'
e.description = ''.join(''.join(n.itertext()) for n in unit)
return e
# check for currency conversion card
currency = parent.find(".//ol/table[@class='std _tLi']/tr/td/h2")
if currency is not None:
e.title = 'Currency Conversion'
e.description = ''.join(currency.itertext())
return e
# check for release date card
release = parent.find(".//div[@id='_vBb']")
if release is not None:
try:
e.description = ''.join(release[0].itertext()).strip()
e.title = ''.join(release[1].itertext()).strip()
return e
except:
return None
# check for definition card
words = parent.find(".//ol/div[@class='g']/div/h3[@class='r']/div")
if words is not None:
try:
definition_info = words.getparent().getparent()[1] # yikes
except:
pass
else:
try:
# inside is a <div> with two <span>
# the first is the actual word, the second is the pronunciation
e.title = words[0].text
e.description = words[1].text
except:
return None
# inside the table there's the actual definitions
# they're separated as noun/verb/adjective with a list
# of definitions
for row in definition_info:
if len(row.attrib) != 0:
# definitions are empty <tr>
# if there is something in the <tr> then we're done
# with the definitions
break
try:
data = row[0]
lexical_category = data[0].text
body = []
for index, definition in enumerate(data[1], 1):
body.append('%s. %s' % (index, definition.text))
e.add_field(name=lexical_category, value='\n'.join(body), inline=False)
except:
continue
return e
# check for "time in" card
time_in = parent.find(".//ol//div[@class='_Tsb _HOb _Qeb']")
if time_in is not None:
try:
time_place = ''.join(time_in.find("span[@class='_HOb _Qeb']").itertext()).strip()
the_time = ''.join(time_in.find("div[@class='_rkc _Peb']").itertext()).strip()
the_date = ''.join(time_in.find("div[@class='_HOb _Qeb']").itertext()).strip()
except:
return None
else:
e.title = time_place
e.description = '%s\n%s' % (the_time, the_date)
return e
# check for weather card
# this one is the most complicated of the group lol
# everything is under a <div class="e"> which has a
# <h3>{{ weather for place }}</h3>
# string, the rest is fucking table fuckery.
weather = parent.find(".//ol//div[@class='e']")
if weather is None:
return None
location = weather.find('h3')
if location is None:
return None
e.title = ''.join(location.itertext())
table = weather.find('table')
if table is None:
return None
# This is gonna be a bit fucky.
# So the part we care about is on the second data
# column of the first tr
try:
tr = table[0]
img = tr[0].find('img')
category = img.get('alt')
image = 'https:' + img.get('src')
temperature = tr[1].xpath("./span[@class='wob_t']//text()")[0]
except:
return None # RIP
else:
e.set_thumbnail(url=image)
e.description = '*%s*' % category
e.add_field(name='Temperature', value=temperature)
# On the 4th column it tells us our wind speeds
try:
wind = ''.join(table[3].itertext()).replace('Wind: ', '')
except:
return None
else:
e.add_field(name='Wind', value=wind)
# On the 5th column it tells us our humidity
try:
humidity = ''.join(table[4][0].itertext()).replace('Humidity: ', '')
except:
return None
else:
e.add_field(name='Humidity', value=humidity)
return e
async def get_google_entries(self, query):
params = {
'q': query,
'safe': 'on',
'lr': 'lang_en',
'hl': 'en'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)'
}
# list of URLs
entries = []
# the result of a google card, an embed
card = None
async with aiohttp.get('https://www.google.com.au/search', params=params, headers=headers) as resp:
if resp.status != 200:
raise RuntimeError('Google somehow failed to respond.')
root = etree.fromstring(await resp.text(), etree.HTMLParser())
# with open('google.html', 'w', encoding='utf-8') as f:
# f.write(etree.tostring(root, pretty_print=True).decode('utf-8'))
"""
Tree looks like this.. sort of..
<div class="g">
...
<h3>
<a href="/url?q=<url>" ...>title</a>
</h3>
...
<span class="st">
<span class="f">date here</span>
summary here, can contain <em>tag</em>
</span>
</div>
"""
card_node = root.find(".//div[@id='topstuff']")
card = self.parse_google_card(card_node)
search_nodes = root.findall(".//div[@class='g']")
for node in search_nodes:
url_node = node.find('.//h3/a')
if url_node is None:
continue
url = url_node.attrib['href']
if not url.startswith('/url?'):
continue
url = parse_qs(url[5:])['q'][0] # get the URL from ?q query string
# if I ever cared about the description, this is how
entries.append(url)
# short = node.find(".//span[@class='st']")
# if short is None:
# entries.append((url, ''))
# else:
# text = ''.join(short.itertext())
# entries.append((url, text.replace('...', '')))
return card, entries
<<<<<<< HEAD
=======
@commands.command(pass_context=True, aliases=['googlecalc', 'gcal', 'calc'])
async def gcalc(self, ctx,*, query):
"""Searches google and gives you top result."""
await self.bot.type()
try:
card, entries = await self.get_google_entries(query)
except RuntimeError as e:
await self.bot.say(str(e))
else:
if card:
value = '\n'.join(entries[:3])
if value:
if card.title != 'Calculator':
card.add_field(name='Search Results', value=value, inline=False)
await self.bot.say(embed=card)
asyncio.sleep(2)
await self.bot.delete_message(ctx.message)
return
await self.bot.say("Error: could not calculate expression")
await asyncio.sleep(2)
messages = []
async for m in self.bot.logs_from(ctx.message.channel, limit=2):
if m.author.id == ctx.message.author.id:
message = m
break
await self.bot.delete_message(ctx.message)
await self.bot.delete_message(message)
return
@commands.command(pass_context=True)
async def copyembed(self, ctx):
channel = ctx.message.channel
messages = []
async for m in self.bot.logs_from(channel, limit=2):
messages.append(m)
message = messages[1]
await self.bot.say('```'+str(message.embeds[0])+'```')
@commands.command(pass_context=True)
async def edit(self, ctx, *msg):
'''edit your previous message
works up to 20 messages ago'''
msg = list(msg)
msg = ' '.join(msg)
channel = ctx.message.channel
# use the 2nd last message because the last message would be the command
messages = []
async for m in self.bot.logs_from(channel, limit=20):
messages.append(m)
for m in messages[1:]:
if m.author.id == ctx.message.author.id:
message = m
break
if msg == None:
msg = message.content
print('{}')
msg = msg.replace('{}', message.content)
await self.bot.delete_message(ctx.message)
await self.bot.edit_message(message, new_content=msg)
@commands.command(pass_context=True)
async def replace(self, ctx, old, *newphrase):
'''replace one phrase to another in your previous message
works up to 20 messages ago'''
new = list(newphrase)
new = ' '.join(new)
channel = ctx.message.channel
# use the 2nd last message because the last message would be the command
messages = []
async for m in self.bot.logs_from(channel, limit=20):
messages.append(m)
for m in messages[1:]:
if m.author.id == ctx.message.author.id :
message = m
break
msg = message.content.replace(old, new)
await self.bot.delete_message(ctx.message)
await self.bot.edit_message(message, new_content=msg)
@commands.command(pass_context=True)
async def reverse(self, ctx):
'''reverse your previous message
works up to 20 messages ago'''
channel = ctx.message.channel
# use the 2nd last message because the last message would be the command
messages = []
async for m in self.bot.logs_from(channel, limit=20):
messages.append(m)
for m in messages[1:]:
if m.author.id == '222925389641547776':
message = m
break
await self.bot.delete_message(ctx.message)
await self.bot.edit_message(message, new_content=message.content[::-1])
@commands.command(pass_context=True)
async def merge(self, ctx, msgs:int, join_with='\n'):
if msgs>10:
msgs = 10
elif msgs < 2:
msg = await self.bot.say('can only merge 2 or more messages')
await asyncio.sleep(2)
await self.bot.delete_message(msg)
return
channel = ctx.message.channel
messages = []
await self.bot.delete_message(ctx.message)
n = 0
async for m in self.bot.logs_from(channel, limit=2*msgs+50):
if n < msgs:
pass
else:
break
if m.author.id == ctx.message.author.id:
messages.append(m)
n += 1
pastmsgs = []
for m in list(reversed(messages)):
pastmsgs.append(m.content)
newmsg = join_with.join(pastmsgs)
for m in messages[1:]:
await self.bot.delete_message(m)
await self.bot.edit_message(messages[0], new_content=newmsg)
>>>>>>> 6a5e1800a9a418bae375fd9d2f90923b610e1e24
def cleanup_code( content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
def get_syntax_error(e):
if e.text is None:
return '```py\n{0.__class__.__name__}: {0}\n```'.format(e)
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__)
async def to_code_block(ctx, body):
if body.startswith('```') and body.endswith('```'):
content = '\n'.join(body.split('\n')[1:-1])
else:
content = body.strip('`')
await bot.edit_message(ctx.message, '```py\n'+content+'```')
<<<<<<< HEAD
=======
@commands.command(pass_context=True)
async def findcmd(self, ctx, command):
cog = ''
for cogclass in self.bot.cogs:
for cmd in dir(self.bot.cogs[cogclass]):
if cmd.lower() == command.lower():
for cog in self.bot.extensions:
if cogclass in dir(self.bot.extensions[cog]):
break
await self.bot.say("Command `{}` is in class `{}` in cog `{}`".format(command, cogclass, cog))
return
await self.bot.say("Couldn't find command `{}`".format(command))
@commands.command(pass_context=True, aliases=['d'])
async def debug(self, ctx, *, code):
"""Evaluates code"""
def check(m):
if m.content.strip().lower() == "more":
return True
author = ctx.message.author
channel = ctx.message.channel
code = code.strip('` ')
result = None
global_vars = globals().copy()
global_vars['bot'] = self.bot
global_vars['ctx'] = ctx
global_vars['message'] = ctx.message
global_vars['author'] = ctx.message.author
global_vars['channel'] = ctx.message.channel
global_vars['server'] = ctx.message.server
try:
result = eval(code, global_vars, locals())
except Exception as e:
await self.bot.say(box('{}: {}'.format(type(e).__name__, str(e)),
lang="py"))
return
if asyncio.iscoroutine(result):
result = await result
result = str(result)
# if not ctx.message.channel.is_private:
# censor = (self.bot.settings.email,
# self.bot.settings.password,
# self.bot.settings.token)
# r = "[EXPUNGED]"
# for w in censor:
# if w is None or w == "":
# continue
# result = result.replace(w, r)
# result = result.replace(w.lower(), r)
# result = result.replace(w.upper(), r)
result = list(pagify(result, shorten_by=16))
for i, page in enumerate(result):
if i != 0 and i % 4 == 0:
last = await self.bot.say("There are still {} messages. "
"Type `more` to continue."
"".format(len(result) - (i+1)))
msg = await self.bot.wait_for_message(author=author,
channel=channel,
check=check,
timeout=10)
if msg is None:
try:
await self.bot.delete_message(last)
except:
pass
finally:
break
await self.bot.say(box(page, lang="py"))
@commands.command(pass_context=True)
async def urban2(self,ctx, *, search_terms : str, definition_number : int=1):
"""Urban Dictionary search
Definition number must be between 1 and 10"""
await self.bot.edit_message(ctx.message, new_content=search_terms + ':')
def encode(s):
return quote_plus(s, encoding='utf-8', errors='replace')
# definition_number is just there to show up in the help
# all this mess is to avoid forcing double quotes on the user
search_terms = search_terms.split(" ")
try:
if len(search_terms) > 1:
pos = int(search_terms[-1]) - 1
search_terms = search_terms[:-1]
else:
pos = 0
if pos not in range(0, 11): # API only provides the
pos = 0 # top 10 definitions
except ValueError:
pos = 0
search_terms = "+".join([encode(s) for s in search_terms])
url = "http://api.urbandictionary.com/v0/define?term=" + search_terms
try:
async with aiohttp.get(url) as r:
result = await r.json()
if result["list"]:
definition = result['list'][pos]['definition']
example = result['list'][pos]['example']
defs = len(result['list'])
msg = ("**Definition #{} out of {}:**\n{}\n\n"
"**Example:**\n{}".format(pos+1, defs, definition,
example))
msg = pagify(msg, ["\n"])
pages = []
for page in msg:
x = page.split('\n')
pages.extend(x)
em = discord.Embed(color=discord.Color.blue())
# em = discord.Embed(color=discord.Color(0xE86222))
em.set_author(name="Urban Dictionary", icon_url='http://i.imgur.com/6nJnuM4.png', url='http://www.urbandictionary.com/')
n = 0
prevn = n
lastfieldname = ''
lastfieldval = ''
for x in pages:
if x.startswith('**'):
lastfieldname = x.replace('**','')
em.add_field(name=lastfieldname, value='lol')
n += 1
else:
if n == prevn:
lastfieldval += x
lastfieldval +='\n'
else:
prevn = n
lastfieldval = x
# print("hi")
# print("name={}\nvalue={}".format(lastfieldname, lastfieldval))
em.set_field_at(n-1, name=lastfieldname, value=lastfieldval)
# print("name={}\nvalue={}".format(lastfieldname, lastfieldval))
# print("hi2")
await self.bot.say(embed=em)
else:
await self.bot.say("Your search terms gave no results.")
except IndexError:
await self.bot.say("There is no definition #{}".format(pos+1))
except:
await self.bot.say("Error.")
>>>>>>> 6a5e1800a9a418bae375fd9d2f90923b610e1e24
def setup(bot):
bot.add_cog(Utility2(bot))
| Dino0631/RedRain-Bot | cogs/cogs/utils2.py | Python | gpl-3.0 | 44,691 |
__author__ = 'stephen'
# ===============================================================================
# GLOBAL IMPORTS:
import os,sys
import numpy as np
import argparse
import time
# ===============================================================================
# LOCAL IMPORTS:
HK_DataMiner_Path = os.path.relpath(os.pardir)
#HK_DataMiner_Path = os.path.abspath("/Users/stephen/Dropbox/projects/work-2018.12/HK_DataMiner/")
#print HK_DataMiner_Path
sys.path.append(HK_DataMiner_Path)
from cluster import Faiss_DBSCAN, KCenters
from utils import plot_cluster, XTCReader, VectorReader
# ===============================================================================
cli = argparse.ArgumentParser()
cli.add_argument('-t', '--trajListFns', default ='tica_trajlist',
help='List of trajectory files to read in, separated by spaces.')
cli.add_argument('-o', '--homedir', help='Home dir.', default=".", type=str)
cli.add_argument('-d', '--device', help='Device No. of GPU.', default=0, type=int)
#cli.add_argument('-n', '--n_size', help='database size.', default=10000, type=int)
cli.add_argument('-e', '--eps', help='eps', default=1, type=float)
cli.add_argument('-m', '--min_samples', help='min_samples', default=5, type=int)
cli.add_argument('-l', '--nlist', help='nlist', default=1000, type=int)
cli.add_argument('-p', '--nprobe', help='nprob', default=10, type=int)
cli.add_argument('-s', '--stride', help='Subsample stride', default = None, type=int)
# ===========================================================================
args = cli.parse_args()
trajlistName = args.trajListFns
homedir = args.homedir
device = args.device
#dimension = args.dimension # dimension
#n_size = args.n_size # database size
#np.random.seed(1234) # make reproducible
#X = np.random.random((n_size, dimension)).astype('float32') * 10.0
#X[:, 0] += np.arange(n_size) / 100.0
#print(X)
#if args.stride is not None:
# trajreader = VectorReader(trajlistName=trajlistName, homedir=homedir, trajExt='txt', stride=args.stride)
#else:
# trajreader = VectorReader(trajlistName=trajlistName, homedir=homedir, trajExt='txt')
#X = trajreader.trajs
X = []
for line in open('trajlist_ala'):
X.append(np.loadtxt(line.strip()))
#traj_len = trajreader.traj_len
#np.savetxt("./traj_len.txt", traj_len, fmt="%d")
'''
trajreader = XTCReader('trajlist', 'atom_indices', '.', 'xtc', 'native.pdb', nSubSample=None)
trajs = trajreader.trajs
traj_len = trajreader.traj_len
xyz = trajs
from msmbuilder.featurizer import DihedralFeaturizer
featurizer = DihedralFeaturizer(types=['phi', 'psi'], sincos=False)
diheds_list = featurizer.fit_transform(xyz)
diheds = diheds_list[0].tolist()
for i in range(1, len(diheds_list)):
diheds.append(diheds_list[i][0].tolist())
diheds_array = np.asarray(diheds) * 180.0 / np.pi
print(diheds_array)
#print(diheds_list)
#print(diheds)
np.savetxt('diheds.txt', diheds_array, fmt="%8f")
#with open('diheds.txt', 'w') as f:
# for item in diheds:
# f.write("%s\n" % item)
print(diheds_array.shape)
'''
diheds_array = np.loadtxt('diheds.txt', dtype=np.float32)
phi_angles = diheds_array[:,0]
psi_angles = diheds_array[:,1]
print(phi_angles.shape)
# ===========================================================================
#if os.path.isfile("./phi_angles.txt") and os.path.isfile("./psi_angles.txt") is True:
# phi_angles = np.loadtxt("./phi_angles.txt", dtype=np.float32)
# psi_angles = np.loadtxt("./psi_angles.txt", dtype=np.float32)
#X = np.column_stack((phi_angles, psi_angles))
# print(X.shape)
#phi_angles = np.degrees(diheds[:, 0])
#psi_angles = np.degrees(diheds[:, 1])
#print(phi_angles)
#X = diheds_array
#X = tica_trajs[0].astype(np.float32)
# print(X)
'''
#if os.path.isfile("./phi_angles.txt") and os.path.isfile("./psi_angles.txt") is True:
# phi_angles = np.loadtxt("./phi_angles.txt", dtype=np.float32)
# psi_angles = np.loadtxt("./psi_angles.txt", dtype=np.float32)
#X=np.column_stack((phi_angles, psi_angles))
#print(X.shape)
#n_size = X.shape[0]
#dimension = X.shape[1]
'''
# ===========================================================================
n_clusters = 10
print('---------------------------------------------------------------------------------')
from msmbuilder.cluster import KCenters
cluster = KCenters(n_clusters=n_clusters, metric="euclidean", random_state=0)
print(cluster)
#cluster.fit(phi_psi)
cluster.fit(X)
labels = cluster.labels_
print(labels)
labels = np.concatenate(labels)
n_microstates = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_microstates)
#cluster_centers_ = cluster.cluster_centers_
# plot micro states
clustering_name = "kcenters_n_" + str(n_microstates)
#splited_assignments =split_assignments(labels, traj_len)
#np.savetxt("assignments_"+clustering_name+".txt", labels, fmt="%d")
np.savetxt("assignments_"+clustering_name+".txt", labels , fmt="%d")
#np.savetxt("cluster_centers_"+clustering_name+".txt", cluster_centers_, fmt="%d")
plot_cluster(labels=labels, phi_angles=phi_angles, psi_angles=psi_angles, name=clustering_name)
X = np.concatenate(X)
plot_cluster(labels=labels, phi_angles=X[:, 0], psi_angles=X[:, 1], name='tica_clustering.png')
#trajs[cluster_centers_].save("cluster_centers.pdb")
#trajs_sub_atoms[cluster_centers_].save("cluster_centers_sub_atoms.pdb")
| stephenliu1989/HK_DataMiner | hkdataminer/scripts/test_kcenter_tica.py | Python | apache-2.0 | 5,371 |
#!/usr/bin/env python
# encoding: utf-8
from itertools import permutations
from math import sqrt, pow
class AssertRouteError(Exception):
pass
class AssertTaskDataError(Exception):
pass
class Node(object):
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
class BaseTask(object):
# base data (needs to be passed to init)
start = None
finish = None
mid_nodes = []
name = 'base'
timeout = None
# Distances can be passed, but, if paths_only is false,
# then all missing distances will be calculated.
# uses keys like 'start_node_name:end_node_name'
# (so no ':' in node names!)
distances = {}
paths_only = False
symetric = True # if true, distance from A to B == B to A unless
# specyfied otherwise in distances
# helper data (calculated)
all_nodes = {}
is_circle = None
def __init__(self, **kwargs):
self.mid_nodes = []
self.all_nodes = {}
self.distances = {}
for key, val in kwargs.items():
setattr(self, key, val)
self.is_circle = self.start == self.finish
self.prepare_data()
def prepare_data(self):
# fill all_nodes dict
self.all_nodes[self.start.name] = self.start
self.all_nodes[self.finish.name] = self.finish
for node in self.mid_nodes:
self.all_nodes[node.name] = node
if self.paths_only:
# Assumes that some nodes can be reached only by going thru other
# nodes. If that is the case, then use Floyd algorithm to find
# shortest paths between every two nodes.
self.run_Floyd()
else: # calculate missing distances
# get a list of stop names (including start and finish)
stop_names = self.all_nodes.keys()
for a, b in permutations(stop_names, 2):
key = '%s:%s' % (a, b)
self.distances[key] = self.calculate_distance(a, b)
def get_distance(self, a, b):
key = '%s:%s' % (a, b)
dist = self.distances.get(key)
if dist is None:
if self.paths_only:
return 1e10000
else:
dist = self.calculate_distance(a, b)
self.distances[key] = dist
return dist
def calculate_distance(self, a, b):
node_a = self.all_nodes[a]
node_b = self.all_nodes[b]
return sqrt(pow((node_a.x-node_b.x), 2) + pow((node_a.y-node_b.y), 2))
def get_path_distance(self, path):
distance = 0
for i in xrange(1, len(path)):
distance += self.get_distance(path[i-1], path[i])
return distance
def pop_closest_to(self, origin, nodes):
distance = float('inf')
i = 0
closest_idx = 0
for node_name in nodes:
d = self.get_distance(origin, node_name)
if d < distance:
distance = d
closest_idx = i
i += 1
return nodes.pop(closest_idx)
def pop_furthest_to(self, origin, nodes):
distance = 0
i = 0
closest_idx = 0
for node_name in nodes:
d = self.get_distance(origin, node_name)
if d > distance:
distance = d
closest_idx = i
i += 1
return nodes.pop(closest_idx)
def verify_route(self, route, solver=None):
# verify start node
if route[0] != self.start.name:
raise AssertRouteError(
u"Start node of route doesn't match one from task")
# verify finish node
if route[-1] != self.finish.name:
raise AssertRouteError(
u"Finish node of route doesn't match one from task")
# verify if there are any repeted nodes on route
route_as_set = set(route)
if (self.start.name != self.finish.name and
len(route_as_set) != len(route)):
raise AssertRouteError(u'Some nodes on route are repeted')
elif len(route_as_set) != len(route):
raise AssertRouteError(u'Some nodes on route are repeted')
# verify if all mid nodes are included
nodes_as_set = set([n.name for n in self.mid_nodes])
nodes_as_set.add(self.start.name)
nodes_as_set.add(self.finish.name)
if nodes_as_set - route_as_set:
raise AssertRouteError(u'Some nodes are missing from the route (%s)' % solver)
if route_as_set - nodes_as_set:
raise AssertRouteError(u'Unknown nodes are included to the route')
def run_Floyd(self):
# prepare reference matrix
ref = self.all_nodes.keys()
size = len(ref)
# prepare distance matrix
N = 1e10000 # infinity
distances = [[N,] * size for i in range(size)]
for key, dist in self.distances.items():
names = key.split(':')
idx1 = ref.index(names[0])
idx2 = ref.index(names[1])
distances[idx1][idx2] = dist
if self.symetric and distances[idx1][idx2] == N:
distances[idx2][idx1] = dist
# run Floyd algorithm
enum = range(size)
for i in enum:
for j in enum:
for k in enum:
if (distances[j][i] + distances[i][k]) < distances[j][k]:
distances[j][k] = (distances[j][i] + distances[i][k])
# write distances back to distances dict
for i in enum:
for j in enum:
dist = distances[i][j]
if dist != N:
key = ':'.join([ref[i], ref[j]])
self.distances[key] = dist
# validate if all nodes are accesible
for i in enum:
# destination node doesn't need to have a route to other nodes
if ref[i] == self.finish.name:
continue
for j in enum:
if i != j and distances[i][j] == N:
raise AssertTaskDataError(
u'There is no route from {} to {}'.format(ref[i], ref[j]))
| Cosiek/KombiVojager | base_task.py | Python | mit | 6,203 |
import locale
import os
import re
from posixpath import join as posix_join
from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder,
unittest)
from pelican.tools.pelican_import import (blogger2fields, build_header,
build_markdown_header,
decode_wp_content,
download_attachments, fields2pelican,
get_attachments, wp2fields)
from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'bloggerexport.xml')
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
'content',
'wordpress_content_encoded')
WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
'content',
'wordpress_content_decoded')
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = False # NOQA
try:
import bs4.builder._lxml as LXML
except ImportError:
LXML = False
@skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
class TestBloggerXmlImporter(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
self.posts = blogger2fields(BLOGGER_XML_SAMPLE)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_recognise_kind_and_title(self):
"""Check that importer only outputs pages, articles and comments,
that these are correctly identified and that titles are correct.
"""
test_posts = list(self.posts)
kinds = {x[8] for x in test_posts}
self.assertEqual({'page', 'article', 'comment'}, kinds)
page_titles = {x[0] for x in test_posts if x[8] == 'page'}
self.assertEqual({'Test page', 'Test page 2'}, page_titles)
article_titles = {x[0] for x in test_posts if x[8] == 'article'}
self.assertEqual({'Black as Egypt\'s Night', 'The Steel Windpipe'},
article_titles)
comment_titles = {x[0] for x in test_posts if x[8] == 'comment'}
self.assertEqual({'Mishka, always a pleasure to read your '
'adventures!...'},
comment_titles)
def test_recognise_status_with_correct_filename(self):
"""Check that importerer outputs only statuses 'published' and 'draft',
that these are correctly identified and that filenames are correct.
"""
test_posts = list(self.posts)
statuses = {x[7] for x in test_posts}
self.assertEqual({'published', 'draft'}, statuses)
draft_filenames = {x[2] for x in test_posts if x[7] == 'draft'}
# draft filenames are id-based
self.assertEqual({'page-4386962582497458967',
'post-1276418104709695660'}, draft_filenames)
published_filenames = {x[2] for x in test_posts if x[7] == 'published'}
# published filenames are url-based, except comments
self.assertEqual({'the-steel-windpipe',
'test-page',
'post-5590533389087749201'}, published_filenames)
@skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
class TestWordpressXmlImporter(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
self.posts = wp2fields(WORDPRESS_XML_SAMPLE)
self.custposts = wp2fields(WORDPRESS_XML_SAMPLE, True)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_ignore_empty_posts(self):
self.assertTrue(self.posts)
for (title, content, fname, date, author,
categ, tags, status, kind, format) in self.posts:
self.assertTrue(title.strip())
def test_recognise_page_kind(self):
""" Check that we recognise pages in wordpress, as opposed to posts """
self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = []
for (title, content, fname, date, author,
categ, tags, status, kind, format) in self.posts:
if kind == 'page':
pages_data.append((title, fname))
self.assertEqual(2, len(pages_data))
self.assertEqual(('Page', 'contact'), pages_data[0])
self.assertEqual(('Empty Page', 'empty'), pages_data[1])
def test_dirpage_directive_for_page_kind(self):
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp:
fname = list(silent_f2p(test_post, 'markdown',
temp, dirpage=True))[0]
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
def test_dircat(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.posts:
# check post kind
if len(post[5]) > 0: # Has a category
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown',
temp, dircat=True))
subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
index = 0
for post in test_posts:
name = post[2]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += '.md'
filename = os.path.join(category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts)
pages_data = []
for (title, content, fname, date, author, categ,
tags, status, kind, format) in self.posts:
if kind == 'page' or kind == 'article':
pass
else:
pages_data.append((title, fname))
self.assertEqual(0, len(pages_data))
def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts)
cust_data = []
for (title, content, fname, date, author, categ,
tags, status, kind, format) in self.custposts:
if kind == 'article' or kind == 'page':
pass
else:
cust_data.append((title, kind))
self.assertEqual(3, len(cust_data))
self.assertEqual(
('A custom post in category 4', 'custom1'),
cust_data[0])
self.assertEqual(
('A custom post in category 5', 'custom1'),
cust_data[1])
self.assertEqual(
('A 2nd custom post type also in category 5', 'custom2'),
cust_data[2])
def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'article' or post[8] == 'page':
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown',
temp, wp_custpost=True))
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
name += '.md'
filename = os.path.join(kind, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_custom_posts_put_in_own_dir_and_catagory_sub_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'article' or post[8] == 'page':
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dircat=True))
subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += '.md'
filename = os.path.join(kind, category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_wp_custpost_true_dirpage_false(self):
# pages should only be put in their own directory when dirpage = True
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'page':
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dirpage=False))
index = 0
for post in test_posts:
name = post[2]
name += '.md'
filename = os.path.join('pages', name)
out_name = fnames[index]
self.assertFalse(out_name.endswith(filename))
def test_can_toggle_raw_html_code_parsing(self):
test_posts = list(self.posts)
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
rst_files = (r(f) for f
in silent_f2p(test_posts, 'markdown', temp))
self.assertTrue(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f
in silent_f2p(test_posts, 'markdown',
temp, strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
# no effect in rst
rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp,
strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self):
test_posts = [post for post
in self.posts if post[2] == 'html-entity-test']
self.assertEqual(len(test_posts), 1)
post = test_posts[0]
title = post[0]
self.assertTrue(title, "A normal post with some <html> entities in "
"the title. You can't miss them.")
self.assertNotIn('&', title)
def test_decode_wp_content_returns_empty(self):
""" Check that given an empty string we return an empty string."""
self.assertEqual(decode_wp_content(""), "")
def test_decode_wp_content(self):
""" Check that we can decode a wordpress content string."""
with open(WORDPRESS_ENCODED_CONTENT_SAMPLE) as encoded_file:
encoded_content = encoded_file.read()
with open(WORDPRESS_DECODED_CONTENT_SAMPLE) as decoded_file:
decoded_content = decoded_file.read()
self.assertEqual(
decode_wp_content(encoded_content, br=False),
decoded_content)
def test_preserve_verbatim_formatting(self):
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md))
self.assertTrue(re.search(r'\s+b = \[4, 5, 6\]', md))
for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0)
print_line = re.search(r'\s+print i', md).group(0)
self.assertTrue(
for_line.rindex('for') < print_line.rindex('print'))
def test_code_in_list(self):
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
sample_line = re.search(r'- This is a code sample', md).group(0)
code_line = re.search(r'\s+a = \[1, 2, 3\]', md).group(0)
self.assertTrue(sample_line.rindex('This') < code_line.rindex('a'))
def test_dont_use_smart_quotes(self):
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Post with raw data"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
escaped_quotes = re.search(r'\\[\'"“”‘’]', md)
self.assertFalse(escaped_quotes)
class TestBuildHeader(unittest.TestCase):
def test_build_header(self):
header = build_header('test', None, None, None, None, None)
self.assertEqual(header, 'test\n####\n\n')
def test_build_header_with_fields(self):
header_data = [
'Test Post',
'2014-11-04',
'Alexis Métaireau',
['Programming'],
['Pelican', 'Python'],
'test-post',
]
expected_docutils = '\n'.join([
'Test Post',
'#########',
':date: 2014-11-04',
':author: Alexis Métaireau',
':category: Programming',
':tags: Pelican, Python',
':slug: test-post',
'\n',
])
expected_md = '\n'.join([
'Title: Test Post',
'Date: 2014-11-04',
'Author: Alexis Métaireau',
'Category: Programming',
'Tags: Pelican, Python',
'Slug: test-post',
'\n',
])
self.assertEqual(build_header(*header_data), expected_docutils)
self.assertEqual(build_markdown_header(*header_data), expected_md)
def test_build_header_with_east_asian_characters(self):
header = build_header('これは広い幅の文字だけで構成されたタイトルです',
None, None, None, None, None)
self.assertEqual(header,
('これは広い幅の文字だけで構成されたタイトルです\n'
'##############################################'
'\n\n'))
def test_galleries_added_to_header(self):
header = build_header('test', None, None, None, None, None,
attachments=['output/test1', 'output/test2'])
self.assertEqual(header, ('test\n####\n'
':attachments: output/test1, '
'output/test2\n\n'))
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header('test', None, None, None, None, None,
attachments=['output/test1',
'output/test2'])
self.assertEqual(
header,
'Title: test\nAttachments: output/test1, output/test2\n\n')
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
@unittest.skipUnless(LXML, 'Needs lxml module')
class TestWordpressXMLAttachements(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
self.attachments = get_attachments(WORDPRESS_XML_SAMPLE)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_recognise_attachments(self):
self.assertTrue(self.attachments)
self.assertTrue(len(self.attachments.keys()) == 3)
def test_attachments_associated_with_correct_post(self):
self.assertTrue(self.attachments)
for post in self.attachments.keys():
if post is None:
expected = {
('https://upload.wikimedia.org/wikipedia/commons/'
'thumb/2/2c/Pelican_lakes_entrance02.jpg/'
'240px-Pelican_lakes_entrance02.jpg')
}
self.assertEqual(self.attachments[post], expected)
elif post == 'with-excerpt':
expected_invalid = ('http://thisurlisinvalid.notarealdomain/'
'not_an_image.jpg')
expected_pelikan = ('http://en.wikipedia.org/wiki/'
'File:Pelikan_Walvis_Bay.jpg')
self.assertEqual(self.attachments[post],
{expected_invalid, expected_pelikan})
elif post == 'with-tags':
expected_invalid = ('http://thisurlisinvalid.notarealdomain')
self.assertEqual(self.attachments[post], {expected_invalid})
else:
self.fail('all attachments should match to a '
'filename or None, {}'
.format(post))
def test_download_attachments(self):
real_file = os.path.join(CUR_DIR, 'content/article.rst')
good_url = path_to_file_url(real_file)
bad_url = 'http://localhost:1/not_a_file.txt'
silent_da = mute()(download_attachments)
with temporary_folder() as temp:
locations = list(silent_da(temp, [good_url, bad_url]))
self.assertEqual(1, len(locations))
directory = locations[0]
self.assertTrue(
directory.endswith(posix_join('content', 'article.rst')),
directory)
| getpelican/pelican | pelican/tests/test_importer.py | Python | agpl-3.0 | 18,963 |
from rest_framework.views import APIView
from rest_framework.response import Response
from jwt import token_factory
from authtoken.serializers import AuthTokenSerializer
from authtoken.settings import api_settings, secret_key
from authtoken.authentication import get_token_instance
class ObtainAuthToken(APIView):
serializer_class = AuthTokenSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user_profile = serializer.validated_data['user'].user_profile
token = get_token_instance(user_profile)
return Response({'token': token.build(
secret_key(),
api_settings.TOKEN_VERIFICATION_ALGORITHM_INSTANCE
)})
| MenloAthertonCoding/cruzebase | authtoken/views.py | Python | apache-2.0 | 771 |
from polyphony import testbench
from polyphony.typing import List, bit, int8
def typing03_a(xs:List[bit][8], i:int8) -> bit:
return xs[i]
def typing03_b(xs:List[int][8], i:int8) -> bit:
return xs[i]
@testbench
def test():
data = [0, 1, 1, 0,
1, 0, 1, 0] # type: List[bit][8]
for i in range(len(data)):
d = data[i]
assert d == typing03_a(data, i)
assert d == typing03_b(data, i)
test()
| ktok07b6/polyphony | tests/typing/typing03.py | Python | mit | 446 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['f1'] | sar009/formula-1 | f1/migrations/0001_initial.py | Python | mit | 311 |
# -*- coding: utf-8 -*-
import sys
import socket
import intelmq.lib.harmonization
from intelmq.lib.bot import Bot
class GethostbynameExpertBot(Bot):
def process(self):
event = self.receive_message()
for key in ["source.", "destination."]:
key_fqdn = key + "fqdn"
key_ip = key + "ip"
if not event.contains(key_fqdn):
continue
if event.contains(key_ip):
continue
ip = socket.gethostbyname(event.get(key_fqdn))
if intelmq.lib.harmonization.IPAddress.is_valid(ip, sanitize=True):
event.add(key_ip, ip, sanitize=True)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = GethostbynameExpertBot(sys.argv[1])
bot.start()
| robcza/intelmq | intelmq/bots/experts/gethostbyname/expert.py | Python | agpl-3.0 | 818 |
from django.views.generic import FormView
from sample.forms import SampleForm
class SampleFormView(FormView):
form_class = SampleForm
template_name = "sample/index.html"
| madisona/django-google-maps | sample/views.py | Python | bsd-2-clause | 181 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Jeroen Hoekx (@jhoekx)
- Alexander Bulimov (@abulimov)
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
lv:
description:
- The name of the logical volume.
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
choices: [ absent, present ]
default: present
active:
description:
- Whether the volume is activate and visible to the host.
type: bool
default: 'yes'
version_added: "2.2"
force:
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
type: bool
default: 'no'
version_added: "1.5"
opts:
description:
- Free-form options to be passed to the lvcreate command.
version_added: "2.0"
snapshot:
description:
- The name of the snapshot volume
version_added: "2.1"
pvs:
description:
- Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
version_added: "2.2"
thinpool:
description:
- The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
version_added: "2.5"
shrink:
description:
- Shrink if current size is higher than size requested.
type: bool
default: 'yes'
version_added: "2.2"
resizefs:
description:
- Resize the underlying filesystem together with the logical volume.
type: bool
default: 'yes'
version_added: "2.5"
notes:
- You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
'''
EXAMPLES = '''
- name: Create a logical volume of 512m
lvol:
vg: firefly
lv: test
size: 512
- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
lvol:
vg: firefly
lv: test
size: 512
pvs: /dev/sda,/dev/sdb
- name: Create cache pool logical volume
lvol:
vg: firefly
lv: lvcache
size: 512m
opts: --type cache-pool
- name: Create a logical volume of 512g.
lvol:
vg: firefly
lv: test
size: 512g
- name: Create a logical volume the size of all remaining space in the volume group
lvol:
vg: firefly
lv: test
size: 100%FREE
- name: Create a logical volume with special options
lvol:
vg: firefly
lv: test
size: 512g
opts: -r 16
- name: Extend the logical volume to 1024m.
lvol:
vg: firefly
lv: test
size: 1024
- name: Extend the logical volume to consume all remaining space in the volume group
lvol:
vg: firefly
lv: test
size: +100%FREE
- name: Extend the logical volume to take all remaining space of the PVs
lvol:
vg: firefly
lv: test
size: 100%PVS
resizefs: true
- name: Resize the logical volume to % of VG
lvol:
vg: firefly
lv: test
size: 80%VG
force: yes
- name: Reduce the logical volume to 512m
lvol:
vg: firefly
lv: test
size: 512
force: yes
- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
lvol:
vg: firefly
lv: test
size: 512
shrink: no
- name: Remove the logical volume.
lvol:
vg: firefly
lv: test
state: absent
force: yes
- name: Create a snapshot volume of the test logical volume.
lvol:
vg: firefly
lv: test
snapshot: snap1
size: 100m
- name: Deactivate a logical volume
lvol:
vg: firefly
lv: test
active: false
- name: Create a deactivated logical volume
lvol:
vg: firefly
lv: test
size: 512g
active: false
- name: Create a thin pool of 512g
lvol:
vg: firefly
thinpool: testpool
size: 512g
- name: Create a thin volume of 128g
lvol:
vg: firefly
lv: test
thinpool: testpool
size: 128g
'''
import re
from ansible.module_utils.basic import AnsibleModule
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[', '').replace(']', ''),
'size': int(decimal_point.match(parts[1]).group(1)),
'active': (parts[2][4] == 'a'),
'thinpool': (parts[2][0] == 't'),
'thinvol': (parts[2][0] == 'V'),
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str'),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
force=dict(type='bool', default=False),
shrink=dict(type='bool', default=True),
active=dict(type='bool', default=True),
snapshot=dict(type='str'),
pvs=dict(type='str'),
resizefs=dict(type='bool', default=False),
thinpool=dict(type='str'),
),
supports_check_mode=True,
required_one_of=(
['lv', 'thinpool'],
),
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found is None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
active = module.boolean(module.params['active'])
resizefs = module.boolean(module.params['resizefs'])
thinpool = module.params['thinpool']
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if '%' not in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit():
raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot:
# Check snapshot pre-conditions
for test_lv in lvs:
if test_lv['name'] == lv or test_lv['name'] == thinpool:
if not test_lv['thinpool'] and not thinpool:
break
else:
module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
else:
module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
check_lv = snapshot
elif thinpool:
if lv:
# Check thin volume pre-conditions
for test_lv in lvs:
if test_lv['name'] == thinpool:
break
else:
module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
check_lv = lv
else:
check_lv = thinpool
else:
check_lv = lv
for test_lv in lvs:
if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
this_lv = test_lv
break
else:
this_lv = None
msg = ''
if this_lv is None:
if state == 'present':
# Require size argument except for snapshot of thin volumes
if (lv or thinpool) and not size:
for test_lv in lvs:
if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
break
else:
module.fail_json(msg="No size given.")
# create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
if size:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
elif thinpool and lv:
if size_opt == 'l':
module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
size_opt = 'V'
cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
elif thinpool and not lv:
cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
# remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif not size:
pass
elif size_opt == 'l':
# Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(
msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
(this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
)
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if resizefs:
tool = '%s %s' % (tool, '--resizefs')
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
# resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if resizefs:
tool = '%s %s' % (tool, '--resizefs')
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| mheap/ansible | lib/ansible/modules/system/lvol.py | Python | gpl-3.0 | 19,498 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.db.models import F
# For each instance, cache "adjunct" objects -- frequently-accessed objects
# which change rarely -- by storing them in local memory. Track cache validity
# via a database timestamp on each instance (instance.adjunct_timestamp).
# Check the timestamp before returning any object from the cache; if it's stale
# invalidate all adjunct objects for the instance.
#
# When an adjunct object is modified (saved to the db or deleted), invalidate
# the appropriate instance's cache and update its timestamp. The timestamp
# update will cause the change to propagate to any other servers.
_adjuncts = {}
# ------------------------------------------------------------------------
# Interface functions
def field_permissions(user, instance, model_name=None):
if settings.USE_OBJECT_CACHES:
return _get_adjuncts(instance).permissions(user, model_name)
else:
return _permissions_from_db(user, instance, model_name)
permissions = field_permissions
def role_field_permissions(role, instance=None, model_name=None):
if settings.USE_OBJECT_CACHES:
if not instance:
instance = role.instance
return _get_adjuncts(instance).role_field_permissions(
role.id, model_name)
else:
return _role_permissions_from_db(role, model_name)
def udf_defs(instance, model_name=None):
if settings.USE_OBJECT_CACHES:
return _get_adjuncts(instance).udf_defs(model_name)
else:
return _udf_defs_from_db(instance, model_name)
def clear_caches():
global _adjuncts
_adjuncts = {}
def invalidate_adjuncts(*args, **kwargs):
# Called by 'save' and 'delete' signal handlers for adjunct objects
if settings.USE_OBJECT_CACHES:
adjunct_object = kwargs['instance'] # 'instance' is a Django term here
instance = adjunct_object.instance
if instance.id in _adjuncts:
del _adjuncts[instance.id]
increment_adjuncts_timestamp(instance)
def increment_adjuncts_timestamp(instance):
# Increment the timestamp carefully.
# Don't call save(), to avoid storing possibly-stale data in "instance".
# Use a SQL increment, to prevent race conditions between servers.
from treemap.models import Instance
qs = Instance.objects.filter(pk=instance.id)
qs.update(adjuncts_timestamp=F('adjuncts_timestamp') + 1)
# Update timestamp from DB to prevent saving stale timestamps
instance.adjuncts_timestamp = qs[0].adjuncts_timestamp
# ------------------------------------------------------------------------
# Fetch info from database when not using cache
def _permissions_from_db(user, instance, model_name):
from treemap.audit import Role
role = Role.objects.get_role(instance, user)
return _role_permissions_from_db(role, model_name)
def _role_permissions_from_db(role, model_name):
if model_name:
perms = role.fieldpermission_set.filter(model_name=model_name)
else:
perms = role.fieldpermission_set.all()
return list(perms)
def _udf_defs_from_db(instance, model_name):
from treemap.udf import UserDefinedFieldDefinition
defs = UserDefinedFieldDefinition.objects.filter(instance=instance)
if model_name:
defs = defs.filter(model_type=model_name)
return list(defs)
# ------------------------------------------------------------------------
# Fetch info from cache
def _get_adjuncts(instance):
adjuncts = _adjuncts.get(instance.id)
if not adjuncts or adjuncts.timestamp < instance.adjuncts_timestamp:
adjuncts = _InstanceAdjuncts(instance)
_adjuncts[instance.id] = adjuncts
return adjuncts
class _InstanceAdjuncts:
def __init__(self, instance):
self._instance = instance
self._user_role_ids = {}
self._permissions = {}
self._udf_defs = {}
self.timestamp = instance.adjuncts_timestamp
def permissions(self, user, model_name):
if not self._user_role_ids:
self._load_roles()
if user and user.id in self._user_role_ids:
role_id = self._user_role_ids[user.id]
else:
role_id = self._user_role_ids[None]
return self.role_field_permissions(role_id, model_name)
def role_field_permissions(self, role_id, model_name):
if not self._permissions:
self._load_permissions()
return self._permissions.get((role_id, model_name), [])
def udf_defs(self, model_name):
if not self._udf_defs:
self._load_udf_defs()
return self._udf_defs.get(model_name, [])
def _load_roles(self):
from treemap.models import InstanceUser
for iu in InstanceUser.objects.filter(instance=self._instance):
self._user_role_ids[iu.user_id] = iu.role_id
self._user_role_ids[None] = self._instance.default_role_id
def _load_permissions(self):
from treemap.audit import FieldPermission
for fp in FieldPermission.objects.filter(instance=self._instance):
dict = self._permissions
self._append_value(dict, (fp.role_id, fp.model_name), fp)
self._append_value(dict, (fp.role_id, None), fp)
def _append_value(self, dict, key, value):
if key not in dict:
dict[key] = []
dict[key].append(value)
def _load_udf_defs(self):
from treemap.udf import UserDefinedFieldDefinition
qs = UserDefinedFieldDefinition.objects.filter(instance=self._instance)
for udfd in qs:
self._append_value(self._udf_defs, udfd.model_type, udfd)
# Add to the "None" key for looking up UDF defs without model name
self._append_value(self._udf_defs, None, udfd)
| jwalgran/otm-core | opentreemap/treemap/lib/object_caches.py | Python | gpl-3.0 | 5,896 |
# -*- coding: utf-8 -*-
#
# HDF5 Tutorial documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 1 12:33:46 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HDF5 Tutorial'
copyright = u'2014, The University of Edinburgh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HDF5Tutorialdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'HDF5Tutorial.tex', u'HDF5 Tutorial Documentation',
u'Amy Krause', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hdf5tutorial', u'HDF5 Tutorial Documentation',
[u'Amy Krause'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'HDF5Tutorial', u'HDF5 Tutorial Documentation',
u'Amy Krause', 'HDF5Tutorial', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
highlight_language='c' | akrause2014/HDF5-Basics | c/conf.py | Python | apache-2.0 | 8,225 |
from psrd.stat_block.utils import colon_filter, default_closure, collapse_text
from psrd.universal import StatBlockSection, StatBlockHeading, filter_name
def is_animal_companion(sb, book):
fields = dict(sb.keys)
if fields.has_key('AC') or fields.has_key("Ability Scores"):
for detail in sb.details:
if detail.__class__ == StatBlockSection and detail.name.endswith('th-Level Advancement'):
return True
if fields.has_key('Ability Scores'):
return True
return False
def animal_companion_parse_function(field):
functions = {
'ac': default_closure('ac'),
'cmd': default_closure('cmd'),
'attack': default_closure('attack'),
'ability scores': default_closure('ability_scores'),
'special abilities': default_closure('special_abilities'),
'special qualities': default_closure('special_qualities'),
'sq': default_closure('special_qualities'),
'special attacks': default_closure('special_attacks'),
'size': default_closure('size'),
'speed': default_closure('speed'),
'bonus feat': default_closure('bonus_feat')
}
return functions[field.lower()]
def parse_animal_companion(sb, book):
ac = {'type': 'animal_companion', 'subtype': 'base', 'source': book, 'name': filter_name(sb.name.strip()), 'sections': []}
text = []
for key, value in sb.keys:
animal_companion_parse_function(key)(ac, value)
for detail in sb.details:
if detail.__class__ in (StatBlockSection, StatBlockHeading) and detail.name.endswith('th-Level Advancement'):
advancement = {'level': detail.name[:3], 'source': book, 'type': 'animal_companion', 'name': filter_name(ac['name'])}
for key, value in detail.keys:
animal_companion_parse_function(key)(advancement, value)
advancement['subtype'] = 'advancement'
ac['sections'].append(advancement)
else:
text.append(detail)
if ac["name"].endswith('th-Level Advancement'):
ac['level'] = ac['name'][:3]
ac['subtype'] = "advancement"
if len(text) > 0:
collapse_text(ac, text)
return ac
| devonjones/PSRD-Parser | src/psrd/stat_block/animal_companion.py | Python | gpl-3.0 | 1,966 |
from django_google_charts.utils import OptionsDict
from django_google_charts.charts import Chart
KEY, VALUE, OVERRIDE = 'key', 'value', 'override'
class SuperChart(Chart):
options = {KEY: VALUE}
class SubChart1(SuperChart):
options = {'other_key': 'other_value'}
class SubChart2(SuperChart):
options = {KEY: OVERRIDE}
def test_chart_options_wrapped():
chart = SuperChart()
assert isinstance(chart.options, OptionsDict)
def test_chart_inherits_options():
subchart = SubChart1()
assert subchart.options[KEY] == VALUE
def test_chart_overrides_superclass_options():
subchart = SubChart2()
assert subchart.options[KEY] == OVERRIDE
| danpalmer/django-google-charts | tests/test_chart_options.py | Python | mit | 675 |
import time
class SizedDict(dict):
''' Sized dictionary without timeout. '''
def __init__(self, size=1000):
dict.__init__(self)
self._maxsize = size
self._stack = []
def __setitem__(self, name, value):
if len(self._stack) >= self._maxsize:
self.__delitem__(self._stack[0])
del self._stack[0]
self._stack.append(name)
return dict.__setitem__(self, name, value)
# Recommended but not required:
def get(self, name, default=None, do_set=False):
try:
return self.__getitem__(name)
except KeyError:
if default is not None:
if do_set:
self.__setitem__(name, default)
return default
else:
raise
class CacheDict(dict):
''' A sized dictionary with a timeout (seconds) '''
def __init__(self, size=1000, timeout=None):
dict.__init__(self)
self._maxsize = size
self._stack = []
self._timeout = timeout
def __setitem__(self, name, value, timeout=None):
if len(self._stack) >= self._maxsize:
self.__delitem__(self._stack[0])
del self._stack[0]
if timeout is None:
timeout = self._timeout
if timeout is not None:
timeout = time.time() + timeout
self._stack.append(name)
dict.__setitem__(self, name, (value, timeout))
def get(self, name, default=None):
try:
focus = self.__getitem__(name)
if focus[1] is not None:
if focus[1] < time.time():
self.__delitem__(name)
self._stack.remove(name)
raise KeyError
return focus[0]
except KeyError:
return default
#sample usage:
# d = SizedDict()
# for i in xrange(10000): d[i] = 'test'
# print len(d)
| ActiveState/code | recipes/Python/496842_Sized_Dictionary/recipe-496842.py | Python | mit | 1,920 |
import logging
import logging.config
from vFense import VFENSE_LOGGING_CONFIG
from vFense.core.queue.queue import AgentQueue
from vFense.settings import Default
from vFense.operations.agent_operations import AgentOperation
from vFense.errorz.error_messages import AgentOperationCodes
from vFense.plugins import ra
from vFense.plugins.ra import DesktopProtocol
logging.config.fileConfig(VFENSE_LOGGING_CONFIG)
logger = logging.getLogger('raapi')
def save_operation(operation):
_oper = (
AgentOperation(
operation.username, operation.customer,
operation.uri, operation.method
)
)
oper_results = (
_oper.create_operation(
operation.operation_type,
ra.PluginName,
[operation.agent_id], # Expecting a list of agent IDs.
None # No tag IDs.
)
)
if oper_results['http_status'] == 200:
operation_id = oper_results['data']['operation_id']
_oper.add_agent_to_operation(
operation.agent_id,
operation_id
)
logger.info(
'%s - %s operation created by user %s' %
(
operation.username,
operation.operation_type,
operation.username
)
)
return operation_id
return None
def save_result(
agent_id,
operation_id,
error,
data,
uri,
method,
operation_type
):
try:
operation = AgentOperation(
Default.User,
None,
uri,
method
)
if not error:
results = operation.update_operation_results(
operation_id,
agent_id,
AgentOperationCodes.ResultsReceived,
operation_type,
error
)
else:
results = operation.update_operation_results(
operation_id,
agent_id,
AgentOperationCodes.ResultsReceivedWithErrors,
operation_type,
error
)
return results
except Exception as e:
print e
return None
def store_in_agent_queue(operation):
operation = operation.to_dict()
agent_queue = AgentQueue(operation.agent_id, operation.customer_name)
agent_queue.add(operation)
class RaOperation():
def __init__(
self,
operation_type,
agent_id,
username=None,
customer='default',
password='',
uri=None,
method=None
):
"""Creates a RaOperation for the ra plugin.
Args:
- operation_type: Specific operation to be performed.
- agent_id: The ID of the agent to communicate with.
- username: User performing the operation.
- customer: Customer for which user is performing the operation.
"""
self.agent_id = agent_id
self.username = username
self.customer = customer
self.operation_type = operation_type
self.password = password
self.uri = uri
self.method = method
self.operation_id = ''
self.plugin = ra.PluginName
self.data = {}
self.tunnel_needed = False
self.host_port = ''
self.ssh_port = ''
def set_tunnel(self, host_port, ssh_port):
self.tunnel_needed = True
self.host_port = host_port
self.ssh_port = ssh_port
def to_dict(self):
data = {}
data['protocol'] = DesktopProtocol.Vnc
if self.password:
data['password'] = self.password
if self.tunnel_needed:
data['tunnel_needed'] = self.tunnel_needed
if self.host_port:
data['host_port'] = self.host_port
if self.ssh_port:
data['ssh_port'] = self.ssh_port
d = {
'operation': self.operation_type,
'operation_id': self.operation_id,
'username': self.username,
'plugin': self.plugin,
'agent_id': self.agent_id,
'data': data
}
return d
| dtklein/vFense | tp/src/plugins/ra/raoperation.py | Python | lgpl-3.0 | 4,192 |
import pytest
from dateutil import tz
import arrow
from unifispot.core.models import Wifisite,Device,Guesttrack,Guest
from unifispot.core.guestutils import init_track,validate_track,redirect_guest
from tests.helpers import randomMAC
@pytest.fixture(scope='function')
def populate_analytics_tracks(request,session):
'''fixture used to create a number of guestracks spans today,yesterday and day after
'''
site1 = Wifisite.query.filter_by(id=1).first()
tzinfo = tz.gettz(site1.timezone)
day_start = arrow.now(tzinfo).floor('day').to('UTC')
apmac =randomMAC()
#create 20 tracks, starting from day start and spaced 1minutes apart
for i in range(20):
track_dict = {'num_visits':1}
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.timestamp = day_start.replace(minutes=+i*1).naive
track.save()
day_start = arrow.now(tzinfo).floor('day').to('UTC').replace(days=-1)
#create 20 tracks, starting from day start and spaced 1minutes apart on previous day
for i in range(20):
track_dict = {'num_visits':1}
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.timestamp = day_start.replace(minutes=+i*1).naive
track.save()
day_start = arrow.now(tzinfo).floor('day').to('UTC').replace(days=+1)
#create 20 tracks, starting from day start and spaced 1minutes apart on next day
for i in range(20):
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.loginstat = {'num_visits':1}
track.timestamp = day_start.replace(minutes=+i*1).naive
track.save()
@pytest.fixture(scope='function')
def populate_analytics_logins(request,session):
'''fixture used to create a number of guestracks with logins
'''
site1 = Wifisite.query.filter_by(id=1).first()
tzinfo = tz.gettz(site1.timezone)
day_start = arrow.now(tzinfo).floor('day').to('UTC')
apmac =randomMAC()
#create 20 tracks, for email login
#half of them new user
for i in range(20):
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.timestamp = day_start.replace(minutes=+i*1).naive
track.loginstat = {'num_visits':1,'auth_email':1}
track.save()
if i%2:
track.updatestat('newguest',1)
track.save()
#create 20 tracks, for FB login
#half of them new user
for i in range(20):
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.timestamp = day_start.replace(minutes=+i*1).naive
track.loginstat = {'num_visits':1,'auth_facebook':1,'fbcheckedin':1}
track.save()
if i%2:
track.updatestat('newguest',1)
track.updatestat('fbliked',1)
track.save()
@pytest.fixture(scope='function')
def populate_analytics_logins_site3(request,session):
'''fixture used to create a number of guestracks with logins
'''
site1 = Wifisite.query.filter_by(id=3).first()
tzinfo = tz.gettz(site1.timezone)
day_start = arrow.now(tzinfo).floor('day').to('UTC')
apmac =randomMAC()
#create 20 tracks, for email login
#half of them new user
for i in range(20):
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.timestamp = day_start.replace(minutes=+i*1).naive
track.loginstat = {'num_visits':1,'auth_email':1}
track.save()
if i%2:
track.updatestat('newguest',1)
track.save()
#create 20 tracks, for FB login
#half of them new user
for i in range(20):
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
track.timestamp = day_start.replace(minutes=+i*1).naive
track.loginstat = {'num_visits':1,'auth_facebook':1,'fbcheckedin':1}
track.save()
if i%2:
track.updatestat('newguest',1)
track.updatestat('fbliked',1)
track.save() | Spotipo/spotipo | tests/modules/analytics/conftest.py | Python | agpl-3.0 | 4,078 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.