code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import os
import asyncio
import struct
from .logger import get_logger
from .protocol import PeerStreamIterator
from .message import (MessageID,
InterestedMessage,
HandshakeMessage,
BitFieldMessage,
NotInterestedMessage,
ChokeMessage,
UnchokeMessage,
HaveMessage,
RequestMessage,
PieceMessage,
CancelMessage,
KeepAliveMessage)
logger = get_logger()
class SourceFileReader:
def __init__(self, torrent):
self.torrent = torrent
self.fd = os.open(self.torrent.name, os.O_RDONLY)
def read(self, begin, index, length):
pos = index * self.torrent.info.piece_length
os.lseek(self.fd, pos, os.SEEK_SET)
return os.read(self.fd, length)
def has_all_pieces(self):
"""Check the size on the disk is equal or greater than
(piece_length - 1) * piece_length.
The assumption is clients wrote the last piece to disk
after checking integrating
Returns True or False.
"""
min_length = (len(self.torrent.info.pieces) - 1) * self.torrent.info.piece_length
return os.path.getsize(self.torrent.name) > min_length
def calculate_have_pieces(self):
pass
def get_have_pieces(self):
"""Get all have pieces
Returns list of all bool values with size of piece+1.
The last element in the list is False and other positions contains
True or False.
Available piece is represented as True and missing piece
is represented as False.
"""
if self.has_all_pieces():
pieces_availability = [True] * len(self.torrent.info.pieces)
pieces_availability.append(False)
return pieces_availability
return self.calculate_have_pieces()
class RequestHandler:
def __init__(self, torrent):
self.torrent = torrent
self.file_reader = SourceFileReader(torrent=self.torrent)
def parse(self, buffer):
"""
Tries to parse protocol messages if there is enough bytes read in the
buffer.
:return The parsed message, or None if no message could be parsed
"""
# Each message is structured as:
# <length prefix><message ID><payload>
#
# The `length prefix` is a four byte big-endian value
# The `message ID` is a decimal byte
# The `payload` is the value of `length prefix`
#
# The message length is not part of the actual length. So another
# 4 bytes needs to be included when slicing the buffer.
self.buffer = buffer
header_length = 4
if len(self.buffer) == 68:
return HandshakeMessage.decode(self.buffer)
elif len(self.buffer) > 4: # 4 bytes is needed to identify the message
message_length = struct.unpack('>I', self.buffer[0:4])[0]
if message_length == 0:
return KeepAliveMessage()
if len(self.buffer) >= message_length:
message_id = struct.unpack('>b', self.buffer[4:5])[0]
def _consume():
"""Consume the current message from the read buffer"""
self.buffer = self.buffer[header_length + message_length:]
def _data():
""""Extract the current message from the read buffer"""
return self.buffer[:header_length + message_length]
if message_id is MessageID.BitField.value:
data = _data()
_consume()
return BitFieldMessage.decode(data)
elif message_id is MessageID.Interested.value:
_consume()
return InterestedMessage()
elif message_id is MessageID.NotInterested.value:
_consume()
return NotInterestedMessage()
elif message_id is MessageID.Choke.value:
_consume()
return ChokeMessage()
elif message_id is MessageID.Unchoke.value:
_consume()
return UnchokeMessage()
elif message_id is MessageID.Have.value:
data = _data()
_consume()
return HaveMessage.decode(data)
elif message_id is MessageID.Piece.value:
data = _data()
_consume()
return PieceMessage.decode(data)
elif message_id is MessageID.Request.value:
data = _data()
_consume()
return RequestMessage.decode(data)
elif message_id is MessageID.Cancel.value:
data = _data()
_consume()
return CancelMessage.decode(data)
else:
logger.debug('Unsupported message!')
else:
#import ipdb;ipdb.set_trace()
return None
logger.debug('Not enough in buffer in order to parse')
return None
def get_piece(self, begin, index, length):
data = self.file_reader.read(begin=begin, index=index, length=length)
return PieceMessage(begin=begin, index=index, block=data)
def handle_message(self, buffer):
message = self.parse(buffer)
if isinstance(message, NotInterestedMessage):
logger.debug('Remove interested state')
elif isinstance(message, HandshakeMessage):
logger.debug('Received Handshake')
elif isinstance(message, ChokeMessage):
logger.debug('Received choke message')
self.current_state.append(PeerState.Choked.value)
elif isinstance(message, UnchokeMessage):
logger.debug('Received unchoke message')
elif isinstance(message, HaveMessage):
logger.debug('Received have message')
elif isinstance(message, BitFieldMessage):
logger.debug('Received bit field message: {}'.format(message))
elif isinstance(message, PieceMessage):
pass
elif isinstance(message, InterestedMessage):
return BitFieldMessage(val=self.file_reader.get_have_pieces())
elif isinstance(message, RequestMessage):
return self.get_piece(begin=message.begin, index=message.index,
length=message.length)
elif isinstance(message, CancelMessage):
# TODO: Implement cancel data
pass
return message
class TorrentServer(asyncio.Protocol):
def __init__(self, torrent):
self.torrent = torrent
super().__init__()
def __call__(self):
self.connections = set([])
self.request_handler = RequestHandler(torrent=self.torrent)
logger.debug('Init server')
return self
def connection_made(self, transport):
self.transport = transport
peer = transport.get_extra_info('peername')
self.connections.add(peer)
def data_received(self, data):
message = self.request_handler.handle_message(data)
logger.debug(message)
if message:
logger.info('Serving {}'.format(message))
self.transport.write(message.encode())
def eof_received(self):
logger.debug('eof received')
def connection_lost(self, exc):
logger.debug('connectin lost')
async def run_server(port, torrent):
"""Run a server to respond to all clients
"""
logger.info('Starting server in port {}'.format(port))
loop = asyncio.get_event_loop()
server = await loop.create_server(
TorrentServer(torrent), host='127.0.0.1', port=port)
return server
| kracekumar/bt | bt/server.py | Python | gpl-3.0 | 8,012 |
import logging
import time
import zlib
from collections import defaultdict
from datetime import datetime
from hashlib import sha1
from operator import itemgetter
import simplejson as json
from _mysql_exceptions import IntegrityError
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from treeherder.etl.common import get_guid_root
from treeherder.events.publisher import JobStatusPublisher
from treeherder.model import error_summary, utils
from treeherder.model.models import Datasource, ExclusionProfile
from treeherder.model.tasks import (populate_error_summary, publish_job_action,
publish_resultset,
publish_resultset_action)
from .artifacts import ArtifactsModel
from .base import ObjectNotFoundException, TreeherderModelBase
logger = logging.getLogger(__name__)
class JobsModel(TreeherderModelBase):
"""
Represent a job repository
"""
INCOMPLETE_STATES = ["running", "pending"]
STATES = INCOMPLETE_STATES + ["completed", "coalesced"]
# indexes of specific items in the ``job_placeholder`` objects
JOB_PH_JOB_GUID = 0
JOB_PH_COALESCED_TO_GUID = 2
JOB_PH_RESULT_SET_ID = 3
JOB_PH_BUILD_PLATFORM_KEY = 4
JOB_PH_MACHINE_PLATFORM_KEY = 5
JOB_PH_MACHINE_NAME = 6
JOB_PH_DEVICE_NAME = 7
JOB_PH_OPTION_COLLECTION_HASH = 8
JOB_PH_TYPE_KEY = 9
JOB_PH_PRODUCT_TYPE = 10
JOB_PH_WHO = 11
JOB_PH_REASON = 12
JOB_PH_RESULT = 13
JOB_PH_STATE = 14
JOB_PH_START_TIMESTAMP = 16
JOB_PH_END_TIMESTAMP = 17
JOB_PH_RUNNING_AVG = 18
# list of searchable columns, i.e. those who have an index
# it would be nice to get this directly from the db and cache it
INDEXED_COLUMNS = {
"job": {
"id": "j.id",
"job_guid": "j.job_guid",
"job_coalesced_to_guid": "j.job_coalesced_to_guid",
"result_set_id": "j.result_set_id",
"build_platform_id": "j.build_platform_id",
"build_system_type": "j.build_system_type",
"machine_platform_id": "j.machine_platform_id",
"machine_id": "j.machine_id",
"option_collection_hash": "j.option_collection_hash",
"job_type_id": "j.job_type_id",
"product_id": "j.product_id",
"failure_classification_id": "j.failure_classification_id",
"who": "j.who",
"reason": "j.reason",
"result": "j.result",
"state": "j.state",
"submit_timestamp": "j.submit_timestamp",
"start_timestamp": "j.start_timestamp",
"end_timestamp": "j.end_timestamp",
"last_modified": "j.last_modified",
"signature": "j.signature",
"tier": "j.tier"
},
"result_set": {
"id": "rs.id",
"revision_hash": "rs.revision_hash",
"revision": "revision.revision",
"author": "rs.author",
"push_timestamp": "rs.push_timestamp"
},
"bug_job_map": {
"job_id": "job_id",
"bug_id": "bug_id",
"type": "type",
"who": "who",
"submit_timestamp": "submit_timestamp"
}
}
# jobs cycle targets
# NOTE: There is an order dependency here, cycle_job and
# cycle_result_set should be after any tables with foreign keys
# to their ids.
JOBS_CYCLE_TARGETS = [
"jobs.deletes.cycle_job_artifact",
"jobs.deletes.cycle_performance_artifact",
"jobs.deletes.cycle_job_log_url",
"jobs.deletes.cycle_job_note",
"jobs.deletes.cycle_bug_job_map",
"jobs.deletes.cycle_job",
"jobs.deletes.cycle_revision",
"jobs.deletes.cycle_revision_map",
"jobs.deletes.cycle_result_set"
]
PERFORMANCE_SERIES_JSON_KEYS = [
"subtest_signatures",
"test_options"
]
@classmethod
def create(cls, project):
"""
Create all the datasource tables for this project.
"""
source = Datasource(project=project)
source.save()
return cls(project=project)
def execute(self, **kwargs):
return utils.retry_execute(self.get_dhub(), logger, **kwargs)
##################
#
# Job schema data methods
#
##################
def get_job(self, id):
"""Return the job row for this ``job_id``"""
repl = [self.refdata_model.get_db_name()]
data = self.execute(
proc="jobs.selects.get_job",
placeholders=[id],
debug_show=self.DEBUG,
replace=repl,
)
return data
def get_job_reference_data(self, signature):
# Retrieve associated data in reference_data_signatures
result = self.refdata_model.get_reference_data([signature])
if result and signature in result:
return result[signature]
return None
def get_job_list(self, offset, limit,
conditions=None, exclusion_profile=None,
visibility="included"):
"""
Retrieve a list of jobs. It's mainly used by the restful api to list
the jobs. The conditions parameter is a dict containing a set of
conditions for each key. e.g.:
{
'who': set([('=', 'john')]),
'result': set([('IN', ("success", "retry"))])
}
"""
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['job']
)
if exclusion_profile:
try:
if exclusion_profile is "default":
profile = ExclusionProfile.objects.get(
is_default=True
)
else:
profile = ExclusionProfile.objects.get(
name=exclusion_profile
)
signatures = profile.flat_exclusion[self.project]
# NOT here means "not part of the exclusion profile"
inclusion = "NOT" if visibility == "included" else ""
replace_str += " AND j.signature {0} IN ({1})".format(
inclusion,
",".join(["%s"] * len(signatures))
)
placeholders += signatures
except KeyError:
# this repo/project has no hidden signatures
pass
except ExclusionProfile.DoesNotExist:
# Either there's no default profile setup or the profile
# specified is not availble
pass
repl = [self.refdata_model.get_db_name(), replace_str]
data = self.execute(
proc="jobs.selects.get_job_list",
replace=repl,
placeholders=placeholders,
limit=limit,
offset=offset,
debug_show=self.DEBUG,
)
return data
def set_state(self, job_id, state):
"""Update the state of an existing job"""
self.execute(
proc='jobs.updates.set_state',
placeholders=[state, job_id],
debug_show=self.DEBUG
)
def get_incomplete_job_guids(self, resultset_id):
"""Get list of ids for jobs of resultset that are not in complete state."""
return self.execute(
proc='jobs.selects.get_incomplete_job_guids',
placeholders=[resultset_id],
debug_show=self.DEBUG,
return_type='dict',
key_column='job_guid'
)
def cancel_all_resultset_jobs(self, requester, resultset_id):
"""Set all pending/running jobs in resultset to usercancel."""
job_guids = list(self.get_incomplete_job_guids(resultset_id))
jobs = self.get_job_ids_by_guid(job_guids).values()
# Cancel all the jobs in the database...
self.execute(
proc='jobs.updates.cancel_all',
placeholders=[resultset_id],
debug_show=self.DEBUG
)
# Sending 'cancel_all' action to pulse. Right now there is no listener
# for this, so we cannot remove 'cancel' action for each job below.
publish_resultset_action.apply_async(
args=[self.project, 'cancel_all', resultset_id, requester],
routing_key='publish_to_pulse'
)
# Notify the build systems which created these jobs...
for job in jobs:
self._job_action_event(job, 'cancel', requester)
# Notify the UI.
status_publisher = JobStatusPublisher(settings.BROKER_URL)
try:
status_publisher.publish(job_guids, self.project, 'processed')
finally:
status_publisher.disconnect()
def trigger_missing_resultset_jobs(self, requester, resultset_id, project):
publish_resultset_action.apply_async(
args=[self.project, "trigger_missing_jobs", resultset_id, requester],
routing_key='publish_to_pulse'
)
def trigger_all_talos_jobs(self, requester, resultset_id, project, times):
publish_resultset_action.apply_async(
args=[self.project, "trigger_all_talos_jobs", resultset_id, requester, times],
routing_key='publish_to_pulse'
)
def _job_action_event(self, job, action, requester):
"""
Helper for issuing an 'action' for a given job (such as
cancel/retrigger)
:param job dict: The job which this action was issued to.
:param action str: Name of the action (cancel, etc..).
:param requester str: Email address of the user who caused action.
"""
publish_job_action.apply_async(
args=[self.project, action, job['id'], requester],
routing_key='publish_to_pulse'
)
def retrigger(self, requester, job):
"""
Issue a retrigger to the given job
:param requester str: The email address associated with the user who
made this request
:param job dict: A job object (typically a result of get_job)
"""
self._job_action_event(job, 'retrigger', requester)
def backfill(self, requester, job):
"""
Issue a "backfill" to the underlying build_system_type by scheduling a
pulse message.
:param requester str: The email address associated with the user who
made this request
:param job dict: A job object (typically a result of get_job)
"""
self._job_action_event(job, 'backfill', requester)
def cancel_job(self, requester, job):
"""
Cancel the given job and send an event to notify the build_system type
who created it to do the actual work.
:param requester str: The email address associated with the user who
made this request
:param job dict: A job object (typically a result of get_job)
"""
self._job_action_event(job, 'cancel', requester)
self.execute(
proc='jobs.updates.cancel_job',
placeholders=[job['job_guid']],
debug_show=self.DEBUG
)
status_publisher = JobStatusPublisher(settings.BROKER_URL)
try:
status_publisher.publish([job['job_guid']], self.project, 'processed')
finally:
status_publisher.disconnect()
def get_log_references(self, job_id):
"""Return the log references for the given ``job_id``."""
data = self.execute(
proc="jobs.selects.get_log_references",
placeholders=[job_id],
debug_show=self.DEBUG,
)
return data
def get_max_job_id(self):
"""Get the maximum job id."""
data = self.get_dhub().execute(
proc="jobs.selects.get_max_job_id",
debug_show=self.DEBUG,
)
return int(data[0]['max_id'] or 0)
@staticmethod
def get_performance_series_cache_key(project, interval_seconds,
machine_platform=None, hash=False):
if machine_platform is None:
key = 'performance-series-summary-%s-%s' % (project,
interval_seconds)
else:
key = 'performance-series-summary-%s-%s-%s' % (project,
interval_seconds,
machine_platform)
if hash:
key += '-hash'
return key
def get_performance_series_summary(self, interval_seconds, machine_platform=None):
"""
Retrieve a summary of all of the property/value list pairs found
in the series_signature table, organized by the signature summaries
that they belong to.
{
'signature1': {
'property1': 'value1',
'property2': 'value2',
...
},
'signature2': {
'property1': 'value1',
'property2': 'value2',
...
}
...
}
This data structure can be used to build a comprehensive set of
options to browse all available performance data in a repository.
"""
# Only retrieve signatures with property/values that have
# received data for the time interval requested
last_updated_limit = utils.get_now_timestamp() - interval_seconds
cache_key = self.get_performance_series_cache_key(self.project, interval_seconds,
machine_platform)
series_summary = cache.get(cache_key, None)
if series_summary:
series_summary = json.loads(utils.decompress_if_needed(series_summary))
else:
data = self.get_dhub().execute(
proc="jobs.selects.get_perf_series_properties",
placeholders=[last_updated_limit, interval_seconds],
debug_show=self.DEBUG,
)
series_summary = defaultdict(dict)
for datum in data:
key, val = datum['property'], datum['value']
if key in self.PERFORMANCE_SERIES_JSON_KEYS:
val = json.loads(val)
series_summary[datum['signature']][key] = val
if machine_platform:
series_summary = dict((key, val) for key, val in series_summary.items()
if val['machine_platform'] == machine_platform)
# HACK: take this out when we're using pylibmc and can use
# compression automatically
series_summary_json = json.dumps(series_summary, sort_keys=True)
cache.set(cache_key, zlib.compress(series_summary_json))
sha = sha1()
sha.update(series_summary_json)
hash_cache_key = self.get_performance_series_cache_key(
self.project, interval_seconds, machine_platform,
hash=True)
cache.set(hash_cache_key, sha.hexdigest())
return series_summary
def get_performance_platforms(self, interval_seconds):
last_updated_limit = utils.get_now_timestamp() - interval_seconds
data = self.get_dhub().execute(
proc="jobs.selects.get_perf_series_properties",
placeholders=[last_updated_limit, interval_seconds],
debug_show=self.DEBUG,
)
platforms = set()
for datum in [datum for datum in data if datum['property'] == 'machine_platform']:
platforms.add(datum['value'])
return platforms
def get_job_note(self, id):
"""Return the job note by id."""
data = self.execute(
proc="jobs.selects.get_job_note",
placeholders=[id],
debug_show=self.DEBUG,
)
return data
def get_job_note_list(self, job_id):
"""Return the job notes by job_id."""
data = self.execute(
proc="jobs.selects.get_job_note_list",
placeholders=[job_id],
debug_show=self.DEBUG,
)
return data
def update_last_job_classification(self, job_id):
"""
Update failure_classification_id no the job table accordingly to
the latest annotation. If none is present it gets reverted to the
default value
"""
self.execute(
proc='jobs.updates.update_last_job_classification',
placeholders=[
job_id,
],
debug_show=self.DEBUG
)
def insert_job_note(self, job_id, failure_classification_id, who, note):
"""insert a new note for a job and updates its failure classification"""
self.execute(
proc='jobs.inserts.insert_note',
placeholders=[
job_id,
failure_classification_id,
who,
note,
utils.get_now_timestamp(),
],
debug_show=self.DEBUG
)
self.update_last_job_classification(job_id)
def delete_job_note(self, note_id, job_id):
"""
Delete a job note and updates the failure classification for that job
"""
self.execute(
proc='jobs.deletes.delete_note',
placeholders=[
note_id,
],
debug_show=self.DEBUG
)
self.update_last_job_classification(job_id)
def insert_bug_job_map(self, job_id, bug_id, assignment_type, submit_timestamp, who):
"""
Store a new relation between the given job and bug ids.
"""
try:
self.execute(
proc='jobs.inserts.insert_bug_job_map',
placeholders=[
job_id,
bug_id,
assignment_type,
submit_timestamp,
who
],
debug_show=self.DEBUG
)
except IntegrityError as e:
raise JobDataIntegrityError(e)
if settings.MIRROR_CLASSIFICATIONS:
job = self.get_job(job_id)[0]
if job["state"] == "completed":
# importing here to avoid an import loop
from treeherder.etl.tasks import (submit_elasticsearch_doc,
submit_bugzilla_comment)
# Submit bug associations to Bugzilla/Elasticsearch using async tasks.
submit_elasticsearch_doc.apply_async(
args=[
self.project,
job_id,
bug_id,
submit_timestamp,
who
],
routing_key='classification_mirroring'
)
submit_bugzilla_comment.apply_async(
args=[
self.project,
job_id,
bug_id,
who,
],
routing_key='classification_mirroring'
)
def delete_bug_job_map(self, job_id, bug_id):
"""
Delete a bug-job entry identified by bug_id and job_id
"""
self.execute(
proc='jobs.deletes.delete_bug_job_map',
placeholders=[
job_id,
bug_id
],
debug_show=self.DEBUG
)
def calculate_eta(self, sample_window_seconds, debug):
# Get the most recent timestamp from jobs
max_timestamp = self.execute(
proc='jobs.selects.get_max_job_submit_timestamp',
return_type='iter',
debug_show=self.DEBUG
).get_column_data('submit_timestamp')
if max_timestamp:
time_window = int(max_timestamp) - sample_window_seconds
eta_groups = self.execute(
proc='jobs.selects.get_eta_groups',
placeholders=[time_window],
key_column='signature',
return_type='dict',
debug_show=self.DEBUG
)
placeholders = []
submit_timestamp = int(time.time())
for signature in eta_groups:
running_samples = map(
lambda x: int(x or 0),
eta_groups[signature]['running_samples'].split(','))
running_median = self.get_median_from_sorted_list(
sorted(running_samples))
placeholders.append(
[
signature,
'running',
eta_groups[signature]['running_avg_sec'],
running_median,
eta_groups[signature]['running_min_sec'],
eta_groups[signature]['running_max_sec'],
eta_groups[signature]['running_std'],
len(running_samples),
submit_timestamp
])
self.execute(
proc='jobs.inserts.set_job_eta',
placeholders=placeholders,
executemany=True,
debug_show=self.DEBUG
)
def get_median_from_sorted_list(self, sorted_list):
length = len(sorted_list)
if length == 0:
return 0
# Cannot take the median with only on sample,
# return it
elif length == 1:
return sorted_list[0]
elif not length % 2:
return round(
(sorted_list[length / 2] + sorted_list[length / 2 - 1]) / 2, 0
)
return round(sorted_list[length / 2], 0)
def cycle_data(self, cycle_interval, chunk_size, sleep_time):
"""Delete data older than cycle_interval, splitting the target data
into chunks of chunk_size size. Returns the number of result sets deleted"""
jobs_max_timestamp = self._get_max_timestamp(cycle_interval)
# Retrieve list of result sets to delete
result_set_data = self.execute(
proc='jobs.selects.get_result_sets_to_cycle',
placeholders=[jobs_max_timestamp],
debug_show=self.DEBUG
)
if not result_set_data:
return 0
# group the result_set data in chunks
result_set_chunk_list = zip(*[iter(result_set_data)] * chunk_size)
# append the remaining result_set not fitting in a complete chunk
result_set_chunk_list.append(
result_set_data[-(len(result_set_data) % chunk_size):])
for result_set_chunks in result_set_chunk_list:
# Retrieve list of revisions associated with result sets
rs_placeholders = [x['id'] for x in result_set_chunks]
rs_where_in_clause = [','.join(['%s'] * len(rs_placeholders))]
revision_data = self.execute(
proc='jobs.selects.get_revision_ids_to_cycle',
placeholders=rs_placeholders,
replace=rs_where_in_clause,
debug_show=self.DEBUG
)
# Retrieve list of jobs associated with result sets
rev_placeholders = [x['revision_id'] for x in revision_data]
rev_where_in_clause = [','.join(['%s'] * len(rev_placeholders))]
job_data = self.execute(
proc='jobs.selects.get_jobs_to_cycle',
placeholders=rs_placeholders,
replace=rs_where_in_clause,
debug_show=self.DEBUG
)
job_guid_dict = dict((d['id'], d['job_guid']) for d in job_data)
job_where_in_clause = [','.join(['%s'] * len(job_guid_dict))]
# Associate placeholders and replace data with sql
jobs_targets = []
for proc in self.JOBS_CYCLE_TARGETS:
query_name = proc.split('.')[-1]
if query_name == 'cycle_revision':
jobs_targets.append({
"proc": proc,
"placeholders": rev_placeholders,
"replace": rev_where_in_clause
})
elif query_name == 'cycle_revision_map':
jobs_targets.append({
"proc": proc,
"placeholders": rs_placeholders,
"replace": rs_where_in_clause
})
elif query_name == 'cycle_result_set':
jobs_targets.append({
"proc": proc,
"placeholders": rs_placeholders,
"replace": rs_where_in_clause
})
else:
jobs_targets.append({
"proc": proc,
"placeholders": job_guid_dict.keys(),
"replace": job_where_in_clause
})
# remove data from specified jobs tables that is older than max_timestamp
self._execute_table_deletes(jobs_targets, 'jobs', sleep_time)
return len(result_set_data)
def _get_max_timestamp(self, cycle_interval):
max_date = datetime.now() - cycle_interval
return int(time.mktime(max_date.timetuple()))
def _execute_table_deletes(self, sql_to_execute, data_type, sleep_time):
for sql_obj in sql_to_execute:
if not sql_obj['placeholders']:
continue
sql_obj['debug_show'] = self.DEBUG
# Disable foreign key checks to improve performance
self.execute(
proc='generic.db_control.disable_foreign_key_checks',
debug_show=self.DEBUG)
self.execute(**sql_obj)
self.get_dhub().commit('master_host')
# Re-enable foreign key checks to improve performance
self.execute(
proc='generic.db_control.enable_foreign_key_checks',
debug_show=self.DEBUG)
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time)
def get_bug_job_map_list(self, offset, limit, conditions=None):
"""
Retrieve a list of bug_job_map entries. The conditions parameter is a
dict containing a set of conditions for each key. e.g.:
{
'job_id': set([('IN', (1, 2))])
}
"""
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['bug_job_map']
)
repl = [replace_str]
proc = "jobs.selects.get_bug_job_map_list"
data = self.execute(
proc=proc,
replace=repl,
placeholders=placeholders,
limit=limit,
offset=offset,
debug_show=self.DEBUG,
)
return data
def get_result_set_ids(self, revision_hashes, where_in_list):
"""Return the a dictionary of revision_hash to id mappings given
a list of revision_hashes and a where_in_list.
revision_hashes = [ revision_hash1, revision_hash2, ... ]
where_in_list = [ %s, %s, %s ... ]
returns:
{
revision_hash1:{id: id1, push_timestamp: pt1},
revision_hash2:{id: id2, push_timestamp: pt2},
...
}
"""
result_set_id_lookup = {}
if revision_hashes:
result_set_id_lookup = self.execute(
proc='jobs.selects.get_result_set_ids',
placeholders=revision_hashes,
replace=[where_in_list],
debug_show=self.DEBUG,
key_column='revision_hash',
return_type='dict')
return result_set_id_lookup
def get_result_set_list_by_ids(self, result_set_ids):
conditions = {'id': set([('IN', tuple(result_set_ids))])}
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['result_set']
)
proc = "jobs.selects.get_result_set_list_by_ids"
result_set_ids = self.execute(
proc=proc,
replace=[replace_str],
placeholders=placeholders,
debug_show=self.DEBUG,
)
aggregate_details = self.get_result_set_details(result_set_ids)
return_list = self._merge_result_set_details(
result_set_ids, aggregate_details, True)
return return_list
def get_result_set_list(
self, offset_id, limit, full=True, conditions=None):
"""
Retrieve a list of ``result_sets`` (also known as ``pushes``)
If ``full`` is set to ``True`` then return revisions, too.
No jobs
Mainly used by the restful api to list the pushes in the UI
"""
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['result_set']
)
# If a push doesn't have jobs we can just
# message the user, it would save us a very expensive join
# with the jobs table.
# Retrieve the filtered/limited list of result sets
proc = "jobs.selects.get_result_set_list"
result_set_ids = self.execute(
proc=proc,
replace=[replace_str],
placeholders=placeholders,
limit=limit,
debug_show=self.DEBUG,
)
aggregate_details = self.get_result_set_details(result_set_ids)
return_list = self._merge_result_set_details(
result_set_ids, aggregate_details, full)
return return_list
def _merge_result_set_details(self, result_set_ids, aggregate_details, full):
# Construct the return dataset, include all revisions associated
# with each result_set in the revisions attribute
return_list = []
for result in result_set_ids:
detail = aggregate_details[result['id']][0]
list_item = {
"id": result['id'],
"revision_hash": result['revision_hash'],
"push_timestamp": result['push_timestamp'],
"repository_id": detail['repository_id'],
"revision": detail['revision'],
"author": result['author'] or detail['author'],
"revision_count": len(aggregate_details[result['id']])
}
# we only return the first 20 revisions.
if full:
list_item.update({
"comments": detail['comments'],
"revisions": aggregate_details[result['id']][:20]
})
return_list.append(list_item)
return return_list
def get_revision_resultset_lookup(self, revision_list):
"""
Create a list of revision->resultset lookups from a list of revision
This will retrieve non-active resultsets as well. Some of the data
ingested has mixed up revisions that show for jobs, but are not in
the right repository in builds4hr/running/pending. So we ingest those
bad resultsets/revisions as non-active so that we don't keep trying
to re-ingest them. Allowing this query to retrieve non ``active``
resultsets means we will avoid re-doing that work by detacting that
we've already ingested it.
But we skip ingesting the job, because the resultset is not active.
"""
replacement = ",".join(["%s"] * len(revision_list))
replacement = " AND revision IN (" + replacement + ") "
proc = "jobs.selects.get_revision_resultset_lookup"
lookups = self.execute(
proc=proc,
placeholders=revision_list + [0, len(revision_list)],
debug_show=self.DEBUG,
replace=[replacement],
return_type="dict",
key_column="revision"
)
return lookups
def get_resultset_revisions_list(self, result_set_id):
"""
Return the revisions for the given resultset
"""
proc = "jobs.selects.get_result_set_details"
lookups = self.execute(
proc=proc,
debug_show=self.DEBUG,
placeholders=[result_set_id],
replace=["%s"],
)
return lookups
def get_result_set_details(self, result_set_ids):
"""
Retrieve all revisions associated with a set of ``result_set``
(also known as ``pushes``) ids.
Mainly used by the restful api to list the pushes and their associated
revisions in the UI
"""
if not result_set_ids:
# No result sets provided
return {}
# Generate a list of result_set_ids
ids = []
id_placeholders = []
for data in result_set_ids:
id_placeholders.append('%s')
ids.append(data['id'])
where_in_clause = ','.join(id_placeholders)
# Retrieve revision details associated with each result_set_id
detail_proc = "jobs.selects.get_result_set_details"
result_set_details = self.execute(
proc=detail_proc,
placeholders=ids,
debug_show=self.DEBUG,
replace=[where_in_clause],
)
# Aggregate the revisions by result_set_id
aggregate_details = {}
for detail in result_set_details:
if detail['result_set_id'] not in aggregate_details:
aggregate_details[detail['result_set_id']] = []
aggregate_details[detail['result_set_id']].append(
{
'revision': detail['revision'],
'author': detail['author'],
'repository_id': detail['repository_id'],
'comments': detail['comments'],
'commit_timestamp': detail['commit_timestamp']
})
return aggregate_details
def get_oauth_consumer_secret(self, key):
"""Consumer secret for oauth"""
ds = self.get_datasource()
secret = ds.get_oauth_consumer_secret(key)
return secret
def store_job_data(self, data, raise_errors=False):
"""
Store JobData instances into jobs db
Example:
[
{
"revision_hash": "24fd64b8251fac5cf60b54a915bffa7e51f636b5",
"job": {
"job_guid": "d19375ce775f0dc166de01daa5d2e8a73a8e8ebf",
"name": "xpcshell",
"desc": "foo",
"job_symbol": "XP",
"group_name": "Shelliness",
"group_symbol": "XPC",
"product_name": "firefox",
"state": "TODO",
"result": 0,
"reason": "scheduler",
"who": "sendchange-unittest",
"submit_timestamp": 1365732271,
"start_timestamp": "20130411165317",
"end_timestamp": "1365733932"
"machine": "tst-linux64-ec2-314",
"build_url": "http://....",
"build_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64",
"vm": true
},
"machine_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64",
"vm": true
},
"option_collection": {
"opt": true
},
"log_references": [
{
"url": "http://ftp.mozilla.org/pub/...",
"name": "unittest"
}
],
artifacts:[{
type:" json | img | ...",
name:"",
log_urls:[
]
blob:""
}],
},
"coalesced": []
},
...
]
"""
# Ensure that we have job data to process
if not data:
return
# remove any existing jobs that already have the same state
data = self._remove_existing_jobs(data)
if not data:
return
# Structures supporting revision_hash SQL
revision_hash_lookup = set()
unique_revision_hashes = []
rh_where_in = []
# Structures supporting job SQL
job_placeholders = []
log_placeholders = []
artifact_placeholders = []
coalesced_job_guid_placeholders = []
retry_job_guids = []
async_error_summary_list = []
# get the tier-2 data signatures for this project.
# if there are none, then just return an empty list
tier_2_signatures = []
try:
tier_2 = ExclusionProfile.objects.get(name="Tier-2")
# tier_2_blob = json.loads(tier_2['flat_exclusion'])
tier_2_signatures = set(tier_2.flat_exclusion[self.project])
except KeyError:
# may be no tier 2 jobs for the current project
# and that's ok.
pass
except ObjectDoesNotExist:
# if this profile doesn't exist, then no second tier jobs
# and that's ok.
pass
for datum in data:
# Make sure we can deserialize the json object
# without raising an exception
try:
job = datum['job']
revision_hash = datum['revision_hash']
coalesced = datum.get('coalesced', [])
# TODO: Need a job structure validation step here. Now that
# everything works in list context we cannot detect what
# object is responsible for what error. If we validate here
# we can capture the error and associate it with the object
# and also skip it before generating any database errors.
except JobDataError as e:
if raise_errors:
raise e
continue
except Exception as e:
if raise_errors:
raise e
continue
try:
# json object can be successfully deserialized
# load reference data
job_guid = self._load_ref_and_job_data_structs(
job,
revision_hash,
revision_hash_lookup,
unique_revision_hashes,
rh_where_in,
job_placeholders,
log_placeholders,
artifact_placeholders,
retry_job_guids,
tier_2_signatures,
async_error_summary_list
)
for coalesced_guid in coalesced:
coalesced_job_guid_placeholders.append(
# coalesced to guid, coalesced guid
[job_guid, coalesced_guid]
)
except Exception as e:
if raise_errors:
raise e
# Store all reference data and retrieve associated ids
id_lookups = self.refdata_model.set_all_reference_data()
job_eta_times = self.get_job_eta_times(
id_lookups['reference_data_signatures']
)
# Store all revision hashes and retrieve result_set_ids
result_set_ids = self.get_result_set_ids(
unique_revision_hashes, rh_where_in
)
job_update_placeholders = []
job_guid_list = []
push_timestamps = {}
for index, job in enumerate(job_placeholders):
# Replace reference data with their associated ids
self._set_data_ids(
index,
job_placeholders,
id_lookups,
job_guid_list,
job_update_placeholders,
result_set_ids,
job_eta_times,
push_timestamps
)
job_id_lookup = self._load_jobs(job_placeholders, job_guid_list)
# For each of these ``retry_job_guids`` the job_id_lookup will
# either contain the retry guid, or the root guid (based on whether we
# inserted, or skipped insertion to do an update). So add in
# whichever is missing.
for retry_guid in retry_job_guids:
retry_guid_root = get_guid_root(retry_guid)
lookup_keys = job_id_lookup.keys()
if retry_guid in lookup_keys:
# this retry was inserted in the db at some point
if retry_guid_root not in lookup_keys:
# the root isn't there because there was, for some reason,
# never a pending/running version of this job
retry_job = job_id_lookup[retry_guid]
job_id_lookup[retry_guid_root] = retry_job
elif retry_guid_root in lookup_keys:
# if job_id_lookup contains the root, then the insert
# will have skipped, so we want to find that job
# when looking for the retry_guid for update later.
retry_job = job_id_lookup[retry_guid_root]
job_id_lookup[retry_guid] = retry_job
# Need to iterate over log references separately since they could
# be a different length. Replace job_guid with id in log url
# placeholders
# need also to retrieve the updated status to distinguish between
# failed and successful jobs
job_results = dict((el[0], el[9]) for el in job_update_placeholders)
self._load_log_urls(log_placeholders, job_id_lookup,
job_results)
with ArtifactsModel(self.project) as artifacts_model:
artifacts_model.load_job_artifacts(artifact_placeholders, job_id_lookup)
# schedule the generation of ``Bug suggestions`` artifacts
# asynchronously now that the jobs have been created
if async_error_summary_list:
populate_error_summary.apply_async(
args=[self.project, async_error_summary_list, job_id_lookup],
routing_key='error_summary'
)
# If there is already a job_id stored with pending/running status
# we need to update the information for the complete job
if job_update_placeholders:
# replace job_guid with job_id
for row in job_update_placeholders:
row[-1] = job_id_lookup[
get_guid_root(row[-1])
]['id']
self.execute(
proc='jobs.updates.update_job_data',
debug_show=self.DEBUG,
placeholders=job_update_placeholders,
executemany=True)
# set the job_coalesced_to_guid column for any coalesced
# job found
if coalesced_job_guid_placeholders:
self.execute(
proc='jobs.updates.update_coalesced_guids',
debug_show=self.DEBUG,
placeholders=coalesced_job_guid_placeholders,
executemany=True)
def _remove_existing_jobs(self, data):
"""
Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point.
"""
states = {
'pending': [],
'running': [],
'completed': [],
}
data_idx = []
new_data = []
placeholders = []
state_clauses = []
for i, datum in enumerate(data):
try:
job = datum['job']
job_guid = str(job['job_guid'])
states[str(job['state'])].append(job_guid)
# index this place in the ``data`` object
data_idx.append(job_guid)
except Exception:
data_idx.append("skipped")
# it will get caught later in ``store_job_data``
# adding the guid as "skipped" will mean it won't be found
# in the returned list of dup guids from the db.
# This will cause the bad job to be re-added
# to ``new_data`` so that the error can be handled
# in ``store_job_data``.
for state, guids in states.items():
if guids:
placeholders.append(state)
placeholders.extend(guids)
state_clauses.append(
"(`state` = %s AND `job_guid` IN ({0}))".format(
",".join(["%s"] * len(guids))
)
)
replacement = ' OR '.join(state_clauses)
if placeholders:
existing_guids = self.execute(
proc='jobs.selects.get_job_guids_in_states',
placeholders=placeholders,
replace=[replacement],
key_column='job_guid',
return_type='set',
debug_show=self.DEBUG,
)
# build a new list of jobs without those we already have loaded
for i, guid in enumerate(data_idx):
if guid not in existing_guids:
new_data.append(data[i])
return new_data
def _load_ref_and_job_data_structs(
self, job, revision_hash, revision_hash_lookup,
unique_revision_hashes, rh_where_in, job_placeholders,
log_placeholders, artifact_placeholders, retry_job_guids,
tier_2_signatures, async_artifact_list
):
"""
Take the raw job object after etl and convert it to job_placeholders.
If the job is a ``retry`` the ``job_guid`` will have a special
suffix on it. But the matching ``pending``/``running`` job will not.
So we append the suffixed ``job_guid`` to ``retry_job_guids``
so that we can update the job_id_lookup later with the non-suffixed
``job_guid`` (root ``job_guid``). Then we can find the right
``pending``/``running`` job and update it with this ``retry`` job.
"""
# Store revision_hash to support SQL construction
# for result_set entry
if revision_hash not in revision_hash_lookup:
unique_revision_hashes.append(revision_hash)
rh_where_in.append('%s')
build_os_name = job.get(
'build_platform', {}).get('os_name', 'unknown')
build_platform = job.get(
'build_platform', {}).get('platform', 'unknown')
build_architecture = job.get(
'build_platform', {}).get('architecture', 'unknown')
build_platform_key = self.refdata_model.add_build_platform(
build_os_name, build_platform, build_architecture
)
machine_os_name = job.get(
'machine_platform', {}).get('os_name', 'unknown')
machine_platform = job.get(
'machine_platform', {}).get('platform', 'unknown')
machine_architecture = job.get(
'machine_platform', {}).get('architecture', 'unknown')
machine_platform_key = self.refdata_model.add_machine_platform(
machine_os_name, machine_platform, machine_architecture
)
option_collection_hash = self.refdata_model.add_option_collection(
job.get('option_collection', [])
)
machine = job.get('machine', 'unknown')
self.refdata_model.add_machine(
machine,
long(job.get("end_timestamp", time.time()))
)
device_name = job.get('device_name', 'unknown')
self.refdata_model.add_device(device_name)
job_type = job.get('name', 'unknown')
job_symbol = job.get('job_symbol', 'unknown')
group_name = job.get('group_name', 'unknown')
group_symbol = job.get('group_symbol', 'unknown')
job_type_key = self.refdata_model.add_job_type(
job_type, job_symbol, group_name, group_symbol
)
product = job.get('product_name', 'unknown')
if len(product.strip()) == 0:
product = 'unknown'
self.refdata_model.add_product(product)
job_guid = job['job_guid']
job_guid = job_guid[0:50]
who = job.get('who') or 'unknown'
who = who[0:50]
reason = job.get('reason') or 'unknown'
reason = reason[0:125]
state = job.get('state') or 'unknown'
state = state[0:25]
if job.get('result', 'unknown') == 'retry':
retry_job_guids.append(job_guid)
build_system_type = job.get('build_system_type', 'buildbot')
# Should be the buildername in the case of buildbot
reference_data_name = job.get('reference_data_name', None)
signature = self.refdata_model.add_reference_data_signature(
reference_data_name, build_system_type, self.project,
[build_system_type, self.project, build_os_name, build_platform, build_architecture,
machine_os_name, machine_platform, machine_architecture,
device_name, group_name, group_symbol, job_type, job_symbol,
option_collection_hash]
)
job_tier = job.get('tier') or 1
# job tier signatures override the setting from the job structure
tier = 2 if signature in tier_2_signatures else job_tier
job_placeholders.append([
job_guid,
signature,
None, # idx:2, job_coalesced_to_guid,
revision_hash, # idx:3, replace with result_set_id
build_platform_key, # idx:4, replace with build_platform_id
machine_platform_key, # idx:5, replace with machine_platform_id
machine, # idx:6, replace with machine_id
device_name, # idx:7, replace with device_id
option_collection_hash, # idx:8
job_type_key, # idx:9, replace with job_type_id
product, # idx:10, replace with product_id
who,
reason,
job.get('result', 'unknown'), # idx:13, this is typically an int
state,
self.get_number(job.get('submit_timestamp')),
self.get_number(job.get('start_timestamp')),
self.get_number(job.get('end_timestamp')),
0, # idx:18, replace with running_avg_sec
tier,
job_guid,
get_guid_root(job_guid) # will be the same except for ``retry`` jobs
])
artifacts = job.get('artifacts', [])
has_text_log_summary = False
if artifacts:
artifacts = ArtifactsModel.serialize_artifact_json_blobs(artifacts)
# the artifacts in this list could be ones that should have
# bug suggestions generated for them. If so, queue them to be
# scheduled for asynchronous generation.
tls_list = error_summary.get_artifacts_that_need_bug_suggestions(
artifacts)
async_artifact_list.extend(tls_list)
# need to add job guid to artifacts, since they likely weren't
# present in the beginning
for artifact in artifacts:
if not all(k in artifact for k in ("name", "type", "blob")):
raise JobDataError(
"Artifact missing properties: {}".format(artifact))
artifact_placeholder = artifact.copy()
artifact_placeholder['job_guid'] = job_guid
artifact_placeholders.append(artifact_placeholder)
has_text_log_summary = any(x for x in artifacts
if x['name'] == 'text_log_summary')
log_refs = job.get('log_references', [])
if log_refs:
for log in log_refs:
name = log.get('name') or 'unknown'
name = name[0:50]
url = log.get('url') or 'unknown'
url = url[0:255]
# this indicates that a summary artifact was submitted with
# this job that corresponds to the buildbot_text log url.
# Therefore, the log does not need parsing. So we should
# ensure that it's marked as already parsed.
if has_text_log_summary and name == 'buildbot_text':
parse_status = 'parsed'
else:
# the parsing status of this log. 'pending' or 'parsed'
parse_status = log.get('parse_status', 'pending')
log_placeholders.append([job_guid, name, url, parse_status])
return job_guid
def get_number(self, s):
try:
return long(s)
except (ValueError, TypeError):
return 0
def _set_data_ids(
self, index, job_placeholders, id_lookups,
job_guid_list, job_update_placeholders,
result_set_ids, job_eta_times, push_timestamps
):
"""
Supplant ref data with ids and create update placeholders
Pending jobs should be updated, rather than created.
``job_placeholders`` are used for creating new jobs.
``job_update_placeholders`` are used for updating existing non-complete
jobs
"""
# Replace reference data with their ids
job_guid = job_placeholders[index][
self.JOB_PH_JOB_GUID]
job_coalesced_to_guid = job_placeholders[index][
self.JOB_PH_COALESCED_TO_GUID]
revision_hash = job_placeholders[index][
self.JOB_PH_RESULT_SET_ID]
build_platform_key = job_placeholders[index][
self.JOB_PH_BUILD_PLATFORM_KEY]
machine_platform_key = job_placeholders[index][
self.JOB_PH_MACHINE_PLATFORM_KEY]
machine_name = job_placeholders[index][
self.JOB_PH_MACHINE_NAME]
device_name = job_placeholders[index][
self.JOB_PH_DEVICE_NAME]
option_collection_hash = job_placeholders[index][
self.JOB_PH_OPTION_COLLECTION_HASH]
job_type_key = job_placeholders[index][self.JOB_PH_TYPE_KEY]
product_type = job_placeholders[index][self.JOB_PH_PRODUCT_TYPE]
who = job_placeholders[index][self.JOB_PH_WHO]
reason = job_placeholders[index][self.JOB_PH_REASON]
result = job_placeholders[index][self.JOB_PH_RESULT]
job_state = job_placeholders[index][self.JOB_PH_STATE]
start_timestamp = job_placeholders[index][self.JOB_PH_START_TIMESTAMP]
end_timestamp = job_placeholders[index][self.JOB_PH_END_TIMESTAMP]
# Load job_placeholders
# replace revision_hash with id
result_set = result_set_ids[revision_hash]
job_placeholders[index][
self.JOB_PH_RESULT_SET_ID] = result_set['id']
push_timestamps[result_set['id']] = result_set['push_timestamp']
# replace build_platform_key with id
build_platform_id = id_lookups['build_platforms'][build_platform_key]['id']
job_placeholders[index][
self.JOB_PH_BUILD_PLATFORM_KEY] = build_platform_id
# replace machine_platform_key with id
machine_platform_id = id_lookups['machine_platforms'][machine_platform_key]['id']
job_placeholders[index][
self.JOB_PH_MACHINE_PLATFORM_KEY] = machine_platform_id
# replace machine with id
job_placeholders[index][
self.JOB_PH_MACHINE_NAME] = id_lookups['machines'][machine_name]['id']
job_placeholders[index][
self.JOB_PH_DEVICE_NAME] = id_lookups['devices'][device_name]['id']
# replace job_type with id
job_type_id = id_lookups['job_types'][job_type_key]['id']
job_placeholders[index][self.JOB_PH_TYPE_KEY] = job_type_id
# replace product_type with id
job_placeholders[index][
self.JOB_PH_PRODUCT_TYPE] = id_lookups['products'][product_type]['id']
job_guid_list.append(job_guid)
# for retry jobs, we may have a different job_guid than the root of job_guid
# because retry jobs append a suffix for uniqueness (since the job_guid
# won't be unique due to them all having the same request_id and request_time.
# But there may be a ``pending`` or ``running`` job that this retry
# should be updating, so make sure to add the root ``job_guid`` as well.
job_guid_root = get_guid_root(job_guid)
if job_guid != job_guid_root:
job_guid_list.append(job_guid_root)
reference_data_signature = job_placeholders[index][1]
running_avg_sec = job_eta_times.get(reference_data_signature, {}).get('running', 0)
job_placeholders[index][self.JOB_PH_RUNNING_AVG] = running_avg_sec
# Load job_update_placeholders
if job_state != 'pending':
job_update_placeholders.append([
job_guid,
job_coalesced_to_guid,
result_set_ids[revision_hash]['id'],
id_lookups['machines'][machine_name]['id'],
option_collection_hash,
id_lookups['job_types'][job_type_key]['id'],
id_lookups['products'][product_type]['id'],
who,
reason,
result,
job_state,
start_timestamp,
end_timestamp,
job_state,
get_guid_root(job_guid)
])
def _load_jobs(self, job_placeholders, job_guid_list):
if not job_placeholders:
return {}
# Store job data
self.execute(
proc='jobs.inserts.create_job_data',
debug_show=self.DEBUG,
placeholders=job_placeholders,
executemany=True)
return self.get_job_ids_by_guid(job_guid_list)
def get_job_eta_times(self, reference_data_signatures):
eta_lookup = {}
if len(reference_data_signatures) == 0:
return eta_lookup
rds_where_in_clause = ','.join(['%s'] * len(reference_data_signatures))
job_eta_data = self.execute(
proc='jobs.selects.get_last_eta_by_signatures',
debug_show=self.DEBUG,
replace=[rds_where_in_clause],
placeholders=reference_data_signatures)
for eta_data in job_eta_data:
signature = eta_data['signature']
state = eta_data['state']
if signature not in eta_lookup:
eta_lookup[signature] = {}
if state not in eta_lookup[signature]:
eta_lookup[signature][state] = {}
eta_lookup[signature][state] = eta_data['avg_sec']
return eta_lookup
def get_job_ids_by_guid(self, job_guid_list):
job_guid_where_in_clause = ",".join(["%s"] * len(job_guid_list))
job_id_lookup = self.execute(
proc='jobs.selects.get_job_ids_by_guids',
debug_show=self.DEBUG,
replace=[job_guid_where_in_clause],
placeholders=job_guid_list,
key_column='job_guid',
return_type='dict')
return job_id_lookup
def _load_log_urls(self, log_placeholders, job_id_lookup,
job_results):
# importing here to avoid an import loop
from treeherder.log_parser.tasks import parse_log, parse_json_log
tasks = []
result_sets = []
if log_placeholders:
for index, log_ref in enumerate(log_placeholders):
job_guid = log_ref[0]
job_id = job_id_lookup[job_guid]['id']
result = job_results[job_guid]
result_set_id = job_id_lookup[job_guid]['result_set_id']
result_sets.append(result_set_id)
# Replace job_guid with id
log_placeholders[index][0] = job_id
task = dict()
# a log can be submitted already parsed. So only schedule
# a parsing task if it's ``pending``
# the submitter is then responsible for submitting the
# text_log_summary artifact
if log_ref[3] == 'pending':
if log_ref[1] == 'mozlog_json':
# don't parse structured logs for passing tests
if result != 'success':
task['routing_key'] = 'parse_log.json'
else:
if result != 'success':
task['routing_key'] = 'parse_log.failures'
else:
task['routing_key'] = 'parse_log.success'
if 'routing_key' in task:
task['job_guid'] = job_guid
task['log_url'] = log_ref[2]
task['result_set_id'] = result_set_id
tasks.append(task)
# Store the log references
self.execute(
proc='jobs.inserts.set_job_log_url',
debug_show=self.DEBUG,
placeholders=log_placeholders,
executemany=True)
# I need to find the jog_log_url ids
# just inserted but there's no unique key.
# Also, the url column is not indexed, so it's
# not a good idea to search based on that.
# I'm gonna retrieve the logs by job ids and then
# use their url to create a map.
job_ids = [j["id"] for j in job_id_lookup.values()]
job_log_url_list = self.get_job_log_url_list(job_ids)
log_url_lookup = dict([(jlu['url'], jlu)
for jlu in job_log_url_list])
for task in tasks:
parse_log_task = parse_log
if task['routing_key'] == "parse_log.json":
parse_log_task = parse_json_log
parse_log_task.apply_async(
args=[
self.project,
log_url_lookup[task['log_url']],
task['job_guid'],
],
routing_key=task['routing_key']
)
def get_job_log_url_detail(self, job_log_url_id):
obj = self.execute(
proc='jobs.selects.get_job_log_url_detail',
debug_show=self.DEBUG,
placeholders=[job_log_url_id])
if len(obj) == 0:
raise ObjectNotFoundException("job_log_url", id=job_log_url_id)
return obj[0]
def get_job_log_url_list(self, job_ids):
"""
Return a list of logs belonging to the given job_id(s).
"""
if len(job_ids) == 0:
return []
replacement = []
id_placeholders = ["%s"] * len(job_ids)
replacement.append(','.join(id_placeholders))
data = self.execute(
proc="jobs.selects.get_job_log_url_list",
placeholders=job_ids,
replace=replacement,
debug_show=self.DEBUG,
)
return data
def update_job_log_url_status(self, job_log_url_id, parse_status):
self.execute(
proc='jobs.updates.update_job_log_url',
debug_show=self.DEBUG,
placeholders=[parse_status, job_log_url_id])
def get_performance_series_from_signatures(self, signatures, interval_seconds):
repl = [','.join(['%s'] * len(signatures))]
placeholders = signatures
placeholders.append(str(interval_seconds))
data = self.execute(
proc="jobs.selects.get_performance_series_from_signatures",
debug_show=self.DEBUG,
placeholders=placeholders,
replace=repl)
data = [{"series_signature": x["series_signature"],
"blob": json.loads(utils.decompress_if_needed(x["blob"]))} for x in data]
return data
def get_signatures_from_properties(self, props):
props_where_repl = [
' OR '.join(['(`property`=%s AND `value`=%s)'] * len(props)),
' AND '.join(['COALESCE(SUM(`property`=%s AND `value`=%s), 0) > 0'] * len(props))]
# convert to 1 dimensional list
props = [el for x in props.items() for el in x]
props.extend(props)
signatures = self.execute(
proc="jobs.selects.get_signatures_from_properties",
debug_show=self.DEBUG,
placeholders=props,
replace=props_where_repl)
if not signatures:
return {"success": False}
signatures = [x.get("signature") for x in signatures]
signatures_repl = [','.join(['%s'] * len(signatures))]
properties = self.execute(
proc="jobs.selects.get_all_properties_of_signatures",
debug_show=self.DEBUG,
placeholders=signatures,
replace=signatures_repl)
ret = {}
for d in properties:
sig = d["signature"]
ret[sig] = ret[sig] if sig in ret else {}
ret[sig][d["property"]] = d["value"]
return ret
def get_signature_properties(self, signatures):
signatures_repl = [','.join(['%s'] * len(signatures))]
properties = self.execute(
proc="jobs.selects.get_all_properties_of_signatures",
debug_show=self.DEBUG,
placeholders=signatures,
replace=signatures_repl)
sigdict = {}
for property in properties:
signature = property['signature']
if not sigdict.get(signature):
sigdict[signature] = {}
(key, val) = (property['property'], property['value'])
if key in self.PERFORMANCE_SERIES_JSON_KEYS:
val = json.loads(val)
sigdict[signature][key] = val
ret = []
for signature in signatures:
if not sigdict.get(signature):
return ObjectNotFoundException("signature", id=signature)
ret.append(sigdict[signature])
return ret
def set_series_signature(self, signature_hash, signature_props):
signature_property_placeholders = []
for (k, v) in signature_props.iteritems():
if not isinstance(v, basestring):
v = json.dumps(v)
signature_property_placeholders.append([
str(signature_hash), str(k), str(v),
str(signature_hash), str(k), str(v),
])
self.execute(
proc='jobs.inserts.set_series_signature',
debug_show=self.DEBUG,
placeholders=signature_property_placeholders,
executemany=True)
def store_performance_series(
self, t_range, series_type, signature, series_data):
# Use MySQL GETLOCK function to guard against concurrent celery tasks
# overwriting each other's blobs. The lock incorporates the time
# interval and signature combination and is specific to a single
# json blob.
lock_string = "sps_{0}_{1}_{2}".format(
t_range, series_type, signature)
lock_timeout = settings.PERFHERDER_UPDATE_SERIES_LOCK_TIMEOUT
# first, wait for lock to become free
started = time.time()
while time.time() < (started + lock_timeout):
is_lock_free = bool(self.execute(
proc='generic.locks.is_free_lock',
debug_show=self.DEBUG,
placeholders=[lock_string])[0]['lock'])
if is_lock_free:
break
time.sleep(0.1)
if not is_lock_free:
logger.error(
'store_performance_series lock_string, '
'{0}, timed out!'.format(lock_string)
)
return
# now, acquire the lock
self.execute(
proc='generic.locks.get_lock',
debug_show=self.DEBUG,
placeholders=[lock_string])
try:
now_timestamp = int(time.time())
# If we don't have this t_range/signature combination create it
series_data_json = json.dumps(series_data)
insert_placeholders = [
t_range, signature,
series_type,
now_timestamp,
zlib.compress(series_data_json),
t_range,
signature,
]
self.execute(
proc='jobs.inserts.set_performance_series',
debug_show=self.DEBUG,
placeholders=insert_placeholders)
# delete any previous instance of the cached copy of the perf
# series summary, since it's now out of date
cache.delete(self.get_performance_series_cache_key(self.project,
t_range))
# Retrieve and update the series
performance_series = self.execute(
proc='jobs.selects.get_performance_series',
debug_show=self.DEBUG,
placeholders=[t_range, signature])
db_series_json = utils.decompress_if_needed(performance_series[0]['blob'])
# If they're equal this was the first time the t_range
# and signature combination was stored, so there's nothing to
# do
if series_data_json != db_series_json:
series = json.loads(db_series_json)
series.extend(series_data)
# expire any entries which are too old
push_timestamp_limit = now_timestamp - int(t_range)
series = filter(
lambda d: d['push_timestamp'] >= push_timestamp_limit,
series
)
if series:
# in case the same data was submitted to be added to the
# db twice (with our setup as of 2015/07, this can happen
# if we parse the same talos log more than once), remove any
# duplicate entries.
# technique from: http://stackoverflow.com/a/9427216
series = [dict(t) for t in set([tuple(sorted(d.items())) for d in
series])]
# sort the series by result set id
series = sorted(
series, key=itemgetter('result_set_id'),
)
update_placeholders = [
now_timestamp,
zlib.compress(json.dumps(series)),
t_range,
signature,
]
self.execute(
proc='jobs.updates.update_performance_series',
debug_show=self.DEBUG,
placeholders=update_placeholders)
except Exception as e:
raise e
finally:
# Make sure we release the lock no matter what errors
# are generated
self.execute(
proc='generic.locks.release_lock',
debug_show=self.DEBUG,
placeholders=[lock_string])
def _get_last_insert_id(self):
"""Return last-inserted ID."""
return self.get_dhub().execute(
proc='generic.selects.get_last_insert_id',
debug_show=self.DEBUG,
return_type='iter',
).get_column_data('id')
def store_result_set_data(self, result_sets):
"""
Build single queries to add new result_sets, revisions, and
revision_map for a list of result_sets.
result_sets = [
{
"revision_hash": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80",
"push_timestamp": 1378293517,
"author": "some-sheriff@mozilla.com",
"revisions": [
{
"comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer",
"repository": "test_treeherder",
"author": "John Doe <jdoe@mozilla.com>",
"branch": "default",
"revision": "2c25d2bbbcd6"
},
...
]
},
...
]
returns = {
}
"""
if not result_sets:
return {}
# result_set data structures
revision_hash_placeholders = []
unique_revision_hashes = []
where_in_list = []
# revision data structures
repository_id_lookup = dict()
revision_placeholders = []
all_revisions = []
rev_where_in_list = []
# revision_map structures
revision_to_rhash_lookup = dict()
# TODO: Confirm whether we need to do a lookup in this loop in the
# memcache to reduce query overhead
for result in result_sets:
revision_hash_placeholders.append(
[
result.get('author', 'unknown@somewhere.com'),
result['revision_hash'],
result['push_timestamp'],
result.get('active_status', 'active'),
result['revision_hash']
]
)
where_in_list.append('%s')
unique_revision_hashes.append(result['revision_hash'])
for rev_datum in result['revisions']:
# Retrieve the associated repository id just once
# and provide handling for multiple repositories
if rev_datum['repository'] not in repository_id_lookup:
repository_id = self.refdata_model.get_repository_id(
rev_datum['repository']
)
repository_id_lookup[rev_datum['repository']] = repository_id
# We may not have a commit timestamp in the push data
commit_timestamp = rev_datum.get(
'commit_timestamp', None
)
# We may not have a comment in the push data
comment = rev_datum.get(
'comment', None
)
repository_id = repository_id_lookup[rev_datum['repository']]
revision_placeholders.append(
[rev_datum['revision'],
rev_datum['author'],
comment,
commit_timestamp,
repository_id,
rev_datum['revision'],
repository_id]
)
all_revisions.append(rev_datum['revision'])
rev_where_in_list.append('%s')
revision_to_rhash_lookup[rev_datum['revision']] = result['revision_hash']
# Retrieve a list of revision_hashes that have already been stored
# in the list of unique_revision_hashes. Use it to determine the new
# result_sets found to publish to pulse.
where_in_clause = ','.join(where_in_list)
result_set_ids_before = self.execute(
proc='jobs.selects.get_result_set_ids',
placeholders=unique_revision_hashes,
replace=[where_in_clause],
key_column='revision_hash',
return_type='set',
debug_show=self.DEBUG
)
# Insert new result sets
self.execute(
proc='jobs.inserts.set_result_set',
placeholders=revision_hash_placeholders,
executemany=True,
debug_show=self.DEBUG
)
lastrowid = self.get_dhub().connection['master_host']['cursor'].lastrowid
# Retrieve new and already existing result set ids
result_set_id_lookup = self.execute(
proc='jobs.selects.get_result_set_ids',
placeholders=unique_revision_hashes,
replace=[where_in_clause],
key_column='revision_hash',
return_type='dict',
debug_show=self.DEBUG
)
# identify the newly inserted result sets
result_set_ids_after = set(result_set_id_lookup.keys())
inserted_result_sets = result_set_ids_after.difference(
result_set_ids_before
)
inserted_result_set_ids = []
# If cursor.lastrowid is > 0 rows were inserted on this
# cursor. When new rows are inserted, determine the new
# result_set ids and submit publish to pulse tasks.
if inserted_result_sets and lastrowid > 0:
for revision_hash in inserted_result_sets:
inserted_result_set_ids.append(
result_set_id_lookup[revision_hash]['id']
)
# Insert new revisions
self.execute(
proc='jobs.inserts.set_revision',
placeholders=revision_placeholders,
executemany=True,
debug_show=self.DEBUG
)
# Retrieve new revision ids
rev_where_in_clause = ','.join(rev_where_in_list)
revision_id_lookup = self.execute(
proc='jobs.selects.get_revisions',
placeholders=all_revisions,
replace=[rev_where_in_clause],
key_column='revision',
return_type='dict',
debug_show=self.DEBUG
)
# Build placeholders for revision_map
revision_map_placeholders = []
for revision in revision_id_lookup:
revision_hash = revision_to_rhash_lookup[revision]
revision_id = revision_id_lookup[revision]['id']
result_set_id = result_set_id_lookup[revision_hash]['id']
revision_map_placeholders.append(
[revision_id,
result_set_id,
revision_id,
result_set_id]
)
# Insert new revision_map entries
self.execute(
proc='jobs.inserts.set_revision_map',
placeholders=revision_map_placeholders,
executemany=True,
debug_show=self.DEBUG
)
if len(inserted_result_set_ids) > 0:
# Queue an event to notify pulse of these new resultsets
publish_resultset.apply_async(
args=[self.project, inserted_result_set_ids],
routing_key='publish_to_pulse'
)
return {
'result_set_ids': result_set_id_lookup,
'revision_ids': revision_id_lookup,
'inserted_result_set_ids': inserted_result_set_ids
}
def get_revision_timestamp(self, rev):
"""Get the push timestamp of the resultset for a revision"""
return self.get_revision_resultset_lookup([rev])[rev][
"push_timestamp"
]
def get_exclusion_profile_signatures(self, exclusion_profile):
"""Retrieve the reference data signatures associates to an exclusion profile"""
signatures = []
try:
if exclusion_profile == "default":
profile = ExclusionProfile.objects.get(
is_default=True
)
else:
profile = ExclusionProfile.objects.get(
name=exclusion_profile
)
signatures = profile.flat_exclusion[self.project]
except KeyError:
# this repo/project has no hidden signatures
pass
except ExclusionProfile.DoesNotExist:
# Either there's no default profile setup or the profile
# specified is not availble
pass
return signatures
def get_resultset_status(self, resultset_id, exclusion_profile="default"):
"""Retrieve an aggregated job count for the given resultset.
If an exclusion profile is provided, the job counted will be filtered accordingly"""
replace = []
placeholders = [resultset_id]
if exclusion_profile:
signature_list = self.get_exclusion_profile_signatures(exclusion_profile)
if signature_list:
signatures_replacement = ",".join(["%s"] * len(signature_list))
replace.append(
"AND signature NOT IN ({0})".format(signatures_replacement)
)
placeholders += signature_list
resulset_status_list = self.execute(
proc='jobs.selects.get_resultset_status',
placeholders=placeholders,
replace=replace,
debug_show=self.DEBUG)
num_coalesced = 0
resultset_status_dict = {}
for rs in resulset_status_list:
num_coalesced += rs['num_coalesced'] if rs['num_coalesced'] else 0
if rs['state'] == 'completed':
resultset_status_dict[rs['result']] = int(rs['total']) - rs['num_coalesced']
else:
resultset_status_dict[rs['state']] = int(rs['total'])
if num_coalesced:
resultset_status_dict['coalesced'] = num_coalesced
return resultset_status_dict
class JobDataError(ValueError):
pass
class JobDataIntegrityError(IntegrityError):
pass
class JobData(dict):
"""
Encapsulates data access from incoming test data structure.
All missing-data errors raise ``JobDataError`` with a useful
message. Unlike regular nested dictionaries, ``JobData`` keeps track of
context, so errors contain not only the name of the immediately-missing
key, but the full parent-key context as well.
"""
def __init__(self, data, context=None):
"""Initialize ``JobData`` with a data dict and a context list."""
self.context = context or []
super(JobData, self).__init__(data)
@classmethod
def from_json(cls, json_blob):
"""Create ``JobData`` from a JSON string."""
try:
data = json.loads(json_blob)
except ValueError as e:
raise JobDataError("Malformed JSON: {0}".format(e))
return cls(data)
def __getitem__(self, name):
"""Get a data value, raising ``JobDataError`` if missing."""
full_context = list(self.context) + [name]
try:
value = super(JobData, self).__getitem__(name)
except KeyError:
raise JobDataError("Missing data: {0}.".format(
"".join(["['{0}']".format(c) for c in full_context])))
# Provide the same behavior recursively to nested dictionaries.
if isinstance(value, dict):
value = self.__class__(value, full_context)
return value
| vaishalitekale/treeherder | treeherder/model/derived/jobs.py | Python | mpl-2.0 | 83,969 |
import torch
def get_data(params):
batch_size = params["batch_size"]
M, N, K = params["problem"]["size"]
flop = (2.0 * M * N * K)
params["problem"]["flop_estimated"] = flop * params["nb_epoch"] * batch_size
m = params["problem"]["size"][0]
n = params["problem"]["size"][1]
k = params["problem"]["size"][2]
matr_1 = torch.randn(batch_size, m, n)
matr_2 = torch.randn(batch_size, n, k)
return matr_1, matr_2
| undertherain/benchmarker | benchmarker/kernels/batchmatmul/data.py | Python | mpl-2.0 | 448 |
# Details used to log into Reddit.
reddit_client_id = ""
reddit_client_secret = ""
reddit_user = ""
reddit_pass = ""
# Auth key used to log into Discord.
discord_key = ""
# Command/feature modules.
module_names = (
"default",
)
# Do not change this value!
config_version = 2
| Yalnix/BarryBot | config.py | Python | mpl-2.0 | 290 |
from textwrap import dedent
from bedrock.mozorg.tests import TestCase
from bedrock.sitemaps.models import NO_LOCALE, SitemapURL
class TestSitemapView(TestCase):
def setUp(self):
data = [
{
'path': '/firefox/all/',
'locale': 'de',
'lastmod': '2020-07-01T21:07:08.730133+00:00'
},
{
'path': '/firefox/',
'locale': 'de',
'lastmod': '2020-07-01T21:07:08.730133+00:00'
},
{
'path': '/privacy/',
'locale': 'fr',
},
{
'path': '/firefox/',
'locale': 'fr',
'lastmod': '2020-07-01T21:07:08.730133+00:00'
},
{
'path': '/keymaster/gatekeeper/there.is.only.xul',
'locale': NO_LOCALE,
'lastmod': '2020-07-01T21:07:08.730133+00:00'
},
{
'path': '/locales/',
'locale': NO_LOCALE,
},
]
SitemapURL.objects.bulk_create(SitemapURL(**kw) for kw in data)
def test_index(self):
good_resp = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<sitemap>
<loc>https://www.mozilla.org/sitemap_none.xml</loc>
</sitemap>
<sitemap>
<loc>https://www.mozilla.org/de/sitemap.xml</loc>
</sitemap>
<sitemap>
<loc>https://www.mozilla.org/fr/sitemap.xml</loc>
</sitemap>
</sitemapindex>""")
resp = self.client.get('/sitemap.xml')
assert resp.content.decode() == good_resp
def test_none(self):
good_resp = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>https://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul</loc>
<lastmod>2020-07-01T21:07:08.730133+00:00</lastmod>
</url>
<url>
<loc>https://www.mozilla.org/locales/</loc>
</url>
</urlset>""")
resp = self.client.get('/sitemap_none.xml')
assert resp.content.decode() == good_resp
def test_locales(self):
good_resp = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>https://www.mozilla.org/de/firefox/</loc>
<lastmod>2020-07-01T21:07:08.730133+00:00</lastmod>
</url>
<url>
<loc>https://www.mozilla.org/de/firefox/all/</loc>
<lastmod>2020-07-01T21:07:08.730133+00:00</lastmod>
</url>
</urlset>""")
resp = self.client.get('/de/sitemap.xml')
assert resp.content.decode() == good_resp
good_resp = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>https://www.mozilla.org/fr/firefox/</loc>
<lastmod>2020-07-01T21:07:08.730133+00:00</lastmod>
</url>
<url>
<loc>https://www.mozilla.org/fr/privacy/</loc>
</url>
</urlset>""")
resp = self.client.get('/fr/sitemap.xml')
assert resp.content.decode() == good_resp
| MichaelKohler/bedrock | bedrock/sitemaps/tests/test_views.py | Python | mpl-2.0 | 3,829 |
#!/usr/bin/env python3
'''ioriodb CLI client to interact with the api from the command line'''
from __future__ import print_function
import time
import json
import argparse
import iorio
def get_arg_parser():
'''build the cli arg parser'''
parser = argparse.ArgumentParser(description='Iorio DB CLI')
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('-u', '--username', default='admin',
help='username used for authentication')
parser.add_argument('-p', '--password', default='secret',
help='password used for authentication')
parser.add_argument('-t', '--token', default=None,
help='token from an already authenticated user')
parser.add_argument('-H', '--host', default='localhost',
help='host where ioriodb is running')
parser.add_argument('-P', '--port', default=8080, type=int,
help='port where ioriodb is running')
parser.add_argument('-c', '--count', default=1, type=int,
help='how many times to do the action')
parser.add_argument('--human', action='store_true', default=False)
subparsers = parser.add_subparsers()
p_post = subparsers.add_parser('post', help='add an event to a stream')
p_patch = subparsers.add_parser('patch',
help='patch last event from a stream')
p_list_buckets = subparsers.add_parser('list-buckets', help='list buckets')
p_list_streams = subparsers.add_parser('list-streams', help='list streams')
p_get = subparsers.add_parser('get', help='get content from a stream')
p_listen = subparsers.add_parser('listen',
help='listen to new content from streams')
p_stats = subparsers.add_parser('stats', help='get server stats')
p_stats.set_defaults(action='stats')
#p_admin = subparsers.add_parser('admin', help='admin tasks')
p_post.set_defaults(action='post')
p_post.add_argument('bucket', help='bucket name')
p_post.add_argument('stream', help='stream name')
p_post.add_argument('-c', '--content-type', default='application/json',
help='content-type for the request')
p_post.add_argument('data', help='literal JSON data or if starts with @ ' +
'path to a file with JSON data')
p_patch.set_defaults(action='patch')
p_patch.add_argument('bucket', help='bucket name')
p_patch.add_argument('stream', help='stream name')
p_patch.add_argument('-c', '--content-type',
default='application/json-patch+json',
help='content-type for the request')
p_patch.add_argument('data', help='literal JSON data or if starts with @ ' +
'path to a file with JSON data')
p_get.set_defaults(action='get')
p_get.add_argument('bucket', help='bucket name')
p_get.add_argument('stream', help='stream name')
p_get.add_argument('-l', '--limit', default=10, type=int,
help='amount of items to retrieve')
p_get.add_argument('-f', '--from', default=None, type=int, dest='fromsn',
help='sequence number to start from')
p_list_buckets.set_defaults(action='list-buckets')
p_list_streams.set_defaults(action='list-streams')
p_list_streams.add_argument('bucket', help='bucket name')
p_listen.set_defaults(action='listen')
p_listen.add_argument('subscriptions', nargs='+',
help="subscription descriptiors (bucket:stream or bucket:stream:from)")
return parser
def parse_args():
'''parse arguments and return them'''
parser = get_arg_parser()
args = parser.parse_args()
return args
def parse_data_from_raw(data_raw):
'''parse data from literal, if it starts wit @ parse content from file'''
if data_raw.startswith('@'):
return json.load(open(data_raw[1:]))
else:
return json.loads(data_raw)
def do_when_authenticated(args, fun, conn=None):
'''if auth works run fun'''
if conn is None:
conn = iorio.Connection(args.host, args.port)
auth_t1 = time.time()
auth_ok, auth_resp = conn.authenticate(args.username, args.password)
auth_t2 = time.time()
if args.verbose and args.verbose > 1:
print("Auth request time", (auth_t2 - auth_t1) * 1000, "ms")
if auth_ok:
req_t1 = time.time()
response = fun(conn)
req_t2 = time.time()
if args.verbose and args.verbose > 1:
print("Request time", (req_t2 - req_t1) * 1000, "ms")
print(response)
else:
print("Auth Failed")
print(auth_resp)
def post_or_patch(args, name):
'''avoid duplication'''
bucket = args.bucket
stream = args.stream
content_type = args.content_type
data_raw = args.data
data = parse_data_from_raw(data_raw)
def fun(conn):
'''fun that does the work'''
function = getattr(conn, name)
for _ in range(args.count):
result = function(bucket, stream, data, content_type)
return result
do_when_authenticated(args, fun)
def handle_post_event(args):
'''post a new event'''
post_or_patch(args, 'send')
def handle_patch_event(args):
'''patch a new event'''
post_or_patch(args, 'send_patch')
def handle_get_events(args):
'''get events'''
bucket = args.bucket
stream = args.stream
limit = args.limit
fromsn = args.fromsn
def fun(conn):
'''fun that does the work'''
return conn.query(bucket, stream, fromsn, limit)
do_when_authenticated(args, fun)
def handle_list_streams(args):
'''get events'''
bucket = args.bucket
def fun(conn):
'''fun that does the work'''
return conn.list_streams(bucket)
do_when_authenticated(args, fun)
def handle_list_buckets(args):
'''get events'''
def fun(conn):
'''fun that does the work'''
return conn.list_buckets()
do_when_authenticated(args, fun)
def diff_keys(dict1, dict2, keys):
'''calculate differebce between key on d2 and d1'''
result = {}
for key in keys:
val1 = dict1.get(key)
val2 = dict2.get(key)
if isinstance(val1, int) and isinstance(val2, int):
result[key] = val2 - val1
return result
def handle_stats(args):
'''get events'''
def fun(conn):
'''fun that does the work'''
response = conn.stats()
stats = response.body
node_stats = stats['node']
abs1 = node_stats['abs1']
abs2 = node_stats['abs2']
keys = ['error_logger_queue_len', 'memory_atoms' 'memory_bin',
'memory_ets', 'memory_procs', 'memory_total', 'process_count',
'run_queue']
abs_diff = diff_keys(abs1, abs2, keys)
stats['abs_diff'] = abs_diff
return response
do_when_authenticated(args, fun)
def parse_subscription(sub):
'''parse a subscription in notation bucket:stream[:from]'''
parts = sub.split(':')
parts_count = len(parts)
if parts_count == 2:
return True, parts + [None]
elif parts_count == 3:
try:
seqnum = int(parts[2])
return True, [parts[0], parts[1], seqnum]
except ValueError:
return (False, "expected subscription to have format " +
"bucket:stream:from where from is a number, got %s" % sub)
else:
return (False, "expected subscription to have format " +
"bucket:stream[:from], got %s" % sub)
def handle_listen(args):
'''listen to events in subscriptions'''
raw_subs = args.subscriptions
subs = iorio.Subscriptions()
for sub in raw_subs:
ok, result = parse_subscription(sub)
if not ok:
print(result)
return
bucket, stream, count = result
subs.add(bucket, stream, count)
def fun(conn):
'''fun that does the work'''
while True:
current_subs = subs.to_list()
print('listening', ' '.join(current_subs))
response = conn.listen(current_subs)
print(response)
print()
if response.status == 200:
subs.update_seqnums(response.body)
do_when_authenticated(args, fun)
HANDLERS = {
'post': handle_post_event,
'patch': handle_patch_event,
'get': handle_get_events,
'listen': handle_listen,
'list-buckets': handle_list_buckets,
'list-streams': handle_list_streams,
'stats': handle_stats
}
def main():
'''cli entry point'''
args = parse_args()
handler = HANDLERS[args.action]
handler(args)
if __name__ == '__main__':
main()
| javierdallamore/ioriodb | tools/ioriocli.py | Python | mpl-2.0 | 8,730 |
#!/usr/bin/env python
# coding:utf-8
class Ob(object):
def __init__(self, *args, **kwds):
for i in args:
self.__dict__.update(args)
self.__dict__.update(kwds)
def __getattr__(self, name):
return self.__dict__.get(name, '')
def __setattr__(self, name, value):
if value is not None:
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__dict__:
del self.__dict__[name]
def __repr__(self):
return self.__dict__.__repr__()
__getitem__ = __getattr__
__delitem__ = __delattr__
__setitem__ = __setattr__
def __len__(self):
return self.__dict__.__len__()
def __iter__(self):
for k, v in self.__dict__.items():
yield k, v
def __contains__(self, name):
return self.__dict__.__contains__(name)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class StripOb(Ob):
def __init__(self, *args, **kwds):
super(StripJsOb, self).__init__(*args, **kwds)
d = self.__dict__
for k, v in d.items():
if isinstance(v, str):
if "\n" not in v:
_v = v.strip()
if _v != v:
d[k] = _v
if __name__ == '__main__':
ob1 = Ob(a=1, b=2)
# ob1.xx = None
# print(ob1.__dict__)
# del ob1.a
# print(ob1.__dict__)
# o = Ob(a='张沈鹏')
# print(o)
# for k, v in o:
# print(k, v)
# print(dict)
# print(dict(iter(o)))
| noman798/dcny | lib/f42/f42/ob.py | Python | mpl-2.0 | 1,570 |
import subprocess
def main():
error = True
while error:
error = False
try:
subprocess.check_call("./asteroids -hostAt=\":10034\" &> /dev/null", shell=True)
except subprocess.CalledProcessError:
error = True
if __name__ == "__main__":
main() | jonbuckley33/Asteroids | run_asterois.py | Python | mpl-2.0 | 258 |
"""
Test models, managers, and validators.
"""
from __future__ import absolute_import, division, unicode_literals
import six
from completion import models, waffle
from completion.test_utils import CompletionWaffleTestMixin, submit_completions_for_testing
from django.core.exceptions import ValidationError
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey, UsageKey
from six.moves import range, zip
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
SELECT = 1
UPDATE = 1
SAVEPOINT = 1
OTHER = 1
@skip_unless_lms
class PercentValidatorTestCase(TestCase):
"""
Test that validate_percent only allows floats (and ints) between 0.0 and 1.0.
"""
def test_valid_percents(self):
for value in [1.0, 0.0, 1, 0, 0.5, 0.333081348071397813987230871]:
models.validate_percent(value)
def test_invalid_percent(self):
for value in [-0.00000000001, 1.0000000001, 47.1, 1000, None, float('inf'), float('nan')]:
self.assertRaises(ValidationError, models.validate_percent, value)
class CompletionSetUpMixin(CompletionWaffleTestMixin):
"""
Mixin that provides helper to create test BlockCompletion object.
"""
def set_up_completion(self):
self.user = UserFactory()
self.block_key = UsageKey.from_string(u'block-v1:edx+test+run+type@video+block@doggos')
self.completion = models.BlockCompletion.objects.create(
user=self.user,
course_key=self.block_key.course_key,
block_type=self.block_key.block_type,
block_key=self.block_key,
completion=0.5,
)
@skip_unless_lms
class SubmitCompletionTestCase(CompletionSetUpMixin, TestCase):
"""
Test that BlockCompletion.objects.submit_completion has the desired
semantics.
"""
def setUp(self):
super(SubmitCompletionTestCase, self).setUp()
self.override_waffle_switch(True)
self.set_up_completion()
def test_changed_value(self):
with self.assertNumQueries(SELECT + UPDATE + 2 * SAVEPOINT + 2 * OTHER):
# OTHER = user exists, completion exists
completion, isnew = models.BlockCompletion.objects.submit_completion(
user=self.user,
course_key=self.block_key.course_key,
block_key=self.block_key,
completion=0.9,
)
completion.refresh_from_db()
self.assertEqual(completion.completion, 0.9)
self.assertFalse(isnew)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
def test_unchanged_value(self):
with self.assertNumQueries(SELECT + 2 * SAVEPOINT):
completion, isnew = models.BlockCompletion.objects.submit_completion(
user=self.user,
course_key=self.block_key.course_key,
block_key=self.block_key,
completion=0.5,
)
completion.refresh_from_db()
self.assertEqual(completion.completion, 0.5)
self.assertFalse(isnew)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
def test_new_user(self):
newuser = UserFactory()
with self.assertNumQueries(SELECT + UPDATE + 4 * SAVEPOINT):
_, isnew = models.BlockCompletion.objects.submit_completion(
user=newuser,
course_key=self.block_key.course_key,
block_key=self.block_key,
completion=0.0,
)
self.assertTrue(isnew)
self.assertEqual(models.BlockCompletion.objects.count(), 2)
def test_new_block(self):
newblock = UsageKey.from_string(u'block-v1:edx+test+run+type@video+block@puppers')
with self.assertNumQueries(SELECT + UPDATE + 4 * SAVEPOINT):
_, isnew = models.BlockCompletion.objects.submit_completion(
user=self.user,
course_key=newblock.course_key,
block_key=newblock,
completion=1.0,
)
self.assertTrue(isnew)
self.assertEqual(models.BlockCompletion.objects.count(), 2)
def test_invalid_completion(self):
with self.assertRaises(ValidationError):
models.BlockCompletion.objects.submit_completion(
user=self.user,
course_key=self.block_key.course_key,
block_key=self.block_key,
completion=1.2
)
completion = models.BlockCompletion.objects.get(user=self.user, block_key=self.block_key)
self.assertEqual(completion.completion, 0.5)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
@skip_unless_lms
class CompletionDisabledTestCase(CompletionSetUpMixin, TestCase):
"""
Tests that completion API is not called when the feature is disabled.
"""
def setUp(self):
super(CompletionDisabledTestCase, self).setUp()
# insert one completion record...
self.set_up_completion()
# ...then disable the feature.
self.override_waffle_switch(False)
def test_cannot_call_submit_completion(self):
self.assertEqual(models.BlockCompletion.objects.count(), 1)
with self.assertRaises(RuntimeError):
models.BlockCompletion.objects.submit_completion(
user=self.user,
course_key=self.block_key.course_key,
block_key=self.block_key,
completion=0.9,
)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
@skip_unless_lms
class SubmitBatchCompletionTestCase(CompletionWaffleTestMixin, TestCase):
"""
Test that BlockCompletion.objects.submit_batch_completion has the desired
semantics.
"""
def setUp(self):
super(SubmitBatchCompletionTestCase, self).setUp()
self.override_waffle_switch(True)
self.block_key = UsageKey.from_string('block-v1:edx+test+run+type@video+block@doggos')
self.course_key_obj = CourseKey.from_string('course-v1:edx+test+run')
self.user = UserFactory()
CourseEnrollmentFactory.create(user=self.user, course_id=six.text_type(self.course_key_obj))
def test_submit_batch_completion(self):
blocks = [(self.block_key, 1.0)]
models.BlockCompletion.objects.submit_batch_completion(self.user, self.course_key_obj, blocks)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
self.assertEqual(models.BlockCompletion.objects.last().completion, 1.0)
def test_submit_batch_completion_without_waffle(self):
with waffle.waffle().override(waffle.ENABLE_COMPLETION_TRACKING, False):
with self.assertRaises(RuntimeError):
blocks = [(self.block_key, 1.0)]
models.BlockCompletion.objects.submit_batch_completion(self.user, self.course_key_obj, blocks)
def test_submit_batch_completion_with_same_block_new_completion_value(self):
blocks = [(self.block_key, 0.0)]
self.assertEqual(models.BlockCompletion.objects.count(), 0)
models.BlockCompletion.objects.submit_batch_completion(self.user, self.course_key_obj, blocks)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
model = models.BlockCompletion.objects.first()
self.assertEqual(model.completion, 0.0)
blocks = [
(UsageKey.from_string('block-v1:edx+test+run+type@video+block@doggos'), 1.0),
]
models.BlockCompletion.objects.submit_batch_completion(self.user, self.course_key_obj, blocks)
self.assertEqual(models.BlockCompletion.objects.count(), 1)
model = models.BlockCompletion.objects.first()
self.assertEqual(model.completion, 1.0)
@skip_unless_lms
class BatchCompletionMethodTests(CompletionWaffleTestMixin, TestCase):
"""
Tests for the classmethods that retrieve course/block completion data.
"""
def setUp(self):
super(BatchCompletionMethodTests, self).setUp()
self.override_waffle_switch(True)
self.user = UserFactory.create()
self.other_user = UserFactory.create()
self.course_key = CourseKey.from_string("edX/MOOC101/2049_T2")
self.other_course_key = CourseKey.from_string("course-v1:ReedX+Hum110+1904")
self.block_keys = [UsageKey.from_string("i4x://edX/MOOC101/video/{}".format(number)) for number in range(5)]
submit_completions_for_testing(self.user, self.course_key, self.block_keys[:3])
submit_completions_for_testing(self.other_user, self.course_key, self.block_keys[2:])
submit_completions_for_testing(self.user, self.other_course_key, [self.block_keys[4]])
def test_get_course_completions_missing_runs(self):
actual_completions = models.BlockCompletion.get_course_completions(self.user, self.course_key)
expected_block_keys = [key.replace(course_key=self.course_key) for key in self.block_keys[:3]]
expected_completions = dict(list(zip(expected_block_keys, [1.0, 0.8, 0.6])))
self.assertEqual(expected_completions, actual_completions)
def test_get_course_completions_empty_result_set(self):
self.assertEqual(
models.BlockCompletion.get_course_completions(self.other_user, self.other_course_key),
{}
)
def test_get_latest_block_completed(self):
self.assertEqual(
models.BlockCompletion.get_latest_block_completed(self.user, self.course_key).block_key,
self.block_keys[2]
)
def test_get_latest_completed_none_exist(self):
self.assertIsNone(models.BlockCompletion.get_latest_block_completed(self.other_user, self.other_course_key))
| ESOedX/edx-platform | openedx/tests/completion_integration/test_models.py | Python | agpl-3.0 | 9,761 |
# Copyright 2009-2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Functional tests for XPI file format"""
__metaclass__ = type
import re
import unittest
from zope.component import getUtility
from lp.app.interfaces.launchpad import ILaunchpadCelebrities
from lp.registry.interfaces.person import IPersonSet
from lp.registry.interfaces.product import IProductSet
from lp.testing.layers import LaunchpadZopelessLayer
from lp.translations.enums import RosettaImportStatus
from lp.translations.interfaces.potemplate import IPOTemplateSet
from lp.translations.utilities.mozilla_xpi_importer import MozillaXpiImporter
from lp.translations.utilities.tests.helpers import (
import_pofile_or_potemplate,
)
from lp.translations.utilities.tests.xpi_helpers import (
access_key_source_comment,
command_key_source_comment,
get_en_US_xpi_file_to_import,
)
def unwrap(text):
"""Remove line breaks and any other wrapping artifacts from text."""
return re.sub('\s+', ' ', text.strip())
class XpiTestCase(unittest.TestCase):
"""XPI file import into Launchpad."""
layer = LaunchpadZopelessLayer
def setUp(self):
# Get the importer.
self.importer = getUtility(IPersonSet).getByName('mark')
# Get the Firefox template.
firefox_product = getUtility(IProductSet).getByName('firefox')
firefox_productseries = firefox_product.getSeries('trunk')
firefox_potemplate_subset = getUtility(IPOTemplateSet).getSubset(
productseries=firefox_productseries)
self.firefox_template = firefox_potemplate_subset.new(
name='firefox',
translation_domain='firefox',
path='en-US.xpi',
owner=self.importer)
self.spanish_firefox = self.firefox_template.newPOFile('es')
self.spanish_firefox.path = 'translations/es.xpi'
def setUpTranslationImportQueueForTemplate(self, subdir):
"""Return an ITranslationImportQueueEntry for testing purposes.
:param subdir: subdirectory in firefox-data to get XPI data from.
"""
# Get the file to import.
en_US_xpi = get_en_US_xpi_file_to_import(subdir)
return import_pofile_or_potemplate(
file_contents=en_US_xpi.read(),
person=self.importer,
potemplate=self.firefox_template)
def setUpTranslationImportQueueForTranslation(self, subdir):
"""Return an ITranslationImportQueueEntry for testing purposes.
:param subdir: subdirectory in firefox-data to get XPI data from.
"""
# Get the file to import. Given the way XPI file format works, we can
# just use the same template file like a translation one.
es_xpi = get_en_US_xpi_file_to_import(subdir)
return import_pofile_or_potemplate(
file_contents=es_xpi.read(),
person=self.importer,
pofile=self.spanish_firefox,
by_maintainer=True)
def _assertXpiMessageInvariant(self, message):
"""Check whether invariant part of all messages are correct."""
# msgid and singular_text are always different except for the keyboard
# shortcuts which are the 'accesskey' and 'commandkey' ones.
self.failIf(
(message.msgid_singular.msgid == message.singular_text and
message.msgid_singular.msgid not in (
u'foozilla.menu.accesskey', u'foozilla.menu.commandkey')),
'msgid and singular_text should be different but both are %s' % (
message.msgid_singular.msgid))
# Plural forms should be None as this format is not able to handle
# them.
self.assertEquals(message.msgid_plural, None)
self.assertEquals(message.plural_text, None)
# There is no way to know whether a comment is from a
# translator or a developer comment, so we have comenttext
# always as None and store all comments as source comments.
self.assertEquals(message.commenttext, u'')
# This format doesn't support any functionality like .po flags.
self.assertEquals(message.flagscomment, u'')
def test_TemplateImport(self):
"""Test XPI template file import."""
# Prepare the import queue to handle a new .xpi import.
entry = self.setUpTranslationImportQueueForTemplate('en-US')
# The status is now IMPORTED:
self.assertEquals(entry.status, RosettaImportStatus.IMPORTED)
# Let's validate the content of the messages.
potmsgsets = list(self.firefox_template.getPOTMsgSets())
messages_msgid_list = []
for message in potmsgsets:
messages_msgid_list.append(message.msgid_singular.msgid)
# Check the common values for all messages.
self._assertXpiMessageInvariant(message)
if message.msgid_singular.msgid == u'foozilla.name':
# It's a normal message that lacks any comment.
self.assertEquals(message.singular_text, u'FooZilla!')
self.assertEquals(
message.filereferences,
u'jar:chrome/en-US.jar!/test1.dtd(foozilla.name)')
self.assertEquals(message.sourcecomment, None)
elif message.msgid_singular.msgid == u'foozilla.play.fire':
# This one is also a normal message that has a comment.
self.assertEquals(
message.singular_text, u'Do you want to play with fire?')
self.assertEquals(
message.filereferences,
u'jar:chrome/en-US.jar!/test1.dtd(foozilla.play.fire)')
self.assertEquals(
message.sourcecomment,
u" Translators, don't play with fire! \n")
elif message.msgid_singular.msgid == u'foozilla.utf8':
# Now, we can see that special UTF-8 chars are extracted
# correctly.
self.assertEquals(
message.singular_text, u'\u0414\u0430\u043d=Day')
self.assertEquals(
message.filereferences,
u'jar:chrome/en-US.jar!/test1.properties:5' +
u'(foozilla.utf8)')
self.assertEquals(message.sourcecomment, None)
elif message.msgid_singular.msgid == u'foozilla.menu.accesskey':
# access key is a special notation that is supposed to be
# translated with a key shortcut.
self.assertEquals(
message.singular_text, u'M')
self.assertEquals(
message.filereferences,
u'jar:chrome/en-US.jar!/subdir/test2.dtd' +
u'(foozilla.menu.accesskey)')
# The comment shows the key used when there is no translation,
# which is noted as the en_US translation.
self.assertEquals(
unwrap(message.sourcecomment),
unwrap(access_key_source_comment))
elif message.msgid_singular.msgid == u'foozilla.menu.commandkey':
# command key is a special notation that is supposed to be
# translated with a key shortcut.
self.assertEquals(
message.singular_text, u'm')
self.assertEquals(
message.filereferences,
u'jar:chrome/en-US.jar!/subdir/test2.dtd' +
u'(foozilla.menu.commandkey)')
# The comment shows the key used when there is no translation,
# which is noted as the en_US translation.
self.assertEquals(
unwrap(message.sourcecomment),
unwrap(command_key_source_comment))
# Check that we got all messages.
self.assertEquals(
[u'foozilla.happytitle', u'foozilla.menu.accesskey',
u'foozilla.menu.commandkey', u'foozilla.menu.title',
u'foozilla.name', u'foozilla.nocomment', u'foozilla.play.fire',
u'foozilla.play.ice', u'foozilla.title', u'foozilla.utf8',
u'foozilla_something'],
sorted(messages_msgid_list))
def test_TwiceTemplateImport(self):
"""Test a template import done twice."""
# Prepare the import queue to handle a new .xpi import.
entry = self.setUpTranslationImportQueueForTemplate('en-US')
# The status is now IMPORTED:
self.assertEquals(entry.status, RosettaImportStatus.IMPORTED)
# Retrieve the number of messages we got in this initial import.
first_import_potmsgsets = self.firefox_template.getPOTMsgSets(
).count()
# Force the entry to be imported again:
entry.setStatus(RosettaImportStatus.APPROVED,
getUtility(ILaunchpadCelebrities).rosetta_experts)
# Now, we tell the PO template to import from the file data it has.
(subject, body) = self.firefox_template.importFromQueue(entry)
# Retrieve the number of messages we got in this second import.
second_import_potmsgsets = self.firefox_template.getPOTMsgSets(
).count()
# Both must match.
self.assertEquals(first_import_potmsgsets, second_import_potmsgsets)
def test_TranslationImport(self):
"""Test XPI translation file import."""
# Prepare the import queue to handle a new .xpi import.
template_entry = self.setUpTranslationImportQueueForTemplate('en-US')
translation_entry = self.setUpTranslationImportQueueForTranslation(
'en-US')
# The status is now IMPORTED:
self.assertEquals(
translation_entry.status, RosettaImportStatus.IMPORTED)
self.assertEquals(template_entry.status, RosettaImportStatus.IMPORTED)
# Let's validate the content of the messages.
potmsgsets = list(self.firefox_template.getPOTMsgSets())
messages = [message.msgid_singular.msgid for message in potmsgsets]
messages.sort()
self.assertEquals(
[u'foozilla.happytitle',
u'foozilla.menu.accesskey',
u'foozilla.menu.commandkey',
u'foozilla.menu.title',
u'foozilla.name',
u'foozilla.nocomment',
u'foozilla.play.fire',
u'foozilla.play.ice',
u'foozilla.title',
u'foozilla.utf8',
u'foozilla_something'],
messages)
potmsgset = self.firefox_template.getPOTMsgSetByMsgIDText(
u'foozilla.name', context='main/test1.dtd')
translation = potmsgset.getCurrentTranslation(
self.firefox_template, self.spanish_firefox.language,
self.firefox_template.translation_side)
# It's a normal message that lacks any comment.
self.assertEquals(potmsgset.singular_text, u'FooZilla!')
# With this first import, upstream and Ubuntu translations must match.
self.assertEquals(
translation.translations,
potmsgset.getOtherTranslation(
self.spanish_firefox.language,
self.firefox_template.translation_side).translations)
potmsgset = self.firefox_template.getPOTMsgSetByMsgIDText(
u'foozilla.menu.accesskey', context='main/subdir/test2.dtd')
# access key is a special notation that is supposed to be
# translated with a key shortcut.
self.assertEquals(potmsgset.singular_text, u'M')
# The comment shows the key used when there is no translation,
# which is noted as the en_US translation.
self.assertEquals(
unwrap(potmsgset.sourcecomment),
unwrap(access_key_source_comment))
# But for the translation import, we get the key directly.
self.assertEquals(
potmsgset.getOtherTranslation(
self.spanish_firefox.language,
self.firefox_template.translation_side).translations,
[u'M'])
potmsgset = self.firefox_template.getPOTMsgSetByMsgIDText(
u'foozilla.menu.commandkey', context='main/subdir/test2.dtd')
# command key is a special notation that is supposed to be
# translated with a key shortcut.
self.assertEquals(
potmsgset.singular_text, u'm')
# The comment shows the key used when there is no translation,
# which is noted as the en_US translation.
self.assertEquals(
unwrap(potmsgset.sourcecomment),
unwrap(command_key_source_comment))
# But for the translation import, we get the key directly.
self.assertEquals(
potmsgset.getOtherTranslation(
self.spanish_firefox.language,
self.firefox_template.translation_side).translations,
[u'm'])
def test_GetLastTranslator(self):
"""Tests whether we extract last translator information correctly."""
translation_entry = self.setUpTranslationImportQueueForTranslation(
'en-US')
importer = MozillaXpiImporter()
translation_file = importer.parse(translation_entry)
# Let's try with the translation file, it has valid Last Translator
# information.
name, email = translation_file.header.getLastTranslator()
self.assertEqual(name, u'Carlos Perell\xf3 Mar\xedn')
self.assertEqual(email, u'carlos@canonical.com')
def test_Contexts(self):
"""Test that message context in XPI file is set to chrome path."""
queue_entry = self.setUpTranslationImportQueueForTranslation(
'clashing_ids')
importer = MozillaXpiImporter()
template = importer.parse(queue_entry)
messages = sorted([
(message.msgid_singular, message.context, message.singular_text)
for message in template.messages])
self.assertEquals(
[
(u'foozilla.clashing.key',
u'mac/extra.dtd',
u'This message is Mac-specific, and comes from DTD.'),
(u'foozilla.clashing.key',
u'mac/extra.properties',
u'This message is Mac-specific, and comes from properties.'),
(u'foozilla.clashing.key',
u'main/main.dtd',
u'This message is in the main DTD.'),
(u'foozilla.clashing.key',
u'main/main.properties',
u'This message is in the main properties file.'),
(u'foozilla.clashing.key',
u'unix/extra.dtd',
u'This message is Unix-specific, and comes from DTD.'),
(u'foozilla.clashing.key',
u'unix/extra.properties',
u'This message is Unix-specific, and comes from properties.'),
(u'foozilla.clashing.key',
u'win/extra.dtd',
u'This message is Windows-specific, and comes from DTD.'),
(u'foozilla.clashing.key',
u'win/extra.properties',
u'This message is Windows-specific, '
'and comes from properties.'),
(u'foozilla.regular.message',
u'main/main.dtd',
u'A non-clashing message.'),
],
messages)
def test_SystemEntityIsIgnored(self):
"""Test handling of SYSTEM entities in DTD files."""
self.setUpTranslationImportQueueForTemplate('system-entity')
msgids = [
(potmsgset.msgid_singular.msgid, potmsgset.singular_text)
for potmsgset in self.firefox_template.getPOTMsgSets()]
self.assertEqual(msgids, [
('firststring', 'First translatable string'),
('secondstring', 'Second translatable string')])
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/translations/utilities/tests/test_xpi_import.py | Python | agpl-3.0 | 15,963 |
"""
Unit tests for django-registration.
These tests assume that you've completed all the prerequisites for
getting django-registration running in the default setup, to wit:
1. You have ``registration`` in your ``INSTALLED_APPS`` setting.
2. You have created all of the templates mentioned in this
application's documentation.
3. You have added the setting ``ACCOUNT_ACTIVATION_DAYS`` to your
settings file.
4. You have URL patterns pointing to the registration and activation
views, with the names ``registration_register`` and
``registration_activate``, respectively, and a URL pattern named
'registration_complete'.
"""
import datetime
import sha
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core import management
from django.core.urlresolvers import reverse
from django.test import TestCase
from commoner.registration import forms
from commoner.registration.models import RegistrationProfile
from commoner.registration import signals
class RegistrationTestCase(TestCase):
"""
Base class for the test cases; this sets up two users -- one
expired, one not -- which are used to exercise various parts
of the application.
"""
def setUp(self):
self.sample_user = RegistrationProfile.objects.create_inactive_user(username='alice',
password='secret',
email='alice@example.com')
self.expired_user = RegistrationProfile.objects.create_inactive_user(username='bob',
password='swordfish',
email='bob@example.com')
self.expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
self.expired_user.save()
class RegistrationModelTests(RegistrationTestCase):
"""
Tests for the model-oriented functionality of django-registration,
including ``RegistrationProfile`` and its custom manager.
"""
def test_new_user_is_inactive(self):
"""
Test that a newly-created user is inactive.
"""
self.failIf(self.sample_user.is_active)
def test_registration_profile_created(self):
"""
Test that a ``RegistrationProfile`` is created for a new user.
"""
self.assertEqual(RegistrationProfile.objects.count(), 2)
def test_activation_email(self):
"""
Test that user signup sends an activation email.
"""
self.assertEqual(len(mail.outbox), 2)
def test_activation_email_disable(self):
"""
Test that activation email can be disabled.
"""
RegistrationProfile.objects.create_inactive_user(username='noemail',
password='foo',
email='nobody@example.com',
send_email=False)
self.assertEqual(len(mail.outbox), 2)
def test_activation(self):
"""
Test that user activation actually activates the user and
properly resets the activation key, and fails for an
already-active or expired user, or an invalid key.
"""
# Activating a valid user returns the user.
self.failUnlessEqual(RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.sample_user).activation_key).pk,
self.sample_user.pk)
# The activated user must now be active.
self.failUnless(User.objects.get(pk=self.sample_user.pk).is_active)
# The activation key must now be reset to the "already activated" constant.
self.failUnlessEqual(RegistrationProfile.objects.get(user=self.sample_user).activation_key,
RegistrationProfile.ACTIVATED)
# Activating an expired user returns False.
self.failIf(RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.expired_user).activation_key))
# Activating from a key that isn't a SHA1 hash returns False.
self.failIf(RegistrationProfile.objects.activate_user('foo'))
# Activating from a key that doesn't exist returns False.
self.failIf(RegistrationProfile.objects.activate_user(sha.new('foo').hexdigest()))
def test_account_expiration_condition(self):
"""
Test that ``RegistrationProfile.activation_key_expired()``
returns ``True`` for expired users and for active users, and
``False`` otherwise.
"""
# Unexpired user returns False.
self.failIf(RegistrationProfile.objects.get(user=self.sample_user).activation_key_expired())
# Expired user returns True.
self.failUnless(RegistrationProfile.objects.get(user=self.expired_user).activation_key_expired())
# Activated user returns True.
RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.sample_user).activation_key)
self.failUnless(RegistrationProfile.objects.get(user=self.sample_user).activation_key_expired())
def test_expired_user_deletion(self):
"""
Test that
``RegistrationProfile.objects.delete_expired_users()`` deletes
only inactive users whose activation window has expired.
"""
RegistrationProfile.objects.delete_expired_users()
self.assertEqual(RegistrationProfile.objects.count(), 1)
def test_management_command(self):
"""
Test that ``manage.py cleanupregistration`` functions
correctly.
"""
management.call_command('cleanupregistration')
self.assertEqual(RegistrationProfile.objects.count(), 1)
def test_signals(self):
"""
Test that the ``user_registered`` and ``user_activated``
signals are sent, and that they send the ``User`` as an
argument.
"""
def receiver(sender, **kwargs):
self.assert_('user' in kwargs)
self.assertEqual(kwargs['user'].username, u'signal_test')
received_signals.append(kwargs.get('signal'))
received_signals = []
expected_signals = [signals.user_registered, signals.user_activated]
for signal in expected_signals:
signal.connect(receiver)
RegistrationProfile.objects.create_inactive_user(username='signal_test',
password='foo',
email='nobody@example.com',
send_email=False)
RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user__username='signal_test').activation_key)
self.assertEqual(received_signals, expected_signals)
class RegistrationFormTests(RegistrationTestCase):
"""
Tests for the forms and custom validation logic included in
django-registration.
"""
fixtures = ['test_codes.json',]
def test_registration_form(self):
"""
Test that ``RegistrationForm`` enforces username constraints
and matching passwords.
"""
invalid_data_dicts = [
# Non-alphanumeric username.
{
'data':
{ 'username': 'foo/bar',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos': 'on',},
'error':
('username', [u"Enter a valid value."])
},
# Already-existing username.
{
'data':
{ 'username': 'alice',
'email': 'alice@example.com',
'password1': 'secret',
'password2': 'secret',
'agree_to_tos': 'on', },
'error':
('username', [u"This username is already taken. Please choose another."])
},
# Mismatched passwords.
{
'data':
{ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'bar',
'agree_to_tos': 'on', },
'error':
('__all__', [u"You must type the same password each time"])
},
# Must agree to TOS
{
'data':
{ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos': False, },
'error':
('agree_to_tos', [u"You must agree to the terms to register"])
},
]
for invalid_dict in invalid_data_dicts:
form = forms.RegistrationForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1])
form = forms.RegistrationForm(data={ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos':'on',
'promo_code':'12345678'})
self.failUnless(form.is_valid())
class RegistrationViewTests(RegistrationTestCase):
"""
Tests for the views included in django-registration.
"""
def _test_registration_view(self):
"""
Underscored to prevent running while free accounts are prohibited
Test that the registration view rejects invalid submissions,
and creates a new user and redirects after a valid submission.
"""
# Invalid data fails.
response = self.client.post(reverse('registration_register'),
data={ 'username': 'alice', # Will fail on username uniqueness.
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo' })
self.assertEqual(response.status_code, 200)
self.failUnless(response.context[0]['form'])
self.failUnless(response.context[0]['form'].errors)
response = self.client.post(reverse('registration_register'),
data={ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos':'on'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/check_inbox.html')
self.assertEqual(RegistrationProfile.objects.count(), 3)
def test_activation_view(self):
"""
Test that the activation view activates the user from a valid
key and fails if the key is invalid or has expired.
"""
# Valid user puts the user account into the context.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': RegistrationProfile.objects.get(user=self.sample_user).activation_key }))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[0]['account'].pk, self.sample_user.pk)
# Expired user sets the account to False.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': RegistrationProfile.objects.get(user=self.expired_user).activation_key }))
self.assertEqual(response.status_code, 404)
# Invalid key gets to the view, but sets account to False.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': 'foo' }))
# hmmm, need an assertion here
# Nonexistent key sets the account to False.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': sha.new('foo').hexdigest() }))
self.assertEqual(response.status_code, 404)
| cc-archive/commoner | src/commoner/registration/tests.py | Python | agpl-3.0 | 12,845 |
import os
import time
import mechanize
CKAN = os.environ.get('CKAN', 'http://data.england.nhs.uk/')
class Transaction(object):
def __init__(self):
self.custom_timers = {}
def run(self):
# create a Browser instance
br = mechanize.Browser()
# don't bother with robots.txt
br.set_handle_robots(False)
# add a custom header so CKAN allows our requests
br.addheaders = [('User-agent', 'Mozilla/5.0 Compatible')]
# start the timer
start_timer = time.time()
# submit the request
br.open(CKAN)
# stop the timer
latency = time.time() - start_timer
# store the custom timer
self.custom_timers['Load_Front_Page'] = latency
# think-time
time.sleep(2)
# select first (zero-based) form on page
br.select_form(nr=0)
# set form field
br.form['q'] = 'england'
start_timer = time.time()
br.submit()
assert 'datasets found for' in br.response().read(), 'Search not performed'
# verify responses are valid
assert (br.response().code == 200), 'Bad HTTP Response'
latency = time.time() - start_timer
# store the custom timer
self.custom_timers['Search'] = latency
# think-time
time.sleep(2)
if __name__ == '__main__':
trans = Transaction()
trans.run()
for timer in trans.custom_timers:
print '%s: %.5f secs' % (timer, trans.custom_timers[timer])
| nhsengland/iit-infrastructure | tests/performance_tests/CKAN/test_scripts/ckan_search.py | Python | agpl-3.0 | 1,654 |
from rest_framework import renderers
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.six.moves import StringIO
from django.utils.encoding import smart_text
from rest_framework.compat import six
from rest_framework import negotiation
import json
"""
@author: Jon Nordling
@date: 06/19/2016
XFormListRenderer, is a custom django rest framework, renderer
that passing a data object, will render serializes, data to xml for
the views
"""
class MediaFileContentNegotiation(negotiation.DefaultContentNegotiation):
def filter_renderers(self, renderers, format):
"""
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
If there is no renderer available, we use MediaFileRenderer.
"""
renderers = [renderer for renderer in renderers
if renderer.format == format]
if not renderers:
renderers = [MediaFileRenderer()]
return renderers
class MediaFileRenderer(renderers.BaseRenderer):
media_type = '*/*'
format = None
charset = None
render_style = 'binary'
def render(self, data, accepted_media_type=None, renderer_context=None):
return data
class XFormListRenderer(renderers.BaseRenderer):
"""
Renderer which serializes to XML.
"""
media_type = 'text/xml'
format = 'xml'
charset = 'utf-8'
root_node = 'xforms'
element_node = 'xform'
xmlns = "http://openrosa.org/xforms/xformsList"
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders *obj* into serialized XML.
"""
if data is None:
return ''
elif isinstance(data, six.string_types):
return data
stream = StringIO()
xml = SimplerXMLGenerator(stream, self.charset)
xml.startDocument()
xml.startElement(self.root_node, {'xmlns': self.xmlns})
self._to_xml(xml, data)
xml.endElement(self.root_node)
xml.endDocument()
return stream.getvalue()
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement(self.element_node, {})
self._to_xml(xml, item)
xml.endElement(self.element_node)
elif isinstance(data, dict):
for key, value in six.iteritems(data):
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
elif data is None:
# Don't output any value
pass
else:
xml.characters(smart_text(data))
| Cadasta/cadasta-geoodk | xforms/renderers.py | Python | agpl-3.0 | 2,727 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Magento Connector - Pricing',
'version': '2.0.0',
'category': 'Connector',
'depends': ['magentoerpconnect',
],
'author': "MagentoERPconnect Core Editors,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'website': 'http://www.odoo-magento-connector.com',
'description': """
Magento Connector - Pricing
===========================
Extension for **Magento Connector**.
The prices of the products are managed in OpenERP using pricelists and
are pushed to Magento.
""",
'images': [],
'demo': [],
'data': ['magento_model_view.xml',
],
'installable': False,
'application': False,
}
| gurneyalex/connector-magento | __unported__/magentoerpconnect_pricing/__openerp__.py | Python | agpl-3.0 | 1,568 |
import ckan.controllers.package as package
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.model as model
from ckan.common import c
class MapactionPackageController(package.PackageController):
def groups(self, id):
q = model.Session.query(model.Group) \
.filter(model.Group.is_organization == False) \
.filter(model.Group.state == 'active')
groups = q.all()
'''
package = c.get('package')
if package:
groups = set(groups) - set(package.get_groups())
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj, 'use_cache': False}
group_list = model_dictize.group_list_dictize(groups, context)
c.event_dropdown = [[group['id'], group['display_name']]
for group in group_list]
return super(MapactionPackageController, self).groups(id) | aptivate/ckanext-mapactiontheme | ckanext/mapactiontheme/controllers/package.py | Python | agpl-3.0 | 1,023 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class AccountInvoiceMultipartner(osv.osv):
''' Add more than one reference partner in account invoice
(only in report document, not in journal entry)
'''
_inherit = 'account.invoice'
# on change function:
def onchange_extra_address(self, cr, uid, ids, extra_address, partner_id,
context=None):
''' Set domain in partner_ids list when
'''
res = {}
if extra_address == 'contact' and partner_id:
res['domain'] = {'partner_ids': [('parent_id', '=', partner_id)]}
else:
res['domain'] = {'partner_ids': []}
res['value'] = {'partner_ids': False}
return res
_columns = {
'extra_address': fields.selection([
('none', 'None'),
('contact', 'Contact'),
('partner', 'Partner'), ],
'Extra address', select=True, readonly=False, required=True),
'partner_ids': fields.many2many(
'res.partner', 'invoice_partner_rel', 'invoice_id', 'partner_id',
'Extra partner'),
}
_defaults = {
'extra_address': lambda *a: 'none',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-accounting | account_invoice_multipartner/multipartner.py | Python | agpl-3.0 | 2,506 |
import time
import random
import os
import os.path
import logging
import urlparse
import functools
import lms.lib.comment_client as cc
import django_comment_client.utils as utils
import django_comment_client.settings as cc_settings
from django.core import exceptions
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST, require_GET
from django.views.decorators import csrf
from django.core.files.storage import get_storage_class
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from mitxmako.shortcuts import render_to_string
from courseware.courses import get_course_with_access, get_course_by_id
from course_groups.cohorts import get_cohort_id, is_commentable_cohorted
from django_comment_client.utils import JsonResponse, JsonError, extract, add_courseware_context
from django_comment_client.permissions import check_permissions_by_view, cached_has_permission
from django_comment_common.models import Role
from courseware.access import has_access
log = logging.getLogger(__name__)
def permitted(fn):
@functools.wraps(fn)
def wrapper(request, *args, **kwargs):
def fetch_content():
if "thread_id" in kwargs:
content = cc.Thread.find(kwargs["thread_id"]).to_dict()
elif "comment_id" in kwargs:
content = cc.Comment.find(kwargs["comment_id"]).to_dict()
else:
content = None
return content
if check_permissions_by_view(request.user, kwargs['course_id'], fetch_content(), request.view_name):
return fn(request, *args, **kwargs)
else:
return JsonError("unauthorized", status=401)
return wrapper
def ajax_content_response(request, course_id, content, template_name):
context = {
'course_id': course_id,
'content': content,
}
html = render_to_string(template_name, context)
user_info = cc.User.from_django_user(request.user).to_dict()
annotated_content_info = utils.get_annotated_content_info(course_id, content, request.user, user_info)
return JsonResponse({
'html': html,
'content': utils.safe_content(content),
'annotated_content_info': annotated_content_info,
})
@require_POST
@login_required
@permitted
def create_thread(request, course_id, commentable_id):
"""
Given a course and commentble ID, create the thread
"""
log.debug("Creating new thread in %r, id %r", course_id, commentable_id)
course = get_course_with_access(request.user, course_id, 'load')
post = request.POST
if course.allow_anonymous:
anonymous = post.get('anonymous', 'false').lower() == 'true'
else:
anonymous = False
if course.allow_anonymous_to_peers:
anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true'
else:
anonymous_to_peers = False
thread = cc.Thread(**extract(post, ['body', 'title', 'tags']))
thread.update_attributes(**{
'anonymous': anonymous,
'anonymous_to_peers': anonymous_to_peers,
'commentable_id': commentable_id,
'course_id': course_id,
'user_id': request.user.id,
})
user = cc.User.from_django_user(request.user)
#kevinchugh because the new requirement is that all groups will be determined
#by the group id in the request this all goes away
#not anymore, only for admins
# Cohort the thread if the commentable is cohorted.
if is_commentable_cohorted(course_id, commentable_id):
user_group_id = get_cohort_id(user, course_id)
# TODO (vshnayder): once we have more than just cohorts, we'll want to
# change this to a single get_group_for_user_and_commentable function
# that can do different things depending on the commentable_id
if cached_has_permission(request.user, "see_all_cohorts", course_id):
# admins can optionally choose what group to post as
group_id = post.get('group_id', user_group_id)
else:
# regular users always post with their own id.
group_id = user_group_id
if group_id:
thread.update_attributes(group_id=group_id)
thread.save()
#patch for backward compatibility to comments service
if not 'pinned' in thread.attributes:
thread['pinned'] = False
if post.get('auto_subscribe', 'false').lower() == 'true':
user = cc.User.from_django_user(request.user)
user.follow(thread)
data = thread.to_dict()
add_courseware_context([data], course)
if request.is_ajax():
return ajax_content_response(request, course_id, data, 'discussion/ajax_create_thread.html')
else:
return JsonResponse(utils.safe_content(data))
@require_POST
@login_required
@permitted
def update_thread(request, course_id, thread_id):
"""
Given a course id and thread id, update a existing thread, used for both static and ajax submissions
"""
thread = cc.Thread.find(thread_id)
thread.update_attributes(**extract(request.POST, ['body', 'title', 'tags']))
thread.save()
if request.is_ajax():
return ajax_content_response(request, course_id, thread.to_dict(), 'discussion/ajax_update_thread.html')
else:
return JsonResponse(utils.safe_content(thread.to_dict()))
def _create_comment(request, course_id, thread_id=None, parent_id=None):
"""
given a course_id, thread_id, and parent_id, create a comment,
called from create_comment to do the actual creation
"""
post = request.POST
comment = cc.Comment(**extract(post, ['body']))
course = get_course_with_access(request.user, course_id, 'load')
if course.allow_anonymous:
anonymous = post.get('anonymous', 'false').lower() == 'true'
else:
anonymous = False
if course.allow_anonymous_to_peers:
anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true'
else:
anonymous_to_peers = False
comment.update_attributes(**{
'anonymous': anonymous,
'anonymous_to_peers': anonymous_to_peers,
'user_id': request.user.id,
'course_id': course_id,
'thread_id': thread_id,
'parent_id': parent_id,
})
comment.save()
if post.get('auto_subscribe', 'false').lower() == 'true':
user = cc.User.from_django_user(request.user)
user.follow(comment.thread)
if request.is_ajax():
return ajax_content_response(request, course_id, comment.to_dict(), 'discussion/ajax_create_comment.html')
else:
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def create_comment(request, course_id, thread_id):
"""
given a course_id and thread_id, test for comment depth. if not too deep,
call _create_comment to create the actual comment.
"""
if cc_settings.MAX_COMMENT_DEPTH is not None:
if cc_settings.MAX_COMMENT_DEPTH < 0:
return JsonError("Comment level too deep")
return _create_comment(request, course_id, thread_id=thread_id)
@require_POST
@login_required
@permitted
def delete_thread(request, course_id, thread_id):
"""
given a course_id and thread_id, delete this thread
this is ajax only
"""
thread = cc.Thread.find(thread_id)
thread.delete()
return JsonResponse(utils.safe_content(thread.to_dict()))
@require_POST
@login_required
@permitted
def update_comment(request, course_id, comment_id):
"""
given a course_id and comment_id, update the comment with payload attributes
handles static and ajax submissions
"""
comment = cc.Comment.find(comment_id)
comment.update_attributes(**extract(request.POST, ['body']))
comment.save()
if request.is_ajax():
return ajax_content_response(request, course_id, comment.to_dict(), 'discussion/ajax_update_comment.html')
else:
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def endorse_comment(request, course_id, comment_id):
"""
given a course_id and comment_id, toggle the endorsement of this comment,
ajax only
"""
comment = cc.Comment.find(comment_id)
comment.endorsed = request.POST.get('endorsed', 'false').lower() == 'true'
comment.save()
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def openclose_thread(request, course_id, thread_id):
"""
given a course_id and thread_id, toggle the status of this thread
ajax only
"""
thread = cc.Thread.find(thread_id)
thread.closed = request.POST.get('closed', 'false').lower() == 'true'
thread.save()
thread = thread.to_dict()
return JsonResponse({
'content': utils.safe_content(thread),
'ability': utils.get_ability(course_id, thread, request.user),
})
@require_POST
@login_required
@permitted
def create_sub_comment(request, course_id, comment_id):
"""
given a course_id and comment_id, create a response to a comment
after checking the max depth allowed, if allowed
"""
if cc_settings.MAX_COMMENT_DEPTH is not None:
if cc_settings.MAX_COMMENT_DEPTH <= cc.Comment.find(comment_id).depth:
return JsonError("Comment level too deep")
return _create_comment(request, course_id, parent_id=comment_id)
@require_POST
@login_required
@permitted
def delete_comment(request, course_id, comment_id):
"""
given a course_id and comment_id delete this comment
ajax only
"""
comment = cc.Comment.find(comment_id)
comment.delete()
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def vote_for_comment(request, course_id, comment_id, value):
"""
given a course_id and comment_id,
"""
user = cc.User.from_django_user(request.user)
comment = cc.Comment.find(comment_id)
user.vote(comment, value)
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def undo_vote_for_comment(request, course_id, comment_id):
"""
given a course id and comment id, remove vote
ajax only
"""
user = cc.User.from_django_user(request.user)
comment = cc.Comment.find(comment_id)
user.unvote(comment)
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def vote_for_thread(request, course_id, thread_id, value):
"""
given a course id and thread id vote for this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
user.vote(thread, value)
return JsonResponse(utils.safe_content(thread.to_dict()))
@require_POST
@login_required
@permitted
def flag_abuse_for_thread(request, course_id, thread_id):
"""
given a course_id and thread_id flag this thread for abuse
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
thread.flagAbuse(user, thread)
return JsonResponse(utils.safe_content(thread.to_dict()))
@require_POST
@login_required
@permitted
def un_flag_abuse_for_thread(request, course_id, thread_id):
"""
given a course id and thread id, remove abuse flag for this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
course = get_course_by_id(course_id)
thread = cc.Thread.find(thread_id)
removeAll = cached_has_permission(request.user, 'openclose_thread', course_id) or has_access(request.user, course, 'staff')
thread.unFlagAbuse(user, thread, removeAll)
return JsonResponse(utils.safe_content(thread.to_dict()))
@require_POST
@login_required
@permitted
def flag_abuse_for_comment(request, course_id, comment_id):
"""
given a course and comment id, flag comment for abuse
ajax only
"""
user = cc.User.from_django_user(request.user)
comment = cc.Comment.find(comment_id)
comment.flagAbuse(user, comment)
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def un_flag_abuse_for_comment(request, course_id, comment_id):
"""
given a course_id and comment id, unflag comment for abuse
ajax only
"""
user = cc.User.from_django_user(request.user)
course = get_course_by_id(course_id)
removeAll = cached_has_permission(request.user, 'openclose_thread', course_id) or has_access(request.user, course, 'staff')
comment = cc.Comment.find(comment_id)
comment.unFlagAbuse(user, comment, removeAll)
return JsonResponse(utils.safe_content(comment.to_dict()))
@require_POST
@login_required
@permitted
def undo_vote_for_thread(request, course_id, thread_id):
"""
given a course id and thread id, remove users vote for thread
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
user.unvote(thread)
return JsonResponse(utils.safe_content(thread.to_dict()))
@require_POST
@login_required
@permitted
def pin_thread(request, course_id, thread_id):
"""
given a course id and thread id, pin this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
thread.pin(user, thread_id)
return JsonResponse(utils.safe_content(thread.to_dict()))
def un_pin_thread(request, course_id, thread_id):
"""
given a course id and thread id, remove pin from this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
thread.un_pin(user, thread_id)
return JsonResponse(utils.safe_content(thread.to_dict()))
@require_POST
@login_required
@permitted
def follow_thread(request, course_id, thread_id):
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
user.follow(thread)
return JsonResponse({})
@require_POST
@login_required
@permitted
def follow_commentable(request, course_id, commentable_id):
"""
given a course_id and commentable id, follow this commentable
ajax only
"""
user = cc.User.from_django_user(request.user)
commentable = cc.Commentable.find(commentable_id)
user.follow(commentable)
return JsonResponse({})
@require_POST
@login_required
@permitted
def follow_user(request, course_id, followed_user_id):
user = cc.User.from_django_user(request.user)
followed_user = cc.User.find(followed_user_id)
user.follow(followed_user)
return JsonResponse({})
@require_POST
@login_required
@permitted
def unfollow_thread(request, course_id, thread_id):
"""
given a course id and thread id, stop following this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
user.unfollow(thread)
return JsonResponse({})
@require_POST
@login_required
@permitted
def unfollow_commentable(request, course_id, commentable_id):
"""
given a course id and commentable id stop following commentable
ajax only
"""
user = cc.User.from_django_user(request.user)
commentable = cc.Commentable.find(commentable_id)
user.unfollow(commentable)
return JsonResponse({})
@require_POST
@login_required
@permitted
def unfollow_user(request, course_id, followed_user_id):
"""
given a course id and user id, stop following this user
ajax only
"""
user = cc.User.from_django_user(request.user)
followed_user = cc.User.find(followed_user_id)
user.unfollow(followed_user)
return JsonResponse({})
@require_POST
@login_required
@permitted
def update_moderator_status(request, course_id, user_id):
"""
given a course id and user id, check if the user has moderator
and send back a user profile
"""
is_moderator = request.POST.get('is_moderator', '').lower()
if is_moderator not in ["true", "false"]:
return JsonError("Must provide is_moderator as boolean value")
is_moderator = is_moderator == "true"
user = User.objects.get(id=user_id)
role = Role.objects.get(course_id=course_id, name="Moderator")
if is_moderator:
user.roles.add(role)
else:
user.roles.remove(role)
if request.is_ajax():
course = get_course_with_access(request.user, course_id, 'load')
discussion_user = cc.User(id=user_id, course_id=course_id)
context = {
'course': course,
'course_id': course_id,
'user': request.user,
'django_user': user,
'profiled_user': discussion_user.to_dict(),
}
return JsonResponse({
'html': render_to_string('discussion/ajax_user_profile.html', context)
})
else:
return JsonResponse({})
@require_GET
def search_similar_threads(request, course_id, commentable_id):
"""
given a course id and commentable id, run query given in text get param
of request
"""
text = request.GET.get('text', None)
if text:
query_params = {
'text': text,
'commentable_id': commentable_id,
}
threads = cc.search_similar_threads(course_id, recursive=False, query_params=query_params)
else:
theads = []
context = {'threads': map(utils.extend_content, threads)}
return JsonResponse({
'html': render_to_string('discussion/_similar_posts.html', context)
})
@require_GET
def tags_autocomplete(request, course_id):
value = request.GET.get('q', None)
results = []
if value:
results = cc.tags_autocomplete(value)
return JsonResponse(results)
@require_POST
@login_required
@csrf.csrf_exempt
def upload(request, course_id): # ajax upload file to a question or answer
"""view that handles file upload via Ajax
"""
# check upload permission
result = ''
error = ''
new_file_name = ''
try:
# TODO authorization
#may raise exceptions.PermissionDenied
#if request.user.is_anonymous():
# msg = _('Sorry, anonymous users cannot upload files')
# raise exceptions.PermissionDenied(msg)
#request.user.assert_can_upload_file()
# check file type
f = request.FILES['file-upload']
file_extension = os.path.splitext(f.name)[1].lower()
if not file_extension in cc_settings.ALLOWED_UPLOAD_FILE_TYPES:
file_types = "', '".join(cc_settings.ALLOWED_UPLOAD_FILE_TYPES)
msg = _("allowed file types are '%(file_types)s'") % \
{'file_types': file_types}
raise exceptions.PermissionDenied(msg)
# generate new file name
new_file_name = str(time.time()).replace('.', str(random.randint(0, 100000))) + file_extension
file_storage = get_storage_class()()
# use default storage to store file
file_storage.save(new_file_name, f)
# check file size
# byte
size = file_storage.size(new_file_name)
if size > cc_settings.MAX_UPLOAD_FILE_SIZE:
file_storage.delete(new_file_name)
msg = _("maximum upload file size is %(file_size)sK") % \
{'file_size': cc_settings.MAX_UPLOAD_FILE_SIZE}
raise exceptions.PermissionDenied(msg)
except exceptions.PermissionDenied, err:
error = unicode(err)
except Exception, err:
print err
logging.critical(unicode(err))
error = _('Error uploading file. Please contact the site administrator. Thank you.')
if error == '':
result = 'Good'
file_url = file_storage.url(new_file_name)
parsed_url = urlparse.urlparse(file_url)
file_url = urlparse.urlunparse(
urlparse.ParseResult(
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
'', '', ''
)
)
else:
result = ''
file_url = ''
return JsonResponse({
'result': {
'msg': result,
'error': error,
'file_url': file_url,
}
})
| TsinghuaX/edx-platform | lms/djangoapps/django_comment_client/base/views.py | Python | agpl-3.0 | 20,227 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-04 14:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("events", "0004_create_basic_calendars")]
operations = [
migrations.AlterField(
model_name="event",
name="name",
field=models.CharField(
help_text="Le nom de l'événement", max_length=255, verbose_name="nom"
),
)
]
| lafranceinsoumise/api-django | agir/events/migrations/0005_auto_20170704_1452.py | Python | agpl-3.0 | 521 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-05 09:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('explorer', '0006_auto_20181004_1159'),
]
operations = [
migrations.AddField(
model_name='fieldvalue',
name='column_name',
field=models.CharField(blank=True, max_length=80, null=True),
),
]
| muccg/rdrf | rdrf/explorer/migrations/0007_fieldvalue_column_name.py | Python | agpl-3.0 | 482 |
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, api, fields
class mrp_bom(models.Model):
_inherit = 'mrp.bom'
def _bom_explode(self, cr, uid, bom, product, factor, properties=None,
level=0, routing_id=False, previous_products=None,
master_bom=None, context=None):
res = super(mrp_bom, self)._bom_explode(
cr, uid, bom, product, factor,
properties=properties, level=level,
routing_id=routing_id,
previous_products=previous_products,
master_bom=master_bom, context=context
)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
indice = 0
for bom_line_id in bom.bom_line_ids:
line = results[indice]
line['largura'] = bom_line_id.largura
line['comprimento'] = bom_line_id.comprimento
line['unidades'] = bom_line_id.unidades
indice += 1
return results, results2
class mrp_bom_line(models.Model):
_inherit = 'mrp.bom.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
@api.onchange('largura', 'comprimento', 'unidades')
def compute_quantity(self):
self.product_qty = (self.largura or 1) * \
(self.comprimento or 1) * (self.unidades or 1)
class mrp_production_product_line(models.Model):
_inherit = 'mrp.production.product.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class stock_move(models.Model):
_inherit = 'stock.move'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class mrp_production(models.Model):
_inherit = 'mrp.production'
def _make_production_consume_line(self, cr, uid, line, context=None):
move_id = super(mrp_production, self)\
._make_production_consume_line(
cr, uid, line, context=context)
self.pool['stock.move'].write(cr, uid, move_id,
{'unidades': line.unidades,
'comprimento': line.comprimento,
'largura': line.largura})
return move_id
| Trust-Code/trust-addons | trust_second_unit_of_measure/models/mrp_bom.py | Python | agpl-3.0 | 3,943 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import lxml
'''
A set of functions related to Data Manager and the <data_manager> node
in Opus Project Configuration files.
'''
def get_tool_nodes(project):
'''
Retrieve a list of all nodes that represent tools in the given project
@param project (OpusProject) project to fetch nodes from
@return a list of nodes representing the tools (list(Element))
'''
tool_nodes = []
tool_group_nodes = get_tool_library_node(project).findall("tool_group")
for tool_group in tool_group_nodes:
tool_nodes.extend(tool_group.findall("tool"))
return tool_nodes
def get_tool_node_by_name(project, tool_name):
'''
Fetch a node representing a tool based in it's name.
@param project (OpusProject) project to fetch node from
@param tool_name (str) name of the tool to fetch
@return the node (Element) or None if the node was not found
'''
for node in get_tool_nodes(project):
if node.get('name') == tool_name:
return node
return None
def get_tool_library_node(project):
'''
Get a reference to the tool library for the given project
@param project (OpusProject) project to operate on
@return the node representing the tool library (Element) or None if the
project does not contain a tool library.
'''
if type(project) == lxml.etree._Element and project.tag == "tool_library": return project
return project.find('data_manager/tool_library')
def get_path_to_tool_modules(project):
'''
Get the path to the tool modules
@param project (OpusProject) project to operate on
@return the text representing the path or None if not found
'''
node = project.find('data_manager/path_to_tool_modules')
if node is not None: return node.text
return None
| apdjustino/DRCOG_Urbansim | src/opus_gui/data_manager/data_manager_functions.py | Python | agpl-3.0 | 1,946 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from nose.plugins.attrib import attr
from ...pages.common.logout import LogoutPage
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now() # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr('shard_4')
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, user_id = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Profile', dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertEqual(profile_page.mode_for_field('country'), 'edit')
self.assertTrue(profile_page.field_icon_present('country'))
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
self.assertTrue(profile_page.field_icon_present('language_proficiencies'))
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `ThisIsIt`.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = (
"Tell other learners a little about yourself: where you live, what your interests are, "
"why you're taking courses, or what you hope to learn."
)
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'ThisIsIt', 'ThisIsIt', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='generic_csv.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr('shard_4')
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
@attr('a11y')
class LearnerProfileA11yTest(LearnerProfileTestMixin, WebAppTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-231
],
})
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-231
],
})
profile_page.a11y_audit.check_for_accessibility_errors()
| alu042/edx-platform | common/test/acceptance/tests/lms/test_learner_profile.py | Python | agpl-3.0 | 34,219 |
from datetime import datetime
from pymongo import DESCENDING
from libs.lib import tee_db
from libs.statisticator import Job
class SuicidesJob(Job):
def __init__(self):
""" Job to get player suicides
Collection name: kill_results
Struture:
{'player': STR ,
'suicides': INT ,
'gametype': STR,
'last_event_date': DATE ,
}
Primary key : 'player'
"""
Job.__init__(self)
results_db_name = 'results_suicides'
self.results_db = tee_db[results_db_name]
self.dependencies = ('players', 'gametypes')
def get_dependencies(self):
return self.dependencies
def load_results_from_cache(self):
res = self.results_db.find(spec={
'player': self.player_name,
'gametype': self.gametype,
},
limit=1,
sort=[{'date', DESCENDING}],
)
if res.count() > 0:
return res[0]
else:
return None
def get_results(self):
res = self.load_results_from_cache()
if res is None:
return []
else:
return res['suicides']
def save_results_to_cache(self):
# Save new line only when data changes
# Else update only the date
last_data = self.load_results_from_cache()
if last_data is not None and last_data['suicides'] == self.results['suicides']:
last_data['date'] = self.results['date']
self.results = last_data
self.results_db.save(self.results)
def process(self, player_name, gametype):
self.player_name = player_name
self.gametype = gametype
# Change status
self.status = 'processing'
# Get old data
self.results = self.load_results_from_cache()
# Set data if no history
if self.results is None:
self.results = {}
self.results['player'] = self.player_name
self.results['gametype'] = self.gametype
self.results['suicides'] = 0
self.results['last_event_date'] = datetime(1,1,1,0,0,0)
# Get new suicides
if self.gametype:
suicides = tee_db['kill'].find(spec={'$and': [
{'weapon': {'$in': ['-1', '0', '1', '2', '3', '4', '5']}},
{'killer': self.player_name},
{'victim': self.player_name},
{'gametype': self.gametype},
{'round': { "$ne": None}},
{'when': {'$gt': self.results['last_event_date']}},
]},
sort=[{'when', DESCENDING}],
)
else:
suicides = tee_db['kill'].find(spec={'$and': [
{'weapon': {'$in': ['-1', '0', '1', '2', '3', '4', '5']}},
{'killer': self.player_name},
{'victim': self.player_name},
{'round': { "$ne": None}},
{'when': {'$gt': self.results['last_event_date']}},
]},
sort=[{'when', DESCENDING}],
)
# Set new suicides
self.results['suicides'] += suicides.count()
# Set last event date
if suicides.count() > 0:
self.results['last_event_date'] = suicides[0]['when']
self.results['date'] = datetime.now()
# Save to mongo
self.save_results_to_cache()
# Change status
self.status = 'done'
| titilambert/teeawards | old/jobs/suicides/suicides.py | Python | agpl-3.0 | 4,168 |
# -*- coding: utf8 -*-
# This file is part of Mnemosyne.
#
# Copyright (C) 2013 Daniel Lombraña González
#
# Mnemosyne is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mnemosyne is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Mnemosyne. If not, see <http://www.gnu.org/licenses/>.
"""
Package for creating the Flask application.
This exports:
- create_app a function that creates the Flask application
"""
from flask import Flask
from mnemosyne.frontend import frontend
from mnemosyne.model import db
try:
import mnemosyne.settings as settings
except:
print "Settings file is missing"
def create_app(db_name=None, testing=False):
"""
Create the Flask app object after configuring it.
Keyword arguments:
db_name -- Database name
testing -- Enable/Disable testing mode
Return value:
app -- Flask application object
"""
try:
app = Flask(__name__)
app.config.from_object(settings)
except:
print "Settings file is missing, trying with env config..."
app.config.from_envvar('MNEMOSYNE_SETTINGS', silent=False)
if db_name:
app.config['SQLALCHEMY_DATABASE_URI'] = db_name
db.init_app(app)
app.register_blueprint(frontend)
return app
| PyBossa/mnemosyne | mnemosyne/core.py | Python | agpl-3.0 | 1,728 |
import glob
import os
import shutil
import sys
import tarfile
import traceback
from model import Model
from subprocess import Popen, PIPE
class Apsim75(Model):
def run(self, latidx, lonidx):
try:
apsim_bin = self.config.get('executable')
# The apsim 'executable' is a gzipped tarball that needs to be extracted into the current working directory
tar = tarfile.open(apsim_bin)
tar.extractall()
tar.close()
model_dir = 'Model'
for xml_file in glob.glob('*.xml'):
if os.path.basename(xml_file) == 'Apsim.xml':
continue
old_xml = '%s/%s' % (model_dir, os.path.basename(xml_file))
if os.path.isfile(old_xml):
os.remove(old_xml)
if os.path.islink(xml_file):
link = os.readlink(xml_file)
shutil.copy(link, model_dir)
else:
shutil.copy(xml_file, model_dir)
# Create sim files
p = Popen('source paths.sh ; mono Model/ApsimToSim.exe Generic.apsim', shell=True, executable='/bin/bash', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout_file = open('RESULT.OUT', 'w')
stdout_file.write(stdout)
if p.returncode != 0:
rc = p.returncode
# Run apsim for each sim file
for sim in glob.glob('*.sim'):
p = Popen('source paths.sh ; Model/ApsimModel.exe %s' % sim, shell=True, executable='/bin/bash', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout_file.write(stdout)
if p.returncode != 0:
rc = p.returncode
stdout_file.close()
return True
except:
print "[%s]: %s" % (os.path.basename(__file__), traceback.format_exc())
return False
| RDCEP/psims | pysims/models/apsim75.py | Python | agpl-3.0 | 2,004 |
# *** submodules *** #
from matrix import constants, decompositions, errors, approximation, nearest
# *** functions *** #
from matrix.calculate import (is_positive_semidefinite, is_positive_definite, is_invertible,
decompose, solve)
# *** constants *** #
from matrix.constants import (
DECOMPOSITION_TYPES,
LDL_DECOMPOSITION_TYPE, LDL_DECOMPOSITION_COMPRESSED_TYPE, LL_DECOMPOSITION_TYPE,
UNIVERSAL_PERMUTATION_METHODS, SPARSE_ONLY_PERMUTATION_METHODS,
NO_PERMUTATION_METHOD,
DECREASING_DIAGONAL_VALUES_PERMUTATION_METHOD, INCREASING_DIAGONAL_VALUES_PERMUTATION_METHOD,
DECREASING_ABSOLUTE_DIAGONAL_VALUES_PERMUTATION_METHOD,
INCREASING_ABSOLUTE_DIAGONAL_VALUES_PERMUTATION_METHOD)
DECOMPOSITION_TYPES = DECOMPOSITION_TYPES
""" Supported types of decompositions. """
UNIVERSAL_PERMUTATION_METHODS = UNIVERSAL_PERMUTATION_METHODS
""" Supported permutation methods for decompose dense and sparse matrices. """
SPARSE_ONLY_PERMUTATION_METHODS = SPARSE_ONLY_PERMUTATION_METHODS
""" Supported permutation methods only for sparse matrices. """
# *** version *** #
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# *** logging *** #
import logging
logger = logging.getLogger(__name__)
del logging
# *** deprecated *** #
def __getattr__(name):
deprecated_names = ['decomposition', 'positive_definite_matrix',
'positive_semidefinite_matrix', 'APPROXIMATION_ONLY_PERMUTATION_METHODS']
if name in deprecated_names:
import warnings
warnings.warn(f'"matrix.{name}" is deprecated. Take a look at'
' "matrix.approximation.positive_semidefinite" instead.',
DeprecationWarning, stacklevel=2)
import matrix.approximate
return matrix.approximate.__getattribute__(name)
raise AttributeError(f'Module {__name__} has no attribute {name}.')
| jor-/matrix-decomposition | matrix/__init__.py | Python | agpl-3.0 | 1,947 |
from logging import getLogger
from ckan.lib.base import request, BaseController, abort, json, c
from ckan.plugins import toolkit
import ckan.plugins as p
import json
import pylons.config as config
import subprocess
import shlex
import os, sys
log = getLogger(__name__)
class Resource_api_harvestController(BaseController):
def stop_job(self):
if not request.method == 'GET':
log.info("API harvest - error detected, incorrect method used in call")
abort(405, 'Method not allowed')
if not c.userobj:
log.info("API harvest - error detected, need user login")
abort(403, 'Forbidden, need user login')
if not c.userobj.sysadmin:
log.info("API harvest - error detected, need API-Key")
abort(403, 'Forbidden, need API-Key')
job_name = request.params.get('job_name','')
if job_name == '':
log.info("API harvest - error detected, need job_name param in get parameter")
abort(400, 'Bad Request, need job_name param')
config_file = config.get('ckan.api_basic.config_file','')
if config_file == '':
log.info("API harvest - error detected, config_file parameter not defined in configuration file")
abort(403, 'Forbidden, config_file parameter not defined in configuration file')
ve_route = config.get('ckan.api_basic.virtual_evnvironment_route')
if ve_route == '':
log.info("API harvest - error detected, ve_route parameter not defined in configuration file")
abort(403, 'Forbidden, ve_route parameter not defined in configuration file')
if config.get('ckan.plugins').find(" harvest ") == -1:
log.info("API harvest - error detected, harvest extension must be active in CKAN'")
abort(403, 'Forbidden, harvest extension must be active in CKAN')
log.info("API harvest - Validations ok, stop the job specified")
subprocess.check_output(shlex.split(ve_route + '/bin/paster --plugin=ckanext-harvest harvester job_abort ' + job_name + ' --config=' + config_file), cwd=ve_route + '/src/ckan')
def create_job(self):
if not request.method == 'GET':
log.info("API harvest - error detected, incorrect method used in call")
abort(405, 'Method not allowed')
if not c.userobj:
log.info("API harvest - error detected, need user login")
abort(403, 'Forbidden, need user login')
if not c.userobj.sysadmin:
log.info("API harvest - error detected, need API-Key")
abort(403, 'Forbidden, need API-Key')
job_name = request.params.get('job_name','')
if job_name == '':
log.info("API harvest - error detected, need job_name param in get parameter")
abort(400, 'Bad Request, need job_name param')
config_file = config.get('ckan.api_basic.config_file','')
if config_file == '':
log.info("API harvest - error detected, config_file parameter not defined in configuration file")
abort(403, 'Forbidden, config_file parameter not defined in configuration file')
ve_route = config.get('ckan.api_basic.virtual_evnvironment_route')
if ve_route == '':
log.info("API harvest - error detected, ve_route parameter not defined in configuration file")
abort(403, 'Forbidden, ve_route parameter not defined in configuration file')
if config.get('ckan.plugins').find(" harvest ") == -1:
abort(403, 'Forbidden, harvest extension must be active in CKAN')
log.info("API harvest - Validations ok, create the job specified")
subprocess.check_output(shlex.split(ve_route + '/bin/paster --plugin=ckanext-harvest harvester job ' + job_name + ' --config=' + config_file), cwd=ve_route + '/src/ckan')
def create_job_all(self):
if not request.method == 'GET':
log.info("API harvest - error detected, incorrect method used in call")
abort(405, 'Method not allowed')
if not c.userobj:
log.info("API harvest - error detected, need user login")
abort(403, 'Forbidden, need user login')
if not c.userobj.sysadmin:
log.info("API harvest - error detected, need API-Key")
abort(403, 'Forbidden, need API-Key')
config_file = config.get('ckan.api_basic.config_file','')
if config_file == '':
log.info("API harvest - error detected, config_file parameter not defined in configuration file")
abort(403, 'Forbidden, config_file parameter not defined in configuration file')
ve_route = config.get('ckan.api_basic.virtual_evnvironment_route')
if ve_route == '':
log.info("API harvest - error detected, ve_route parameter not defined in configuration file")
abort(403, 'Forbidden, ve_route parameter not defined in configuration file')
if config.get('ckan.plugins').find(" harvest ") == -1:
log.info("API harvest - Validations ok, create the job specified")
abort(403, 'Forbidden, harvest extension must be active in CKAN')
subprocess.check_output(shlex.split(ve_route + '/bin/paster --plugin=ckanext-harvest harvester job-all --config=' + config_file), cwd=ve_route + '/src/ckan')
def run_job(self):
if not request.method == 'GET':
log.info("API harvest - error detected, incorrect method used in call")
abort(405, 'Method not allowed')
if not c.userobj:
log.info("API harvest - error detected, need user login")
abort(403, 'Forbidden, need user login')
if not c.userobj.sysadmin:
log.info("API harvest - error detected, need API-Key")
abort(403, 'Forbidden, need API-Key')
config_file = config.get('ckan.api_basic.config_file','')
if config_file == '':
log.info("API harvest - error detected, config_file parameter not defined in configuration file")
abort(403, 'Forbidden, config_file parameter not defined in configuration file')
ve_route = config.get('ckan.api_basic.virtual_evnvironment_route')
if ve_route == '':
log.info("API harvest - error detected, ve_route parameter not defined in configuration file")
abort(403, 'Forbidden, ve_route parameter not defined in configuration file')
if config.get('ckan.plugins').find(" harvest ") == -1:
log.info("API harvest - error detected, harvest extension must be active in CKAN")
abort(403, 'Forbidden, harvest extension must be active in CKAN')
log.info("API harvest - Validations ok, execute command run")
subprocess.check_output(shlex.split(ve_route + '/bin/paster --plugin=ckanext-harvest harvester run --config=' + config_file), cwd=ve_route + '/src/ckan')
| odevsp/ckanext-api_harvest | ckanext/api_harvest/controller.py | Python | agpl-3.0 | 6,252 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask import url_for
class Hateoas(object):
def link(self, rel, title, href):
return "<link rel='%s' title='%s' href='%s'/>" % (rel, title, href)
def create_link(self, item, rel='self'):
title = item.__class__.__name__.lower()
method = ".api_%s" % title
href = url_for(method, id=item.id, _external=True)
return self.link(rel, title, href)
def create_links(self, item):
cls = item.__class__.__name__.lower()
if cls == 'taskrun':
link = self.create_link(item)
links = []
if item.app_id is not None:
links.append(self.create_link(item.app, rel='parent'))
if item.task_id is not None:
links.append(self.create_link(item.task, rel='parent'))
return links, link
elif cls == 'task':
link = self.create_link(item)
links = []
if item.app_id is not None:
links = [self.create_link(item.app, rel='parent')]
return links, link
elif cls == 'category':
return None, self.create_link(item)
elif cls == 'app':
link = self.create_link(item)
links = []
if item.category_id is not None:
links.append(self.create_link(item.category, rel='category'))
return links, link
else:
return False
def remove_links(self, item):
"""Remove HATEOAS link and links from item"""
if item.get('link'):
item.pop('link')
if item.get('links'):
item.pop('links')
return item
| geotagx/geotagx-pybossa-archive | pybossa/hateoas.py | Python | agpl-3.0 | 2,393 |
#!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
| jeffery9/mixprint_addons | ineco_thai_account/report/jy_serv.py | Python | agpl-3.0 | 1,612 |
# -*- coding: utf-8 -*-
"""
Unit tests for embargo app admin forms.
"""
from __future__ import absolute_import
import six
# Explicitly import the cache from ConfigurationModel so we can reset it after each test
from config_models.models import cache
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import IPFilterForm, RestrictedCourseForm
from ..models import IPFilter
class RestrictedCourseFormTest(ModuleStoreTestCase):
"""Test the course form properly validates course IDs"""
def test_save_valid_data(self):
course = CourseFactory.create()
data = {
'course_key': six.text_type(course.id),
'enroll_msg_key': 'default',
'access_msg_key': 'default'
}
form = RestrictedCourseForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_course_key(self):
# Invalid format for the course key
form = RestrictedCourseForm(data={'course_key': 'not/valid'})
self._assert_course_field_error(form)
def test_course_not_found(self):
course_key = CourseLocator(org='test', course='test', run='test')
form = RestrictedCourseForm(data={'course_key': course_key})
self._assert_course_field_error(form)
def _assert_course_field_error(self, form):
"""
Validation shouldn't work.
"""
self.assertFalse(form.is_valid())
msg = 'COURSE NOT FOUND'
self.assertIn(msg, form._errors['course_key'][0]) # pylint: disable=protected-access
with self.assertRaisesRegexp(
ValueError, "The RestrictedCourse could not be created because the data didn't validate."
):
form.save()
class IPFilterFormTest(TestCase):
"""Test form for adding [black|white]list IP addresses"""
def tearDown(self):
super(IPFilterFormTest, self).tearDown()
# Explicitly clear ConfigurationModel's cache so tests have a clear cache
# and don't interfere with each other
cache.clear()
def test_add_valid_ips(self):
# test adding valid ip addresses
# should be able to do both ipv4 and ipv6
# spacing should not matter
form_data = {
'whitelist': u'127.0.0.1, 2003:dead:beef:4dad:23:46:bb:101, 1.1.0.1/32, 1.0.0.0/24',
'blacklist': u' 18.244.1.5 , 2002:c0a8:101::42, 18.36.22.1, 1.0.0.0/16'
}
form = IPFilterForm(data=form_data)
self.assertTrue(form.is_valid())
form.save()
whitelist = IPFilter.current().whitelist_ips
blacklist = IPFilter.current().blacklist_ips
for addr in u'127.0.0.1, 2003:dead:beef:4dad:23:46:bb:101'.split(','):
self.assertIn(addr.strip(), whitelist)
for addr in u'18.244.1.5, 2002:c0a8:101::42, 18.36.22.1'.split(','):
self.assertIn(addr.strip(), blacklist)
# Network tests
# ips not in whitelist network
for addr in [u'1.1.0.2', u'1.0.1.0']:
self.assertNotIn(addr.strip(), whitelist)
# ips in whitelist network
for addr in [u'1.1.0.1', u'1.0.0.100']:
self.assertIn(addr.strip(), whitelist)
# ips not in blacklist network
for addr in [u'2.0.0.0', u'1.1.0.0']:
self.assertNotIn(addr.strip(), blacklist)
# ips in blacklist network
for addr in [u'1.0.100.0', u'1.0.0.10']:
self.assertIn(addr.strip(), blacklist)
# Test clearing by adding an empty list is OK too
form_data = {
'whitelist': '',
'blacklist': ''
}
form = IPFilterForm(data=form_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(IPFilter.current().whitelist), 0)
self.assertEqual(len(IPFilter.current().blacklist), 0)
def test_add_invalid_ips(self):
# test adding invalid ip addresses
form_data = {
'whitelist': u'.0.0.1, :dead:beef:::, 1.0.0.0/55',
'blacklist': u' 18.244.* , 999999:c0a8:101::42, 1.0.0.0/'
}
form = IPFilterForm(data=form_data)
self.assertFalse(form.is_valid())
if six.PY2:
wmsg = "Invalid IP Address(es): [u'.0.0.1', u':dead:beef:::', u'1.0.0.0/55']" \
" Please fix the error(s) and try again."
else:
wmsg = "Invalid IP Address(es): ['.0.0.1', ':dead:beef:::', '1.0.0.0/55']" \
" Please fix the error(s) and try again."
self.assertEquals(wmsg, form._errors['whitelist'][0]) # pylint: disable=protected-access
if six.PY2:
bmsg = "Invalid IP Address(es): [u'18.244.*', u'999999:c0a8:101::42', u'1.0.0.0/']" \
" Please fix the error(s) and try again."
else:
bmsg = "Invalid IP Address(es): ['18.244.*', '999999:c0a8:101::42', '1.0.0.0/']" \
" Please fix the error(s) and try again."
self.assertEquals(bmsg, form._errors['blacklist'][0]) # pylint: disable=protected-access
with self.assertRaisesRegexp(ValueError, "The IPFilter could not be created because the data didn't validate."):
form.save()
| ESOedX/edx-platform | openedx/core/djangoapps/embargo/tests/test_forms.py | Python | agpl-3.0 | 5,361 |
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
# Uses all default except last is safemode switch availability
bundles = VOLT.ServerBundle('recover',
needs_catalog=False,
supports_live=False,
default_host=True,
safemode_available=True,
supports_daemon=True,
supports_multiple_daemons=True),
description = 'Start the database and recover the previous state.'
)
def recover(runner):
runner.go()
| zheguang/voltdb | lib/python/voltcli/voltdb.d/recover.py | Python | agpl-3.0 | 1,849 |
from django.contrib.gis.geos import GEOSGeometry, GeometryCollection
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
class GeoUtils(object):
def create_geom_collection_from_geojson(self, geojson):
geoms = []
for feature in geojson['features']:
geoms.append(GEOSGeometry(JSONSerializer().serialize(feature['geometry'])))
return GeometryCollection(geoms)
def get_bounds_from_geojson(self, geojson):
"""
Takes a geojson object with polygon(s) and returns the coordinates of
the extent of the polygons.
"""
geom_collection = self.create_geom_collection_from_geojson(geojson)
bounds = geom_collection.extent
return bounds
def get_centroid(self, geojson):
"""
Takes a geojson object with polygon(s) and returns its center point as geojson.
"""
geom_collection = self.create_geom_collection_from_geojson(geojson)
centroid = geom_collection.centroid.geojson
return JSONDeserializer().deserialize(centroid)
| cvast/arches | arches/app/utils/geo_utils.py | Python | agpl-3.0 | 1,092 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-23 15:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mendel', '0008_auto_20160613_1911'),
]
operations = [
migrations.RenameField(
model_name='context',
old_name='keyword',
new_name='keyword_given',
),
migrations.RenameField(
model_name='review',
old_name='keyword',
new_name='keyword_given',
),
migrations.AddField(
model_name='review',
name='keyword_proposed',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='keyword_proposed', to='mendel.Keyword'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='review',
unique_together=set([('context', 'keyword_proposed', 'category', 'user', 'status')]),
),
]
| Architizer/mendel | mendel/migrations/0009_auto_20160623_1141.py | Python | agpl-3.0 | 1,091 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
from frappe.model.naming import make_autoname
from frappe import throw, _
import frappe.permissions
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from erpnext.utilities.transaction_base import delete_events
class EmployeeUserDisabledError(frappe.ValidationError):
pass
class Employee(Document):
def onload(self):
self.get("__onload").salary_structure_exists = frappe.db.get_value("Salary Structure",
{"employee": self.name, "is_active": "Yes", "docstatus": ["!=", 2]})
def autoname(self):
naming_method = frappe.db.get_value("HR Settings", None, "emp_created_by")
if not naming_method:
throw(_("Please setup Employee Naming System in Human Resource > HR Settings"))
else:
if naming_method == 'Naming Series':
self.name = make_autoname(self.naming_series + '.####')
elif naming_method == 'Employee Number':
self.name = self.employee_number
self.employee = self.name
def validate(self):
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Active", "Left"])
self.employee = self.name
self.validate_date()
self.validate_email()
self.validate_status()
self.validate_employee_leave_approver()
self.validate_reports_to()
if self.user_id:
self.validate_for_enabled_user_id()
self.validate_duplicate_user_id()
else:
existing_user_id = frappe.db.get_value("Employee", self.name, "user_id")
if existing_user_id:
frappe.permissions.remove_user_permission(
"Employee", self.name, existing_user_id)
def on_update(self):
if self.user_id:
self.update_user()
self.update_user_permissions()
def update_user_permissions(self):
frappe.permissions.add_user_permission("Employee", self.name, self.user_id)
frappe.permissions.set_user_permission_if_allowed("Company", self.company, self.user_id)
def update_user(self):
# add employee role if missing
user = frappe.get_doc("User", self.user_id)
user.flags.ignore_permissions = True
if "Employee" not in user.get("user_roles"):
user.add_roles("Employee")
# copy details like Fullname, DOB and Image to User
if self.employee_name and not (user.first_name and user.last_name):
employee_name = self.employee_name.split(" ")
if len(employee_name) >= 3:
user.last_name = " ".join(employee_name[2:])
user.middle_name = employee_name[1]
elif len(employee_name) == 2:
user.last_name = employee_name[1]
user.first_name = employee_name[0]
if self.date_of_birth:
user.birth_date = self.date_of_birth
if self.gender:
user.gender = self.gender
if self.image:
if not user.user_image:
user.user_image = self.image
try:
frappe.get_doc({
"doctype": "File",
"file_name": self.image,
"attached_to_doctype": "User",
"attached_to_name": self.user_id
}).insert()
except frappe.DuplicateEntryError:
# already exists
pass
user.save()
def validate_date(self):
if self.date_of_birth and getdate(self.date_of_birth) > getdate(today()):
throw(_("Date of Birth cannot be greater than today."))
if self.date_of_birth and self.date_of_joining and getdate(self.date_of_birth) >= getdate(self.date_of_joining):
throw(_("Date of Joining must be greater than Date of Birth"))
elif self.date_of_retirement and self.date_of_joining and (getdate(self.date_of_retirement) <= getdate(self.date_of_joining)):
throw(_("Date Of Retirement must be greater than Date of Joining"))
elif self.relieving_date and self.date_of_joining and (getdate(self.relieving_date) <= getdate(self.date_of_joining)):
throw(_("Relieving Date must be greater than Date of Joining"))
elif self.contract_end_date and self.date_of_joining and (getdate(self.contract_end_date) <= getdate(self.date_of_joining)):
throw(_("Contract End Date must be greater than Date of Joining"))
def validate_email(self):
if self.company_email:
validate_email_add(self.company_email, True)
if self.personal_email:
validate_email_add(self.personal_email, True)
def validate_status(self):
if self.status == 'Left' and not self.relieving_date:
throw(_("Please enter relieving date."))
def validate_for_enabled_user_id(self):
if not self.status == 'Active':
return
enabled = frappe.db.sql("""select name from `tabUser` where
name=%s and enabled=1""", self.user_id)
if not enabled:
throw(_("User {0} is disabled").format(
self.user_id), EmployeeUserDisabledError)
def validate_duplicate_user_id(self):
employee = frappe.db.sql_list("""select name from `tabEmployee` where
user_id=%s and status='Active' and name!=%s""", (self.user_id, self.name))
if employee:
throw(_("User {0} is already assigned to Employee {1}").format(
self.user_id, employee[0]), frappe.DuplicateEntryError)
def validate_employee_leave_approver(self):
for l in self.get("leave_approvers")[:]:
if "Leave Approver" not in frappe.get_roles(l.leave_approver):
frappe.get_doc("User", l.leave_approver).add_roles("Leave Approver")
def validate_reports_to(self):
if self.reports_to == self.name:
throw(_("Employee cannot report to himself."))
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def get_retirement_date(date_of_birth=None):
import datetime
ret = {}
if date_of_birth:
try:
dt = getdate(date_of_birth) + datetime.timedelta(21915)
ret = {'date_of_retirement': dt.strftime('%Y-%m-%d')}
except ValueError:
# invalid date
ret = {}
return ret
@frappe.whitelist()
def make_salary_structure(source_name, target=None):
target = get_mapped_doc("Employee", source_name, {
"Employee": {
"doctype": "Salary Structure",
"field_map": {
"name": "employee",
}
}
})
target.make_earn_ded_table()
return target
def validate_employee_role(doc, method):
# called via User hook
if "Employee" in [d.role for d in doc.get("user_roles")]:
if not frappe.db.get_value("Employee", {"user_id": doc.name}):
frappe.msgprint(_("Please set User ID field in an Employee record to set Employee Role"))
doc.get("user_roles").remove(doc.get("user_roles", {"role": "Employee"})[0])
def update_user_permissions(doc, method):
# called via User hook
if "Employee" in [d.role for d in doc.get("user_roles")]:
employee = frappe.get_doc("Employee", {"user_id": doc.name})
employee.update_user_permissions()
def send_birthday_reminders():
"""Send Employee birthday reminders if no 'Stop Birthday Reminders' is not set."""
if int(frappe.db.get_single_value("HR Settings", "stop_birthday_reminders") or 0):
return
from frappe.utils.user import get_enabled_system_users
users = None
birthdays = get_employees_who_are_born_today()
if birthdays:
if not users:
users = [u.email_id or u.name for u in get_enabled_system_users()]
for e in birthdays:
frappe.sendmail(recipients=filter(lambda u: u not in (e.company_email, e.personal_email, e.user_id), users),
subject=_("Birthday Reminder for {0}").format(e.employee_name),
message=_("""Today is {0}'s birthday!""").format(e.employee_name),
reply_to=e.company_email or e.personal_email or e.user_id,
bulk=True)
def get_employees_who_are_born_today():
"""Get Employee properties whose birthday is today."""
return frappe.db.sql("""select name, personal_email, company_email, user_id, employee_name
from tabEmployee where day(date_of_birth) = day(%(date)s)
and month(date_of_birth) = month(%(date)s)
and status = 'Active'""", {"date": today()}, as_dict=True)
def get_holiday_list_for_employee(employee, raise_exception=True):
if employee:
holiday_list, company = frappe.db.get_value("Employee", employee, ["holiday_list", "company"])
else:
holiday_list=''
company=frappe.db.get_value("Global Defaults", None, "default_company")
if not holiday_list:
holiday_list = frappe.db.get_value("Company", company, "default_holiday_list")
if not holiday_list and raise_exception:
frappe.throw(_('Please set a default Holiday List for Employee {0} or Company {0}').format(employee, company))
return holiday_list
| ShashaQin/erpnext | erpnext/hr/doctype/employee/employee.py | Python | agpl-3.0 | 8,327 |
# Copyright 2012-2015 Mattias Fliesberg
#
# This file is part of opmuse.
#
# opmuse is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opmuse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with opmuse. If not, see <http://www.gnu.org/licenses/>.
from . import setup_db, teardown_db
from opmuse.security import User, hash_password
class TestSecurity:
def setup_method(self):
setup_db(self)
def teardown_method(self):
teardown_db(self)
def test_login(self):
user = self.session.query(User).filter_by(login="admin").one()
hashed = hash_password("admin", user.salt)
assert hashed == user.password
hashed = hash_password("wrong", user.salt)
assert hashed != user.password
| opmuse/opmuse | opmuse/test/test_security.py | Python | agpl-3.0 | 1,210 |
import datetime
from django.db import models, transaction
class Key(models.Model):
uid = models.CharField(max_length=255, unique=True)
name = models.TextField()
created = models.DateTimeField(default=datetime.datetime.utcnow)
class Meta:
ordering = ('-created',)
get_latest_by = 'created'
def __unicode__(self):
return u"pk=%d uid=%r name=%r" % (
self.pk,
self.uid,
self.name,
)
def save(self, *args, **kwargs):
created = not self.pk
super(Key, self).save(*args, **kwargs)
if created:
from .tasks import update_or_create_key
transaction.on_commit(lambda: update_or_create_key.delay(self.uid))
| lamby/buildinfo.debian.net | bidb/keys/models.py | Python | agpl-3.0 | 744 |
# -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountAccountType(models.Model):
_inherit = 'account.account.type'
_order = 'sequence asc'
sequence = fields.Integer(
string=u'Sequence',
)
| thinkopensolutions/l10n-brazil | financial_account/models/inherited_account_account_type.py | Python | agpl-3.0 | 318 |
"""
Views to support bulk email functionalities like opt-out.
"""
import logging
from six import text_type
from django.contrib.auth.models import User
from django.http import Http404
from bulk_email.models import Optout
from courseware.courses import get_course_by_id
from edxmako.shortcuts import render_to_response
from lms.djangoapps.discussion.notification_prefs.views import (
UsernameCipher,
UsernameDecryptionException,
)
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
def opt_out_email_updates(request, token, course_id):
"""
A view that let users opt out of any email updates.
This meant is meant to be the target of an opt-out link or button.
The `token` parameter must decrypt to a valid username.
The `course_id` is the string course key of any course.
Raises a 404 if there are any errors parsing the input.
"""
try:
username = UsernameCipher().decrypt(token)
user = User.objects.get(username=username)
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key, depth=0)
except UnicodeDecodeError:
raise Http404("base64url")
except UsernameDecryptionException as exn:
raise Http404(text_type(exn))
except User.DoesNotExist:
raise Http404("username")
except InvalidKeyError:
raise Http404("course")
unsub_check = request.POST.get('unsubscribe', False)
context = {
'course': course,
'unsubscribe': unsub_check
}
if request.method == 'GET':
return render_to_response('bulk_email/confirm_unsubscribe.html', context)
if request.method == 'POST' and unsub_check:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
return render_to_response('bulk_email/unsubscribe_success.html', context)
| cpennington/edx-platform | lms/djangoapps/bulk_email/views.py | Python | agpl-3.0 | 2,070 |
"""
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .test import *
from .sauce import *
# You need to start the server in debug mode,
# otherwise the browser will not render the pages correctly
DEBUG = True
SITE_NAME = 'localhost:{}'.format(LETTUCE_SERVER_PORT)
# Output Django logs to a file
import logging
logging.basicConfig(filename=TEST_ROOT / "log" / "lms_acceptance.log", level=logging.ERROR)
# set root logger level
logging.getLogger().setLevel(logging.ERROR)
import os
from random import choice
def seed():
return os.getppid()
# Silence noisy logs
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('codejail.safe_exec', logging.ERROR),
('edx.courseware', logging.ERROR),
('audit', logging.ERROR),
('instructor_task.api_helper', logging.ERROR),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
update_module_store_settings(
MODULESTORE,
doc_store_settings={
'db': 'acceptance_xmodule',
'collection': 'acceptance_modulestore_%s' % seed(),
},
module_store_options={
'fs_root': TEST_ROOT / "data",
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'acceptance_xcontent_%s' % seed(),
}
}
# Set this up so that 'paver lms --settings=acceptance' and running the
# harvest command both use the same (test) database
# which they can flush without messing up your dev db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_edx.db",
'TEST_NAME': TEST_ROOT / "db" / "test_edx.db",
'OPTIONS': {
'timeout': 30,
},
'ATOMIC_REQUESTS': True,
}
}
TRACKING_BACKENDS.update({
'mongo': {
'ENGINE': 'track.backends.mongodb.MongoBackend'
}
})
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update({
'mongo': {
'ENGINE': 'eventtracking.backends.mongodb.MongoBackend',
'OPTIONS': {
'database': 'track'
}
}
})
BULK_EMAIL_DEFAULT_FROM_EMAIL = "test@test.org"
# Forums are disabled in test.py to speed up unit tests, but we do not have
# per-test control for lettuce acceptance tests.
# If you are writing an acceptance test that needs the discussion service enabled,
# do not write it in lettuce, but instead write it using bok-choy.
# DO NOT CHANGE THIS SETTING HERE.
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Enable third-party authentication
FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True
THIRD_PARTY_AUTH = {
"Google": {
"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": "test",
"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": "test"
},
"Facebook": {
"SOCIAL_AUTH_FACEBOOK_KEY": "test",
"SOCIAL_AUTH_FACEBOOK_SECRET": "test"
}
}
# Enable fake payment processing page
FEATURES['ENABLE_PAYMENT_FAKE'] = True
# Enable email on the instructor dash
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Don't actually send any requests to Software Secure for student identity
# verification.
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
# HACK
# Setting this flag to false causes imports to not load correctly in the lettuce python files
# We do not yet understand why this occurs. Setting this to true is a stopgap measure
USE_I18N = True
FEATURES['ENABLE_FEEDBACK_SUBMISSION'] = False
# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command
INSTALLED_APPS += ('lettuce.django',)
LETTUCE_APPS = ('courseware', 'instructor')
# Lettuce appears to have a bug that causes it to search
# `instructor_task` when we specify the `instructor` app.
# This causes some pretty cryptic errors as lettuce tries
# to parse files in `instructor_task` as features.
# As a quick workaround, explicitly exclude the `instructor_task` app.
LETTUCE_AVOID_APPS = ('instructor_task',)
LETTUCE_BROWSER = os.environ.get('LETTUCE_BROWSER', 'chrome')
# Where to run: local, saucelabs, or grid
LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'local')
SELENIUM_GRID = {
'URL': 'http://127.0.0.1:4444/wd/hub',
'BROWSER': LETTUCE_BROWSER,
}
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
# Because an override for where to run will affect which ports to use,
# set these up after the local overrides.
# Configure XQueue interface to use our stub XQueue server
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:{0:d}".format(XQUEUE_PORT),
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or \
FEATURES.get('ENABLE_DASHBOARD_SEARCH') or \
FEATURES.get('ENABLE_COURSE_DISCOVERY'):
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
ANONYMOUS_ID_SECRET_KEY = SECRET_KEY
USERNAME_CIPHER_SECRET_KEY = SECRET_KEY
############################### PIPELINE #######################################
PIPELINE_ENABLED = False
# We want to make sure that any new migrations are run
# see https://groups.google.com/forum/#!msg/django-developers/PWPj3etj3-U/kCl6pMsQYYoJ
MIGRATION_MODULES = {}
| nttks/edx-platform | lms/envs/acceptance.py | Python | agpl-3.0 | 6,447 |
# This file is part of trc.me.
#
# trc.me is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# trc.me is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# trc.me. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from trc_me.core.models import Tag
class TagForm(forms.ModelForm):
"""Used when creating tags.
tag.user is set to authenticated user.
"""
class Meta:
model = Tag
exclude = ('user', 'img_width', 'img_height', 'created_at')
| kaapstorm/trc_me | src/trc_me/api/forms.py | Python | agpl-3.0 | 937 |
# -*- coding: utf-8 -*-
# © 2017 Didotech srl (www.didotech.com)
{
"name": "BoM Warning",
"version": "4.0.1.2",
"depends": [
"mrp",
"base",
"product",
"warning"
],
"author": "Didotech srl",
"description": """
This module is aim to track the warning on Bills of Material.
""",
"website": "https://www.didotech.com",
"category": "Manufacture Resource Planning",
"data": [
'views/product_view.xml',
'views/mrp_bom_view.xml'
],
"demo": [],
"active": False,
"installable": True,
}
| iw3hxn/LibrERP | mrp_bom_warning/__openerp__.py | Python | agpl-3.0 | 588 |
# -*- coding: utf-8 -*-
'''
.. module:: genomics.config
:synopsis: library configuration
:noindex:
:copyright: Copyright 2014 by Tiago Antao
:license: GNU Affero, see LICENSE for details
.. moduleauthor:: Tiago Antao <tra@popgen.net>
'''
import os
import configparser as cp
config_file = os.path.expanduser('~/.config/pygenomics/main.conf')
# This can be configured before loading of the main module to read another file
class Config(object):
'''Configuration object
:param config_file: The config file to use
The default config file is defined above and can be changed before doing
import genomics
Configuration parameters are separated by section
**Section main**
* **mr_dir** Directory where temporary map_reduce communication is stored
* **grid** Grid type (Local)
**Section grid.local**
The parameters for grid type Local.
Currently limit (see :py:class:`genomics.parallel.executor.Local`)
'''
def __init__(self, config_file=config_file):
self.config_file = config_file
def load_config(self):
config = cp.ConfigParser()
config.read(self.config_file)
try:
self.mr_dir = config.get('main', 'mr_dir')
self.grid = config.get('main', 'grid')
if self.grid == 'Local':
self.grid_limit = config.get('grid.local', 'limit')
if self.grid_limit.find('.') > -1:
self.grid_limit = float(self.grid_limit)
else:
self.grid_limit = int(self.grid_limit)
except cp.NoSectionError:
self.mr_dir = '/tmp'
self.grid = 'Local'
self.grid_limit = 1.0
| tiagoantao/pygenomics | genomics/config.py | Python | agpl-3.0 | 1,713 |
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo.tools import convert_file
def import_csv_data(cr, registry):
"""Import CSV data as it is faster than xml and because we can't use
noupdate anymore with csv"""
filenames = ['data/res.better.zip.csv']
for filename in filenames:
convert_file(
cr, 'l10n_ch_zip',
filename, None, mode='init', noupdate=True,
kind='init', report=None,
)
| brain-tec/l10n-switzerland | l10n_ch_zip/hooks.py | Python | agpl-3.0 | 506 |
# -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Contributor: Pedro Manuel Baeza <pedro.baeza@serviciosbaeza.com>
# Ignacio Ibeas <ignacio@acysos.com>
# Alejandro Santana <alejandrosantana@anubia.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
class ResPartner(models.Model):
_inherit = 'res.partner'
zip_id = fields.Many2one('res.better.zip', 'City/Location')
@api.one
@api.onchange('zip_id')
def onchange_zip_id(self):
if self.zip_id:
self.zip = self.zip_id.name
self.city = self.zip_id.city
self.state_id = self.zip_id.state_id
self.country_id = self.zip_id.country_id
def _compute_zip_id(self, vals):
""" Ensure the zip_id is filled whenever possible. This is useful in
case segmentation is done on this field.
Try to match a zip_id based on country/zip/city or country/zip.
"""
if 'zip_id' not in vals and (
'city' in vals or
'zip' in vals or
'country_id' in vals):
domain = []
zip_ids = []
if 'country_id' in vals:
country_id = vals['country_id']
else:
country_id = self.country_id.id
if 'zip' in vals:
zipcode = vals['zip']
else:
zipcode = self.zip
if 'city' in vals:
city = vals['city']
else:
city = self.city
if country_id:
domain.append(('country_id', '=', country_id))
if zipcode:
domain.append(('name', '=', zipcode))
if city:
zip_ids = self.env['res.better.zip'].search(domain + [('city', '=ilike', city)])
if not city or not zip_ids:
zip_ids = self.env['res.better.zip'].search(domain)
if zip_ids:
vals['zip_id'] = zip_ids[0].id
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
self._compute_zip_id(vals)
return super(ResPartner, self).create(vals)
@api.one
def write(self, vals):
self._compute_zip_id(vals)
return super(ResPartner, self).write(vals)
def _address_fields(self, cr, uid, context=None):
""" Returns the list of address fields that are synced from the parent
when the `use_parent_address` flag is set. """
return super(ResPartner, self)._address_fields(cr, uid, context=context) + ['zip_id']
| QANSEE/partner-contact | base_location/models/partner.py | Python | agpl-3.0 | 3,295 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.test import TestCase
from ..forms import QuoteForm
class TestQuoteForm(TestCase):
def setUp(self):
pass
def test_validate_emtpy_quote(self):
form = QuoteForm({'message': ''})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': ' '})
self.assertFalse(form.is_valid())
def test_validate_invalid_quote(self):
form = QuoteForm({'message': 'Mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'me nsaje invalido'})
self.assertFalse(form.is_valid())
def test_urls_in_quote(self):
form = QuoteForm({'message': 'http://122.33.43.322'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com/asdfads/'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com/test/12'})
self.assertFalse(form.is_valid())
def test_emails_in_quote(self):
form = QuoteForm({'message': 'Me caga test@test.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga test.this@test.asdfas.com'})
self.assertFalse(form.is_valid())
def test_validate_short_quote(self):
form = QuoteForm({'message': 'Me caga '})
self.assertFalse(form.is_valid())
def test_validate_long_quote(self):
form = QuoteForm({'message': 'Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar asdfadfa adsfasdfa. Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar.'})
self.assertFalse(form.is_valid())
def test_valid_message(self):
form = QuoteForm({'message': 'Me caga probar esto'})
self.assertTrue(form.is_valid())
| gabrielsaldana/sqmc | sabesqmc/quote/tests/test_forms.py | Python | agpl-3.0 | 2,384 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import re
from openerp.osv import fields, osv
class logistic_company(osv.osv):
_inherit="logistic.company"
def _get_company_code(self, cr, user, context=None):
res = super(logistic_company, self)._get_company_code(cr, user, context=context)
res.append(('fedex', 'FedEx'))
return list(set(res))
_columns = {
'ship_company_code': fields.selection(_get_company_code, 'Logistic Company', method=True, required=True, size=64),
'fedex_account_shipping_id': fields.many2one('fedex.account.shipping', 'FedEx Shipping Account'),
}
def onchange_shipping_number(self, cr, uid, ids, shipping_no, url, context=None):
ret = {}
if url:
b = url[url.rindex('/'): len(url)]
b = b.strip('/')
if re.match("^[0-9]*$", b):
url = url[0:url.rindex('/')]
url += ('/' + shipping_no)
ret['url'] = url
return{'value': ret}
logistic_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | ryepdx/shipping_api_fedex | logistic_company.py | Python | agpl-3.0 | 2,090 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from dace.processinstance.gateway import (
ExclusiveGateway, ParallelGateway, InclusiveGateway)
from .core import FlowNodeDefinition, Path
class GatewayDefinition(FlowNodeDefinition):
factory = NotImplemented
class ExclusiveGatewayDefinition(GatewayDefinition):
factory = ExclusiveGateway
def find_startable_paths(self, source_path, source):
for transition in self.outgoing:
if transition.condition(None):
nodedef = self.process[transition.target_id]
initial_path = source_path.clone()
source_transaction = source_path.transaction.__parent__
source_transaction.remove_subtransaction(
source_path.transaction)
source_transaction.start_subtransaction(type='Find',
path=initial_path,
initiator=self)
initial_path.add_transition(transition)
startable_paths = nodedef.find_startable_paths(
initial_path, self)
for startable_path in startable_paths:
yield startable_path
class ParallelGatewayDefinition(GatewayDefinition):
factory = ParallelGateway
def find_startable_paths(self, source_path, source):
global_transaction = source_path.transaction.get_global_transaction()
paths = global_transaction.find_allsubpaths_for(self, 'Find')
test_path = Path()
for path in paths:
test_path.add_transition(path.transitions)
multiple_target = test_path.get_multiple_target
if multiple_target:
for node in multiple_target:
if isinstance(self.process[node.__name__], ExclusiveGatewayDefinition):
return
alllatest_transitions = []
for path in paths:
alllatest_transitions.extend(path.latest)
validated_nodes = set(t.source_id for t in alllatest_transitions)
validated = True
incoming_nodes = (t.source_id for t in self.incoming)
for node in incoming_nodes:
if not node in validated_nodes:
validated = False
break
if validated:
for transition in self.outgoing:
if transition.condition(None):
nodedef = self.process[transition.target_id]
for path in paths:
initial_path = path.clone()
source_transaction = path.transaction.__parent__
source_transaction.remove_subtransaction(
path.transaction)
source_transaction.start_subtransaction(type='Find',
path=initial_path,
initiator=self)
initial_path.add_transition(transition)
startable_paths = nodedef.find_startable_paths(
initial_path, self)
for startable_path in startable_paths:
yield startable_path
class InclusiveGatewayDefinition(GatewayDefinition):
factory = InclusiveGateway
| ecreall/dace | dace/processdefinition/gatewaydef.py | Python | agpl-3.0 | 3,567 |
import json
from urllib.parse import urlencode
from tests import TestCase
from tests.fixtures.factories import ProfessorFactory, CourseFactory
class SearchTestCase(TestCase):
def setUp(self):
super().setUp()
ProfessorFactory(first_name='Mathias')
CourseFactory(title='Math Stuff')
def test_search(self):
rv = self.client.get('/search', headers=self.head_auth, query_string=urlencode({'q': 'mat'}))
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data)
self.assertIn('courses', data)
self.assertIn('professors', data)
self.assertEqual(len(data['courses']), 1)
self.assertEqual(len(data['professors']), 1)
| SCUEvals/scuevals-api | tests/resources/test_search.py | Python | agpl-3.0 | 709 |
# -*- coding: UTF-8 -*-
# Copyright 2014-2017 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""
Model mixins for `lino_welfare.modlib.aids`.
.. autosummary::
"""
from __future__ import unicode_literals
from builtins import str
import logging
logger = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy as pgettext
from lino.api import dd, rt
from lino import mixins
from etgen.html import E, tostring
from lino.utils.ranges import encompass
from lino.modlib.checkdata.choicelists import Checker
from lino.modlib.users.mixins import UserAuthored
from lino_xl.lib.contacts.mixins import ContactRelated
from lino_xl.lib.excerpts.mixins import Certifiable
from lino.mixins.periods import rangefmt
from .choicelists import ConfirmationStates
from .roles import AidsStaff
def e2text(v):
return tostring(v)
# if isinstance(v, types.GeneratorType):
# return "".join([e2text(x) for x in v])
# if E.iselement(v):
# return tostring(v)
# return unicode(v)
class SignConfirmation(dd.Action):
"""Sign this database object.
This is available if signer is either empty or equals the
requesting user. Except for system managers who can sign as
somebody else by manually setting the signer field before running
this action.
"""
label = pgettext("aids", "Sign")
show_in_workflow = True
show_in_bbar = False
# icon_name = 'flag_green'
required_states = "requested"
help_text = _("You sign this confirmation, making most "
"fields read-only.")
def get_action_permission(self, ar, obj, state):
user = ar.get_user()
if obj.signer_id and obj.signer != user \
and not user.user_type.has_required_roles([AidsStaff]):
return False
return super(SignConfirmation,
self).get_action_permission(ar, obj, state)
def run_from_ui(self, ar, **kw):
obj = ar.selected_rows[0]
def ok(ar):
if not obj.signer_id:
obj.signer = ar.get_user()
obj.state = ConfirmationStates.confirmed
obj.save()
ar.set_response(refresh=True)
d = dict(text=obj.confirmation_text())
d.update(client=e2text(obj.client.get_full_name(nominative=True)))
msg = _("You confirm that %(client)s %(text)s") % d
ar.confirm(ok, msg, _("Are you sure?"))
class RevokeConfirmation(dd.Action):
label = pgettext("aids", "Revoke")
show_in_workflow = True
show_in_bbar = False
# icon_name = 'flag_green'
required_states = "confirmed"
help_text = _("You revoke your signatore from this confirmation.")
def get_action_permission(self, ar, obj, state):
user = ar.get_user()
if obj.signer != user and not user.user_type.has_required_roles([AidsStaff]):
return False
return super(RevokeConfirmation,
self).get_action_permission(ar, obj, state)
def run_from_ui(self, ar, **kw):
obj = ar.selected_rows[0]
def ok(ar):
# obj.signer = None
obj.state = ConfirmationStates.requested
obj.save()
ar.set_response(refresh=True)
d = dict(text=obj.confirmation_text())
d.update(client=e2text(obj.client.get_full_name(nominative=True)))
msg = _("You revoke your confirmation that %(client)s %(text)s") % d
ar.confirm(ok, msg, _("Are you sure?"))
class Confirmable(mixins.DateRange):
"""Base class for both :class:`Granting` and :class:`Confirmation`.
.. attribute:: signer
The agent who has signed or is expected to sign this item.
.. attribute:: state
The confirmation state of this object. Pointer to
:class:`ConfirmationStates`.
"""
class Meta:
abstract = True
manager_roles_required = dd.login_required()
workflow_state_field = 'state'
signer = dd.ForeignKey(
settings.SITE.user_model,
verbose_name=pgettext("aids", "Signer"),
blank=True, null=True,
related_name="%(app_label)s_%(class)s_set_by_signer",
)
state = ConfirmationStates.field(
default=ConfirmationStates.as_callable('requested'))
sign = SignConfirmation()
revoke = RevokeConfirmation()
@classmethod
def on_analyze(cls, site):
cls.CONFIRMED_FIELDS = dd.fields_list(
cls,
cls.get_confirmable_fields())
super(Confirmable, cls).on_analyze(site)
@classmethod
def get_confirmable_fields(cls):
return ''
@classmethod
def setup_parameters(cls, fields):
fields.update(signer=dd.ForeignKey(
settings.SITE.user_model,
verbose_name=pgettext("aids", "Signer"),
blank=True, null=True))
fields.update(state=ConfirmationStates.field(blank=True))
super(Confirmable, cls).setup_parameters(fields)
@classmethod
def get_simple_parameters(cls):
for p in super(Confirmable, cls).get_simple_parameters():
yield p
yield 'signer'
yield 'state'
def full_clean(self):
super(Confirmable, self).full_clean()
if self.signer is None and self.state == ConfirmationStates.confirmed:
self.state = ConfirmationStates.requested
# raise ValidationError(_("Cannot confirm without signer!"))
def get_row_permission(self, ar, state, ba):
"""A signed confirmation cannot be modified, even not by a privileged
user.
"""
if not super(Confirmable, self).get_row_permission(ar, state, ba):
return False
if self.state == ConfirmationStates.confirmed \
and self.signer is not None \
and self.signer != ar.get_user():
return ba.action.readonly
return True
def disabled_fields(self, ar):
if self.state != ConfirmationStates.requested:
return self.CONFIRMED_FIELDS
return super(Confirmable, self).disabled_fields(ar)
def get_printable_context(self, ar=None, **kw):
kw.update(when=self.get_period_text())
return super(Confirmable, self).get_printable_context(ar, **kw)
def confirmation_text(self):
kw = dict()
kw.update(when=self.get_period_text())
at = self.get_aid_type()
if at:
kw.update(what=str(at))
else:
kw.update(what=str(self))
return _("receives %(what)s %(when)s.") % kw
def confirmation_address(self):
at = self.get_aid_type()
if at and at.address_type:
addr = self.client.get_address_by_type(at)
else:
addr = self.client.get_primary_address()
if addr is not None:
return addr.living_at_text()
def get_excerpt_title(self):
at = self.get_aid_type()
if at:
return at.get_excerpt_title()
return str(self)
@dd.python_2_unicode_compatible
class Confirmation(
Confirmable, UserAuthored, ContactRelated,
mixins.Created, Certifiable):
"""Base class for all aid confirmations.
Subclassed by :class:`SimpleConfirmation
<lino_welfare.modlib.aids.models.SimpleConfirmation>`,
:class:`IncomeConfirmation
<lino_welfare.modlib.aids.models.IncomeConfirmation>` and
:class:`RefundConfirmation
<lino_welfare.modlib.aids.models.RefundConfirmation>`.
"""
class Meta:
abstract = True
allow_cascaded_delete = ['client']
client = dd.ForeignKey(
'pcsw.Client', related_name="%(app_label)s_%(class)s_set_by_client")
granting = dd.ForeignKey('aids.Granting', blank=True, null=True)
remark = dd.RichTextField(
_("Remark"), blank=True, format='html')
language = dd.LanguageField(blank=True)
@classmethod
def setup_parameters(cls, fields):
# fields.update(client=dd.ForeignKey(
# 'pcsw.Client', blank=True, null=True))
# fields.update(
# granting=dd.ForeignKey('aids.Granting', blank=True, null=True))
fields.update(gender=dd.Genders.field(blank=True, null=True))
super(Confirmation, cls).setup_parameters(fields)
@classmethod
def get_simple_parameters(cls):
s = list(super(Confirmation, cls).get_simple_parameters())
s += ['client', 'granting']
return s
def __str__(self):
if self.granting is not None:
return '%s/%s' % (self.granting, self.pk)
return '%s #%s' % (self._meta.verbose_name, self.pk)
def get_date_range_veto(obj):
"""
Return an error message if this confirmation lies outside of
granted period.
"""
pk = dd.plugins.aids.no_date_range_veto_until
if pk and obj.pk and obj.pk <= pk:
return
gp = obj.granting.get_period()
if obj.start_date or obj.end_date:
cp = obj.get_period()
if cp[1] is None: cp = (cp[0], cp[0])
if not encompass(gp, cp):
return _(
"Date range %(p1)s lies outside of granted "
"period %(p2)s.") % dict(
p2=rangefmt(gp), p1=rangefmt(cp))
def full_clean(self):
super(Confirmation, self).full_clean()
if self.granting is None:
return
msg = self.get_date_range_veto()
if msg is not None:
raise ValidationError(msg)
if not self.language:
obj = self.recipient
if obj is None:
self.language = self.client.language
else:
if isinstance(obj, rt.models.contacts.Role):
self.language = obj.person.language
else:
self.language = obj.language
def on_create(self, ar):
if self.granting_id:
self.signer = self.granting.signer
self.client = self.granting.client
if self.granting.aid_type_id:
at = self.granting.aid_type
self.company = at.company
self.contact_person = at.contact_person
self.contact_role = at.contact_role
super(Confirmation, self).on_create(ar)
@classmethod
def get_confirmable_fields(cls):
return 'client signer granting remark start_date end_date'
def get_print_language(self):
return self.language
def get_excerpt_options(self, ar, **kw):
# Set project field when creating an excerpt from Client.
kw.update(project=self.client)
return super(Confirmation, self).get_excerpt_options(ar, **kw)
def get_aid_type(self):
if self.granting_id and self.granting.aid_type_id:
return self.granting.aid_type
return None
def get_granting(self, **aidtype_filter):
if self.granting_id:
return rt.models.aids.Granting.objects.get_by_aidtype(
self.granting.client, self, **aidtype_filter)
def get_urgent_granting(self):
"""Return the one and only one urgent aid granting for the client and
period defined for this confirmation. Return None if there is
no such granting, or if there is more than one such granting.
Used in :xfile:`medical_refund.body.html`.
"""
return self.get_granting(is_urgent=True)
@classmethod
def get_template_group(cls):
# Used by excerpts and printable. The individual confirmation
# models use a common tree of templates.
return 'aids/Confirmation'
def get_body_template(self):
"""Overrides :meth:`lino.core.model.Model.get_body_template`."""
at = self.get_aid_type()
if at is not None:
return at.body_template
dd.update_field(Confirmation, 'start_date', default=dd.today,
verbose_name=_('Period from'))
dd.update_field(Confirmation, 'end_date', default=dd.today,
verbose_name=_('until'))
# dd.update_field(Confirmation, 'user', verbose_name=_('Requested by'))
dd.update_field(Confirmation, 'company',
verbose_name=_("Recipient (Organization)"))
dd.update_field(Confirmation, 'contact_person',
verbose_name=_("Recipient (Person)"))
class ConfirmationChecker(Checker):
model = Confirmation
verbose_name = _("Check for confirmations outside of granted period")
def get_responsible_user(self, obj):
return obj.client.get_primary_coach()
def get_checkdata_problems(self, obj, fix=False):
if obj.granting is None:
msg = _("Confirmation without granting")
yield (False, msg)
return
msg = obj.get_date_range_veto()
if msg is not None:
yield (False, msg)
ConfirmationChecker.activate()
| khchine5/lino-welfare | lino_welfare/modlib/aids/mixins.py | Python | agpl-3.0 | 13,679 |
import unittest
import ccs
import time
####################################################################################################################
# OKCOINCOM #
####################################################################################################################
class Valid(unittest.TestCase):
def setUp(self):
self.stock = ccs.constants.OKCOINCOM
self.base = ccs.constants.BTC
self.quote = ccs.constants.USD
self.trades = ccs.trades(self.stock, self.base, self.quote)
self.m = ccs.okcoincom.public.response
#time.sleep(3)
def testLen(self):
self.assertIsInstance(len(self.trades), int)
def testGetItem(self):
self.assertIsInstance(self.trades[0], self.m.Trade)
def testStock(self):
self.assertEqual(self.trades.stock(), self.stock)
def testMethod(self):
self.assertEqual(self.trades.method(), ccs.constants.TRADES)
def testUsymbol(self):
self.assertEqual(self.trades.usymbol(), self.base + ":" + self.quote)
def testOsymbol(self):
pass
def testData(self):
pass
def testRaw(self):
pass
def testStr(self):
pass
if __name__ == '__main__':
unittest.main()
| Honzin/ccs | tests/testAdapter/testOkcoincom/testTrades.py | Python | agpl-3.0 | 1,366 |
'''
Copyright (C) 2016-2019 Vanessa Sochat.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from spython.main import Client
from singularity.logger import bot
from singularity.analysis.reproduce.criteria import *
from singularity.analysis.reproduce.levels import *
from singularity.analysis.reproduce.utils import (
get_image_tar,
extract_content,
delete_image_tar,
extract_guts
)
import datetime
import hashlib
import sys
import os
import io
import re
Client.quiet = True
def get_image_hashes(image_path, version=None, levels=None):
'''get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level.
'''
if levels is None:
levels = get_levels(version=version)
hashes = dict()
# use a cached object for all
file_obj, tar = get_image_tar(image_path)
for level_name, level_filter in levels.items():
hashes[level_name] = get_image_hash(image_path,
level_filter=level_filter,
file_obj=file_obj,
tar=tar)
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
return hashes
def get_image_hash(image_path,
level=None,
level_filter=None,
include_files=None,
skip_files=None,
version=None,
file_obj=None,
tar=None):
'''get_image_hash will generate a sha1 hash of an image, depending on a level
of reproducibility specified by the user. (see function get_levels for descriptions)
the user can also provide a level_filter manually with level_filter (for custom levels)
:param level: the level of reproducibility to use, which maps to a set regular
expression to match particular files/folders in the image. Choices are in notes.
:param skip_files: an optional list of files to skip
:param include_files: an optional list of files to keep (only if level not defined)
:param version: the version to use. If not defined, default is 2.3
::notes
LEVEL DEFINITIONS
The level definitions come down to including folders/files in the comparison. For files
that Singularity produces on the fly that might be different (timestamps) but equal content
(eg for a replication) we hash the content ("assess_content") instead of the file.
'''
# First get a level dictionary, with description and regexp
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("RECIPE",
version=version,
include_files=include_files,
skip_files=skip_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
close = False
if file_obj is None and tar is None:
file_obj, tar = get_image_tar(image_path)
close = True
hasher = hashlib.md5()
for member in tar:
member_name = member.name.replace('.','',1)
# For files, we either assess content, or include the file
if member.isdir() or member.issym():
continue
elif assess_content(member, file_filter):
content = extract_content(image_path, member.name)
hasher.update(content)
elif include_file(member.name, file_filter):
buf = member.tobuf()
hasher.update(buf)
digest = hasher.hexdigest()
# Close up / remove files
if close is True:
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
return digest
def get_content_hashes(image_path,
level=None,
regexp=None,
include_files=None,
tag_root=True,
level_filter=None,
skip_files=None,
version=None,
include_sizes=True):
'''get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes
'''
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("REPLICATE", version=version,
skip_files=skip_files,
include_files=include_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
results = extract_guts(image_path=image_path,
file_filter=file_filter,
tag_root=tag_root,
include_sizes=include_sizes)
return results
def get_image_file_hash(image_path):
'''get_image_hash will return an md5 hash of the file based on a criteria level.
:param level: one of LOW, MEDIUM, HIGH
:param image_path: full path to the singularity image
'''
hasher = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest()
| vsoch/singularity-python | singularity/analysis/reproduce/hash.py | Python | agpl-3.0 | 6,435 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/getaddons.ui'
#
# Created: Fri Aug 22 00:57:31 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(367, 204)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.code = QtGui.QLineEdit(Dialog)
self.code.setObjectName(_fromUtf8("code"))
self.horizontalLayout.addWidget(self.code)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_("Install Add-on"))
self.label.setText(_("To browse add-ons, please click the browse button below.<br><br>When you\'ve found an add-on you like, please paste its code below."))
self.label_2.setText(_("Code:"))
| weihautin/anki | aqt/forms/getaddons.py | Python | agpl-3.0 | 2,703 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.erfs.input_data_builder import ( # analysis:ignore
step_01_pre_processing as pre_processing,
step_02_imputation_loyer as imputation_loyer,
step_03_fip as fip,
step_04_famille as famille,
step_05_foyer as foyer,
step_06_rebuild as rebuild,
step_07_invalides as invalides,
step_08_final as final,
)
from openfisca_france_data.temporary import get_store
from openfisca_survey_manager.surveys import Survey
from openfisca_survey_manager.survey_collections import SurveyCollection
log = logging.getLogger(__name__)
def build(year = None, check = False):
assert year is not None
pre_processing.create_indivim_menagem(year = year)
pre_processing.create_enfants_a_naitre(year = year)
# try:
# imputation_loyer.imputation_loyer(year = year)
# except Exception, e:
# log.info('Do not impute loyer because of the following error: \n {}'.format(e))
# pass
fip.create_fip(year = year)
famille.famille(year = year)
foyer.sif(year = year)
foyer.foyer_all(year = year)
rebuild.create_totals_first_pass(year = year)
rebuild.create_totals_second_pass(year = year)
rebuild.create_final(year = year)
invalides.invalide(year = year)
final.final(year = year, check = check)
temporary_store = get_store(file_name = 'erfs')
data_frame = temporary_store['input_{}'.format(year)]
# Saving the data_frame
openfisca_survey_collection = SurveyCollection(name = "openfisca", config_files_directory = config_files_directory)
output_data_directory = openfisca_survey_collection.config.get('data', 'output_directory')
survey_name = "openfisca_data_{}".format(year)
table = "input"
hdf5_file_path = os.path.join(os.path.dirname(output_data_directory), "{}.h5".format(survey_name))
survey = Survey(
name = survey_name,
hdf5_file_path = hdf5_file_path,
)
survey.insert_table(name = table, data_frame = data_frame)
openfisca_survey_collection.surveys.append(survey)
collections_directory = openfisca_survey_collection.config.get('collections', 'collections_directory')
json_file_path = os.path.join(collections_directory, 'openfisca.json')
openfisca_survey_collection.dump(json_file_path = json_file_path)
| benjello/openfisca-france-data | openfisca_france_data/erfs/input_data_builder/__init__.py | Python | agpl-3.0 | 2,482 |
"""
tests for the models
"""
import json
from datetime import datetime, timedelta
import ddt
from pytz import utc
from student.roles import CourseCcxCoachRole
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls
from ..overrides import override_field_for_ccx
from .factories import CcxFactory
@ddt.ddt
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = CourseFactory.create()
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(utc)
self.set_ccx_override('start', expected)
actual = self.ccx.start
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(utc)
self.set_ccx_override('start', now)
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(utc)
self.set_ccx_override('due', expected)
actual = self.ccx.due
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(utc)
self.set_ccx_override('due', expected)
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(utc)
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started())
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(utc)
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started())
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(utc)
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended())
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(utc)
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended())
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended())
def test_ccx_max_student_enrollment_correct(self):
"""
Verify the override value for max_student_enrollments_allowed
"""
expected = 200
self.set_ccx_override('max_student_enrollments_allowed', expected)
actual = self.ccx.max_student_enrollments_allowed
self.assertEqual(expected, actual)
def test_structure_json_default_empty(self):
"""
By default structure_json does not contain anything
"""
self.assertEqual(self.ccx.structure_json, None)
self.assertEqual(self.ccx.structure, None)
def test_structure_json(self):
"""
Test a json stored in the structure_json
"""
dummy_struct = [
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_4",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_5",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_11"
]
json_struct = json.dumps(dummy_struct)
ccx = CcxFactory(
course_id=self.course.id,
coach=self.coach,
structure_json=json_struct
)
self.assertEqual(ccx.structure_json, json_struct)
self.assertEqual(ccx.structure, dummy_struct)
def test_locator_property(self):
"""
Verify that the locator helper property returns a correct CCXLocator
"""
locator = self.ccx.locator
self.assertEqual(self.ccx.id, long(locator.ccx))
| jolyonb/edx-platform | lms/djangoapps/ccx/tests/test_models.py | Python | agpl-3.0 | 7,054 |
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
# <<< imports
# @generated
from dynamics.dynamics.rotating_machine import RotatingMachine
from google.appengine.ext import db
# >>> imports
class GenSync(RotatingMachine):
""" Synchronous generator model. A single standard synchronous model is defined for the CIM, with several variations indicated by the 'model type' attribute. This model can be used for all types of synchronous machines (salient pole, solid iron rotor, etc.).
"""
# <<< gen_sync.attributes
# @generated
# >>> gen_sync.attributes
# <<< gen_sync.references
# @generated
# >>> gen_sync.references
# <<< gen_sync.operations
# @generated
# >>> gen_sync.operations
# EOF -------------------------------------------------------------------------
| rwl/openpowersystem | dynamics/dynamics/generators/gen_sync.py | Python | agpl-3.0 | 1,631 |
'''
Copyright (C) 2017 The Board of Trustees of the Leland Stanford Junior
University.
Copyright (C) 2016-2017 Vanessa Sochat.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from glob import glob
import json
import os
import re
from singularity.logger import bot
###################################################################################
# METRICS #########################################################################
###################################################################################
def information_coefficient(total1,total2,intersect):
'''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
'''
total = total1 + total2
return 2.0*len(intersect) / total
def RSA(m1,m2):
'''RSA analysis will compare the similarity of two matrices
'''
from scipy.stats import pearsonr
import scipy.linalg
import numpy
# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector
vectorm1 = m1.mask(numpy.triu(numpy.ones(m1.shape)).astype(numpy.bool)).values.flatten()
vectorm2 = m2.mask(numpy.triu(numpy.ones(m2.shape)).astype(numpy.bool)).values.flatten()
# Now remove the nans
m1defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm1,dtype=float)))
m2defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm2,dtype=float)))
idx = numpy.intersect1d(m1defined,m2defined)
return pearsonr(vectorm1[idx],vectorm2[idx])[0]
| singularityware/singularity-python | singularity/analysis/metrics.py | Python | agpl-3.0 | 2,085 |
#!/usr/bin/env python
import os, sys
from polib import pofile
from config import CONFIGURATION
from extract import SOURCE_WARN
from execute import execute
TRANSIFEX_HEADER = 'Translations in this file have been downloaded from %s'
TRANSIFEX_URL = 'https://www.transifex.com/projects/p/edx-studio/'
def push():
execute('tx push -s')
def pull():
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
#execute('tx pull -l %s' % locale)
execute('tx pull --all')
clean_translated_locales()
def clean_translated_locales():
"""
Strips out the warning from all translated po files
about being an English source file.
"""
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
clean_locale(locale)
def clean_locale(locale):
"""
Strips out the warning from all of a locale's translated po files
about being an English source file.
Iterates over machine-generated files.
"""
dirname = CONFIGURATION.get_messages_dir(locale)
for filename in ('django-partial.po', 'djangojs.po', 'mako.po'):
clean_file(dirname.joinpath(filename))
def clean_file(file):
"""
Strips out the warning from a translated po file about being an English source file.
Replaces warning with a note about coming from Transifex.
"""
po = pofile(file)
if po.header.find(SOURCE_WARN) != -1:
new_header = get_new_header(po)
new = po.header.replace(SOURCE_WARN, new_header)
po.header = new
po.save()
def get_new_header(po):
team = po.metadata.get('Language-Team', None)
if not team:
return TRANSIFEX_HEADER % TRANSIFEX_URL
else:
return TRANSIFEX_HEADER % team
if __name__ == '__main__':
if len(sys.argv)<2:
raise Exception("missing argument: push or pull")
arg = sys.argv[1]
if arg == 'push':
push()
elif arg == 'pull':
pull()
else:
raise Exception("unknown argument: (%s)" % arg)
| praveen-pal/edx-platform | i18n/transifex.py | Python | agpl-3.0 | 2,066 |
#!/usr/bin/env python
# coding: utf-8
#
# StreamBuddy - a video and data streaming serviweng zieleinfahrtce.
# Copyright (c) 2015, Tobias Bleiker & Dumeni Manatschal
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# Source on github:
# https://github.com/tbleiker/StreamBug
#
import multiprocessing
import time
import zmq
from streambug import cmd_interface
from streambug import mplogger
# set up logging
mplogger.setup(debug=True)
log = mplogger.getLogger()
def server_thread(zeromq_context, address, port_pull, port_pub):
server = cmd_interface.Server(zeromq_context, address, port_pull, port_pub)
server.start()
server.join()
def f1_thread(name, role, zeromq_context, address, port_pub, port_pull):
client = cmd_interface.Client(name, role, zeromq_context, address,
port_pub, port_pull)
def test():
return 'test successful'
client.add_command('test', test, 'simple test')
client.start()
# send an update and join client
time.sleep(0.5)
log.info('### Test 1: Send an update.')
client.send_update('This is an update message.')
client.join()
def c1_thread(name, role, zeromq_context, address, port_pub, port_pull):
client = cmd_interface.Client(name, role, zeromq_context, address,
port_pub, port_pull)
def update_func(msg):
log.info('Got update message: {msg}'.format(msg=msg))
client.set_update_func(update_func)
client.start()
time.sleep(2)
log.info('### Test 2: Get server status.')
client.get_server_status()
time.sleep(0.5)
log.info('### Test 3: Request help.')
client.get_help('F1')
time.sleep(0.5)
log.info('### Test 4: Send command test1.')
ret = client.send_cmd('F1', 'test')
log.info('Got: {ret}'.format(ret=ret))
if __name__ == '__main__':
zeromq_context = zmq.Context()
client_c1 = multiprocessing.Process(name='Client-c1', target=c1_thread,
args=('c1', 'commander',
zeromq_context, '0.0.0.0', 7001,
7000))
client_f1 = multiprocessing.Process(name='Client-f1', target=f1_thread,
args=('f1', 'follower', zeromq_context,
'0.0.0.0', 7001, 7000))
server = multiprocessing.Process(name='Server', target=server_thread,
args=(zeromq_context, '0.0.0.0', 7001,
7000))
server.start()
time.sleep(0.5)
log.info('### Starting clients...')
client_f1.start()
time.sleep(0.1)
client_c1.start()
time.sleep(0.1)
client_c1.join()
client_f1.terminate()
server.terminate()
| tbleiker/StreamBug | tests/cmd_interface_02_server_clients.py | Python | agpl-3.0 | 3,447 |
# -*- coding: utf-8 -*-
from django.conf import settings as settings
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
from mitxmako.shortcuts import render_to_response
from courseware.courses import get_opt_course_with_access
from courseware.access import has_access
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from .models import Revision, Article, Namespace, CreateArticleForm, RevisionFormWithTitle, RevisionForm
import wiki_settings
def wiki_reverse(wiki_page, article=None, course=None, namespace=None, args=[], kwargs={}):
kwargs = dict(kwargs) # TODO: Figure out why if I don't do this kwargs sometimes contains {'article_path'}
if not 'course_id' in kwargs and course:
kwargs['course_id'] = course.id
if not 'article_path' in kwargs and article:
kwargs['article_path'] = article.get_path()
if not 'namespace' in kwargs and namespace:
kwargs['namespace'] = namespace
return reverse(wiki_page, kwargs=kwargs, args=args)
def update_template_dictionary(dictionary, request=None, course=None, article=None, revision=None):
if article:
dictionary['wiki_article'] = article
dictionary['wiki_title'] = article.title # TODO: What is the title when viewing the article in a course?
if not course and 'namespace' not in dictionary:
dictionary['namespace'] = article.namespace.name
if course:
dictionary['course'] = course
if 'namespace' not in dictionary:
dictionary['namespace'] = "edX"
else:
dictionary['course'] = None
if revision:
dictionary['wiki_article_revision'] = revision
dictionary['wiki_current_revision_deleted'] = not (revision.deleted == 0)
if request:
dictionary.update(csrf(request))
if request and course:
dictionary['staff_access'] = has_access(request.user, course, 'staff')
else:
dictionary['staff_access'] = False
def view(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=True)
if perm_err:
return perm_err
d = {}
update_template_dictionary(d, request, course, article, article.current_revision)
return render_to_response('simplewiki/simplewiki_view.html', d)
def view_revision(request, revision_number, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
try:
revision = Revision.objects.get(counter=int(revision_number), article=article)
except:
d = {'wiki_err_norevision': revision_number}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_error.html', d)
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=True, revision=revision)
if perm_err:
return perm_err
d = {}
update_template_dictionary(d, request, course, article, revision)
return render_to_response('simplewiki/simplewiki_view.html', d)
def root_redirect(request, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
#TODO: Add a default namespace to settings.
namespace = "edX"
try:
root = Article.get_root(namespace)
return HttpResponseRedirect(reverse('wiki_view', kwargs={'course_id': course_id, 'article_path': root.get_path()}))
except:
# If the root is not found, we probably are loading this class for the first time
# We should make sure the namespace exists so the root article can be created.
Namespace.ensure_namespace(namespace)
err = not_found(request, namespace + '/', course)
return err
def create(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
article_path_components = article_path.split('/')
# Ensure the namespace exists
if not len(article_path_components) >= 1 or len(article_path_components[0]) == 0:
d = {'wiki_err_no_namespace': True}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
namespace = None
try:
namespace = Namespace.objects.get(name__exact=article_path_components[0])
except Namespace.DoesNotExist, ValueError:
d = {'wiki_err_bad_namespace': True}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
# See if the article already exists
article_slug = article_path_components[1] if len(article_path_components) >= 2 else ''
#TODO: Make sure the slug only contains legal characters (which is already done a bit by the url regex)
try:
existing_article = Article.objects.get(namespace=namespace, slug__exact=article_slug)
#It already exists, so we just redirect to view the article
return HttpResponseRedirect(wiki_reverse("wiki_view", existing_article, course))
except Article.DoesNotExist:
#This is good. The article doesn't exist
pass
#TODO: Once we have permissions for namespaces, we should check for create permissions
#check_permissions(request, #namespace#, check_locked=False, check_write=True, check_deleted=True)
if request.method == 'POST':
f = CreateArticleForm(request.POST)
if f.is_valid():
article = Article()
article.slug = article_slug
if not request.user.is_anonymous():
article.created_by = request.user
article.title = f.cleaned_data.get('title')
article.namespace = namespace
a = article.save()
new_revision = f.save(commit=False)
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.article = article
new_revision.save()
return HttpResponseRedirect(wiki_reverse("wiki_view", article, course))
else:
f = CreateArticleForm(initial={'title': request.GET.get('wiki_article_name', article_slug),
'contents': _('Headline\n===\n\n')})
d = {'wiki_form': f, 'create_article': True, 'namespace': namespace.name}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_edit.html', d)
def edit(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
# Check write permissions
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True, check_deleted=False)
if perm_err:
return perm_err
if wiki_settings.WIKI_ALLOW_TITLE_EDIT:
EditForm = RevisionFormWithTitle
else:
EditForm = RevisionForm
if request.method == 'POST':
f = EditForm(request.POST)
if f.is_valid():
new_revision = f.save(commit=False)
new_revision.article = article
if request.POST.__contains__('delete'):
if (article.current_revision.deleted == 1): # This article has already been deleted. Redirect
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
new_revision.contents = ""
new_revision.deleted = 1
elif not new_revision.get_diff():
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.save()
if wiki_settings.WIKI_ALLOW_TITLE_EDIT:
new_revision.article.title = f.cleaned_data['title']
new_revision.article.save()
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
else:
startContents = article.current_revision.contents if (article.current_revision.deleted == 0) else 'Headline\n===\n\n'
f = EditForm({'contents': startContents, 'title': article.title})
d = {'wiki_form': f}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_edit.html', d)
def history(request, article_path, page=1, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=False)
if perm_err:
return perm_err
page_size = 10
if page is None:
page = 1
try:
p = int(page)
except ValueError:
p = 1
history = Revision.objects.filter(article__exact=article).order_by('-counter').select_related('previous_revision__counter', 'revision_user', 'wiki_article')
if request.method == 'POST':
if request.POST.__contains__('revision'): # They selected a version, but they can be either deleting or changing the version
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
redirectURL = wiki_reverse('wiki_view', article, course)
try:
r = int(request.POST['revision'])
revision = Revision.objects.get(id=r)
if request.POST.__contains__('change'):
article.current_revision = revision
article.save()
elif request.POST.__contains__('view'):
redirectURL = wiki_reverse('wiki_view_revision', course=course,
kwargs={'revision_number': revision.counter, 'article_path': article.get_path()})
#The rese of these are admin functions
elif request.POST.__contains__('delete') and request.user.is_superuser:
if (revision.deleted == 0):
revision.adminSetDeleted(2)
elif request.POST.__contains__('restore') and request.user.is_superuser:
if (revision.deleted == 2):
revision.adminSetDeleted(0)
elif request.POST.__contains__('delete_all') and request.user.is_superuser:
Revision.objects.filter(article__exact=article, deleted=0).update(deleted=2)
elif request.POST.__contains__('lock_article'):
article.locked = not article.locked
article.save()
except Exception as e:
print str(e)
pass
finally:
return HttpResponseRedirect(redirectURL)
#
#
# <input type="submit" name="delete" value="Delete revision"/>
# <input type="submit" name="restore" value="Restore revision"/>
# <input type="submit" name="delete_all" value="Delete all revisions">
# %else:
# <input type="submit" name="delete_article" value="Delete all revisions">
#
page_count = (history.count() + (page_size - 1)) / page_size
if p > page_count:
p = 1
beginItem = (p - 1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
d = {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_history': history[beginItem:beginItem + page_size],
'show_delete_revision': request.user.is_superuser}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_history.html', d)
def revision_feed(request, page=1, namespace=None, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
page_size = 10
if page is None:
page = 1
try:
p = int(page)
except ValueError:
p = 1
history = Revision.objects.order_by('-revision_date').select_related('revision_user', 'article', 'previous_revision')
page_count = (history.count() + (page_size - 1)) / page_size
if p > page_count:
p = 1
beginItem = (p - 1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
d = {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_history': history[beginItem:beginItem + page_size],
'show_delete_revision': request.user.is_superuser,
'namespace': namespace}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_revision_feed.html', d)
def search_articles(request, namespace=None, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
# blampe: We should check for the presence of other popular django search
# apps and use those if possible. Only fall back on this as a last resort.
# Adding some context to results (eg where matches were) would also be nice.
# todo: maybe do some perm checking here
if request.method == 'GET':
querystring = request.GET.get('value', '').strip()
else:
querystring = ""
results = Article.objects.all()
if namespace:
results = results.filter(namespace__name__exact=namespace)
if request.user.is_superuser:
results = results.order_by('current_revision__deleted')
else:
results = results.filter(current_revision__deleted=0)
if querystring:
for queryword in querystring.split():
# Basic negation is as fancy as we get right now
if queryword[0] == '-' and len(queryword) > 1:
results._search = lambda x: results.exclude(x)
queryword = queryword[1:]
else:
results._search = lambda x: results.filter(x)
results = results._search(Q(current_revision__contents__icontains=queryword) | \
Q(title__icontains=queryword))
results = results.select_related('current_revision__deleted', 'namespace')
results = sorted(results, key=lambda article: (article.current_revision.deleted, article.get_path().lower()))
if len(results) == 1 and querystring:
return HttpResponseRedirect(wiki_reverse('wiki_view', article=results[0], course=course))
else:
d = {'wiki_search_results': results,
'wiki_search_query': querystring,
'namespace': namespace}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_searchresults.html', d)
def search_add_related(request, course_id, slug, namespace):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True)
if perm_err:
return perm_err
search_string = request.GET.get('query', None)
self_pk = request.GET.get('self', None)
if search_string:
results = []
related = Article.objects.filter(title__istartswith=search_string)
others = article.related.all()
if self_pk:
related = related.exclude(pk=self_pk)
if others:
related = related.exclude(related__in=others)
related = related.order_by('title')[:10]
for item in related:
results.append({'id': str(item.id),
'value': item.title,
'info': item.get_url()})
else:
results = []
json = simplejson.dumps({'results': results})
return HttpResponse(json, mimetype='application/json')
def add_related(request, course_id, slug, namespace):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
related_id = request.POST['id']
rel = Article.objects.get(id=related_id)
has_already = article.related.filter(id=related_id).count()
if has_already == 0 and not rel == article:
article.related.add(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def remove_related(request, course_id, namespace, slug, related_id):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
rel_id = int(related_id)
rel = Article.objects.get(id=rel_id)
article.related.remove(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def random_article(request, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
from random import randint
num_arts = Article.objects.count()
article = Article.objects.all()[randint(0, num_arts - 1)]
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
def not_found(request, article_path, course):
"""Generate a NOT FOUND message for some URL"""
d = {'wiki_err_notfound': True,
'article_path': article_path,
'namespace': "edX"}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
def get_article(request, article_path, course):
err = None
article = None
try:
article = Article.get_article(article_path)
except Article.DoesNotExist, ValueError:
err = not_found(request, article_path, course)
return (article, err)
def check_permissions(request, article, course, check_read=False, check_write=False, check_locked=False, check_deleted=False, revision=None):
read_err = check_read and not article.can_read(request.user)
write_err = check_write and not article.can_write(request.user)
locked_err = check_locked and article.locked
if revision is None:
revision = article.current_revision
deleted_err = check_deleted and not (revision.deleted == 0)
if (request.user.is_superuser):
deleted_err = False
locked_err = False
if read_err or write_err or locked_err or deleted_err:
d = {'wiki_article': article,
'wiki_err_noread': read_err,
'wiki_err_nowrite': write_err,
'wiki_err_locked': locked_err,
'wiki_err_deleted': deleted_err, }
update_template_dictionary(d, request, course)
# TODO: Make this a little less jarring by just displaying an error
# on the current page? (no such redirect happens for an anon upload yet)
# benjaoming: I think this is the nicest way of displaying an error, but
# these errors shouldn't occur, but rather be prevented on the other pages.
return render_to_response('simplewiki/simplewiki_error.html', d)
else:
return None
####################
# LOGIN PROTECTION #
####################
if wiki_settings.WIKI_REQUIRE_LOGIN_VIEW:
view = login_required(view)
history = login_required(history)
search_articles = login_required(search_articles)
root_redirect = login_required(root_redirect)
revision_feed = login_required(revision_feed)
random_article = login_required(random_article)
search_add_related = login_required(search_add_related)
not_found = login_required(not_found)
view_revision = login_required(view_revision)
if wiki_settings.WIKI_REQUIRE_LOGIN_EDIT:
create = login_required(create)
edit = login_required(edit)
add_related = login_required(add_related)
remove_related = login_required(remove_related)
if wiki_settings.WIKI_CONTEXT_PREPROCESSORS:
settings.TEMPLATE_CONTEXT_PROCESSORS += wiki_settings.WIKI_CONTEXT_PREPROCESSORS
| elimence/edx-platform | lms/djangoapps/simplewiki/views.py | Python | agpl-3.0 | 21,289 |
"""pdbquery RAPD plugin"""
"""
This file is part of RAPD
c
Copyright (C) 2017, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2017-04-20"
__maintainer__ = "Jon Schuermann"
__email__ = "schuerjp@anl.gov"
__status__ = "Development"
# This is an active RAPD plugin
RAPD_PLUGIN = True
# This plugin's type
DATA_TYPE = "MX"
PLUGIN_TYPE = "PDBQUERY"
PLUGIN_SUBTYPE = "EXPERIMENTAL"
# A unique UUID for this handler (uuid.uuid1().hex)
ID = "9a2e"
VERSION = "2.0.0"
# Standard imports
from distutils.spawn import find_executable
import glob
import logging
#from multiprocessing import Process
from multiprocessing import cpu_count
from threading import Thread
import os
from pprint import pprint
import random
import shutil
import sys
import time
import importlib
import random
# RAPD
from bson.objectid import ObjectId
from plugins.subcontractors.rapd_phaser import run_phaser
from plugins.subcontractors.rapd_cctbx import get_pdb_info, get_mtz_info, get_res, get_spacegroup_info
from plugins.get_cif.plugin import check_pdbq
# from plugins.subcontractors.parse import parse_phaser_output, set_phaser_failed
from utils import archive
import utils.credits as rcredits
import utils.exceptions as exceptions
import utils.global_vars as rglobals
from utils.text import json
import utils.xutils as xutils
from utils.processes import local_subprocess, mp_pool, mp_manager
import info
# NE-CAT REST PDB server
PDBQ_SERVER = rglobals.PDBQ_SERVER
# Software dependencies
VERSIONS = {
"gnuplot": (
"gnuplot 4.2",
"gnuplot 5.0",
)
}
#class RapdPlugin(Process):
class RapdPlugin(Thread):
"""
RAPD plugin class
Command format:
{
"command":"pdbquery",
"directories":
{
"work": "" # Where to perform the work
},
"site_parameters": {} # Site data
"preferences": {} # Settings for calculations
"return_address":("127.0.0.1", 50000) # Location of control process
}
"""
# Settings
# Calc ADF for each solution (creates a lot of big map files).
#adf = False
percent = 0.01
# Run rigid-body refinement after MR.
#rigid = False
# Parameters
cell = None
est_res_number = 0
large_cell = False
input_spacegroup = False
input_spacegroup_num = 0
laue = False
dres = 0.0
volume = 0
# Holders for passed-in info
command = None
preferences = {}
# Holders for launched Phaser jobs
cell_output = {}
jobs = {}
# Holders for pdb ids
custom_structures = []
common_contaminants = []
search_results = []
# Holders for results
# Actual Phaser job results
phaser_results = {}
# Results that are sent back
results = {}
# Initial status
status = 1
redis = False
pool = False
batch_queue = False
# Timers for processes
phaser_timer = rglobals.PHASER_TIMEOUT
#def __init__(self, command, processed_results=False, computer_cluster=False, tprint=False, logger=False, verbosity=False):
def __init__(self, site, command, processed_results=False, tprint=False, logger=False, verbosity=False):
"""Initialize the plugin"""
Thread.__init__ (self)
# If the logging instance is passed in...
if logger:
self.logger = logger
else:
# Otherwise get the logger Instance
self.logger = logging.getLogger("RAPDLogger")
self.logger.debug("__init__")
# Keep track of start time
self.start_time = time.time()
# Store tprint for use throughout
if tprint:
self.tprint = tprint
# Dead end if no tprint passed
else:
def func(arg=False, level=False, verbosity=False, color=False):
"""Dummy function"""
pass
self.tprint = func
# Used for sending results back to DB referencing a dataset
self.processed_results = processed_results
# Some logging
self.logger.info(command)
self.verbose = verbosity
# Store passed-in variables
self.site = site
self.command = command
#self.preferences = self.command.get("preferences", {})
self.preferences = info.DEFAULT_PREFERENCES
self.preferences.update(self.command.get("preferences", {}))
# Params
self.working_dir = self.command["directories"].get("work", os.getcwd())
self.test = self.preferences.get("test", False)
#self.test = self.preferences.get("test", True) # Limit number of runs on cluster
#self.sample_type = self.preferences.get("type", "protein")
#self.solvent_content = self.preferences.get("solvent_content", 0.55)
self.clean = self.preferences.get("clean", True)
# self.verbose = self.command["preferences"].get("verbose", False)
self.data_file = xutils.convert_unicode(self.command["input_data"].get("data_file"))
# Used for setting up Redis connection
self.db_settings = self.command["input_data"].get("db_settings")
#self.nproc = self.preferences.get("nproc", 1)
# If no launcher is passed in, use local_subprocess in a multiprocessing.Pool
self.computer_cluster = xutils.load_cluster_adapter(self)
if self.computer_cluster:
self.launcher = self.computer_cluster.process_cluster
self.batch_queue = self.computer_cluster.check_queue(self.command.get('command'))
else:
self.launcher = local_subprocess
self.pool = mp_pool(self.preferences.get("nproc", cpu_count()-1))
self.manager = mp_manager()
# Setup a multiprocessing pool if not using a computer cluster.
#if not self.computer_cluster:
# self.pool = mp_pool(self.nproc)
# Set Python path for subcontractors.rapd_phaser
self.rapd_python = "rapd.python"
if self.site:
if hasattr(self.site, "RAPD_PYTHON_PATH"):
self.rapd_python = self.site.RAPD_PYTHON_PATH
def run(self):
"""Execution path of the plugin"""
self.preprocess()
self.process()
self.postprocess()
def preprocess(self):
"""Set up for plugin action"""
# Get running instance of PDB server
self.repository = check_pdbq(self.tprint, self.logger)
#print self.repository
# Construct the results
self.construct_results()
# self.tprint("preprocess")
self.tprint(arg=0, level="progress")
# Glean some information on the input file
input_spacegroup, self.cell, volume = get_mtz_info(self.data_file)
# Get high resolution limt from MTZ
self.dres = get_res(self.data_file)
# Determine the Laue group from the MTZ
input_spacegroup_num = xutils.convert_spacegroup(input_spacegroup)
self.laue = xutils.get_sub_groups(input_spacegroup_num, "laue")
# Throw some information into the terminal
self.tprint("\nDataset information", color="blue", level=10)
self.tprint(" Data file: %s" % self.data_file, level=10, color="white")
self.tprint(" Spacegroup: %s (%s)" % (input_spacegroup, input_spacegroup_num),
level=10,
color="white")
self.tprint(" Cell: %f.2 %f.2 %f.2 %f.2 %f.2 %f.2" % tuple(self.cell),
level=10,
color="white")
self.tprint(" Volume: %f.1" % volume, level=10, color="white")
self.tprint(" Resolution: %f.1" % self.dres, level=10, color="white")
# self.tprint(" Subgroups: %s" % self.laue, level=10, color="white")
# Set by number of residues in AU. Ribosome (70s) is 24k.
self.est_res_number = xutils.calc_res_number(input_spacegroup,
se=False,
volume=volume,
#sample_type=self.sample_type,
sample_type=self.preferences.get("type", "protein"),
#solvent_content=self.solvent_content
solvent_content=self.preferences.get("solvent_content", 0.55))
if self.est_res_number > 5000:
self.large_cell = True
self.phaser_timer = self.phaser_timer * 1.5
# Check for dependency problems
self.check_dependencies()
# Connect to Redis (computer cluster sends results via Redis)
if self.preferences.get("run_mode") == "server" or self.computer_cluster:
self.connect_to_redis()
def update_status(self):
"""Update the status of the run."""
iter = 90/len(self.cell_output.keys())
self.status += iter
if self.status > 90:
self.status = 90
self.results["process"]["status"] = int(self.status)
def check_dependencies(self):
"""Make sure dependencies are all available"""
# Any of these missing, dead in the water
#TODO reduce external dependencies
#for executable in ("bzip2", "gunzip", "phaser", "phenix.cif_as_pdb", "tar"):
for executable in ("gunzip", "phaser"):
if not find_executable(executable):
self.tprint("Executable for %s is not present, exiting" % executable,
level=30,
color="red")
self.results["process"]["status"] = -1
self.results["error"] = "Executable for %s is not present" % executable
self.write_json()
raise exceptions.MissingExecutableException(executable)
# If no gnuplot turn off printing
# if self.preferences.get("show_plots", True) and (not self.preferences.get("json", False)):
# if not find_executable("gnuplot"):
# self.tprint("\nExecutable for gnuplot is not present, turning off plotting",
# level=30,
# color="red")
# self.preferences["show_plots"] = False
def construct_results(self):
"""Create the self.results dict"""
self.results["command"] = self.command
# Copy over details of this run
#self.results["command"] = self.command.get("command")
self.results["preferences"] = self.preferences
# Describe the process
self.results["process"] = self.command.get("process", {})
# Add process_id
self.results["process"]["process_id"] = self.command.get("process_id")
# Status is now 1 (starting)
self.results["process"]["status"] = self.status
# Process type is plugin
self.results["process"]["type"] = "plugin"
# Give it a result_id
self.results["process"]["result_id"] = str(ObjectId())
# Add link to processed dataset
if self.processed_results:
#self.results["process"]["result_id"] = self.processed_results["process"]["result_id"]
# This links to MongoDB results._id
self.results["process"]["parent_id"] = self.processed_results.get("process", {}).get("result_id", False)
# This links to a session
self.results["process"]["session_id"] = self.processed_results.get("process", {}).get("session_id", False)
# Identify parent type
self.results["process"]["parent"] = self.processed_results.get("plugin", {})
# The repr
self.results["process"]["repr"] = self.processed_results.get("process", {}).get("repr", "Unknown")
# Describe plugin
self.results["plugin"] = {
"data_type": DATA_TYPE,
"type": PLUGIN_TYPE,
"subtype": PLUGIN_SUBTYPE,
"id": ID,
"version": VERSION
}
# Add fields to results
self.results["results"] = {
"custom_structures": [],
"common_contaminants": [],
"search_results": [],
"archive_files": [],
"data_produced": [],
"for_display": []
}
def connect_to_redis(self):
"""Connect to the redis instance"""
redis_database = importlib.import_module('database.redis_adapter')
#redis_database = redis_database.Database(settings=self.db_settings)
#self.redis = redis_database.connect_to_redis()
self.redis = redis_database.Database(settings=self.db_settings,
logger=self.logger)
def send_results(self):
"""Let everyone know we are working on this"""
self.logger.debug("send_results")
if self.preferences.get("run_mode") == "server":
self.logger.debug("Sending back on redis")
#pprint(self.results)
# Transcribe results
json_results = json.dumps(self.results)
# Get redis instance
if not self.redis:
self.connect_to_redis()
# Send results back
self.redis.lpush("RAPD_RESULTS", json_results)
self.redis.publish("RAPD_RESULTS", json_results)
def process(self):
"""Run plugin action"""
if self.command["input_data"].get("pdbs", False):
self.add_custom_pdbs()
if self.preferences.get("search", False):
self.query_pdbq()
if self.preferences.get("contaminants", False):
self.add_contaminants()
self.process_phaser()
self.jobs_monitor()
def add_custom_pdbs(self):
"""Add custom pdb codes to the screen"""
self.logger.debug("add_custom_pdbs")
self.tprint("\nAdding input PDB codes", level=10, color="blue")
# Query the server for information and add to self.cell_output
cif_check = self.repository.check_for_pdbs(self.command["input_data"].get("pdbs"))
self.cell_output.update(cif_check)
self.custom_structures = cif_check.keys()
def query_pdbq(self):
"""
Check if cell is found in PDBQ
Places relevant pdbs into self.cell_output
"""
self.logger.debug("query_pdbq")
self.tprint("\nSearching for similar unit cells in the PDB", level=10, color="blue")
def connect_pdbq(previous_results, permutations, end):
"""Function to query the PDBQ server"""
all_results = previous_results.copy()
# Fields for search parameters
fields = ["a", "b", "c", "alpha", "beta", "gamma"]
for permute_counter in range(end):
search_params = {}
for field_index, field in enumerate(fields):
search_params[field] = \
[self.cell[permutations[permute_counter][field_index]] -
self.cell[permutations[permute_counter][field_index]] *
self.percent/2,
self.cell[permutations[permute_counter][field_index]] +
self.cell[permutations[permute_counter][field_index]] *
self.percent/2]
#print "search_params", search_params
#all_results.update(self.repository.cell_search(search_params))
search_results = self.repository.cell_search(search_params)
all_results.update(search_results)
# Return all results
return all_results
def limit_pdbq_results(pdbq_results, limit):
"""Filter repeats out of query"""
entries_beyond_limit = pdbq_results.keys()[:limit+1]
for p in pdbq_results.keys():
if p in entries_beyond_limit:
del pdbq_results[p]
return pdbq_results
permute = False
end = 1
permutations = [(0, 1, 2, 3, 4, 5), (1, 2, 0, 4, 5, 3), (2, 0, 1, 5, 3, 4)]
# Check for orthorhombic
if self.laue == "16":
permute = True
# Check monoclinic when Beta is near 90.
if self.laue in ("3", "5"):
if 89.0 < self.cell[4] < 91.0:
permute = True
if permute:
end = len(permutations)
# Limit the minimum number of results
no_limit = False
if self.computer_cluster:
if self.large_cell:
#limit = 10
limit = int(round(self.preferences.get("pdb_limit", 20) / 2))
elif permute:
#limit = 60
limit = int(round(self.preferences.get("pdb_limit", 20) * 1.5))
else:
no_limit = True
#limit = 40
limit = self.preferences.get("pdb_limit", 20)
else:
#limit = 8
limit = self.preferences.get("pdb_limit", 20)
# Limit the unit cell difference to 25%. Also stops it if errors are received.
pdbq_results = {}
counter = 0
#while counter < 25:
while counter < self.preferences.get("cell_limit", 25):
self.tprint(" Querying server at %s" % PDBQ_SERVER,
level=20,
color="white")
# Connect to and query the PDBQ server
pdbq_results = connect_pdbq(pdbq_results, permutations, end)
# Handle results
if pdbq_results:
for line in pdbq_results.keys():
# Remove anything bigger than 4 letters
if len(line) > 4:
del pdbq_results[line]
# Do not limit number of results if many models come out really close in cell
# dimensions.
if counter in (0, 1):
# Limit output
if no_limit == False:
pdbq_results = limit_pdbq_results(pdbq_results, limit)
else:
pdbq_results = limit_pdbq_results(pdbq_results, limit)
# Not enough results
if len(pdbq_results) < limit:
counter += 1
self.percent += 0.01
self.logger.debug("Not enough PDB results. Going for more...")
else:
break
# There will be results!
if pdbq_results:
# Test mode = only one PDB
if self.test:
my_pdbq_results = {
pdbq_results.keys()[0]: pdbq_results[pdbq_results.keys()[0]],
pdbq_results.keys()[1]: pdbq_results[pdbq_results.keys()[1]],
pdbq_results.keys()[2]: pdbq_results[pdbq_results.keys()[2]],
}
pdbq_results = my_pdbq_results
self.search_results = pdbq_results.keys()
self.cell_output.update(pdbq_results)
self.tprint(" %d relevant PDB files found on the PDBQ server" % \
len(pdbq_results.keys()),
level=50,
color="white")
else:
self.logger.debug("Failed to find pdb with similar cell.")
self.tprint("No relevant PDB files found on the PDBQ server",
level=50,
color="red")
def add_contaminants(self):
"""
Add common PDB contaminants to the search list.
Adds files to self.cell_output
"""
self.logger.debug("add_common_pdb")
self.tprint("\nAdding common contaminants to PDB screen",
level=10,
color="blue")
# Save these codes in a separate list so they can be separated in the Summary.
common_contaminants = info.CONTAMINANTS.copy()
self.common_contaminants = common_contaminants.keys()
# Remove PDBs from self.common if they were already caught by unit cell dimensions.
for contaminant in self.common_contaminants:
if contaminant in self.cell_output:
del common_contaminants[contaminant]
self.common_contaminants.remove(contaminant)
# Test mode = only one PDB
if self.test:
my_contaminants = {
common_contaminants.keys()[0]: common_contaminants[common_contaminants.keys()[0]]
}
common_contaminants = my_contaminants
self.common_contaminants = common_contaminants.keys()
# Put contaminants in list to be screened
self.tprint(" %d contaminants added to screen" % len(common_contaminants),
level=10,
color="white")
# print common_contaminants
self.cell_output.update(common_contaminants)
def process_phaser(self):
"""Start Phaser for input pdb"""
self.logger.debug("process_phaser")
self.tprint("\nStarting molecular replacement", level=30, color="blue")
self.tprint(" Assembling Phaser runs", level=10, color="white")
def launch_job(inp):
"""Launch the Phaser job"""
#self.logger.debug("process_phaser Launching %s"%inp['name'])
tag = 'Phaser_%d' % random.randint(0, 10000)
if self.computer_cluster:
# Create a unique identifier for Phaser results
inp['tag'] = tag
# Send Redis settings so results can be sent thru redis
#inp['db_settings'] = self.site.CONTROL_DATABASE_SETTINGS
# Don't need result queue since results will be sent via Redis
queue = False
else:
inp['pool'] = self.pool
# Add result queue
queue = self.manager.Queue()
inp['result_queue'] = queue
#if self.pool:
# inp['pool'] = self.pool
#else:
# inp['tag'] = tag
#job, pid, tag = run_phaser(**inp)
job, pid = run_phaser(**inp)
self.jobs[job] = {'name': inp['name'],
'pid' : pid,
'tag' : tag,
'result_queue': queue,
'spacegroup': inp['spacegroup'] # Need for jobs that timeout.
}
# Run through the pdbs
for pdb_code in self.cell_output.keys():
self.tprint(" %s" % pdb_code, level=30, color="white")
l = False
copy = 1
# Create directory for MR
xutils.create_folders(self.working_dir, "Phaser_%s" % pdb_code)
cif_file = pdb_code.lower() + ".cif"
# Get the structure file
if self.test and os.path.exists(cif_file):
cif_path = os.path.join(os.getcwd(), cif_file)
else:
cif_path = self.repository.download_cif(pdb_code, os.path.join(os.getcwd(), cif_file))
if not cif_path:
self.postprocess_invalid_code(pdb_code)
else:
# If mmCIF, checks if file exists or if it is super structure with
# multiple PDB codes, and returns False, otherwise sends back SG.
spacegroup_pdb = xutils.fix_spacegroup(get_spacegroup_info(cif_path))
if not spacegroup_pdb:
del self.cell_output[pdb_code]
continue
# Now check all SG's
spacegroup_num = xutils.convert_spacegroup(spacegroup_pdb)
lg_pdb = xutils.get_sub_groups(spacegroup_num, "laue")
self.tprint(" %s spacegroup: %s (%s)" % (cif_path, spacegroup_pdb, spacegroup_num),
level=10,
color="white")
self.tprint(" subgroups: %s" % str(lg_pdb), level=10, color="white")
# SG from data
data_spacegroup = xutils.convert_spacegroup(self.laue, True)
# self.tprint(" Data spacegroup: %s" % data_spacegroup, level=10, color="white")
# Fewer mols in AU or in common_contaminents.
if pdb_code in self.common_contaminants or float(self.laue) > float(lg_pdb):
# if SM is lower sym, which will cause problems, since PDB is too big.
pdb_info = get_pdb_info(struct_file=cif_path,
data_file=self.data_file,
dres=self.dres,
matthews=True,
chains=True)
# Prune if only one chain present, b/c "all" and "A" will be the same.
if len(pdb_info.keys()) == 2:
for key in pdb_info.keys():
if key != "all":
del pdb_info[key]
copy = pdb_info["all"]["NMol"]
if copy == 0:
copy = 1
# If pdb_info["all"]["res"] == 0.0:
if pdb_info["all"]["SC"] < 0.2:
# Only run on chains that will fit in the AU.
l = [chain for chain in pdb_info.keys() if pdb_info[chain]["res"] != 0.0]
# More mols in AU
elif float(self.laue) < float(lg_pdb):
pdb_info = get_pdb_info(struct_file=cif_path,
data_file=self.data_file,
dres=self.dres,
matthews=True,
chains=False)
copy = pdb_info["all"]["NMol"]
# Same number of mols in AU.
else:
pdb_info = get_pdb_info(struct_file=cif_path,
data_file=self.data_file,
dres=self.dres,
matthews=False,
chains=False)
job_description = {
"work_dir": os.path.abspath(os.path.join(self.working_dir, "Phaser_%s" % pdb_code)), #
"data_file": self.data_file,
"struct_file": cif_path,
"name": pdb_code, #
"spacegroup": data_spacegroup,
"ncopy": copy, #
#"test": self.test,
"cell_analysis": True, #
#"large_cell": self.large_cell,
"resolution": xutils.set_phaser_res(pdb_info["all"]["res"],
self.large_cell,
self.dres),
"launcher": self.launcher, #
"db_settings": self.db_settings, #
"tag": False, #
"batch_queue": self.batch_queue, #
"rapd_python": self.rapd_python}
if not l:
launch_job(job_description)
else:
for chain in l:
new_code = "%s_%s" % (pdb_code, chain)
xutils.folders(self, "Phaser_%s" % new_code)
job_description.update({
"work_dir": os.path.abspath(os.path.join(self.working_dir, "Phaser_%s" % \
new_code)),
"struct_file": pdb_info[chain]["file"],
"name":new_code,
"ncopy":pdb_info[chain]["NMol"],
"resolution":xutils.set_phaser_res(pdb_info[chain]["res"],
self.large_cell,
self.dres)})
launch_job(job_description)
def postprocess_phaser(self, job_name, results):
"""fix Phaser results and pass back"""
self.logger.debug("postprocess_phaser")
#self.logger.debug(results)
# Add description to results
results['description'] = self.cell_output[job_name.split('_')[0]].get('description')
# Copy tar to working dir for commandline results
## Have to determine when it is running in commandline mode##
"""
if results.get("tar", False):
orig = results.get("tar", {"path":False}).get("path")
if orig:
new = os.path.join(self.working_dir, os.path.basename(orig))
# If old file in working dir, remove it and recopy.
if os.path.exists(new):
os.unlink(new)
shutil.copy(orig, new)
results["tar"]["path"] = new
# Copy pdbfile to working dir
if results.get("pdb_file", False):
orig = results.get("pdb_file")
new = os.path.join(self.working_dir, os.path.basename(orig))
# If old file in working dir, remove it and recopy.
if os.path.exists(new):
os.unlink(new)
shutil.copy(orig, new)
results["pdb_file"] = new
"""
# Three result types to run through
types = (
("custom_structures", self.custom_structures),
("common_contaminants", self.common_contaminants),
("search_results", self.search_results)
)
# Run through result types
for result_type, pdb_codes in types:
if pdb_codes.count(job_name.split('_')[0]):
# Add chains of PDB to correct list
if len(job_name.split('_')) not in [1]:
pdb_codes.append(job_name)
# Update results
#self.results['results'][result_type][job_name] = results
self.results['results'][result_type].append(results)
break
# Save results for command line
self.phaser_results[job_name] = {"results": results}
# Update the status number
self.update_status()
# Move transferring files
self.transfer_files(results)
# Passback new results to RAPD
self.send_results()
def transfer_files(self, results):
"""
Transfer files to a directory that the control can access
"""
self.logger.debug("transfer_files")
#self.logger.debug(results)
if self.command["directories"].get("exchange_dir", False):
# Determine and validate the place to put the data
target_dir = os.path.join(
#self.preferences["exchange_dir"], os.path.split(self.working_dir)[1])
self.command["directories"].get("exchange_dir" ), os.path.split(self.working_dir)[1])
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for result in (results.get("common_contaminants", [])+results.get("search_results", [])):
# If there is a pdb produced -> data_produced
# Copy compressed results files to exchange dir and update path.
l = ["map_1_1", "map_2_1", 'pdb', 'mtz', 'tar']
for f in l:
archive_dict = result.get(f, {})
archive_file = archive_dict.get("path", False)
if archive_file:
# Copy data
target = os.path.join(target_dir, os.path.basename(archive_file))
#shutil.move(archive_file, target)
if f in ("map_1_1", "map_2_1", 'tar'):
shutil.move(archive_file, target)
else:
# Once we know this works we can switch to moving files.
shutil.copyfile(archive_file, target)
# Store new path information
archive_dict["path"] = target
# Add to the results.data_produced array
if f in ('pdb', 'mtz', 'tar'):
self.results["results"]["data_produced"].append(archive_dict)
# Also put PDB path in 'for_display' results
if f in ('pdb', "map_1_1", "map_2_1"):
self.results["results"]["for_display"].append(archive_dict)
"""
archive_dict = result.get("pdb", {})
archive_file = archive_dict.get("path", False)
if archive_file:
# Copy data
target = os.path.join(target_dir, os.path.basename(archive_file))
shutil.copyfile(archive_file, target)
# Store new path information
archive_dict["path"] = target
# Add to the results.data_produced array
self.results["results"]["data_produced"].append(archive_dict)
"""
# # Maps & PDB
# for my_map in ("map_1_1", "map_2_1", "pdb"):
# archive_dict = result.get(my_map, {})
# archive_file = archive_dict.get("path", False)
# if archive_file:
# # Move the file
# target = os.path.join(target_dir, os.path.basename(archive_file))
# shutil.move(archive_file, target)
# # Store information
# archive_dict["path"] = target
# # Add to the results.data_produced array
# self.results["results"]["data_produced"].append(archive_dict)
"""
# Maps & PDB
for my_map in ("map_1_1", "map_2_1", "pdb"):
archive_dict = result.get(my_map, {})
archive_file = archive_dict.get("path", False)
if archive_file:
# Move the file
target = os.path.join(target_dir, os.path.basename(archive_file))
shutil.move(archive_file, target)
# Store information
archive_dict["path"] = target
# Add to the results.archive_files array
self.results["results"]["for_display"].append(archive_dict)
# If there is an archive
archive_dict = result.get("tar", {})
archive_file = archive_dict.get("path", False)
if archive_file:
# Move the file
target = os.path.join(
target_dir, os.path.basename(archive_file))
self.logger.debug("target %s", target)
shutil.move(archive_file, target)
# Store information
archive_dict["path"] = target
# Add to the results.data_produced array
self.results["results"]["data_produced"].append(archive_dict)
"""
"""
# Maps & PDB (IS THIS REQUIRED?? or leftover garbage???)
for my_map in ("map_1_1", "map_2_1", "pdb"):
archive_dict = result.get(my_map, {})
archive_file = archive_dict.get("path", False)
if archive_file:
# Move the file
target = os.path.join(target_dir, os.path.basename(archive_file))
shutil.move(archive_file, target)
# Store information
archive_dict["path"] = target
# Add to the results.archive_files array
self.results["results"]["archive_files"].append(
archive_dict)
"""
def postprocess_invalid_code(self, job_name):
"""Make a proper result for PDB that could not be downloaded"""
results = {"solution": False,
"message": "invalid PDB code",
"description": self.cell_output[job_name].get("description")}
# Three result types to run through
types = (
("custom_structures", self.custom_structures),
("common_contaminants", self.common_contaminants),
("search_results", self.search_results)
)
# Run through result types
for result_type, pdb_codes in types:
if pdb_codes.count(job_name):
self.results['results'][result_type][job_name] = results
break
# Save results for command line
self.phaser_results[job_name] = {"results": results}
# Update the status number
self.update_status()
# Passback new results to RAPD
self.send_results()
def jobs_monitor(self):
"""Monitor running jobs and finsh them when they complete."""
def finish_job(job):
"""Finish the jobs and send to postprocess_phaser"""
info = self.jobs.pop(job)
self.tprint(' Finished Phaser on %s with id: %s'%(info['name'], info['tag']), level=30, color="white")
self.logger.debug('Finished Phaser on %s'%info['name'])
if self.computer_cluster:
results_json = self.redis.get(info['tag'])
#self.logger.debug('results_json_type: %s results_json: %s'%(type(results_json), results_json))
if not results_json:
self.postprocess_phaser(info['name'], {"ID": info['name'],
"solution": False,
"spacegroup": info['spacegroup'],
"message": "Error launching job"})
else:
results = json.loads(results_json)
self.postprocess_phaser(info['name'], results)
self.redis.delete(info['tag'])
"""
# This try/except is for when results aren't in Redis in time.
try:
results = json.loads(results_json)
self.postprocess_phaser(info['name'], results)
self.redis.delete(info['tag'])
except Exception as e:
self.logger.error('Error: '+ str(e))
self.logger.error('results_json: %s'%results_json)
#print 'PROBLEM: %s %s'%(info['name'], info['output_id'])
#print results_json
"""
else:
results = info['result_queue'].get()
# pprint(results.get('stdout', " "))
# pprint(json.loads(results.get('stdout'," ")))
# if results["stderr"]:
# print results["stderr"]
self.postprocess_phaser(info['name'], json.loads(results.get('stdout', " ")))
jobs.remove(job)
# Signal to the pool that no more processes will be added
if self.pool:
self.pool.close()
timed_out = False
timer = 0
jobs = self.jobs.keys()
# Run loop to see when jobs finish
while len(jobs):
for job in jobs:
if self.pool:
if job.ready():
finish_job(job)
elif job.is_alive() == False:
finish_job(job)
time.sleep(1)
timer += 1
"""
if self.verbose:
if round(timer%1,1) in (0.0,1.0):
print 'Waiting for AutoStat jobs to finish '+str(timer)+' seconds'
"""
if self.phaser_timer:
if timer >= self.phaser_timer:
timed_out = True
break
if timed_out:
if self.verbose:
self.logger.debug('PDBQuery timed out.')
for job in self.jobs.keys():
if self.computer_cluster:
# Kill job on cluster:
self.computer_cluster.kill_job(self.jobs[job].get('pid'))
else:
# terminate the job
job.terminate()
# Get the job info
info = self.jobs.pop(job)
self.logger.debug('Timeout Phaser on %s'%info['name'])
# Send timeout result to postprocess
self.postprocess_phaser(info['name'], {"ID": info['name'],
"solution": False,
"spacegroup": info['spacegroup'],
"message": "Timed out"})
# Delete the Redis key
self.redis.delete(info['tag'])
# Join the self.pool if used
if self.pool:
self.pool.join()
if self.verbose and self.logger:
self.logger.debug('PDBQuery.jobs_monitor finished.')
def postprocess(self):
"""Clean up after plugin action"""
self.tprint(arg=90, level="progress")
# Cleanup my mess.
self.clean_up()
# Finished
self.results["process"]["status"] = 100
self.tprint(arg=100, level="progress")
#pprint(self.results)
self.write_json()
# Send Final results
self.send_results()
# print results if run from commandline
if self.tprint:
self.print_results()
# Print credits
self.print_credits()
# Message in logger
self.logger.debug('PDBquery finished')
def clean_up(self):
"""Clean up the working directory"""
self.tprint(" Cleaning up", level=30, color="white")
if self.command["preferences"].get("clean", False):
self.logger.debug("Cleaning up Phaser files and folders")
# Change to work dir
os.chdir(self.working_dir)
# Gather targets and remove
files_to_clean = glob.glob("Phaser_*")
for target in files_to_clean:
shutil.rmtree(target)
def print_results(self):
"""Print the results to the commandline"""
self.tprint("\nResults", level=99, color="blue")
# pprint(self.results["results"])
def get_longest_field(pdb_codes):
"""Calculate the ongest field in a set of results"""
longest_field = 0
for pdb_code in pdb_codes:
if self.cell_output.has_key(pdb_code):
length = len(self.cell_output[pdb_code]["description"])
if length > longest_field:
longest_field = length
return longest_field
def print_header_line(longest_field):
"""Print the table header line"""
self.tprint((" {:4} {:^{width}} {:^14} {:^14} {:^14} {:^14} {}").format(
"PDB",
"Description",
"LL-Gain",
"RF Z-score",
"TF Z-score",
"# Clashes",
"Info",
width=str(longest_field)),
level=99,
color="white")
def print_result_line(pdb_code, my_result, longest_field):
"""Print the result line in the table"""
# print my_result
self.tprint(" {:4} {:^{width}} {:^14} {:^14} {:^14} {:^14} {}".format(
pdb_code,
#self.cell_output[pdb_code]["description"],
my_result.get("description", "-"),
my_result.get("gain", "-"),
my_result.get("rfz", "-"),
my_result.get("tfz", "-"),
my_result.get("clash", "-"),
my_result.get("message", ""),
width=str(longest_field)
),
level=99,
color="white")
for tag, pdb_codes in (("User-input structures", self.custom_structures),
("Common contaminants", self.common_contaminants),
("Cell parameter search structures", self.search_results)):
if pdb_codes:
# Find out the longest description field
longest_field = get_longest_field(pdb_codes)
# Print header for table
self.tprint("\n %s" % tag, level=99, color="white")
print_header_line(longest_field)
# Run through the codes
for pdb_code in pdb_codes:
if self.phaser_results.has_key(pdb_code):
# Get the result in question
my_result = self.phaser_results[pdb_code]["results"]
# Print the result line
print_result_line(pdb_code, my_result, longest_field)
def write_json(self):
"""Print out JSON-formatted result"""
json_string = json.dumps(self.results)
# If running in JSON mode, print to terminal
if self.preferences.get("run_mode") == "json":
print json_results
# Output to terminal?
#if self.preferences.get("json", False):
# print json_string
# Always write a file
os.chdir(self.working_dir)
with open("result.json", "w") as outfile:
outfile.writelines(json_string)
def print_credits(self):
"""Print credits for programs utilized by this plugin"""
self.tprint(rcredits.HEADER,
level=99,
color="blue")
programs = ["CCTBX", "PHENIX", "PHASER"]
info_string = rcredits.get_credits_text(programs, " ")
self.tprint(info_string, level=99, color="white")
| RAPD/RAPD | src/plugins/pdbquery/plugin.py | Python | agpl-3.0 | 47,200 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.dispatch import Signal
compute_scores_encodings_deadlines = Signal(providing_args=["offer_year_calendar"])
compute_student_score_encoding_deadline = Signal(providing_args=["session_exam_deadline"])
compute_all_scores_encodings_deadlines = Signal(providing_args=["academic_calendar"])
| uclouvain/osis_louvain | base/signals/publisher.py | Python | agpl-3.0 | 1,579 |
__author__ = 'sweemeng'
from rest_framework import status
from popit.signals.handlers import *
from popit.models import *
from popit.tests.base_testcase import BasePopitAPITestCase
class PersonLinkAPITestCase(BasePopitAPITestCase):
def test_view_person_link_list_unauthorized(self):
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_link_list_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_link_details_unauthorized(self):
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_link_details_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_link_details_not_exist_unauthorized(self):
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_view_person_link_details_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_person_links_unauthorized(self):
data = {
"url": "http://twitter.com/sweemeng",
}
response = self.client.post("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/", data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_links_authorized(self):
data = {
"url": "http://twitter.com/sweemeng",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/", data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(url="http://twitter.com/sweemeng")
self.assertEqual(url.url, "http://twitter.com/sweemeng")
def test_update_person_links_unauthorized(self):
data = {
"note": "just a random repo"
}
response = self.client.put(
"/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_links_not_exist_unauthorized(self):
data = {
"note": "just a random repo"
}
response = self.client.put(
"/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_links_authorized(self):
data = {
"note": "just a random repo"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/",
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person.links.language("en").get(id="a4ffa24a9ef3cbcb8cfaa178c9329367")
self.assertEqual(url.note, "just a random repo")
def test_update_person_links_not_exist_authorized(self):
data = {
"note": "just a random repo"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_person_links_unauthorized(self):
response = self.client.delete("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_links_not_exist_unauthorized(self):
response = self.client.delete("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_links_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_person_links_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class PersonOtherNameAPITestCase(BasePopitAPITestCase):
def test_view_person_othername_list_unauthorized(self):
response = self.client.get("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_othername_list_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_othername_details_unauthorized(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_othername_details_not_exist_unauthorized(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_view_person_othername_details_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_view_person_othername_details_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_person_othername_unauthorized(self):
data = {
"name": "jane",
"family_name": "jambul",
"given_name": "test person",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/", data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_othername_authorized(self):
data = {
"name": "jane",
"family_name": "jambul",
"given_name": "test person",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post(
"/en/persons/ab1a5788e5bae955c048748fa6af0e97/othernames/", data
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
other_name = person_.other_names.language('en').get(name="jane")
self.assertEqual(other_name.given_name, "test person")
def test_update_person_othername_unauthorized(self):
data = {
"family_name": "jambul",
}
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
other_name = person.other_names.language('en').get(id="cf93e73f-91b6-4fad-bf76-0782c80297a8")
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_othername_not_exist_unauthorized(self):
data = {
"family_name": "jambul",
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_othername_authorized(self):
data = {
"family_name": "jambul",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/",
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
other_name = person.other_names.language('en').get(id="cf93e73f-91b6-4fad-bf76-0782c80297a8")
self.assertEqual(other_name.family_name, "jambul")
def test_update_person_othername_not_exist_authorized(self):
data = {
"family_name": "jambul",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_person_othername_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_othername_not_exist_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_othername_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_person_othername_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class PersonIdentifierLinkAPITestCase(BasePopitAPITestCase):
def test_get_person_identifier_link_list_unauthorized(self):
# identifier af7c01b5-1c4f-4c08-9174-3de5ff270bdb
# link 9c9a2093-c3eb-4b51-b869-0d3b4ab281fd
# person 8497ba86-7485-42d2-9596-2ab14520f1f4
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data["results"][0]
self.assertEqual(data["url"], "http://github.com/sinarproject/")
def test_get_person_identifier_link_list_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data["results"][0]
self.assertEqual(data["url"], "http://github.com/sinarproject/")
def test_get_person_identifier_link_detail_unauthorized(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/9c9a2093-c3eb-4b51-b869-0d3b4ab281fd/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["results"]["url"], "http://github.com/sinarproject/")
def test_get_person_identifier_link_detail_not_exist_unauthorized(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_person_identifier_link_detail_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/9c9a2093-c3eb-4b51-b869-0d3b4ab281fd/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["results"]["url"], "http://github.com/sinarproject/")
def test_get_person_identifier_link_detail_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_person_identifier_link_unauthorized(self):
data = {
"url": "http://twitter.com/sinarproject"
}
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_identifier_link_authorized(self):
data = {
"url": "http://twitter.com/sinarproject"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/",
data
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
person = Person.objects.language("en").get(id="8497ba86-7485-42d2-9596-2ab14520f1f4")
identifier = person.identifiers.language("en").get(id="af7c01b5-1c4f-4c08-9174-3de5ff270bdb")
link = identifier.links.language("en").get(url="http://twitter.com/sinarproject")
self.assertEqual(link.url, "http://twitter.com/sinarproject")
def test_update_person_identifier_link_unauthorized(self):
data = {
"note":"This is a nested link"
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/9c9a2093-c3eb-4b51-b869-0d3b4ab281fd/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_identifier_link_not_exist_unauthorized(self):
data = {
"note":"This is a nested link"
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_identifier_link_authorized(self):
data = {
"note":"This is a nested link"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/9c9a2093-c3eb-4b51-b869-0d3b4ab281fd/",
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# 9c9a2093-c3eb-4b51-b869-0d3b4ab281fd
person = Person.objects.language("en").get(id="8497ba86-7485-42d2-9596-2ab14520f1f4")
identifier = person.identifiers.language("en").get(id="af7c01b5-1c4f-4c08-9174-3de5ff270bdb")
link = identifier.links.language("en").get(id="9c9a2093-c3eb-4b51-b869-0d3b4ab281fd")
self.assertEqual(link.note, "This is a nested link")
def test_update_person_identifier_link_not_exist_authorized(self):
data = {
"note":"This is a nested link"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_person_identifier_link_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/9c9a2093-c3eb-4b51-b869-0d3b4ab281fd/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_identifier_link_not_exist_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_identifier_link_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/9c9a2093-c3eb-4b51-b869-0d3b4ab281fd/"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_person_identifier_link_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/identifiers/af7c01b5-1c4f-4c08-9174-3de5ff270bdb/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# We going to use existing serilaizer.
class PersonOtherNameLinkAPITestCase(BasePopitAPITestCase):
def test_list_person_othername_link(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_show_person_othername_link_detail_not_exist(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_show_person_othername_link_detail(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/4d8d71c4-20ea-4ed1-ae38-4b7d7550cdf6/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_person_othername_link_unauthorized(self):
data = {
"url": "http://github.com/sinar"
}
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_othername_link_authorized(self):
data = {
"url": "http://github.com/sinar"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/",
data
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_update_person_othername_link_not_exist_unauthorized(self):
data = {
"note": "Just a link"
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_othername_link_not_exist_authorized(self):
data = {
"note": "Just a link"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_person_othername_link_unauthorized(self):
data = {
"note": "Just a link"
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/4d8d71c4-20ea-4ed1-ae38-4b7d7550cdf6/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_othername_link_authorized(self):
data = {
"note": "Just a link"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/4d8d71c4-20ea-4ed1-ae38-4b7d7550cdf6/",
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_person_othername_link_not_exist_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_othername_link_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_person_othername_link_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/4d8d71c4-20ea-4ed1-ae38-4b7d7550cdf6/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_othername_link_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/othernames/cf93e73f-91b6-4fad-bf76-0782c80297a8/links/4d8d71c4-20ea-4ed1-ae38-4b7d7550cdf6/"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class PersonContactLinkAPITestCase(BasePopitAPITestCase):
def test_list_person_contact_link(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_show_person_contact_link_not_exist(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_show_person_contact_link(self):
response = self.client.get(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/6d0afb46-67d4-4708-87c4-4d51ce99767e/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_person_contact_link_unauthorized(self):
data = {
"url": "http://github.com/sinar"
}
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_contact_link_authorized(self):
data = {
"url": "http://github.com/sinar"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/",
data
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_update_person_contact_link_not_exist_unauthorized(self):
data = {
"note": "Just a link"
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_contact_link_not_exist_authorized(self):
data = {
"note": "Just a link"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/not_exist/",
data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_person_contact_link_unauthorized(self):
data = {
"note": "Just a link"
}
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/6d0afb46-67d4-4708-87c4-4d51ce99767e/",
data
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_contact_link_authorized(self):
data = {
"note": "Just a link"
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/6d0afb46-67d4-4708-87c4-4d51ce99767e/",
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_person_contact_link_not_exist_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_contact_link_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/not_exist/"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_person_contact_link_unauthorized(self):
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/6d0afb46-67d4-4708-87c4-4d51ce99767e/"
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_contact_link_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(
"/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/contact_details/2256ec04-2d1d-4994-b1f1-16d3f5245441/links/6d0afb46-67d4-4708-87c4-4d51ce99767e/"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) | Sinar/popit_ng | popit/tests/test_person_misc_api.py | Python | agpl-3.0 | 32,087 |
"""
(Future home of) Tests for program enrollment writing Python API.
Currently, we do not directly unit test the functions in api/writing.py.
This is okay for now because they are all used in
`rest_api.v1.views` and is thus tested through `rest_api.v1.tests.test_views`.
Eventually it would be good to directly test the Python API function and just use
mocks in the view tests.
This file serves as a placeholder and reminder to do that the next time there
is development on the program_enrollments writing API.
"""
from __future__ import absolute_import, unicode_literals
| ESOedX/edx-platform | lms/djangoapps/program_enrollments/api/tests/test_writing.py | Python | agpl-3.0 | 574 |
import datetime
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from comics.core.managers import ComicManager
class Comic(models.Model):
LANGUAGES = (
('en', 'English'),
('no', 'Norwegian'),
)
# Required fields
name = models.CharField(
max_length=100,
help_text='Name of the comic')
slug = models.SlugField(
max_length=100, unique=True,
verbose_name='Short name',
help_text='For file paths and URLs')
language = models.CharField(
max_length=2, choices=LANGUAGES,
help_text='The language of the comic')
# Optional fields
url = models.URLField(
verbose_name='URL', blank=True,
help_text='URL to the official website')
active = models.BooleanField(
default=True,
help_text='Wheter the comic is still being crawled')
start_date = models.DateField(
blank=True, null=True,
help_text='First published at')
end_date = models.DateField(
blank=True, null=True,
help_text='Last published at, if comic has been cancelled')
rights = models.CharField(
max_length=100, blank=True,
help_text='Author, copyright, and/or licensing information')
# Automatically populated fields
added = models.DateTimeField(
auto_now_add=True,
help_text='Time the comic was added to the site')
objects = ComicManager()
class Meta:
db_table = 'comics_comic'
ordering = ['name']
def __unicode__(self):
return self.slug
def get_absolute_url(self):
return reverse('comic_latest', kwargs={'comic_slug': self.slug})
def get_redirect_url(self):
return reverse('comic_website', kwargs={'comic_slug': self.slug})
def is_new(self):
some_time_ago = timezone.now() - datetime.timedelta(
days=settings.COMICS_NUM_DAYS_COMIC_IS_NEW)
return self.added > some_time_ago
class Release(models.Model):
# Required fields
comic = models.ForeignKey(Comic)
pub_date = models.DateField(verbose_name='publication date', db_index=True)
images = models.ManyToManyField('Image', related_name='releases')
# Automatically populated fields
fetched = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
db_table = 'comics_release'
get_latest_by = 'pub_date'
def __unicode__(self):
return u'Release %s/%s' % (self.comic.slug, self.pub_date)
def get_absolute_url(self):
return reverse('comic_day', kwargs={
'comic_slug': self.comic.slug,
'year': self.pub_date.year,
'month': self.pub_date.month,
'day': self.pub_date.day,
})
def get_ordered_images(self):
if not getattr(self, '_ordered_images', []):
self._ordered_images = list(self.images.order_by('id'))
return self._ordered_images
# Let all created dirs and files be writable by the group
os.umask(0002)
image_storage = FileSystemStorage(
location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL)
def image_file_path(instance, filename):
return u'%s/%s/%s' % (instance.comic.slug, filename[0], filename)
class Image(models.Model):
# Required fields
comic = models.ForeignKey(Comic)
file = models.ImageField(
storage=image_storage, upload_to=image_file_path,
height_field='height', width_field='width')
checksum = models.CharField(max_length=64, db_index=True)
# Optional fields
title = models.CharField(max_length=255, blank=True)
text = models.TextField(blank=True)
# Automatically populated fields
fetched = models.DateTimeField(auto_now_add=True)
height = models.IntegerField()
width = models.IntegerField()
class Meta:
db_table = 'comics_image'
def __unicode__(self):
return u'Image %s/%s...' % (self.comic.slug, self.checksum[:8])
| datagutten/comics | comics/core/models.py | Python | agpl-3.0 | 4,089 |
#!usr/bin/python
# -*- coding:utf-8 -*-
from osv import osv,fields
import time
from datetime import datetime
from dateutil import rrule
class qingjia_calendar(osv.osv):
_name='qingjia.calendar'
_columns={
'start_date':fields.datetime('start_date'),
'end_date':fields.datetime('end_date'),
'calendar_line_ids':fields.one2many('qingjia.calendar.line','qingjia_calendar_id','calendar_line_ids'),
'state':fields.selection([('arrange','arrange'),('not arrange','not arrange')],'state',readonly=True)
}
_defaults={
}
def plan_arrange(self,cr,uid,ids,context=None):
my=self.browse(cr,uid,ids[0])
line_obj=self.pool.get('qingjia.calendar.line')
holidays=[]
datas=[]
start_date=time.strptime(my.start_date,'%Y-%m-%d %H:%M:%S')
end_date=time.strptime(my.end_date,'%Y-%m-%d %H:%M:%S')
dt=datetime(start_date.tm_year,start_date.tm_mon,start_date.tm_mday)
unt=datetime(end_date.tm_year,end_date.tm_mon,end_date.tm_mday)
days=rrule.rrule(rrule.DAILY,dtstart=dt,until=unt,byweekday=[6])
ge=days._iter()
for i in range(days.count()):
date_info=ge.next()
date_list=map(str,(date_info.year,date_info.month,date_info.day))
date='-'.join(date_list)
holidays.append(date)
for day in holidays:
line_search=line_obj.search(cr,uid,[('date','=',day),('type','=','holiday'),('state','=','arrange')])
if line_search:
datas.append((4,line_search[0]))
else:
datas.append((0,0,{'date':day,'type':'holiday','state':'arrange','name':'holiday'}))
self.write(cr,uid,ids,{'calendar_line_ids':datas})
return True
qingjia_calendar()
class qingjia_calendar_line(osv.osv):
_name='qingjia.calendar.line'
_columns={
'qingjia_calendar_id':fields.many2one('qingjia.calendar','qingjia_calendar_id'),
'name':fields.char('type',size=64),
'date':fields.datetime('date'),
'type':fields.selection([('work','Work'),('holiday','Holiday')],'type',),
'state':fields.selection([('arrange','arrange'),('not arrange','not arrange')],'state'),
'is_holiday':fields.boolean('is_holiday'),
'note':fields.char('note',size=128),
}
_defaults={'type':'work'}
def onchange_type(self,cr,uid,ids,res,context=None):
if res:
print res,'res'
return {'value':{'name':res}}
qingjia_calendar_line()
| alangwansui/mtl_ordercenter | openerp/addons/001_qingjia/qingjia_calendar.py | Python | agpl-3.0 | 2,648 |
__author__ = 'sweemeng'
from rest_framework import status
from popit.signals.handlers import *
from popit.models import *
from django.conf import settings
import json
import logging
from popit.tests.base_testcase import BasePopitTestCase
from popit.tests.base_testcase import BasePopitAPITestCase
from popit.serializers.minimized import MinPersonSerializer
# TODO: Test multilingual behavior. To make behavior clear
# TODO: Need new fixtures
class PersonSerializerTestCase(BasePopitTestCase):
def test_fetch_non_empty_field_person_serializer(self):
person = Person.objects.untranslated().get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertEqual(data["name"], "John")
def test_fetch_empty_field_person_serializer(self):
person = Person.objects.untranslated().get(id='ab1a5788e5bae955c048748fa6af0e97')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertEqual(data["given_name"], "")
def test_fetch_not_empty_relation_person_serializer(self):
person = Person.objects.untranslated().get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertTrue(data["other_names"])
def test_fetch_empty_relation_person_serializer(self):
person = Person.objects.untranslated().get(id='078541c9-9081-4082-b28f-29cbb64440cb')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertFalse(data["other_names"])
def test_create_person_with_all_field_serializer(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"type":"twitter",
"value": "sinarproject",
}
],
"links":[
{
"url":"http://sinarproject.org",
}
],
"identifiers":[
{
"identifier": "9089098098",
"scheme": "rakyat",
}
],
"other_names":[
{
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
person_serial = PersonSerializer(data=person_data, language='en')
person_serial.is_valid()
self.assertEqual(person_serial.errors, {})
person_serial.save()
person = Person.objects.language("en").get(name="joe")
self.assertEqual(person.given_name, "joe jambul")
def test_update_person_serializer(self):
person_data = {
"given_name": "jerry jambul",
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
self.assertEqual(person_.given_name, "jerry jambul")
def test_create_links_person_serializers(self):
person_data = {
"links": [
{
"url": "http://twitter.com/sweemeng",
}
]
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(url="http://twitter.com/sweemeng")
self.assertEqual(url.url, "http://twitter.com/sweemeng")
def test_update_links_person_serializers(self):
# links id a4ffa24a9ef3cbcb8cfaa178c9329367
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"links":[
{
"id": "a4ffa24a9ef3cbcb8cfaa178c9329367",
"note": "just a random repo"
}
]
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language="en")
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(id="a4ffa24a9ef3cbcb8cfaa178c9329367")
self.assertEqual(url.note, "just a random repo")
def test_update_create_nested_links_persons_serializer(self):
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"contact_details":[
{
"id": "a66cb422-eec3-4861-bae1-a64ae5dbde61",
"links": [{
"url": "http://facebook.com",
}]
}
],
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
# There should be only 1 links in that contact
contact = person_.contact_details.language('en').get(id='a66cb422-eec3-4861-bae1-a64ae5dbde61')
links = contact.links.language('en').filter(url="http://sinarproject.org")
self.assertEqual(links[0].url, "http://sinarproject.org")
def test_update_update_nested_links_person_serializer(self):
person_data = {
"id":"8497ba86-7485-42d2-9596-2ab14520f1f4",
"identifiers":[
{
"id": "af7c01b5-1c4f-4c08-9174-3de5ff270bdb",
"links": [{
"id": "9c9a2093-c3eb-4b51-b869-0d3b4ab281fd",
"note": "this is just a test note",
}]
}
],
}
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
identifier = person_.identifiers.language('en').get(id="af7c01b5-1c4f-4c08-9174-3de5ff270bdb")
link = identifier.links.language('en').get(id="9c9a2093-c3eb-4b51-b869-0d3b4ab281fd")
self.assertEqual(link.note, "this is just a test note")
def test_create_identifier_person_serializer(self):
person_data = {
"identifiers": [
{
"scheme": "IC",
"identifier": "129031309",
}
]
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
identifier = person_.identifiers.language('en').get(identifier="129031309")
self.assertEqual(identifier.scheme, "IC")
def test_update_identifier_person_serializer(self):
person_data = {
"identifiers": [
{
"id": "34b59cb9-607a-43c7-9d13-dfe258790ebf",
"identifier": "53110322",
}
]
}
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language="en")
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
identifier = person_.identifiers.language('en').get(id="34b59cb9-607a-43c7-9d13-dfe258790ebf")
self.assertEqual(identifier.identifier, '53110322')
def test_create_contact_person_serializer(self):
person_data = {
"contact_details": [
{
"type":"twitter",
"value": "sinarproject",
}
]
}
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
contact = person_.contact_details.language('en').get(type="twitter")
self.assertEqual(contact.value, "sinarproject")
def test_update_contact_person_serializer(self):
person_data = {
"contact_details": [
{
"id": "a66cb422-eec3-4861-bae1-a64ae5dbde61",
"value": "0123421222",
}
]
}
person = Person.objects.untranslated().get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language="en")
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
contact = person_.contact_details.language('en').get(id="a66cb422-eec3-4861-bae1-a64ae5dbde61")
self.assertEqual(contact.value, "0123421222")
def test_create_other_name_person_serializer(self):
person_data = {
"other_names": [
{
"name": "jane",
"family_name": "jambul",
"given_name": "test person",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
other_name = person_.other_names.language('en').get(name="jane")
self.assertEqual(other_name.given_name, "test person")
def test_update_other_person_serializer(self):
person_data = {
"other_names": [
{
"id": "cf93e73f-91b6-4fad-bf76-0782c80297a8",
"family_name": "jambul",
}
]
}
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
other_name = person_.other_names.language('en').get(id="cf93e73f-91b6-4fad-bf76-0782c80297a8")
self.assertEqual(other_name.family_name, "jambul")
def test_create_person_invalid_date_serializer(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "invalid date",
"death_data": "invalid date",
"email": "joejambul@sinarproject.org",
}
person_serial = PersonSerializer(data=person_data, language='en')
person_serial.is_valid()
self.assertNotEqual(person_serial.errors, {})
def test_update_person_translated_serializer(self):
person_data = {
"given_name": "jerry jambul",
}
person = Person.objects.language("ms").get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='ms')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('ms').get(id='ab1a5788e5bae955c048748fa6af0e97')
self.assertEqual(person_.given_name, "jerry jambul")
def test_create_person_translated_serializer(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "bukan john doe",
"gender": "tak tahu",
"summary": "orang ujian",
"honorific_prefix": "Datuk Seri",
"biography": "Dia Tak wujud!!!!",
"email": "joejambul@sinarproject.org",
}
person_serial = PersonSerializer(data=person_data, language='ms')
person_serial.is_valid()
self.assertEqual(person_serial.errors, {})
person_serial.save()
person = Person.objects.language("ms").get(name="joe")
self.assertEqual(person.given_name, "joe jambul")
def test_load_translated_person_membership(self):
person = Person.objects.untranslated().get(id="078541c9-9081-4082-b28f-29cbb64440cb")
person_serializer = PersonSerializer(person, language="ms")
data = person_serializer.data
for membership in data["memberships"]:
self.assertEqual(membership["language_code"], "ms")
def test_load_translated_person_membership_organization(self):
person = Person.objects.untranslated().get(id="078541c9-9081-4082-b28f-29cbb64440cb")
person_serializer = PersonSerializer(person, language="ms")
data = person_serializer.data
for membership in data["memberships"]:
if membership["organization"]:
self.assertEqual(membership["organization"]["language_code"], "ms")
def test_fetch_person_membership_on_behalf_of_expanded(self):
person = Person.objects.untranslated().get(id="2439e472-10dc-4f9c-aa99-efddd9046b4a")
person_serializer = PersonSerializer(person, language="en")
data = person_serializer.data
self.assertEqual(data["memberships"][0]["on_behalf_of"]["id"], "3d62d9ea-0600-4f29-8ce6-f7720fd49aa3")
def test_fetch_person_minimized_serializer(self):
person = Person.objects.untranslated().get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
person_serializer = MinPersonSerializer(person)
membership_count = person.memberships.count()
self.assertTrue(len(person_serializer.data["memberships"]), membership_count)
def test_update_person_serializer_null_value(self):
person = Person.objects.untranslated().get(id="ab1a5788e5bae955c048748fa6af0e97")
data = {
"biography": None,
}
person_serializer = PersonSerializer(person, data=data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person = Person.objects.language("en").get(id="ab1a5788e5bae955c048748fa6af0e97")
self.assertEqual(person.biography, None)
# We have set parameter in client into json instead of multipart form, maybe we should explicitly set it.
class PersonAPITestCase(BasePopitAPITestCase):
def test_view_person_list(self):
response = self.client.get("/en/persons/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("page" in response.data)
self.assertEqual(response.data["per_page"], settings.REST_FRAMEWORK["PAGE_SIZE"])
self.assertEqual(response.data["num_pages"], 1)
def test_view_person_detail(self):
person = Person.objects.language("en").get(id="8497ba86-7485-42d2-9596-2ab14520f1f4")
response = self.client.get("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data
self.assertEqual(data["result"]["name"], "John")
self.assertTrue("memberships" in response.data["result"])
def test_view_person_detail_not_exist(self):
response = self.client.get("/en/persons/not_exist/")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_person_unauthorized(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"type":"twitter",
"value": "sinarproject",
}
],
"links":[
{
"url":"http://sinarproject.org",
}
],
"identifiers":[
{
"identifier": "9089098098",
"scheme": "rakyat",
}
],
"other_names":[
{
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
response = self.client.post("/en/persons/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_authorized(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"type":"twitter",
"value": "sinarproject",
}
],
"links":[
{
"url":"http://sinarproject.org",
}
],
"identifiers":[
{
"identifier": "9089098098",
"scheme": "rakyat",
}
],
"other_names":[
{
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", person_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
person = Person.objects.language("en").get(name="joe")
self.assertEqual(person.name, "joe")
def test_update_person_unauthorized(self):
person_data = {
"given_name": "jerry jambul",
}
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_not_exist_unauthorized(self):
person_data = {
"given_name": "jerry jambul",
}
response = self.client.put("/en/persons/not_exist/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_authorized(self):
person_data = {
"given_name": "jerry jambul",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
self.assertEqual(person_.given_name, "jerry jambul")
def test_update_person_not_exist_authorized(self):
person_data = {
"given_name": "jerry jambul",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/not_exist/", person_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_person_links_unauthorized(self):
person_data = {
"links": [
{
"url": "http://twitter.com/sweemeng",
}
]
}
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_links_authorized(self):
person_data = {
"links": [
{
"url": "http://twitter.com/sweemeng",
}
]
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(url="http://twitter.com/sweemeng")
self.assertEqual(url.url, "http://twitter.com/sweemeng")
def test_update_person_links_unauthorized(self):
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"links":[
{
"id": "a4ffa24a9ef3cbcb8cfaa178c9329367",
"note": "just a random repo"
}
]
}
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_person_links_authorized(self):
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"links":[
{
"id": "a4ffa24a9ef3cbcb8cfaa178c9329367",
"note": "just a random repo"
}
]
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(id="a4ffa24a9ef3cbcb8cfaa178c9329367")
self.assertEqual(url.note, "just a random repo")
def test_create_nested_person_links_unauthorized(self):
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"contact_details":[
{
"id": "a66cb422-eec3-4861-bae1-a64ae5dbde61",
"links": [{
"url": "http://facebook.com",
}]
}
],
}
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_nested_person_links_authorized(self):
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"contact_details":[
{
"id": "a66cb422-eec3-4861-bae1-a64ae5dbde61",
"links": [{
"url": "http://facebook.com",
}]
}
],
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
# There should be only 1 links in that contact
contact = person_.contact_details.language('en').get(id='a66cb422-eec3-4861-bae1-a64ae5dbde61')
links = contact.links.language('en').all()
check = False
for i in links:
if i.url == "http://sinarproject.org":
check = True
self.assertTrue(check, "http://sinarproject.org does not exist")
def test_update_nested_person_links_unauthorized(self):
person_data = {
"id":"8497ba86-7485-42d2-9596-2ab14520f1f4",
"identifiers":[
{
"id": "af7c01b5-1c4f-4c08-9174-3de5ff270bdb",
"links": [{
"id": "9c9a2093-c3eb-4b51-b869-0d3b4ab281fd",
"note": "this is just a test note",
}]
}
],
}
response = self.client.put("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_nested_person_links_authorized(self):
person_data = {
"id":"8497ba86-7485-42d2-9596-2ab14520f1f4",
"identifiers":[
{
"id": "af7c01b5-1c4f-4c08-9174-3de5ff270bdb",
"links": [{
"id": "9c9a2093-c3eb-4b51-b869-0d3b4ab281fd",
"note": "this is just a test note",
}]
}
],
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
identifier = person_.identifiers.language('en').get(id="af7c01b5-1c4f-4c08-9174-3de5ff270bdb")
link = identifier.links.language('en').get(id="9c9a2093-c3eb-4b51-b869-0d3b4ab281fd")
self.assertEqual(link.note, "this is just a test note")
def test_create_other_names_unauthorized(self):
person_data = {
"other_names": [
{
"name": "jane",
"family_name": "jambul",
"given_name": "test person",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_other_names_authorized(self):
person_data = {
"other_names": [
{
"name": "jane",
"family_name": "jambul",
"given_name": "test person",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
other_name = person_.other_names.language('en').get(name="jane")
self.assertEqual(other_name.given_name, "test person")
def test_update_other_names_unauthorized(self):
person_data = {
"other_names": [
{
"id": "cf93e73f-91b6-4fad-bf76-0782c80297a8",
"family_name": "jambul",
}
]
}
response = self.client.put("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/", person_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_other_names_authorized(self):
person_data = {
"other_names": [
{
"id": "cf93e73f-91b6-4fad-bf76-0782c80297a8",
"family_name": "jambul",
}
]
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
other_name = person_.other_names.language('en').get(id="cf93e73f-91b6-4fad-bf76-0782c80297a8")
self.assertEqual(other_name.family_name, "jambul")
def test_delete_persons_unauthorized(self):
response = self.client.delete("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_person_not_exist_unauthorized(self):
response = self.client.delete("/en/persons/not_exist/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_persons_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete("/en/persons/8497ba86-7485-42d2-9596-2ab14520f1f4/")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_person_not_exist_authorized(self):
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete("/en/persons/not_exist/")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_person_api_summary_more_than_255(self):
raw_data = """
{
"result":
{
"proxy_image": "https://sinar-malaysia.popit.mysociety.org/image-proxy/http%3A%2F%2Fupload.wikimedia.org%2Fwikipedia%2Fcommons%2Fthumb%2F0%2F05%2FAnwar_Ibrahim.jpg%2F398px-Anwar_Ibrahim.jpg",
"image": "http://upload.wikimedia.org/wikipedia/commons/thumb/0/05/Anwar_Ibrahim.jpg/398px-Anwar_Ibrahim.jpg",
"html_url": "https://sinar-malaysia.popit.mysociety.org/persons/53630562f1eab6270da6c8ed",
"url": "https://sinar-malaysia.popit.mysociety.org/api/v0.1/persons/53630562f1eab6270da6c8ed",
"birth_date": "1947-08-10",
"death_date": null,
"id": "53630562f1eab6270da6c8ed",
"name": "Anwar Ibrahim",
"summary": "Dato' Seri Anwar Bin Ibrahim[1] (born 10 August 1947) is a Malaysian politician. He is the Leader of Opposition of Malaysia (Pakatan Rakyat), economic advisor to the state government of Selangor[2] and de facto leader of PKR (KeADILan). He served as the Deputy Prime Minister of Malaysia from 1993 to 1998 and Finance Minister from 1991 to 1998 when he was in UMNO, a major party in ruling Barisan Nasional coaltion.",
"images":
[
{
"proxy_url": "https://sinar-malaysia.popit.mysociety.org/image-proxy/http%3A%2F%2Fupload.wikimedia.org%2Fwikipedia%2Fcommons%2Fthumb%2F0%2F05%2FAnwar_Ibrahim.jpg%2F398px-Anwar_Ibrahim.jpg",
"created": "",
"url": "http://upload.wikimedia.org/wikipedia/commons/thumb/0/05/Anwar_Ibrahim.jpg/398px-Anwar_Ibrahim.jpg",
"id": "536305bef1eab6270da6c8ee"
}
],
"memberships":
[
{
"contact_details": [ ],
"links": [ ],
"images": [ ],
"url": "https://sinar-malaysia.popit.mysociety.org/api/v0.1/memberships/53630b0619ee29270d8a9e5e",
"start_date": null,
"role": "",
"post_id": null,
"person_id": "53630562f1eab6270da6c8ed",
"organization_id": "536309c319ee29270d8a9e26",
"label": null,
"id": "53630b0619ee29270d8a9e5e",
"html_url": "https://sinar-malaysia.popit.mysociety.org/memberships/53630b0619ee29270d8a9e5e",
"end_date": null,
"area_name": null,
"area_id": null
},
{
"contact_details": [ ],
"links": [ ],
"images": [ ],
"id": "53633d8319ee29270d8a9ed5",
"person_id": "53630562f1eab6270da6c8ed",
"end_date": "2013-05-05",
"start_date": "2008-08-26",
"label": null,
"post_id": "53633d1719ee29270d8a9ed4",
"role": "Opposition Leader",
"organization_id": "53633b5a19ee29270d8a9ecf",
"url": "https://sinar-malaysia.popit.mysociety.org/api/v0.1/memberships/53633d8319ee29270d8a9ed5",
"html_url": "https://sinar-malaysia.popit.mysociety.org/memberships/53633d8319ee29270d8a9ed5"
},
{
"contact_details": [ ],
"links": [ ],
"images": [ ],
"end_date": null,
"id": "5535e892aea781383fa79402",
"post_id": "545e4d5b5222837c2c05988b",
"start_date": "2013",
"role": "Parliamentary Candidate",
"organization_id": "545de8665222837c2c0586c0",
"person_id": "53630562f1eab6270da6c8ed",
"url": "https://sinar-malaysia.popit.mysociety.org/api/v0.1/memberships/5535e892aea781383fa79402",
"html_url": "https://sinar-malaysia.popit.mysociety.org/memberships/5535e892aea781383fa79402"
}
],
"links": [ ],
"contact_details": [ ],
"identifiers": [ ],
"other_names":
[
{
"name": "Dato' Seri Anwar Bin Ibrahim",
"note": "With honorifics.",
"id": "55653036561fa5421bb7bd20"
},
{
"name": "Anwar Bin Ibrahim",
"id": "55653036561fa5421bb7bd1f"
}
]
}
}
"""
data = json.loads(raw_data)
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", data["result"])
logging.warn(response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_person_api_invalid_date(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "invalid date",
"death_date": "invalid date",
"email": "joejambul@sinarproject.org",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", person_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue("errors" in response.data)
def test_update_person_authorized_translated(self):
person_data = {
"given_name": "jerry jambul",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put("/ms/persons/ab1a5788e5bae955c048748fa6af0e97/", person_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
person_ = Person.objects.language('ms').get(id='ab1a5788e5bae955c048748fa6af0e97')
self.assertEqual(person_.given_name, "jerry jambul")
def test_create_person_authorized_translated(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "bukan john doe",
"gender": "tak tahu",
"summary": "orang ujian",
"honorific_prefix": "Datuk Seri",
"biography": "Dia Tak wujud!!!!",
"email": "joejambul@sinarproject.org",
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/ms/persons/", person_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
person = Person.objects.language("ms").get(name="joe")
self.assertEqual(person.name, "joe")
def test_create_person_othername_blank_id_authorized(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"other_names":[
{
"id": "",
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", person_data)
logging.warn(response.data["result"]["other_names"])
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
other_names = response.data["result"]["other_names"][0]
self.assertNotEqual(other_names["id"], "")
def test_create_person_identifier_blank_id_authorized(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"identifiers":[
{
"id": "",
"identifier": "9089098098",
"scheme": "rakyat",
}
],
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", person_data)
logging.warn(response.data["result"]["other_names"])
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
identifiers = response.data["result"]["identifiers"][0]
self.assertNotEqual(identifiers["id"], "")
def test_create_person_contact_details_blank_id_authorized(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"id": "",
"type":"twitter",
"value": "sinarproject",
}
],
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", person_data)
logging.warn(response.data["result"]["other_names"])
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
contact_details = response.data["result"]["contact_details"][0]
self.assertNotEqual(contact_details["id"], "")
def test_create_person_links_blank_id_authorized(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"links":[
{
"id": "",
"url":"http://sinarproject.org",
}
],
}
token = Token.objects.get(user__username="admin")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post("/en/persons/", person_data)
logging.warn(response.data["result"]["other_names"])
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
links = response.data["result"]["links"][0]
self.assertNotEqual(links["id"], "")
def test_create_person_with_all_field_blank_id_serializer(self):
person_data = {
"id": "",
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"type":"twitter",
"value": "sinarproject",
}
],
"links":[
{
"url":"http://sinarproject.org",
}
],
"identifiers":[
{
"identifier": "9089098098",
"scheme": "rakyat",
}
],
"other_names":[
{
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
person_serial = PersonSerializer(data=person_data, language='en')
person_serial.is_valid()
self.assertEqual(person_serial.errors, {})
person_serial.save()
person = Person.objects.language("en").get(name="joe")
self.assertEqual(person.given_name, "joe jambul")
def test_create_person_with_all_field_birthdate_deathdate_blank_serializer(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "",
"death_data": "",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"type":"twitter",
"value": "sinarproject",
}
],
"links":[
{
"url":"http://sinarproject.org",
}
],
"identifiers":[
{
"identifier": "9089098098",
"scheme": "rakyat",
}
],
"other_names":[
{
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
person_serial = PersonSerializer(data=person_data, language='en')
person_serial.is_valid()
self.assertEqual(person_serial.errors, {})
person_serial.save()
person = Person.objects.language("en").get(name="joe")
self.assertEqual(person.given_name, "joe jambul")
def test_minify_person_api(self):
response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97", {"minify":"True"})
person = Person.objects.get(id="ab1a5788e5bae955c048748fa6af0e97")
membership_count = person.memberships.count()
self.assertEqual(len(response.data["result"]["memberships"]), membership_count)
| Sinar/popit_ng | popit/tests/tests_person_api.py | Python | agpl-3.0 | 50,109 |
import pytest
from gaphas.painter import BoundingBoxPainter
from gaphas.view import GtkView
from gaphor.diagram.painter import ItemPainter
from gaphor.diagram.selection import Selection
from gaphor.diagram.tests.fixtures import diagram, element_factory, event_manager
@pytest.fixture
def view(diagram):
view = GtkView(model=diagram, selection=Selection())
view._qtree.resize((-100, -100, 400, 400))
item_painter = ItemPainter(view.selection)
view.painter = item_painter
view.bounding_box_painter = BoundingBoxPainter(item_painter)
return view
| amolenaar/gaphor | gaphor/diagram/diagramtools/tests/conftest.py | Python | lgpl-2.1 | 570 |
import json
import numpy as np
from glob import glob
inputs = {
'xml_file_path' : "./data/single_wavelength_copy",
'file_set' : {'p38' : glob( "./data/single_wavelength_copy/*.xml")},
'section' : '280_480_TOP_120',
'ligand_order' : ['Bosutinib','Bosutinib Isomer','Erlotinib','Gefitinib','Ponatinib','Lapatinib','Saracatinib','Vandetanib'],
'Lstated' : np.array([20.0e-6,14.0e-6,9.82e-6,6.88e-6,4.82e-6,3.38e-6,2.37e-6,1.66e-6,1.16e-6,0.815e-6,0.571e-6,0.4e-6,0.28e-6,0.196e-6,0.138e-6,0.0964e-6,0.0676e-6,0.0474e-6,0.0320e-6,0.0240e-6,0.0160e-6,0.0120e-6,0.008e-6,0.0], np.float64), # ligand concentration, M
'Pstated' : 0.5e-6 * np.ones([24],np.float64), # protein concentration, M
'assay_volume' : 50e-6, # assay volume, L
'well_area' : 0.1369, # well area, cm^2 for 4ti-0203 [http://4ti.co.uk/files/3113/4217/2464/4ti-0201.pdf]
}
inputs['Lstated'] = inputs['Lstated'].tolist()
inputs['Pstated'] = inputs['Pstated'].tolist()
with open('inputs.json', 'w') as fp:
json.dump(inputs, fp)
| choderalab/assaytools | examples/direct-fluorescence-assay/inputs_p38_singlet.py | Python | lgpl-2.1 | 1,067 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Linux ioctl macros. Taken from /usr/include/asm/ioctl.h
"""
# ioctl command encoding: 32 bits total, command in lower 16 bits,
# size of the parameter structure in the lower 14 bits of the
# upper 16 bits.
# Encoding the size of the parameter structure in the ioctl request
# is useful for catching programs compiled with old versions
# and to avoid overwriting user space outside the user buffer area.
# The highest 2 bits are reserved for indicating the ``access mode''.
# NOTE: This limits the max parameter size to 16kB -1 !
#
#
# The following is for compatibility across the various Linux
# platforms. The i386 ioctl numbering scheme doesn't really enforce
# a type field. De facto, however, the top 8 bits of the lower 16
# bits are indeed used as a type field, so we might just as well make
# this explicit here. Please be sure to use the decoding macros
# below from now on.
import struct
sizeof = struct.calcsize
_IOC_NRBITS = 8
_IOC_TYPEBITS = 8
_IOC_SIZEBITS = 14
_IOC_DIRBITS = 2
_IOC_NRMASK = ((1 << _IOC_NRBITS)-1)
_IOC_TYPEMASK = ((1 << _IOC_TYPEBITS)-1)
_IOC_SIZEMASK = ((1 << _IOC_SIZEBITS)-1)
_IOC_DIRMASK = ((1 << _IOC_DIRBITS)-1)
_IOC_NRSHIFT = 0
_IOC_TYPESHIFT = (_IOC_NRSHIFT+_IOC_NRBITS)
_IOC_SIZESHIFT = (_IOC_TYPESHIFT+_IOC_TYPEBITS)
_IOC_DIRSHIFT = (_IOC_SIZESHIFT+_IOC_SIZEBITS)
IOCSIZE_MASK = (_IOC_SIZEMASK << _IOC_SIZESHIFT)
IOCSIZE_SHIFT = (_IOC_SIZESHIFT)
###
# direction bits
_IOC_NONE = 0
_IOC_WRITE = 1
_IOC_READ = 2
def _IOC(dir,type,nr,FMT):
return int((((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((FMT) << _IOC_SIZESHIFT)) & 0xffffffff )
# used to create numbers
# type is the assigned type from the kernel developers
# nr is the base ioctl number (defined by driver writer)
# FMT is a struct module format string.
def _IO(type,nr): return _IOC(_IOC_NONE,(type),(nr),0)
def _IOR(type,nr,FMT): return _IOC(_IOC_READ,(type),(nr),sizeof(FMT))
def _IOW(type,nr,FMT): return _IOC(_IOC_WRITE,(type),(nr),sizeof(FMT))
def _IOWR(type,nr,FMT): return _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(FMT))
# used to decode ioctl numbers
def _IOC_DIR(nr): return (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
def _IOC_TYPE(nr): return (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
def _IOC_NR(nr): return (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
def _IOC_SIZE(nr): return (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
| xiangke/pycopia | core/pycopia/OS/Linux/IOCTL.py | Python | lgpl-2.1 | 3,107 |
"""autogenerated by genpy from hrl_lib/Pose3DOF.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class Pose3DOF(genpy.Message):
_md5sum = "646ead44a0e6fecf4e14ca116f12b08b"
_type = "hrl_lib/Pose3DOF"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
float64 x
float64 y
float64 theta
float64 dt
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','x','y','theta','dt']
_slot_types = ['std_msgs/Header','float64','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,x,y,theta,dt
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Pose3DOF, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
if self.dt is None:
self.dt = 0.
else:
self.header = std_msgs.msg.Header()
self.x = 0.
self.y = 0.
self.theta = 0.
self.dt = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_4d = struct.Struct("<4d")
| HailStorm32/Q.bo_stacks | qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/msg/_Pose3DOF.py | Python | lgpl-2.1 | 5,992 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
from gtk import *
from gnome.ui import *
win = GtkWindow()
win.connect('destroy', mainquit)
win.set_title('Canvas test')
canvas = GnomeCanvas()
canvas.set_size(300, 300)
win.add(canvas)
canvas.show()
canvas.root().add('line', points=(10,10, 90,10, 90,90, 10,90),
width_pixels=10, fill_color='blue')
win.show()
mainloop()
| xiangke/pycopia | experimental/pycopia/GUI/canvas.py | Python | lgpl-2.1 | 1,013 |
# -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Stoqdrivers
# Copyright (C) 2005 Async Open Source <http://www.async.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
# Author(s): Henrique Romano <henrique@async.com.br>
#
"""
Driver Capability management.
"""
from numbers import Real
from typing import Optional
from stoqdrivers.exceptions import CapabilityError
class Capability:
""" This class is used to represent a driver capability, offering methods
to validate a value with base in the capability limits.
"""
def __init__(self, min_len: Optional[int]=None, max_len: Optional[int]=None,
max_size: Optional[Real]=None, min_size: Optional[Real]=None,
digits: Optional[int]=None, decimals: Optional[Real]=None):
""" Creates a new driver capability. A driver capability can be
represented basically by the max length of a string, the max digits
number of a value or its minimum/maximum size. With an instance of
Capability you can check if a value is acceptable by the driver
through the check_value method. The Capability arguments are:
@param min_len: The minimum length of a string
@type min_len: int
@param max_len: The max length of a string
@type max_len: int
@param max_size The maximum size for a value
@type max_size: number
@param min_size: The minimum size for a value
@type min_size: number
@param digits: The number of digits that a number can have
@type digits: int
@param decimals: If the max value for the capability is a float,
this parameter specifies the max precision
that the number can have.
@type decimals: number
Note that 'max_len' can't be used together with 'min_size', 'max_size'
and 'digits', in the same way that 'max_size' and 'min_size' can't be
used with 'digits'. The values defined for these parameters are used
also to verify the value type in the 'check_value' method.
"""
if max_len is not None and (max_size is not None
and min_size is not None
and digits is not None
and decimals):
raise ValueError("max_len cannot be used together with max_size, "
"min_size, digits or decimals")
if digits is not None:
if max_size is not None:
raise ValueError("digits can't be used with max_size")
if decimals:
decimal_part = 1 - (1 / 10.0 ** decimals)
else:
decimal_part = 0
self.max_size = ((10.0 ** digits) - 1) + decimal_part
self.min_len = min_len
self.max_len = max_len
self.min_size = min_size or 0
self.max_size = max_size
self.digits = digits
self.decimals = decimals
def check_value(self, value):
if self.max_len:
if not isinstance(value, str):
raise CapabilityError("the value must be a string")
if len(value) > (self.max_len or float('inf')):
raise CapabilityError("the value can't be greater than %d "
"characters" % self.max_len)
elif len(value) < (self.min_len or float('-inf')):
raise CapabilityError("the value can't be less than %d "
"characters" % self.min_len)
return
elif not (self.max_size and self.min_size):
return
if not isinstance(value, (float, int)):
raise CapabilityError("the value must be float, integer or long")
if value > (self.max_size or float('inf')):
raise CapabilityError("the value can't be greater than %r"
% self.max_size)
elif value < (self.min_size or float('-inf')):
raise CapabilityError("the value can't be less than %r"
% self.min_size)
| stoq/stoqdrivers | stoqdrivers/printers/capabilities.py | Python | lgpl-2.1 | 4,905 |
#!/usr/bin/env python
# -*- coding:utf8 -*-
from setuptools import setup
import picuplib
setup(
name = 'picuplib',
packages = ['picuplib'],
version = picuplib.__version__,
description = 'Picflash upload library',
author = 'Arvedui',
author_email = 'arvedui@posteo.de',
url = 'https://github.com/Arvedui/picuplib',
install_requires=['requests', 'requests-toolbelt'],
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Software Development :: Libraries',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
)
| Arvedui/picuplib | setup.py | Python | lgpl-2.1 | 950 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import pytest
import spack
from llnl.util.filesystem import join_path, touch, working_dir
from spack.spec import Spec
from spack.version import ver
from spack.util.executable import which
pytestmark = pytest.mark.skipif(
not which('svn'), reason='requires subversion to be installed')
@pytest.mark.parametrize("type_of_test", ['default', 'rev0'])
@pytest.mark.parametrize("secure", [True, False])
def test_fetch(
type_of_test,
secure,
mock_svn_repository,
config,
refresh_builtin_mock
):
"""Tries to:
1. Fetch the repo using a fetch strategy constructed with
supplied args (they depend on type_of_test).
2. Check if the test_file is in the checked out repository.
3. Assert that the repository is at the revision supplied.
4. Add and remove some files, then reset the repo, and
ensure it's all there again.
"""
# Retrieve the right test parameters
t = mock_svn_repository.checks[type_of_test]
h = mock_svn_repository.hash
# Construct the package under test
spec = Spec('svn-test')
spec.concretize()
pkg = spack.repo.get(spec)
pkg.versions[ver('svn')] = t.args
# Enter the stage directory and check some properties
with pkg.stage:
try:
spack.insecure = secure
pkg.do_stage()
finally:
spack.insecure = False
with working_dir(pkg.stage.source_path):
assert h() == t.revision
file_path = join_path(pkg.stage.source_path, t.file)
assert os.path.isdir(pkg.stage.source_path)
assert os.path.isfile(file_path)
os.unlink(file_path)
assert not os.path.isfile(file_path)
untracked_file = 'foobarbaz'
touch(untracked_file)
assert os.path.isfile(untracked_file)
pkg.do_restage()
assert not os.path.isfile(untracked_file)
assert os.path.isdir(pkg.stage.source_path)
assert os.path.isfile(file_path)
assert h() == t.revision
| EmreAtes/spack | lib/spack/spack/test/svn_fetch.py | Python | lgpl-2.1 | 3,319 |
'''
Created on 2015年1月19日
@author: Guan-yu Willie Chen
'''
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
#browser = webdriver.Firefox()
#browser = webdriver.Ie()
browser = webdriver.Chrome("chromedriver.exe")
URL = ""
browser.get(URL+"/insurance/gs/sp/spLogin")
# 登入
browser.find_element_by_xpath("//input[@id='login:userName']").send_keys('')
browser.find_element_by_xpath("//input[@id='login:password']").send_keys('' + Keys.RETURN)
#進入前台
browser.find_element_by_xpath("//img[@name='Adminstration']").click()
#進入條碼列印作業
browser.get(URL+"insurance/eclaim/qrcodePrint.do")
# 選擇賠案號碼起
claimStartNo = browser.find_element_by_name("claimStartNo").send_keys("CLBR14V000000")
# 選擇文件名稱
docId = browser.find_element_by_name("docId")
for n in enumerate(docId.text.split("\n")):
print(n)
select = Select(docId)
select.select_by_index(1)
# 查詢
browser.find_element_by_xpath("//input[@name='queryBtn']").click()
# 分頁
browser.find_element_by_xpath("//input[@id='gotoPageNo']").send_keys(Keys.BACKSPACE)
browser.find_element_by_xpath("//input[@id='gotoPageNo']").send_keys("3")
browser.find_element_by_xpath("//div[@id='turnpage']/table/tbody/tr/td/input[@value='跳至']").click()
| williechen/DailyApp | 18/py201501/sample/sample.py | Python | lgpl-3.0 | 1,423 |
from scipy.spatial import distance as dist
class Searcher:
def __init__(self, index):
self.index = index
def search(self, queryFeature):
results = {}
for (k, feature) in self.index.items():
d = dist.euclidean(queryFeature, feature)
results[k] = d
results = sorted([(v, k) for (k, v) in results.items()])
return results | CoderEnko007/Pokedex | tools/searcher.py | Python | lgpl-3.0 | 337 |
from .spec import BASIC_PROPS_SET, encode_basic_properties
def encode_message(frame, headers, body, frame_size):
"""Encode message headers and body as a sequence of frames."""
for f in frame.encode():
yield f
props, headers = split_headers(headers, BASIC_PROPS_SET)
if headers:
props['headers'] = headers
yield encode_basic_properties(len(body), props)
for chunk in encode_body(body, frame_size):
yield chunk
def split_headers(user_headers, properties_set):
"""Split bitfield properties from named headers."""
props = {}
headers = {}
for key, value in user_headers.iteritems():
if key in properties_set:
props[key] = value
else:
headers[key] = value
return props, headers
def encode_body(body, frame_size):
"""Generate a sequence of chunks for body where each chunk is less than frame_size"""
limit = frame_size - 7 - 1 # spec is broken...
while body:
payload, body = body[:limit], body[limit:]
yield (0x03, payload)
| seatme/nucleon.amqp | nucleon/amqp/encoding.py | Python | lgpl-3.0 | 1,062 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from openturns import coupling_tools
import os
import time
import sys
wanted_lines = '# ooo\nE=@E\nE1=@E\nFE1=@F#oo\nZ=@Z@Z\n# ooo\n'
semi_parsed = '# ooo\nE=2\nE1=2\nFE1=@F#oo\nZ=@Z@Z\n# ooo\n'
parsed = '# ooo\nE=1.6\nE1=1.6\nFE1=5#oo\nZ=66\n# ooo\n'
# how many Mo for perf file
#howbig = 3024
howbig = 256
max_time = divmod(howbig, 5)[0]
def create_template():
template_name = 'template.in'
template_handle = open(template_name, 'wb')
template_handle.write(wanted_lines.encode())
template_handle.close()
return template_name
def create_big_template():
template_name = 'template_big.in'
template_handle = open(template_name, 'wb')
print('create template file of ' + str(howbig) + 'Mo')
template_handle.write(wanted_lines.encode())
for i in range(howbig):
for i in range(1024):
# line of 1024 octets
template_handle.write(b'u'*1024)
template_handle.write(b'\n')
template_handle.write(b'# ooo\n')
template_handle.close()
return template_name
def remove_file(filename, quiet=False):
if quiet:
try:
os.remove(filename)
except:
pass
else:
os.remove(filename)
def check_outfile(filename, wanted_result):
""" wanted_result: a string """
is_ok = True
handle = open(filename)
for wanted_line, result_line in zip(wanted_result.splitlines(True), handle):
if wanted_line != result_line:
print('Aaaaarg, result is not what we wanted (result:' + \
result_line + ', should be:' + wanted_line.decode() + ')')
is_ok = False
handle.close()
if is_ok:
print('check ' + filename + ': ok')
else:
exit(1)
#return is_ok
def check_replace():
print("=== " + sys._getframe().f_code.co_name)
print("= check replace std")
template = create_template()
template_out = template + ".replaced"
coupling_tools.replace(infile=template, outfile=template_out,
tokens=["@E"], values=[2])
check_outfile(template_out, semi_parsed)
remove_file(template_out)
remove_file(template)
print("= check replace more vars")
template = create_template()
coupling_tools.replace(infile=template, outfile=template_out,
tokens=["@E", "@F", "@Z"],
values=[1.6, 5, 6])
check_outfile(template_out, parsed)
remove_file(template_out)
remove_file(template)
print("= check replace inplace")
template = create_template()
coupling_tools.replace(infile=template, outfile=template,
tokens=["@E", "@F", "@Z"], values=[1.6, 5, 6])
check_outfile(template, parsed)
remove_file(template)
print("= check replace inplace with None")
template = create_template()
coupling_tools.replace(infile=template, outfile=None,
tokens=["@E", "@F", "@Z"], values=[1.6, 5, 6])
check_outfile(template, parsed)
remove_file(template)
print("= check replace big template")
start_time = time.time()
template = create_big_template()
sys.stderr.write( "big template created in : " + str(time.time() - start_time) + "s\n" )
template_out = template + ".replaced"
start_time = time.time()
coupling_tools.replace(infile=template, outfile=template_out,
tokens=["@E"], values=[2])
time_to_parse = str(int(time.time() - start_time))
check_outfile(template_out, semi_parsed)
remove_file(template_out)
remove_file(template)
sys.stderr.write( "parsed template in: " + time_to_parse + "s\n" )
# parsed template=3G -> 25s on bx (ssd, core i7@2.5GHz)
if int(time_to_parse) > max_time:
print('time to get token took too long (should be ' + str(max_time)+'s max)')
exit(1)
else:
print('check replace big template: ok')
def create_results(tokens, values=None, big=False):
filename = "results.out"
handle = open(filename, "wb")
if big:
print("create file of " + str(howbig) + "Mo")
for i in range(howbig):
for i in range(1024):
# line of 1024 octets
handle.write(b'u'*1024)
handle.write(b'\n')
handle.write(b'# ooo\n')
if values == None:
handle.write(tokens.encode())
else:
n = 0
for t, v in zip(tokens, values):
handle.write((t + str(v)).encode())
# go to next line sometimes
if n%3 == 0:
handle.write(b'\n')
n += 1
handle.close()
return filename
def check_results(ok_values, values):
if ok_values == values:
print("ok")
else:
print("Error: found: " + str(values) + " should be: " + str(ok_values))
exit(1)
def check_get_line_col():
print("=== " + sys._getframe().f_code.co_name)
content = """01 02 03 04 05 06 07 08 09
11 12 13 14 15 16 17 18 19
21 22 23 24 25 26 27 28 29
31 32 33 34 35 36 37 38 39
"""
result_file = create_results(content)
value = 1
result = coupling_tools.get_line_col(result_file)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 3
result = coupling_tools.get_line_col(result_file, skip_col=2)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 11
result = coupling_tools.get_line_col(result_file, 1)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 16
result = coupling_tools.get_line_col(result_file, 1, 5)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 9
result = coupling_tools.get_line_col(result_file, skip_col=-1)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 17
result = coupling_tools.get_line_col(result_file, 1, -3)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 31
result = coupling_tools.get_line_col(result_file, -1)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 14
result = coupling_tools.get_line_col(result_file, -3, -6)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 3
result = coupling_tools.get_line_col(result_file, seek=6)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 23
result = coupling_tools.get_line_col(result_file, skip_line=1, skip_col=2, seek=30)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 31
result = coupling_tools.get_line_col(result_file, skip_line=-1, seek=-100)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
#coupling_tools.debug = True
value = 21
just_before_line_ret = 80
sys.stderr.write( 'char at pos ' + str(just_before_line_ret) + ':->' + \
content[just_before_line_ret] + '<-\n' )
result = coupling_tools.get_line_col(result_file, skip_line=-1,
seek=-just_before_line_ret)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 21
just_after_line_ret = just_before_line_ret + 1
sys.stderr.write( 'char at pos ' + str(just_after_line_ret) + ':->' + \
content[just_after_line_ret] + '<-\n')
result = coupling_tools.get_line_col(result_file, skip_line=-2,
seek=-just_after_line_ret)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
try:
result = coupling_tools.get_line_col(result_file, skip_line=4)
except:
pass
else:
raise Exception("! should have fail !")
try:
result = coupling_tools.get_line_col(result_file, skip_line=-5)
except:
pass
else:
raise Exception("! should have fail !")
os.remove(result_file)
# test with a last empty line
content = """01 02 03 04 05 06 07 08 09
11 12 13 14 15 16 17 18 19
"""
result_file = create_results(content)
value = 19
result = coupling_tools.get_line_col(result_file, skip_line=-2, skip_col=-1)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 1
result = coupling_tools.get_line_col(result_file, skip_line=-3)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
try:
result = coupling_tools.get_line_col(result_file, skip_line=-1)
except:
pass
else:
raise Exception("! should have fail !")
os.remove(result_file)
print("ok")
def check_get():
print("=== " + sys._getframe().f_code.co_name)
tokens = ["@Y1=", "@Y2="]
values = [5.4, 6.5]
result_file = create_results(tokens, values)
results = coupling_tools.get(filename=result_file, tokens=tokens)
check_results(values, results)
remove(result_file)
#Y = coupling_tools.get(outfile="output.py", tokens=["@Y1=", "@Y2="],
# occurence=[0, 0], col=[0, 3], line=[0, 2])
def check_get_regex():
print("=== " + sys._getframe().f_code.co_name)
tokens = ["@E=", "02=", " 01 = "]
values = [-9.55555E5, 8, 5.4]
result_file = create_results(tokens, values)
results = coupling_tools.get_regex(filename=result_file,
patterns=['@E=(\R)',
'02\s*=\s*(\I)\s*',
'01 =\s*(\R)']
)
check_results(values, results)
remove_file(result_file)
def check_get_regex_perf():
print("=== " + sys._getframe().f_code.co_name)
tokens = ["@E=", "02=", " 01 = "]
values = [-9.55555E5, 8, 5.4]
start_time = time.time()
result_file = create_results(tokens, values, big=True)
sys.stderr.write("big file created in : " + str(time.time() - start_time) + "s\n")
start_time = time.time()
results = coupling_tools.get_regex(filename=result_file,
patterns=['@E=(\R)',
'02\s*=\s*(\I)\s*',
'01 =\s*(\R)']
)
time_to_parse = str(int(time.time() - start_time))
check_results(values, results)
remove_file(result_file)
# get file=3G -> 16s on bx (ssd, core i7@2.5GHz)
sys.stderr.write("get regex in file in: " + time_to_parse + "s\n")
if int(time_to_parse) > max_time:
print('time to get token took too long (should be ' + str(max_time)+'s max)')
exit(1)
else:
print("get regex in file: ok")
def check_get_tokens():
print("=== " + sys._getframe().f_code.co_name)
tokens = ["@E=", " pp", ",virg", " normal="]
values = [-9.55555E6, 56.666, -12345678912.2, 0]
result_file = create_results(tokens, values)
results = coupling_tools.get(filename=result_file,
tokens=tokens)
check_results(values, results)
remove_file(result_file)
def check_get_tokens_skip():
print("=== " + sys._getframe().f_code.co_name)
content = "@E=99 @E=-9.55555E6 pp88 pp 56.666,virg-12345678912.2 normal=0"
values = [-9.55555E6, 56.666, -12345678912.2, 0]
result_file = create_results(content)
results = coupling_tools.get(filename=result_file,
tokens=["@E=", "pp", ",virg",
"normal="],
skip_tokens=[1, -1, 0, 0]
)
check_results(values, results)
remove_file(result_file)
print("=== " + sys._getframe().f_code.co_name + "2")
tokens = '@E=99 @E=7899 pp88 pp pp\n'\
'pp999 pp56.666E-9pp,virg-12345678912.2 uu88 uuuu\n'\
'uu999uu\n'
values = [99, 56.666E-9, -12345678912.2, 999]
result_file = create_results(tokens)
results = coupling_tools.get(filename=result_file,
tokens=["@E=", "pp", ",virg",
"uu"],
skip_tokens=[0, 4, -1, 3]
)
check_results(values, results)
remove_file(result_file)
def check_get_array():
print("=== " + sys._getframe().f_code.co_name)
tokens = '11.0E-9 22.0crap 33.0 44.0 55.0\n'\
'11.1 22.1 33.1 44.1\n'\
'11.2 22.2 33.2 44.2'
values = [11.0E-9, 22.0, 55.0, 11.1, 33.2, 22.2, 33.2]
result_file = create_results(tokens)
results = coupling_tools.get(filename=result_file,
skip_lines=[0, 0, 0, 1, 2, 2, -1],
skip_cols= [0, 1, -1, 0, 2, 1, -2]
)
check_results(values, results)
remove_file(result_file)
def check_get_tokens_line_col():
print("=== " + sys._getframe().f_code.co_name)
tokens = '11.0E-9 22.0crap 33.0 44.0 55.0\n'\
'11.1 22.1 33.1 44.1 middle\n'\
'11.2 22.2 33.2 44.2\n'\
'@E=1111.1E11 666'
values = [1111.1E11, 22.0, 33.1, 666, 33.2, 44.1, 55.0]
result_file = create_results(tokens)
results = coupling_tools.get(filename=result_file,
tokens=['@E=', None, '@E=', '@E=',
'middle', 'middle', 'middle'],
skip_lines=[0, 0, -2, 0, 1, 0, -1],
skip_cols= [0, 1, 2, 1,-2, -1, -1]
)
check_results(values, results)
remove_file(result_file)
def check_get_tokens_skip_line_col():
print("=== " + sys._getframe().f_code.co_name)
tokens = '11.0E-9 22.0crap 33.0 44.0 55.0\n'\
'11.1 22.1 33.1 44.1 middle\n'\
'11.2 22.2 middle 33.2 44.2\n'\
'@E=1111.1E11 666\n'\
'@E=999 8888 @E=95\n'
values = [1111.1E11, 33.2, 666, 8888, 8888, 666]
result_file = create_results(tokens)
results = coupling_tools.get(filename=result_file,
tokens=['@E=', 'middle', 'middle', '@E=', '@E=', '@E='],
skip_tokens=[0, 1, -1, -2, -1, -2],
skip_lines=[0, 0, 1, 0, 0, -1],
skip_cols= [0, 0, 1, 1, -1, -1]
)
check_results(values, results)
remove_file(result_file)
def check_get_tokens_perf():
print("=== " + sys._getframe().f_code.co_name)
tokens = ["@E=", " pp", ",virg", " normal="]
values = [-9.55555E6, 56.666, -12345678912.2, 0]
tokens = ["@E="]
values = [-9.55555E6]
start_time = time.time()
result_file = create_results(tokens, values, big=True)
sys.stderr.write("big file created in : " + str(time.time() - start_time) + "s\n")
start_time = time.time()
results = coupling_tools.get(filename=result_file,
tokens=tokens)
time_to_parse = str(int(time.time() - start_time))
check_results(values, results)
remove_file(result_file)
# get file=3G -> 18s on bx (ssd, core i7@2.5GHz)
sys.stderr.write( 'get token in file in: ' + time_to_parse + 's\n' )
if int(time_to_parse) > max_time:
print('time to get token took too long (should be ' + str(max_time)+'s max)')
exit(1)
else:
print('get tokens: ok')
def check_get_tokens_skip_perf():
print("=== " + sys._getframe().f_code.co_name)
content = "@E=99 @E=-9.55555E6 pp88 pp 56.666,virg-12345678912.2 normal=0"
values = [-9.55555E6, 56.666, -12345678912.2, 0]
content = "@E=99 @E=-9.55555E6 pp88 pp 56.666,virg-12345678912.2 normal=0"
values = [-9.55555E6]
start_time = time.time()
result_file = create_results(content, big=True)
sys.stderr.write("big file created in : " + str(time.time() - start_time) + "s\n")
start_time = time.time()
results = coupling_tools.get(filename=result_file,
tokens=["@E="],
skip_tokens=[-1]
)
time_to_parse = str(int(time.time() - start_time))
check_results(values, results)
remove_file(result_file)
# get file=3G -> 21s on bx (ssd, core i7@2.5GHz)
sys.stderr.write("get token skip in file in: " + time_to_parse + "s\n")
if int(time_to_parse) > max_time:
print('time to get token took too long (should be ' + str(max_time)+'s max)')
exit(1)
else:
print('get tokens skip: ok')
def check_get_line_col_perf():
print("=== " + sys._getframe().f_code.co_name)
tokens = '11.0E-9 22.0 33.0 44.0 55.0\n'\
'11.1 22.1 33.1 44.1\n'\
'11.2 22.2 33.2 44.2'\
'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'\
'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'\
'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'
values = [22.0]
start_time = time.time()
result_file = create_results(tokens, big=True)
sys.stderr.write('big file created in : ' + str(time.time() - start_time) + 's\n')
start_time = time.time()
results = coupling_tools.get(filename=result_file,
skip_lines=[-92],
skip_cols= [1]
)
time_to_parse = str(int(time.time() - start_time))
check_results(values, results)
remove_file(result_file)
# get file=3G -> 15s on bx (ssd, core i7@2.5GHz)
sys.stderr.write('get token skip line col in file in: ' + time_to_parse + 's\n')
if int(time_to_parse) > max_time:
print('time to get token took too long (should be ' + str(max_time)+'s max)')
exit(1)
else:
print('get line col: ok')
def check_execute():
print("=== " + sys._getframe().f_code.co_name)
# ensure previous print is print before following command output
sys.stdout.flush()
if 'win' not in sys.platform:
coupling_tools.execute('/bin/ls /bin/kill')
coupling_tools.execute('echo "hi"', is_shell=True)
coupling_tools.execute('echo "hi"', is_shell=True,
shell_exe='/bin/bash')
ret, stdout = coupling_tools.execute('/bin/ls /bin/kill',
get_stdout=True)
if stdout != b'/bin/kill\n':
raise Exception("coupling_tools.execute error!")
ret, stdout, stderr = coupling_tools.execute('/bin/ls /bin/kill',
get_stdout=True, get_stderr=True)
if stdout != b'/bin/kill\n' and stderr != b'':
raise Exception("coupling_tools.execute error!")
ret, stderr = coupling_tools.execute('/bin/ls /bin/kill 1>&2',
is_shell=True,
get_stderr=True)
if stderr != b'/bin/kill\n':
raise Exception("coupling_tools.execute error!")
else:
coupling_tools.execute('cmd.exe /c echo /bin/kill')
exec_in_wine = os.path.exists('/boot')
if exec_in_wine:
# command 'echo' do not work in python on wine for an unknown reason
print('hi')
print('hi')
else:
# native windows
coupling_tools.execute('echo hi', is_shell=True)
coupling_tools.execute('echo hi', is_shell=True, hide_win=False)
ret, stdout = coupling_tools.execute('echo hello', is_shell=True,
get_stdout=True)
if ret != 0 or not str(stdout).startswith('hello'):
raise Exception("coupling_tools.execute error!")
print("execute ok")
check_execute()
check_replace()
check_get_regex()
check_get_regex_perf()
check_get_line_col()
check_get_tokens()
check_get_tokens_skip()
check_get_array()
check_get_tokens_line_col()
check_get_tokens_skip_line_col()
check_get_tokens_perf()
check_get_tokens_skip_perf()
check_get_line_col_perf()
exit(0)
| dbarbier/privot | python/test/t_coupling_tools.py | Python | lgpl-3.0 | 21,167 |
# vi: sw=4 ts=4 et:
"""microarray.py - cMonkey microarray related processing
This module captures the microarray-specific scoring component
of cMonkey.
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import numpy as np
import logging
import datamatrix as dm
import util
import scoring
import multiprocessing as mp
def seed_column_members(data_matrix, row_membership, num_clusters,
num_clusters_per_column):
"""Default column membership seeder ('best')
In case of multiple input ratio matrices, we assume that these
matrices have been combined into data_matrix"""
num_rows = data_matrix.num_rows()
num_cols = data_matrix.num_columns()
# create a submatrix for each cluster
column_scores = []
for cluster_num in xrange(1, num_clusters + 1):
current_cluster_rows = []
for row_index in xrange(num_rows):
if row_membership[row_index][0] == cluster_num:
current_cluster_rows.append(data_matrix.row_names[row_index])
submatrix = data_matrix.submatrix_by_name(
row_names=current_cluster_rows)
scores = (-scoring.compute_column_scores_submatrix(submatrix)).values[0]
column_scores.append(scores)
column_members = []
start_time = util.current_millis()
for column_index in xrange(num_cols):
scores_to_order = []
for row_index in xrange(num_clusters):
scores_to_order.append(column_scores[row_index][column_index])
column_members.append(order(scores_to_order)[:num_clusters_per_column])
elapsed = util.current_millis() - start_time
logging.info("seed column members in %f s.", elapsed % 1000.0)
return column_members
def order(alist):
"""a weird R function that gives each item's position in the original list
if you enumerate each item in a sorted list"""
return map(lambda x: alist.index(x) + 1, sorted(alist, reverse=True))
def compute_row_scores(membership, matrix, num_clusters,
use_multiprocessing):
"""for each cluster 1, 2, .. num_clusters compute the row scores
for the each row name in the input name matrix"""
start_time = util.current_millis()
cluster_row_scores = __compute_row_scores_for_clusters(
membership, matrix, num_clusters, use_multiprocessing)
# TODO: replace the nan/inf-Values with the quantile-thingy in the R-version
logging.info("__compute_row_scores_for_clusters() in %f s.",
(util.current_millis() - start_time) / 1000.0)
# rearrange result into a DataMatrix, where rows are indexed by gene
# and columns represent clusters
start_time = util.current_millis()
values = np.zeros((matrix.num_rows(), num_clusters))
# note that cluster is 0 based on a matrix
for cluster in xrange(num_clusters):
row_scores = cluster_row_scores[cluster]
values[:, cluster] = row_scores
result = dm.DataMatrix(matrix.num_rows(), num_clusters,
row_names=matrix.row_names,
values=values)
logging.info("made result matrix in %f s.",
(util.current_millis() - start_time) / 1000.0)
result = result.sorted_by_row_name()
result.fix_extreme_values()
return result
ROW_SCORE_MATRIX = None
ROW_SCORE_MEMBERSHIP = None
def __compute_row_scores_for_clusters(membership, matrix, num_clusters,
use_multiprocessing):
"""compute the pure row scores for the specified clusters
without nowmalization"""
# note that we set the data into globals before we fork it off
# to save memory and pickling time
global ROW_SCORE_MATRIX, ROW_SCORE_MEMBERSHIP
ROW_SCORE_MATRIX = matrix
ROW_SCORE_MEMBERSHIP = membership
if use_multiprocessing:
pool = mp.Pool()
result = pool.map(compute_row_scores_for_cluster, xrange(1, num_clusters + 1))
pool.close()
pool.join()
else:
result = []
for cluster in range(1, num_clusters + 1):
result.append(compute_row_scores_for_cluster(cluster))
# cleanup
ROW_SCORE_MATRIX = None
ROW_SCORE_MEMBERSHIP = None
return result
def compute_row_scores_for_cluster(cluster):
"""This function computes the row score for a cluster"""
global ROW_SCORE_MATRIX, ROW_SCORE_MEMBERSHIP
membership = ROW_SCORE_MEMBERSHIP
matrix = ROW_SCORE_MATRIX
rnames = membership.rows_for_cluster(cluster)
cnames = membership.columns_for_cluster(cluster)
sm1 = matrix.submatrix_by_name(row_names=rnames, column_names=cnames)
if sm1.num_columns() > 1:
matrix_filtered = matrix.submatrix_by_name(column_names=cnames)
row_scores_for_cluster = __compute_row_scores_for_submatrix(
matrix_filtered, sm1)
return row_scores_for_cluster
else:
return None
def __compute_row_scores_for_submatrix(matrix, submatrix):
"""For a given matrix, compute the row scores. The second submatrix is
used to calculate the column means on and should be derived from
datamatrix filtered by the row names and column names of a specific
cluster.
matrix should be filtered by the columns of a specific cluster in
order for the column means to be applied properly.
The result is a DataMatrix with one row containing all the row scores"""
return np.log(
util.row_means(np.square(matrix.values - submatrix.column_means())) + 1e-99)
"""
def __quantile_normalize_scores(cluster_row_scores,
row_names,
membership,
num_clusters):
#quantile normalize the row scores in cluster_row_scores
#that are not NaN or +/-Inf and are in a row cluster membership
values_for_quantile = []
for cluster in xrange(1, num_clusters + 1):
row_scores_for_cluster = cluster_row_scores[cluster - 1]
cluster_rows = membership.rows_for_cluster(cluster)
if row_scores_for_cluster != None:
for row in xrange(len(row_scores_for_cluster)):
score = row_scores_for_cluster[row]
gene_name = row_names[row]
if np.isfinite(score) and (gene_name in cluster_rows):
values_for_quantile.append(score)
return util.quantile(values_for_quantile, 0.95)
"""
class RowScoringFunction(scoring.ScoringFunctionBase):
"""Scoring algorithm for microarray data based on genes"""
def __init__(self, membership, matrix, scaling_func=None,
run_in_iteration=scoring.schedule(1, 2),
config_params=None):
"""Create scoring function instance"""
scoring.ScoringFunctionBase.__init__(self, membership,
matrix, scaling_func,
run_in_iteration,
config_params)
self.run_log = scoring.RunLog("row_scoring")
def name(self):
"""returns the name of this scoring function"""
return "Row"
def do_compute(self, iteration_result, ref_matrix=None):
"""the row scoring function"""
return compute_row_scores(self.membership(),
self.matrix(),
self.num_clusters(),
self.config_params[scoring.KEY_MULTIPROCESSING])
def run_logs(self):
"""return the run logs"""
return [self.run_log]
__all__ = ['compute_row_scores', 'seed_column_members']
| jashworth-isb/cmonkey-python | cmonkey/microarray.py | Python | lgpl-3.0 | 7,657 |
from bacpypes.apdu import SubscribeCOVRequest, SimpleAckPDU, RejectPDU, AbortPDU
from bacpypes.iocb import IOCB
from bacpypes.core import deferred
from bacpypes.pdu import Address
from bacpypes.object import get_object_class, get_datatype
from bacpypes.constructeddata import Array
from bacpypes.primitivedata import Tag, ObjectIdentifier, Unsigned
from BAC0.core.io.Read import cast_datatype_from_tag
"""
using cov, we build a "context" which is turned into a subscription being sent to
the destination.
Once the IOCB is over, the callback attached to it will execute (subscription_acknowledged)
and we'll get the answer
"""
class SubscriptionContext:
next_proc_id = 1
def __init__(self, address, objectID, confirmed=None, lifetime=None, callback=None):
self.address = address
self.subscriberProcessIdentifier = SubscriptionContext.next_proc_id
SubscriptionContext.next_proc_id += 1
self.monitoredObjectIdentifier = objectID
self.issueConfirmedNotifications = confirmed
self.lifetime = lifetime
self.callback = callback
def cov_notification(self, apdu):
# make a rash assumption that the property value is going to be
# a single application encoded tag
source = apdu.pduSource
object_changed = apdu.monitoredObjectIdentifier
elements = {
"source": source,
"object_changed": object_changed,
"properties": {},
}
for element in apdu.listOfValues:
prop_id = element.propertyIdentifier
datatype = get_datatype(object_changed[0], prop_id)
value = element.value
if not datatype:
value = cast_datatype_from_tag(
element.value, object_changed[0], prop_id
)
else:
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (
element.propertyArrayIndex is not None
):
if element.propertyArrayIndex == 0:
value = element.value.cast_out(Unsigned)
else:
value = element.value.cast_out(datatype.subtype)
else:
value = element.value.cast_out(datatype)
elements["properties"][prop_id] = value
return elements
class CoV:
"""
Mixin to support COV registration
"""
def send_cov_subscription(self, request):
self._log.debug("Request : {}".format(request))
iocb = IOCB(request)
self._log.debug("IOCB : {}".format(iocb))
iocb.add_callback(self.subscription_acknowledged)
# pass to the BACnet stack
deferred(self.this_application.request_io, iocb)
def subscription_acknowledged(self, iocb):
if iocb.ioResponse:
self._log.info("Subscription success")
if iocb.ioError:
self._log.error("Subscription failed. {}".format(iocb.ioError))
def cov(self, address, objectID, confirmed=True, lifetime=0, callback=None):
address = Address(address)
context = self._build_cov_context(
address, objectID, confirmed=confirmed, lifetime=lifetime, callback=callback
)
request = self._build_cov_request(context)
self.send_cov_subscription(request)
def cancel_cov(self, address, objectID, callback=None):
address = Address(address)
context = self._build_cov_context(
address, objectID, confirmed=None, lifetime=None, callback=callback
)
request = self._build_cov_request(context)
self.send_cov_subscription(request)
def _build_cov_context(
self, address, objectID, confirmed=True, lifetime=None, callback=None
):
context = SubscriptionContext(
address=address,
objectID=objectID,
confirmed=confirmed,
lifetime=lifetime,
callback=callback,
)
self.subscription_contexts[context.subscriberProcessIdentifier] = context
if "context_callback" not in self.subscription_contexts.keys():
self.subscription_contexts["context_callback"] = self.context_callback
return context
def _build_cov_request(self, context):
request = SubscribeCOVRequest(
subscriberProcessIdentifier=context.subscriberProcessIdentifier,
monitoredObjectIdentifier=context.monitoredObjectIdentifier,
)
request.pduDestination = context.address
# optional parameters
if context.issueConfirmedNotifications is not None:
request.issueConfirmedNotifications = context.issueConfirmedNotifications
if context.lifetime is not None:
request.lifetime = context.lifetime
return request
# def context_callback(self, elements, callback=None):
def context_callback(self, elements):
self._log.info("Received COV Notification for {}".format(elements))
# if callback:
# callback()
for device in self.registered_devices:
if str(device.properties.address) == str(elements["source"]):
device[elements["object_changed"]].cov_registered = True
for prop, value in elements["properties"].items():
if prop == "presentValue":
device[elements["object_changed"]]._trend(value)
else:
device[elements["object_changed"]].properties.bacnet_properties[
prop
] = value
break
| ChristianTremblay/BAC0 | BAC0/core/functions/cov.py | Python | lgpl-3.0 | 5,711 |
# coding:utf-8
from django.conf.urls import patterns, include, url
from kylin_log.views import *
urlpatterns = patterns('',
url(r'^list/(\w+)/$', log_list, name='log_list'),
url(r'^detail/(\w+)/$', log_detail, name='log_detail'),
url(r'^history/$', log_history, name='log_history'),
url(r'^log_kill/', log_kill, name='log_kill'),
url(r'^record/$', log_record, name='log_record'),
) | cwm-kylin/kylin_ops | kylin_log/urls.py | Python | lgpl-3.0 | 519 |
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates out a Closure deps.js file given a list of JavaScript sources.
Paths can be specified as arguments or (more commonly) specifying trees
with the flags (call with --help for descriptions).
Usage: depswriter.py [path/to/js1.js [path/to/js2.js] ...]
"""
import logging
import optparse
import os
import posixpath
import shlex
import sys
import source
import treescan
def MakeDepsFile(source_map):
"""Make a generated deps file.
Args:
source_map: A dict map of the source path to source.Source object.
Returns:
str, A generated deps file source.
"""
# Write in path alphabetical order
paths = source_map.keys()
paths.sort()
lines = []
for path in paths:
js_source = source_map[path]
# We don't need to add entries that don't provide anything.
if js_source.provides:
lines.append(_GetDepsLine(path, js_source))
return ''.join(lines)
def _GetDepsLine(path, js_source):
"""Get a deps.js file string for a source."""
provides = list(js_source.provides)
provides.sort()
requires = list(js_source.requires)
requires.sort()
return 'goog.addDependency(\'%s\', %s, %s);\n' % (path, provides, requires)
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
parser.add_option('--root',
dest='roots',
default=[],
action='append',
help='A root directory to scan for JS source files. '
'Paths of JS files in generated deps file will be '
'relative to this path. This flag may be specified '
'multiple times.')
parser.add_option('--root_with_prefix',
dest='roots_with_prefix',
default=[],
action='append',
help='A root directory to scan for JS source files, plus '
'a prefix (if either contains a space, surround with '
'quotes). Paths in generated deps file will be relative '
'to the root, but preceded by the prefix. This flag '
'may be specified multiple times.')
parser.add_option('--path_with_depspath',
dest='paths_with_depspath',
default=[],
action='append',
help='A path to a source file and an alternate path to '
'the file in the generated deps file (if either contains '
'a space, surround with whitespace). This flag may be '
'specified multiple times.')
return parser
def _NormalizePathSeparators(path):
"""Replaces OS-specific path separators with POSIX-style slashes.
Args:
path: str, A file path.
Returns:
str, The path with any OS-specific path separators (such as backslash on
Windows) replaced with URL-compatible forward slashes. A no-op on systems
that use POSIX paths.
"""
return path.replace(os.sep, posixpath.sep)
def _GetRelativePathToSourceDict(root, prefix=''):
"""Scans a top root directory for .js sources.
Args:
root: str, Root directory.
prefix: str, Prefix for returned paths.
Returns:
dict, A map of relative paths (with prefix, if given), to source.Source
objects.
"""
# Remember and restore the cwd when we're done. We work from the root so
# that paths are relative from the root.
start_wd = os.getcwd()
os.chdir(root)
path_to_source = {}
for path in treescan.ScanTreeForJsFiles('.'):
prefixed_path = _NormalizePathSeparators(os.path.join(prefix, path))
path_to_source[prefixed_path] = source.Source(source.GetFileContents(path))
os.chdir(start_wd)
return path_to_source
def _GetPair(s):
"""Return a string as a shell-parsed tuple. Two values expected."""
try:
# shlex uses '\' as an escape character, so they must be escaped.
s = s.replace('\\', '\\\\')
first, second = shlex.split(s)
return (first, second)
except:
raise Exception('Unable to parse input line as a pair: %s' % s)
def main():
"""CLI frontend to MakeDepsFile."""
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
path_to_source = {}
# Roots without prefixes
for root in options.roots:
path_to_source.update(_GetRelativePathToSourceDict(root))
# Roots with prefixes
for root_and_prefix in options.roots_with_prefix:
root, prefix = _GetPair(root_and_prefix)
path_to_source.update(_GetRelativePathToSourceDict(root, prefix=prefix))
# Source paths
for path in args:
path_to_source[path] = source.Source(source.GetFileContents(path))
# Source paths with alternate deps paths
for path_with_depspath in options.paths_with_depspath:
srcpath, depspath = _GetPair(path_with_depspath)
path_to_source[depspath] = source.Source(source.GetFileContents(srcpath))
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
out.write('// This file was autogenerated by %s.\n' % sys.argv[0])
out.write('// Please do not edit.\n')
out.write(MakeDepsFile(path_to_source))
if __name__ == '__main__':
main()
| SOCR/HTML5_WebSite | HTML5/BrainPainter/X/lib/closure-library/closure/bin/build/depswriter.py | Python | lgpl-3.0 | 6,203 |
#! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
try :
try :
# Instanciate one distribution object
meanPoint = NumericalPoint(1)
meanPoint[0] = 1.0
sigma = NumericalPoint(1)
sigma[0] = 1.0
R = CorrelationMatrix(1)
distribution = Normal(meanPoint, sigma, R)
print "Distribution " , repr(distribution)
# We try to set an erroneous covariance matrix (wrong dimension) into distribution
newR = CorrelationMatrix(2)
distribution.setCorrelationMatrix(newR)
# Normally, we should never go here
raise
except :
pass
#except TestFailed, ex :
except :
import sys
print "t_Normal_wrongarg.py", sys.exc_type, sys.exc_value
| dbarbier/privot | python/test/t_Normal_wrongarg.py | Python | lgpl-3.0 | 760 |
"""Hypertext Transfer Protocol (HTTP)
These objects provide support for :rfc:`HTTP <2616>`.
.. seealso:: :mod:`HTTP authentication <bedframe.auth.http>`
"""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
from ._exc import *
from ._services import *
| nisavid/bedframe | bedframe/http/__init__.py | Python | lgpl-3.0 | 292 |
########################################################################
# File name: service.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import asyncio
import logging
import aioxmpp.service
import aioxmpp.callbacks as callbacks
import aioxmpp.errors as errors
import aioxmpp.stanza as stanza
import aioxmpp.structs as structs
from . import xso as roster_xso
logger = logging.getLogger(__name__)
_Sentinel = object()
class Item:
"""
Represent an entry in the roster. These entries are mutable, see the
documentation of :class:`Service` for details on the lifetime of
:class:`Item` instances within a :class:`Service` instance.
.. attribute:: jid
The :class:`~aioxmpp.JID` of the entry. This is always a bare
JID.
.. attribute:: name
The display name of the entry, if any.
.. attribute:: groups
A :class:`set` of names of groups in which the roster entry is.
.. attribute:: subscription
The subscription status of the entry. One of ``"none"``, ``"to"``,
``"from"`` and ``"both"`` (in contrast to :class:`.xso.Item`,
``"remove"`` cannot occur here).
.. attribute:: ask
The ``ask`` attribute of the roster entry.
.. attribute:: approved
The ``approved`` attribute of the roster entry.
The data of a roster entry can conveniently be exported to JSON:
.. automethod:: export_as_json
To mutate the roster entry, some handy methods are provided:
.. automethod:: update_from_json
.. automethod:: update_from_xso_item
To create a roster entry from a :class:`.xso.Item`, use the
:meth:`from_xso_item` class method.
.. automethod:: from_xso_item
.. note::
Do not confuse this with the XSO :class:`.xso.Item`.
"""
def __init__(self, jid, *,
approved=False,
ask=None,
subscription="none",
name=None,
groups=()):
super().__init__()
self.jid = jid
self.subscription = subscription
self.approved = approved
self.ask = ask
self.name = name
self.groups = set(groups)
def update_from_xso_item(self, xso_item):
"""
Update the attributes (except :attr:`jid`) with the values obtained
from the gixen `xso_item`.
`xso_item` must be a valid :class:`.xso.Item` instance.
"""
self.subscription = xso_item.subscription
self.approved = xso_item.approved
self.ask = xso_item.ask
self.name = xso_item.name
self.groups = {group.name for group in xso_item.groups}
@classmethod
def from_xso_item(cls, xso_item):
"""
Create a :class:`Item` with the :attr:`jid` set to the
:attr:`.xso.Item.jid` obtained from `xso_item`. Then update that
instance with `xso_item` using :meth:`update_from_xso_item` and return
it.
"""
item = cls(xso_item.jid)
item.update_from_xso_item(xso_item)
return item
def export_as_json(self):
"""
Return a :mod:`json`-compatible dictionary which contains the
attributes of this :class:`Item` except its JID.
"""
result = {
"subscription": self.subscription,
}
if self.name:
result["name"] = self.name
if self.ask is not None:
result["ask"] = self.ask
if self.approved:
result["approved"] = self.approved
if self.groups:
result["groups"] = sorted(self.groups)
return result
def update_from_json(self, data):
"""
Update the attributes of this :class:`Item` using the values obtained
from the dictionary `data`.
The format of `data` should be the same as the format returned by
:meth:`export_as_json`.
"""
self.subscription = data.get("subscription", "none")
self.approved = bool(data.get("approved", False))
self.ask = data.get("ask", None)
self.name = data.get("name", None)
self.groups = set(data.get("groups", []))
class RosterClient(aioxmpp.service.Service):
"""
A roster client :class:`aioxmpp.service.Service`.
The interaction with a roster service happens mainly by accessing the
attributes holding the state and using the events to be notified of state
changes:
Attributes for accessing the roster:
.. attribute:: items
A dictionary mapping :class:`~aioxmpp.JID` instances to corresponding
:class:`Item` instances.
.. attribute:: groups
A dictionary which allows group-based access to :class:`Item`
instances. The dictionaries keys are the names of the groups, the values
are :class:`set` instances, which hold the :class:`Item` instances in
that group.
At no point one can observe empty :class:`set` instances in this
dictionary.
The :class:`Item` instances stay the same, as long as they represent the
identical roster entry on the remote side. That is, if the name or
subscription state are changed in the server side roster, the :class:`Item`
instance stays the same, but the attributes are mutated. However, if the
entry is removed from the server roster and re-added later for the same
JID, it will be a different :class:`Item` instance.
Signals:
.. signal:: on_initial_roster_received()
Fires when the initial roster has been received. Note that if roster
versioning is used, the initial roster may not be up-to-date. The server
is allowed to tell the client to re-use its local state and deliver
changes using roster pushes. In that case, the
:meth:`on_initial_roster_received` event fires immediately, so that the
user sees whatever roster has been set up for versioning before the
stream was established; updates pushed by the server are delivered using
the normal events.
The roster data has already been imported at the time the callback is
fired.
Note that the initial roster is diffed against whatever is in the local
store and events are fired just like for normal push updates. Thus, in
general, you won’t need this signal; it might be better to listen for
the events below.
.. signal:: on_entry_added(item)
Fires when an `item` has been added to the roster. The attributes of the
`item` are up-to-date when this callback fires.
When the event fires, the bookkeeping structures are already updated.
This implies that :meth:`on_group_added` is called before
:meth:`on_entry_added` if the entry adds a new group.
.. signal:: on_entry_name_changed(item)
Fires when a roster update changed the name of the `item`. The new name
is already applied to the `item`.
.. signal:: on_entry_subscription_state_changed(item)
Fires when a roster update changes any of the :attr:`Item.subscription`,
:attr:`Item.ask` or :attr:`Item.approved` attributes. The new values are
already applied to `item`.
The event always fires once per update, even if the update changes
more than one of the above attributes.
.. signal:: on_entry_added_to_group(item, group_name)
Fires when an update adds an `item` to a group. The :attr:`Item.groups`
attribute is already updated (not only with this, but also other group
updates, including removals) when this event is fired.
The event fires for each added group in an update, thus it may fire more
than once per update.
The name of the new group is in `group_name`.
At the time the event fires, the bookkeeping structures for the group
are already updated; this implies that :meth:`on_group_added` fires
*before* :meth:`on_entry_added_to_group` if the entry added a new group.
.. signal:: on_entry_removed_from_group(item, group_name)
Fires when an update removes an `item` from a group. The
:attr:`Item.groups` attribute is already updated (not only with this,
but also other group updates, including additions) when this event is
fired.
The event fires for each removed group in an update, thus it may fire
more than once per update.
The name of the new group is in `group_name`.
At the time the event fires, the bookkeeping structures are already
updated; this implies that :meth:`on_group_removed` fires *before*
:meth:`on_entry_removed_from_group` if the removal of an entry from a
group causes the group to vanish.
.. signal:: on_entry_removed(item)
Fires after an entry has been removed from the roster. The entry is
already removed from all bookkeeping structures, but the values on the
`item` object are the same as right before the removal.
This implies that :meth:`on_group_removed` fires *before*
:meth:`on_entry_removed` if the removal of an entry causes a group to
vanish.
.. signal:: on_group_added(group)
Fires after a new group has been added to the bookkeeping structures.
:param group: Name of the new group.
:type group: :class:`str`
At the time the event fires, the group is empty.
.. versionadded:: 0.9
.. signal:: on_group_removed(group)
Fires after a new group has been removed from the bookkeeping
structures.
:param group: Name of the old group.
:type group: :class:`str`
At the time the event fires, the group is empty.
.. versionadded:: 0.9
Modifying roster contents:
.. automethod:: set_entry
.. automethod:: remove_entry
Managing presence subscriptions:
.. automethod:: approve
.. automethod:: subscribe
.. signal:: on_subscribe(stanza)
Fires when a peer requested a subscription. The whole stanza received is
included as `stanza`.
.. seealso::
To approve a subscription request, use :meth:`approve`.
.. signal:: on_subscribed(stanza)
Fires when a peer has confirmed a previous subscription request. The
``"subscribed"`` stanza is included as `stanza`.
.. signal:: on_unsubscribe(stanza)
Fires when a peer cancelled their subscription for our presence. As per
:rfc:`6121`, the server forwards the ``"unsubscribe"`` presence stanza
(which is included as `stanza` argument) *before* sending the roster
push.
Unless your application is interested in the specific cause of a
subscription state change, it is not necessary to use this signal; the
subscription state change will be covered by
:meth:`on_entry_subscription_state_changed`.
.. signal:: on_unsubscribed(stanza)
Fires when a peer cancelled our subscription. As per :rfc:`6121`, the
server forwards the ``"unsubscribed"`` presence stanza (which is
included as `stanza` argument) *before* sending the roster push.
Unless your application is interested in the specific cause of a
subscription state change, it is not necessary to use this signal; the
subscription state change will be covered by
:meth:`on_entry_subscription_state_changed`.
Import/Export of roster data:
.. automethod:: export_as_json
.. automethod:: import_from_json
To make use of roster versioning, use the above two methods. The general
workflow is to :meth:`export_as_json` the roster after disconnecting and
storing it for the next connection attempt. **Before** connecting, the
stored data needs to be loaded using :meth:`import_from_json`. This only
needs to happen after a new :class:`Service` has been created, as roster
services won’t delete roster contents between two connections on the same
:class:`.Client` instance.
.. versionchanged:: 0.8
This class was formerly known as :class:`aioxmpp.roster.Service`. It
is still available under that name, but the alias will be removed in
1.0.
"""
ORDER_AFTER = [
aioxmpp.dispatcher.SimplePresenceDispatcher,
]
on_initial_roster_received = callbacks.Signal()
on_entry_name_changed = callbacks.Signal()
on_entry_subscription_state_changed = callbacks.Signal()
on_entry_removed = callbacks.Signal()
on_entry_added = callbacks.Signal()
on_entry_added_to_group = callbacks.Signal()
on_entry_removed_from_group = callbacks.Signal()
on_group_added = callbacks.Signal()
on_group_removed = callbacks.Signal()
on_subscribed = callbacks.Signal()
on_subscribe = callbacks.Signal()
on_unsubscribed = callbacks.Signal()
on_unsubscribe = callbacks.Signal()
def __init__(self, client, **kwargs):
super().__init__(client, **kwargs)
self._bse_token = client.before_stream_established.connect(
self._request_initial_roster
)
self.__roster_lock = asyncio.Lock()
self.items = {}
self.groups = {}
self.version = None
def _update_entry(self, xso_item):
try:
stored_item = self.items[xso_item.jid]
except KeyError:
stored_item = Item.from_xso_item(xso_item)
self.items[xso_item.jid] = stored_item
for group in stored_item.groups:
try:
group_members = self.groups[group]
except KeyError:
group_members = self.groups.setdefault(group, set())
self.on_group_added(group)
group_members.add(stored_item)
self.on_entry_added(stored_item)
return
to_call = []
if stored_item.name != xso_item.name:
to_call.append(self.on_entry_name_changed)
if (stored_item.subscription != xso_item.subscription or
stored_item.approved != xso_item.approved or
stored_item.ask != xso_item.ask):
to_call.append(self.on_entry_subscription_state_changed)
old_groups = set(stored_item.groups)
stored_item.update_from_xso_item(xso_item)
new_groups = set(stored_item.groups)
removed_from_groups = old_groups - new_groups
added_to_groups = new_groups - old_groups
for cb in to_call:
cb(stored_item)
for group in added_to_groups:
try:
group_members = self.groups[group]
except KeyError:
group_members = self.groups.setdefault(group, set())
self.on_group_added(group)
group_members.add(stored_item)
self.on_entry_added_to_group(stored_item, group)
for group in removed_from_groups:
groupset = self.groups[group]
groupset.remove(stored_item)
if not groupset:
del self.groups[group]
self.on_group_removed(group)
self.on_entry_removed_from_group(stored_item, group)
@aioxmpp.service.iq_handler(
aioxmpp.structs.IQType.SET,
roster_xso.Query)
async def handle_roster_push(self, iq):
if iq.from_ and iq.from_ != self.client.local_jid.bare():
raise errors.XMPPAuthError(errors.ErrorCondition.FORBIDDEN)
request = iq.payload
async with self.__roster_lock:
for item in request.items:
if item.subscription == "remove":
try:
old_item = self.items.pop(item.jid)
except KeyError:
pass
else:
self._remove_from_groups(old_item, old_item.groups)
self.on_entry_removed(old_item)
else:
self._update_entry(item)
self.version = request.ver
@aioxmpp.dispatcher.presence_handler(
aioxmpp.structs.PresenceType.SUBSCRIBE,
None)
def handle_subscribe(self, stanza):
self.on_subscribe(stanza)
@aioxmpp.dispatcher.presence_handler(
aioxmpp.structs.PresenceType.SUBSCRIBED,
None)
def handle_subscribed(self, stanza):
self.on_subscribed(stanza)
@aioxmpp.dispatcher.presence_handler(
aioxmpp.structs.PresenceType.UNSUBSCRIBED,
None)
def handle_unsubscribed(self, stanza):
self.on_unsubscribed(stanza)
@aioxmpp.dispatcher.presence_handler(
aioxmpp.structs.PresenceType.UNSUBSCRIBE,
None)
def handle_unsubscribe(self, stanza):
self.on_unsubscribe(stanza)
def _remove_from_groups(self, item_to_remove, groups):
for group in groups:
try:
group_members = self.groups[group]
except KeyError:
continue
group_members.remove(item_to_remove)
if not group_members:
del self.groups[group]
self.on_group_removed(group)
async def _request_initial_roster(self):
iq = stanza.IQ(type_=structs.IQType.GET)
iq.payload = roster_xso.Query()
async with self.__roster_lock:
logger.debug("requesting initial roster")
if self.client.stream_features.has_feature(
roster_xso.RosterVersioningFeature):
logger.debug("requesting incremental updates (old ver = %s)",
self.version)
iq.payload.ver = self.version
response = await self.client.send(
iq,
timeout=self.client.negotiation_timeout.total_seconds()
)
if response is None:
logger.debug("roster will be updated incrementally")
self.on_initial_roster_received()
return True
self.version = response.ver
logger.debug("roster update received (new ver = %s)", self.version)
actual_jids = {item.jid for item in response.items}
known_jids = set(self.items.keys())
removed_jids = known_jids - actual_jids
logger.debug("jids dropped: %r", removed_jids)
for removed_jid in removed_jids:
old_item = self.items.pop(removed_jid)
self._remove_from_groups(old_item, old_item.groups)
self.on_entry_removed(old_item)
logger.debug("jids updated: %r", actual_jids - removed_jids)
for item in response.items:
self._update_entry(item)
self.on_initial_roster_received()
return True
def export_as_json(self):
"""
Export the whole roster as currently stored on the client side into a
JSON-compatible dictionary and return that dictionary.
"""
return {
"items": {
str(jid): item.export_as_json()
for jid, item in self.items.items()
},
"ver": self.version
}
def import_from_json(self, data):
"""
Replace the current roster with the :meth:`export_as_json`-compatible
dictionary in `data`.
No events are fired during this activity. After this method completes,
the whole roster contents are exchanged with the contents from `data`.
Also, no data is transferred to the server; this method is intended to
be used for roster versioning. See below (in the docs of
:class:`Service`).
"""
self.version = data.get("ver", None)
self.items.clear()
self.groups.clear()
for jid, data in data.get("items", {}).items():
jid = structs.JID.fromstr(jid)
item = Item(jid)
item.update_from_json(data)
self.items[jid] = item
for group in item.groups:
self.groups.setdefault(group, set()).add(item)
async def set_entry(self, jid, *,
name=_Sentinel,
add_to_groups=frozenset(),
remove_from_groups=frozenset(),
timeout=None):
"""
Set properties of a roster entry or add a new roster entry. The roster
entry is identified by its bare `jid`.
If an entry already exists, all values default to those stored in the
existing entry. For example, if no `name` is given, the current name of
the entry is re-used, if any.
If the entry does not exist, it will be created on the server side.
The `remove_from_groups` and `add_to_groups` arguments have to be based
on the locally cached state, as XMPP does not support sending
diffs. `remove_from_groups` takes precedence over `add_to_groups`.
`timeout` is the time in seconds to wait for a confirmation by the
server.
Note that the changes may not be visible immediately after his
coroutine returns in the :attr:`items` and :attr:`groups`
attributes. The :class:`Service` waits for the "official" roster push
from the server for updating the data structures and firing events, to
ensure that consistent state with other clients is achieved.
This may raise arbitrary :class:`.errors.XMPPError` exceptions if the
server replies with an error and also any kind of connection error if
the connection gets fatally terminated while waiting for a response.
"""
existing = self.items.get(jid, Item(jid))
post_groups = (existing.groups | add_to_groups) - remove_from_groups
post_name = existing.name
if name is not _Sentinel:
post_name = name
item = roster_xso.Item(
jid=jid,
name=post_name,
groups=[
roster_xso.Group(name=group_name)
for group_name in post_groups
])
await self.client.send(
stanza.IQ(
structs.IQType.SET,
payload=roster_xso.Query(items=[
item
])
),
timeout=timeout
)
async def remove_entry(self, jid, *, timeout=None):
"""
Request removal of the roster entry identified by the given bare
`jid`. If the entry currently has any subscription state, the server
will send the corresponding unsubscribing presence stanzas.
`timeout` is the maximum time in seconds to wait for a reply from the
server.
This may raise arbitrary :class:`.errors.XMPPError` exceptions if the
server replies with an error and also any kind of connection error if
the connection gets fatally terminated while waiting for a response.
"""
await self.client.send(
stanza.IQ(
structs.IQType.SET,
payload=roster_xso.Query(items=[
roster_xso.Item(
jid=jid,
subscription="remove"
)
])
),
timeout=timeout
)
def approve(self, peer_jid):
"""
(Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
the subscription.
If the peer has not requested a subscription (yet), it is marked as
pre-approved by the server. A future subscription request by the peer
will then be confirmed by the server automatically.
.. note::
Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced
as a stream feature.
"""
self.client.enqueue(
stanza.Presence(type_=structs.PresenceType.SUBSCRIBED,
to=peer_jid)
)
def subscribe(self, peer_jid):
"""
Request presence subscription with the given `peer_jid`.
This is deliberately not a coroutine; we don’t know whether the peer is
online (usually) and they may defer the confirmation very long, if they
confirm at all. Use :meth:`on_subscribed` to get notified when a peer
accepted a subscription request.
"""
self.client.enqueue(
stanza.Presence(type_=structs.PresenceType.SUBSCRIBE,
to=peer_jid)
)
def unsubscribe(self, peer_jid):
"""
Unsubscribe from the presence of the given `peer_jid`.
"""
self.client.enqueue(
stanza.Presence(type_=structs.PresenceType.UNSUBSCRIBE,
to=peer_jid)
)
| horazont/aioxmpp | aioxmpp/roster/service.py | Python | lgpl-3.0 | 25,618 |
from dolfin import *
from dolfin_adjoint import *
parameters["adjoint"]["cache_factorizations"] = True
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, "R", 0)
test = TestFunction(V)
trial = TrialFunction(V)
def main(m):
u = interpolate(Constant(0.1), V, name="Solution")
F = inner(u*u, test)*dx - inner(m, test)*dx
solve(F == 0, u)
F = inner(sin(u)*u*u*trial, test)*dx - inner(u**4, test)*dx
solve(lhs(F) == rhs(F), u)
return u
if __name__ == "__main__":
m = interpolate(Constant(2.13), V, name="Parameter1")
u = main(m)
parameters["adjoint"]["stop_annotating"] = True
J = Functional((inner(u, u))**3*dx + inner(m, m)*dx, name="NormSquared")
Jm = assemble(inner(u, u)**3*dx + inner(m, m)*dx)
controls = [Control(m)]
dJdm = compute_gradient(J, controls, forget=None)
HJm = hessian(J, controls, warn=False)
def Jhat(m):
m = m[0] # the control is a list of length one, so Jhat will have to
# except a list as well
u = main(m)
return assemble(inner(u, u)**3*dx + inner(m, m)*dx)
direction = [interpolate(Constant(0.1), V)]
minconv = taylor_test(Jhat, controls, Jm, dJdm, HJm=HJm,
perturbation_direction=direction)
assert minconv > 2.9
| live-clones/dolfin-adjoint | tests_dolfin/hessian_identity_list/hessian_identity_list.py | Python | lgpl-3.0 | 1,279 |
__author__ = 'soporte'
from .base import *
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': Path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATIC_URL = '/static/' | xskylarx/skytube-web | CursoDjango/CursoDjango/settings/local.py | Python | lgpl-3.0 | 265 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#*/1 * * * * python /xxx/monitor.py >> /xxx/logs/monitor.log 2>&1 &
import sys
import subprocess
import os.path as op
import socket
def this_abs_path(script_name):
return op.abspath(op.join(op.dirname(__file__), script_name))
def monitor_process(key_word, cmd):
p1 = subprocess.Popen(['ps', '-ef'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', key_word], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(['grep', '-v', 'grep'], stdin=p2.stdout, stdout=subprocess.PIPE)
lines = p3.stdout.readlines()
if len(lines) > 0:
return
sys.stderr.write('process[%s] is lost, run [%s]\n' % (key_word, cmd))
subprocess.call(cmd, shell=True)
def monitor_port(protocol, port, cmd):
address = ('127.0.0.1', port)
socket_type = socket.SOCK_STREAM if protocol == 'tcp' else socket.SOCK_DGRAM
client = socket.socket(socket.AF_INET, socket_type)
try:
client.bind(address)
except Exception, e:
pass
else:
sys.stderr.write('port[%s-%s] is lost, run [%s]\n' % (protocol, port, cmd))
subprocess.call(cmd, shell=True)
finally:
client.close()
#=============================================================================
def yuanzhaopin():
cmd = '%s start' % this_abs_path('gun.sh')
#monitor_process('\[yuanzhaopin\]', cmd)
monitor_port('tcp', 8635, cmd)
def main():
yuanzhaopin()
if __name__ == '__main__':
main() | JamesLinus/OMMPS | ProcessMonitor.py | Python | lgpl-3.0 | 1,496 |
# -*- coding: utf-8 -*-
from . import stock_return_picking
| rfhk/awo-custom | sale_line_quant_extended/wizard/__init__.py | Python | lgpl-3.0 | 60 |
from skimage import measure
import numpy as np
import struct
import math as m
from PIL import Image
from simplify import simplify
import argparse
parser = argparse.ArgumentParser(description='convert apk heightmaps to floating point tiff')
parser.add_argument('file', type=str, help='the apk heightmap file')
args = parser.parse_args()
hdr=b'\x33\x13\x26\xc3\x33\x13\x26\x43\x02\x00\x20\xc1\x33\x13\xa1\x43'
with open(args.file, mode='rb') as file:
raw = file.read()
print(struct.unpack_from("<4xIII",raw,0x1020))
print(struct.unpack_from("<ffff",raw,0x1030))
t,w,h = struct.unpack_from("<4xIII",raw,0x1020)
e1,e2,e3,e4 = struct.unpack_from("<ffff",raw,0x1030)
dt = np.dtype("half")
dt = dt.newbyteorder('<')
img = np.frombuffer(raw,dtype=dt,offset=0x1040,count=w*h)
print (img.shape)
img = img.reshape((w,h))
imin = np.amin(img)
imax = np.amax(img)
extents = np.array((e1,e2,e3,e4))
np.savez_compressed(args.file, extents = extents, heightmap=img)
fimg = img.astype(np.float32)
fimg.reshape((w*h,1))
pimg = Image.frombytes('F',(w,h), fimg.tostring(),'raw','F;32NF')
pimg.save(args.file + ".tif")
hmin = e1 * (1-imin) + e2 * imin
hmax = e1 * (1-imax) + e2 * imax
contours = []
hstep = 2.5
nc = m.ceil((hmax-hmin)/hstep)
for i in range(nc):
hgt = imin + i*hstep/(hmax-hmin)
npc = measure.find_contours(img, hgt)
cs = []
for c in npc:
c = simplify(c,5,True)
cs.append(c)
cs = np.array(cs)
contours.append(cs)
np.savez_compressed(args.file+"-contours", *contours)
# mi,ma = float(np.amin(img)),float(np.amax(img))
# print("contour",mi,ma)
# for i in range(50):
# d = float(mi*(1-i/50)+ma*i/50)
# print("contour",d)
# npc = measure.find_contours(img, d)
# for n,c in enumerate(npc):
# contours = [((x[1]-512)/1024*3499.99975586*2,(x[0]-512)/1024*3499.99975586*2) for x in c]
# if norm(c[-1] - c[0]) < 0.01:
# self.canvas.create_polygon(contours,fill="",outline='red',tag="contour")
# else:
# self.canvas.create_line(contours,fill='green',tag="contour")
# except FileNotFoundError:
# print("file not found!")
# return
# try:
# self.img = Image.open(path)
# except:
# try:
# with open(path, mode='rb') as file:
# raw = file.read()
# self.img = Image.frombytes("F",(1024,1024),raw,"raw","F;16")
# print(self.img.getpixel((4,4)))
# f = 1.0 / 2**8
# self.img = self.img.point(lambda x: x * f)
# print(self.img.getpixel((4,4)))
# self.img = self.img.resize((8192,8192))
# self.img = self.img.filter(ImageFilter.CONTOUR)
# except FileNotFoundError:
# print("file not found!")
# return
# self.ix =2*3499.99975586
# f = self.ix/2049.0
# print (f)
# #self.img = self.img.transform((int(self.ix),int(self.ix)),Image.AFFINE,data=(f,0,0,0,f,0))
# self.img = self.img.resize((int(self.ix),int(self.ix)))
# self.simg = self.img
# self.pimg = ImageTk.PhotoImage(self.img)
# self.imgcid = self.canvas.create_image(-2048, -2048, image=self.pimg, anchor=tk.NW)
| tarnheld/ted-editor | hm/apkhm.py | Python | unlicense | 3,380 |
"""
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD Style.
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance
from ..utils import array2d
###############################################################################
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Parameters
----------
emp_cov: array-like, shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Returns
-------
shrunk_cov: array-like
shrunk covariance
Notes
-----
The regularized (shrunk) covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = array2d(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
shrinkage: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
""" Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
assume_centered: Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X,
assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
###############################################################################
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage
assume_centered: Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split.
Returns
-------
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
# optionaly center data
if not assume_centered:
X = X - X.mean(0)
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in xrange(n_splits):
for j in xrange(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in xrange(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(
X2.T[block_size * n_splits:], X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
beta = min(beta, delta)
# finally get shrinkage
shrinkage = beta / delta
return shrinkage
def ledoit_wolf(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered: Boolean
If True, data are not centered before computation.
Usefull to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Returns
-------
shrunk_cov: array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
if n_features > block_size:
raise MemoryError("LW: n_features is too large, " +
"try increasing block_size")
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
""" Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True, block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
###############################################################################
# OAS estimator
def oas(X, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered: boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
shrunk_cov: array-like, shape (n_features, n_features)
Shrunk covariance
shrinkage: float
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS
does not correspond to the one given in the article. It has been taken
from the MATLAB program available from the author's webpage
(https://tbayes.eecs.umich.edu/yilun/covestimation).
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""
Oracle Approximating Shrinkage Estimator
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. It has been taken from the Matlab program available from the
authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation).
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
""" Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| seckcoder/lang-learn | python/sklearn/sklearn/covariance/shrunk_covariance_.py | Python | unlicense | 17,960 |
#!/usr/bin/env python
def load_velocity(filename):
import os
if not os.path.exists(filename):
return None
from numpy import zeros
from vtk import vtkPolyDataReader, vtkCellDataToPointData
reader = vtkPolyDataReader()
reader.SetFileName(filename)
reader.ReadAllVectorsOn()
reader.Update()
data = reader.GetOutput()
# Extracting triangulation information
triangles = data.GetPolys().GetData()
points = data.GetPoints()
# Mapping data: cell -> point
mapper = vtkCellDataToPointData()
mapper.AddInputData(data)
mapper.Update()
mapped_data = mapper.GetOutput()
# Extracting interpolate point data
udata = mapped_data.GetPointData().GetArray(0)
ntri = triangles.GetNumberOfTuples()/4
npts = points.GetNumberOfPoints()
nvls = udata.GetNumberOfTuples()
tri = zeros((ntri, 3))
x = zeros(npts)
y = zeros(npts)
ux = zeros(nvls)
uy = zeros(nvls)
for i in xrange(0, ntri):
tri[i, 0] = triangles.GetTuple(4*i + 1)[0]
tri[i, 1] = triangles.GetTuple(4*i + 2)[0]
tri[i, 2] = triangles.GetTuple(4*i + 3)[0]
for i in xrange(npts):
pt = points.GetPoint(i)
x[i] = pt[0]
y[i] = pt[1]
for i in xrange(0, nvls):
U = udata.GetTuple(i)
ux[i] = U[0]
uy[i] = U[1]
return (x, y, tri, ux, uy)
def plot(filename):
import os
from matplotlib.pyplot import clf, tricontour, tricontourf, \
gca, savefig, rc, minorticks_on
if not os.path.exists(filename):
return -1
rc('text', usetex=True)
clf()
x, y, tri, ux, uy = load_velocity(filename)
tricontourf(x, y, tri, ux, 16)
tricontour(x, y, tri, ux, 16, linestyles='-',
colors='black', linewidths=0.5)
minorticks_on()
gca().set_aspect('equal')
gca().tick_params(direction='out', which='both')
gca().set_xticklabels([])
gca().set_yticklabels([])
name, _ = os.path.splitext(filename)
name = os.path.basename(name)
savefig('{0}.png'.format(name), dpi=300, bbox_inches='tight')
savefig('{0}.pdf'.format(name), bbox_inches='tight')
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print 'usage: {0} [FILENAME]'.format(sys.argv[0])
sys.exit(-1)
sys.exit(plot(sys.argv[1]))
| mrklein/vtk-plot | plot-vtk.py | Python | unlicense | 2,344 |
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import TemplateView
from farxiv.forms import *
class Index(TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
context['user']=self.request.user
return context
class SubmitFarticle(TemplateView):
template_name = 'submit.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
context['form'] = FarticleForm(instance=self.request.user)
return context
class ViewFarticle(TemplateView):
template_name='view.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
farticle_id = self.kwargs['fid']
farticle = Farticle.objects.get(id = farticle_id)
context['author'] = farticle.author
context['title'] = farticle.title
if farticle.render_type=="quick":
# Does this actually work? Doubt it...
template_name='view.html'
farticle = QuickFarticle.objects.get(id = farticle_id)
context['steps'] = farticle.steps
context['problems'] = farticle.problems
context['suggestions'] = farticle.suggestions
return context
else:
return context
class ViewExampleFarticle(TemplateView):
template_name='farticle.html'
| ghallsimpsons/farxiv | farxiv/views.py | Python | unlicense | 1,516 |
data = [set(open(i).read().split()) for i in ('C:\\Users\\Aliwka\\Desktop\\ДЗ-курсы\\Homework6\\first.txt', 'C:\\Users\\Aliwka\\Desktop\\ДЗ-курсы\\Homework6\\second.txt')]
diff = data[0].difference(data[1])
if diff:
print(diff, 'слова которые есть в первом файле, но нет во втором')
print(data[1],data[0],'слова из обоих файлов')
| Torkvamedo/smx | Homework/lesson 6/second.py | Python | unlicense | 414 |
import sys
sys.path.append('../..')
import codestudio
a = codestudio.load('s1level32')
a.speed = 'fastest'
for count in range(360):
a.move_backward(1)
a.left(1)
a.check()
| skilstak/code-dot-org-python | solutions/stage05-artist1/s1level32b.py | Python | unlicense | 181 |
from setuptools import setup
install_requires = [
'requests',
'beautifulsoup4'
]
setup(
name='urbandefinition',
install_requires=install_requires,
version=0.3,
description='Get Urban Dictionary definitions from the terminal',
author='Asad Dhamani',
author_email='dhamaniasad+code@gmail.com',
url='https://github.com/dhamaniasad/urbandefinition',
license='Unlicense',
py_modules=['urbandefinition'],
entry_points={
'console_scripts': [
'urban = urbandefinition:command_line_runner'
]
}
) | dhamaniasad/urbandefinition | setup.py | Python | unlicense | 568 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# XXX: FabricRunner import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
import mock
from unittest2 import TestCase
from st2actions.runners.fabric_runner import BaseFabricRunner
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.models.system.action import RemoteScriptAction
from st2common.models.system.action import FabricRemoteScriptAction
class FabricRunner(BaseFabricRunner):
def run(self):
pass
class FabricRunnerTestCase(TestCase):
def test_get_env_vars(self):
runner = FabricRunner('id')
env_vars = {'key1': 'val1', 'key2': 'val2'}
env_vars.update(runner._get_common_action_env_variables())
runner.runner_parameters = {'hosts': 'localhost', 'env': env_vars}
# This is awful, context is just set at some point, no idea when and
# where MOVE IT TO CONSTRUCTOR!11
runner.context = {}
runner.pre_run()
actual_env_vars = runner._get_env_vars()
self.assertEqual(actual_env_vars, env_vars)
class TestFabricRunnerResultStatus(TestCase):
def test_pf_ok_all_success(self):
result = {
'1': {'succeeded': True},
'2': {'succeeded': True},
'3': {'succeeded': True},
}
self.assertEquals(LIVEACTION_STATUS_SUCCEEDED,
FabricRunner._get_result_status(result, True))
def test_pf_ok_some_success(self):
result = {
'1': {'succeeded': False},
'2': {'succeeded': True},
'3': {'succeeded': False},
}
self.assertEquals(LIVEACTION_STATUS_SUCCEEDED,
FabricRunner._get_result_status(result, True))
result = {
'1': {'succeeded': True},
'2': {'succeeded': False},
'3': {'succeeded': False},
}
self.assertEquals(LIVEACTION_STATUS_SUCCEEDED,
FabricRunner._get_result_status(result, True))
result = {
'1': {'succeeded': False},
'2': {'succeeded': False},
'3': {'succeeded': True},
}
self.assertEquals(LIVEACTION_STATUS_SUCCEEDED,
FabricRunner._get_result_status(result, True))
def test_pf_ok_all_fail(self):
result = {
'1': {'succeeded': False},
'2': {'succeeded': False},
'3': {'succeeded': False},
}
self.assertEquals(LIVEACTION_STATUS_FAILED,
FabricRunner._get_result_status(result, True))
def test_pf_not_ok_all_success(self):
result = {
'1': {'succeeded': True},
'2': {'succeeded': True},
'3': {'succeeded': True},
}
self.assertEquals(LIVEACTION_STATUS_SUCCEEDED,
FabricRunner._get_result_status(result, False))
def test_pf_not_ok_some_success(self):
result = {
'1': {'succeeded': False},
'2': {'succeeded': True},
'3': {'succeeded': False},
}
self.assertEquals(LIVEACTION_STATUS_FAILED,
FabricRunner._get_result_status(result, False))
result = {
'1': {'succeeded': True},
'2': {'succeeded': False},
'3': {'succeeded': False},
}
self.assertEquals(LIVEACTION_STATUS_FAILED,
FabricRunner._get_result_status(result, False))
result = {
'1': {'succeeded': False},
'2': {'succeeded': False},
'3': {'succeeded': True},
}
self.assertEquals(LIVEACTION_STATUS_FAILED,
FabricRunner._get_result_status(result, False))
def test_pf_not_ok_all_fail(self):
result = {
'1': {'succeeded': False},
'2': {'succeeded': False},
'3': {'succeeded': False},
}
self.assertEquals(LIVEACTION_STATUS_FAILED,
FabricRunner._get_result_status(result, False))
class RemoteScriptActionTestCase(TestCase):
def test_parameter_formatting(self):
# Only named args
named_args = {'--foo1': 'bar1', '--foo2': 'bar2', '--foo3': True,
'--foo4': False}
action = RemoteScriptAction(name='foo', action_exec_id='dummy',
script_local_path_abs='test.py',
script_local_libs_path_abs='/',
remote_dir='/tmp',
named_args=named_args, positional_args=None)
self.assertEqual(action.command, '/tmp/test.py --foo1=bar1 --foo2=bar2 --foo3')
class FabricRemoteScriptActionTestCase(TestCase):
@mock.patch('st2common.models.system.action.run')
@mock.patch('st2common.models.system.action.put')
@mock.patch('st2common.models.system.action.shell_env')
@mock.patch('st2common.models.system.action.settings')
def test_settings_are_used(self, mock_settings, mock_shell_env, mock_put, mock_run):
# Test that the remote script action uses fabric environment and authentication settings
named_args = {}
action = FabricRemoteScriptAction(name='foo', action_exec_id='dummy',
script_local_path_abs='test.py',
script_local_libs_path_abs='/',
remote_dir='/tmp',
named_args=named_args, positional_args=None)
task = action.get_fabric_task()
self.assertEqual(mock_settings.call_count, 0)
self.assertEqual(mock_shell_env.call_count, 0)
task.run()
self.assertEqual(mock_settings.call_count, 1)
self.assertEqual(mock_shell_env.call_count, 1)
| grengojbo/st2 | st2actions/tests/unit/test_remote_runners.py | Python | apache-2.0 | 6,700 |
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supported pricing models."""
import enum
from typing import Any
import dataclasses
class InterestRateModelType(enum.Enum):
"""Models for pricing interest rate derivatives.
LOGNORMAL_RATE: Lognormal model for the underlying rate.
NORMAL_RATE: Normal model for the underlying rate
LOGNORMAL_SMILE_CONSISTENT_REPLICATION: Smile consistent replication
(lognormal vols).
NORMAL_SMILE_CONSISTENT_REPLICATION: Smile consistent replication
(normal vols).
HULL_WHITE_ONE_FACTOR: Hull-White single factor model of short rate.
"""
LOGNORMAL_RATE = 1
NORMAL_RATE = 2
LOGNORMAL_SMILE_CONSISTENT_REPLICATION = 3
NORMAL_SMILE_CONSISTENT_REPLICATION = 4
HULL_WHITE_ONE_FACTOR = 5
@dataclasses.dataclass(frozen=True)
class HullWhite1FactorConfig:
mean_reversion: Any
volatility: Any
__all__ = ["InterestRateModelType", "HullWhite1FactorConfig"]
| google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/core/models.py | Python | apache-2.0 | 1,474 |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A CLI to import whisper data into Cassandra."""
from __future__ import print_function
import argparse
import datetime
import io
import logging
import multiprocessing
import os
import re
import struct
import sys
import time
from multiprocessing import dummy as multiprocessing_dummy
import progressbar
import scandir
import whisper
from biggraphite import accessor_factory as bg_accessor_factory
from biggraphite import metric as bg_metric
from biggraphite import settings as bg_settings
from biggraphite import utils as bg_utils
from biggraphite.cli import command
_DEV_NULL = open(os.devnull, "w")
_POINT_STRUCT = struct.Struct(whisper.pointFormat)
_WORKER = None
log = logging.getLogger(__name__)
def metric_name_from_wsp(root_dir, prefix, wsp_path):
"""Return the name of a metric given a wsp file path and a root directory.
The path do not have to exist.
Args:
root_dir: A directory that is parent to all metrics.
prefix: Prefix to preprend to metric names.
wsp_path: The name of a file ending with wsp file in root_dir.
Returns:
The metric name.
"""
relpath = os.path.relpath(wsp_path, root_dir)
assert ".." not in relpath, "%s not a child of %s" % (root_dir, wsp_path)
relpath_noext = os.path.splitext(relpath)[0]
return prefix + relpath_noext.replace(os.path.sep, ".")
class _Worker(object):
def __init__(self, opts):
settings = bg_settings.settings_from_args(opts)
bg_utils.set_log_level(settings)
self._accessor = bg_accessor_factory.accessor_from_settings(settings)
self._opts = opts
self.time_start = time.mktime(self._opts.time_start.timetuple())
self.time_end = time.mktime(self._opts.time_end.timetuple())
@staticmethod
def _read_metadata(metric_name, path):
info = whisper.info(path)
if not info:
return None
retentions = bg_metric.Retention(
[
bg_metric.Stage(precision=a["secondsPerPoint"], points=a["points"])
for a in info["archives"]
]
)
aggregator = bg_metric.Aggregator.from_carbon_name(info["aggregationMethod"])
return bg_metric.MetricMetadata.create(
aggregator=aggregator,
retention=retentions,
carbon_xfilesfactor=info["xFilesFactor"],
)
def _read_points(self, path):
"""Return a list of (timestamp, value)."""
info = whisper.info(path)
res = []
if not info:
return []
archives = info["archives"]
with io.open(path, "rb") as f:
buf = f.read()
stage0 = True
for archive in archives:
offset = archive["offset"]
stage = bg_metric.Stage(
precision=archive["secondsPerPoint"],
points=archive["points"],
stage0=stage0,
)
stage0 = False
if stage in self._opts.ignored_stages:
continue
for _ in range(archive["points"]):
timestamp, value = _POINT_STRUCT.unpack_from(buf, offset)
offset += whisper.pointSize
if timestamp == 0:
continue
elif timestamp >= self.time_start and timestamp <= self.time_end:
res.append((timestamp, value, 1, stage))
return res
def import_whisper(self, path):
if not self._accessor.is_connected:
self._accessor.connect()
name = metric_name_from_wsp(self._opts.root_directory, self._opts.prefix, path)
metadata = self._read_metadata(name, path)
log.debug("%s: %s" % (name, metadata.as_string_dict()))
if not metadata:
return 0
metric = bg_metric.make_metric_with_defaults(name, metadata)
if not self._opts.no_metadata:
self._accessor.create_metric(metric)
ret = 0
if not self._opts.no_data:
points = self._read_points(path)
self._accessor.insert_downsampled_points(metric, points)
ret = len(points)
return ret
def _setup_process(opts):
global _WORKER
_WORKER = _Worker(opts)
def _import_whisper(*args, **kwargs):
assert _WORKER is not None, "_setup_process was never called"
try:
return _WORKER.import_whisper(*args, **kwargs)
except Exception as e:
log.exception(e)
return 0
def _parse_opts(args):
parser = argparse.ArgumentParser(
description="Import whisper files into BigGraphite."
)
parser.add_argument(
"root_directory",
metavar="WHISPER_DIR",
help="directory in which to find whisper files",
)
parser.add_argument(
"--filter",
type=str,
default=r".*\.wsp",
help="Only import metrics matching this filter",
)
parser.add_argument(
"--prefix",
metavar="WHISPER_PREFIX",
default="",
help="prefix to prepend to metric names",
)
parser.add_argument(
"--quiet",
action="store_const",
default=False,
const=True,
help="Show no output unless there are problems.",
)
parser.add_argument(
"--process",
metavar="N",
type=int,
help="number of concurrent process",
default=multiprocessing.cpu_count(),
)
parser.add_argument(
"--no-data", action="store_true", help="Do not import data, only metadata."
)
parser.add_argument(
"--no-metadata", action="store_true", help="Do not import metadata, only data."
)
parser.add_argument(
"--ignored_stages",
nargs="*",
help="Do not import data for these stages.",
default=[],
)
parser.add_argument(
"--time-start",
action=command.ParseDateTimeArg,
help="Read points written later than this time.",
default=datetime.datetime.fromtimestamp(0),
required=False,
)
parser.add_argument(
"--time-end",
action=command.ParseDateTimeArg,
help="Read points written earlier than this time.",
default=datetime.datetime.now(),
required=False,
)
bg_settings.add_argparse_arguments(parser)
opts = parser.parse_args(args)
opts.ignored_stages = [bg_metric.Stage.from_string(s) for s in opts.ignored_stages]
return opts
# TODO: put that in a thread.
class _Walker:
def __init__(self, root_directory, regexp):
self.count = 0
self.root_directory = root_directory
self.regexp = re.compile(regexp)
def paths(self, root=None):
root = root or self.root_directory
for entry in scandir.scandir(root):
if entry.is_dir():
for filename in self.paths(entry.path):
yield filename
elif self.regexp.match(os.path.join(root, entry.name)):
self.count += 1
yield os.path.join(root, entry.name)
def main(args=None):
"""Entry point for the module."""
if not args:
args = sys.argv[1:]
opts = _parse_opts(args)
pool_factory = multiprocessing.Pool
if opts.process == 1:
pool_factory = multiprocessing_dummy.Pool
pool = pool_factory(opts.process, initializer=_setup_process, initargs=(opts,))
out_fd = sys.stderr
if opts.quiet:
out_fd = _DEV_NULL
if "__pypy__" not in sys.builtin_module_names:
print("Running without PyPy, this is about 20 times slower", file=out_fd)
out_fd.flush()
walker = _Walker(opts.root_directory, opts.filter)
paths = walker.paths()
total_points = 0
max_value = progressbar.UnknownLength
with progressbar.ProgressBar(
max_value=max_value, fd=out_fd, redirect_stderr=True
) as pbar:
try:
res = pool.imap_unordered(_import_whisper, paths)
for n_path, n_points in enumerate(res):
total_points += n_points
pbar.update(n_path)
except KeyboardInterrupt:
pool.terminate()
pool.close()
pool.join()
print(
"Uploaded",
walker.count,
"metrics containing",
total_points,
"points",
file=out_fd,
)
if __name__ == "__main__":
main()
| criteo/biggraphite | biggraphite/cli/import_whisper.py | Python | apache-2.0 | 8,945 |
##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
"""
This module implements a reactor wrapper which will cause all traffic on
connections set up using that reactor to be logged.
"""
__all__ = ['loggedReactor']
from weakref import ref
from StringIO import StringIO
from collections import namedtuple
from zope.interface import providedBy
from twisted.python.components import proxyForInterface
from twisted.internet.interfaces import IReactorTCP
from twisted.protocols.policies import WrappingFactory, TrafficLoggingProtocol
logstate = namedtuple('logstate', 'active finished')
def loggedReactor(reactor):
"""
Construct and return a wrapper around the given C{reactor} which provides
all of the same interfaces, but which will log all traffic over outgoing
TCP connections it establishes.
"""
bases = []
for iface in providedBy(reactor):
if iface is IReactorTCP:
bases.append(_TCPTrafficLoggingReactor)
else:
bases.append(proxyForInterface(iface, '_reactor'))
if bases:
return type('(Logged Reactor)', tuple(bases), {})(reactor)
return reactor
class _TCPTrafficLoggingReactor(proxyForInterface(IReactorTCP, '_reactor')):
"""
A mixin for a reactor wrapper which defines C{connectTCP} so as to cause
traffic to be logged.
"""
_factories = None
@property
def factories(self):
if self._factories is None:
self._factories = []
return self._factories
def getLogFiles(self):
active = []
finished = []
for factoryref in self.factories:
factory = factoryref()
active.extend(factory.logs)
finished.extend(factory.finishedLogs)
return logstate(active, finished)
def connectTCP(self, host, port, factory, *args, **kwargs):
wrapper = _TrafficLoggingFactory(factory)
self.factories.append(ref(wrapper, self.factories.remove))
return self._reactor.connectTCP(
host, port, wrapper, *args, **kwargs)
class _TrafficLoggingFactory(WrappingFactory):
"""
A wrapping factory which applies L{TrafficLoggingProtocolWrapper}.
"""
LOGFILE_LIMIT = 20
protocol = TrafficLoggingProtocol
noisy = False
def __init__(self, wrappedFactory):
WrappingFactory.__init__(self, wrappedFactory)
self.logs = []
self.finishedLogs = []
def unregisterProtocol(self, protocol):
WrappingFactory.unregisterProtocol(self, protocol)
self.logs.remove(protocol.logfile)
self.finishedLogs.append(protocol.logfile)
del self.finishedLogs[:-self.LOGFILE_LIMIT]
def buildProtocol(self, addr):
logfile = StringIO()
self.logs.append(logfile)
return self.protocol(
self, self.wrappedFactory.buildProtocol(addr), logfile, None, 0)
| red-hood/calendarserver | contrib/performance/loadtest/trafficlogger.py | Python | apache-2.0 | 3,436 |