Search is not available for this dataset
text stringlengths 75 104k |
|---|
def set_node(self, dst, src):
"""
Basically copy one node to another.
usefull to transmit a node from a terminal
rule as result of the current rule.
example::
R = [
In : node #set(_, node)
]
here the node return by the rule In is
also the node return by the rule R
"""
if not isinstance(src, Node):
dst.value = src
else:
dst.set(src)
idsrc = id(src)
iddst = id(dst)
if iddst not in self.id_cache:
print("DST: %s" % repr(dst))
print("RULE_NODES %s" % repr(self.rule_nodes))
print("IDCACHE %s" % repr(self.id_cache))
if idsrc in self.id_cache:
k = self.id_cache[idsrc]
k2 = self.id_cache[iddst]
if k in self.rule_nodes:
self.tag_cache[k2] = self.tag_cache[k]
return True |
def set_node_as_int(self, dst, src):
"""
Set a node to a value captured from another node
example::
R = [
In : node #setcapture(_, node)
]
"""
dst.value = self.value(src)
return True |
def get_subnode(self, dst, ast, expr):
"""
get the value of subnode
example::
R = [
__scope__:big getsomethingbig:>big
#get(_, big, '.val') // copy big.val into _
]
"""
dst.value = eval('ast' + expr)
return True |
def default_serializer(o):
"""Default serializer for json."""
defs = (
((datetime.date, datetime.time),
lambda x: x.isoformat(), ),
((datetime.datetime, ),
lambda x: dt2utc_timestamp(x), ),
)
for types, fun in defs:
if isinstance(o, types):
return fun(o) |
def _get_depositions(user=None, type=None):
"""Get list of depositions (as iterator).
This is redefined Deposition.get_depositions classmethod without order-by
for better performance.
"""
from invenio.modules.workflows.models import BibWorkflowObject, Workflow
from invenio.modules.deposit.models import InvalidDepositionType
from flask import current_app
from invenio.ext.sqlalchemy import db
from invenio.modules.deposit.models import Deposition
params = [
Workflow.module_name == 'webdeposit',
]
if user:
params.append(BibWorkflowObject.id_user == user.get_id())
else:
params.append(BibWorkflowObject.id_user != 0)
if type:
params.append(Workflow.name == type.get_identifier())
objects = BibWorkflowObject.query.join("workflow").options(
db.contains_eager('workflow')).filter(*params)
def _create_obj(o):
try:
obj = Deposition(o)
except InvalidDepositionType as err:
current_app.logger.exception(err)
return None
if type is None or obj.type == type:
return obj
return None
def mapper_filter(objs):
for o in objs:
o = _create_obj(o)
if o is not None:
yield o
return mapper_filter(objects) |
def get(query, from_date, limit=0, **kwargs):
"""Get deposits."""
dep_generator = _get_depositions()
total_depids = 1 # Count of depositions is hard to determine
# If limit provided, serve only first n=limit items
if limit > 0:
dep_generator = islice(dep_generator, limit)
total_depids = limit
return total_depids, dep_generator |
def dump(deposition, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the deposition object as dictionary."""
# Serialize the __getstate__ and fall back to default serializer
dep_json = json.dumps(deposition.__getstate__(),
default=default_serializer)
dep_dict = json.loads(dep_json)
dep_dict['_p'] = {}
dep_dict['_p']['id'] = deposition.id
dep_dict['_p']['created'] = dt2utc_timestamp(deposition.created)
dep_dict['_p']['modified'] = dt2utc_timestamp(deposition.modified)
dep_dict['_p']['user_id'] = deposition.user_id
dep_dict['_p']['state'] = deposition.state
dep_dict['_p']['has_sip'] = deposition.has_sip()
dep_dict['_p']['submitted'] = deposition.submitted
return dep_dict |
def _get_recids_invenio12(from_date):
"""Get BibDocs for Invenio 1."""
from invenio.dbquery import run_sql
return (id[0] for id in run_sql(
'select id_bibrec from '
'bibrec_bibdoc as r join bibdoc as d on r.id_bibdoc=d.id '
'where d.modification_date >=%s',
(from_date, ), run_on_slave=True)) |
def _get_recids_invenio2(from_date):
"""Get BibDocs for Invenio 2."""
from invenio.legacy.dbquery import run_sql
return (id[0] for id in run_sql(
'select id_bibrec from '
'bibrec_bibdoc as r join bibdoc as d on r.id_bibdoc=d.id '
'where d.modification_date >=%s',
(from_date, ), run_on_slave=True)) |
def _import_bibdoc():
"""Import BibDocFile."""
try:
from invenio.bibdocfile import BibRecDocs, BibDoc
except ImportError:
from invenio.legacy.bibdocfile.api import BibRecDocs, BibDoc
return BibRecDocs, BibDoc |
def dump_bibdoc(recid, from_date, **kwargs):
"""Dump all BibDoc metadata.
:param docid: BibDoc ID
:param from_date: Dump only BibDoc revisions newer than this date.
:returns: List of version of the BibDoc formatted as a dict
"""
BibRecDocs, BibDoc = _import_bibdoc()
bibdocfile_dump = []
date = datetime.datetime.strptime(from_date, '%Y-%m-%d %H:%M:%S')
for bibdoc in BibRecDocs(recid).list_bibdocs():
for version in bibdoc.list_versions():
bibdoc_version = bibdoc.list_version_files(version)
for f in bibdoc_version:
if f.is_icon() or f.md < date:
# Don't care about icons
# Don't care about files not modified since from_date
continue
bibdocfile_dump.append(dict(
bibdocid=f.get_bibdocid(),
checksum=f.get_checksum(),
comment=f.get_comment(),
copyright=(
f.get_copyright() if hasattr(f, 'get_copyright')
else None),
creation_date=datetime_toutc(f.cd).isoformat(),
description=f.get_description(),
encoding=f.encoding,
etag=f.etag,
flags=f.flags,
format=f.get_format(),
full_name=f.get_full_name(),
full_path=f.get_full_path(),
hidden=f.hidden,
license=(
f.get_license()if hasattr(f, 'get_license') else None),
modification_date=datetime_toutc(f.md).isoformat(),
name=f.get_name(),
mime=f.mime,
path=f.get_path(),
recid=f.get_recid(),
recids_doctype=f.recids_doctypes,
size=f.get_size(),
status=f.get_status(),
subformat=f.get_subformat(),
superformat=f.get_superformat(),
type=f.get_type(),
url=f.get_url(),
version=f.get_version(),
))
return bibdocfile_dump |
def get_check():
"""Get bibdocs to check."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return (
run_sql('select count(id) from bibdoc', run_on_slave=True)[0][0],
[id[0] for id in run_sql('select id from bibdoc', run_on_slave=True)],
) |
def check(id_):
"""Check bibdocs."""
BibRecDocs, BibDoc = _import_bibdoc()
try:
BibDoc(id_).list_all_files()
except Exception:
click.secho("BibDoc {0} failed check.".format(id_), fg='red') |
def dump(obj, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the oauth2server tokens."""
return dict(id=obj.id,
client_id=obj.client_id,
user_id=obj.user_id,
token_type=obj.token_type,
access_token=obj.access_token,
refresh_token=obj.refresh_token,
expires=dt2iso_or_empty(obj.expires),
_scopes=obj._scopes,
is_personal=obj.is_personal,
is_internal=obj.is_internal) |
def get(*args, **kwargs):
"""Get UserEXT objects."""
try:
from invenio.modules.accounts.models import UserEXT
except ImportError:
from invenio_accounts.models import UserEXT
q = UserEXT.query
return q.count(), q.all() |
def dump(u, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the UserEXt objects as a list of dictionaries.
:param u: UserEXT to be dumped.
:type u: `invenio_accounts.models.UserEXT [Invenio2.x]`
:returns: User serialized to dictionary.
:rtype: dict
"""
return dict(id=u.id, method=u.method, id_user=u.id_user) |
def get(*args, **kwargs):
"""Get communities."""
from invenio.modules.communities.models import FeaturedCommunity
q = FeaturedCommunity.query
return q.count(), q.all() |
def dump(fc, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the community object as dictionary.
:param fc: Community featuring to be dumped.
:type fc: `invenio_communities.models.FeaturedCommunity [Invenio2.x]`
:returns: Community serialized to dictionary.
:rtype: dict
"""
return dict(id=fc.id,
id_community=fc.id_community,
start_date=fc.start_date.isoformat()) |
def _get_modified_recids_invenio12(from_date):
"""Get record ids for Invenio 1."""
from invenio.search_engine import search_pattern
from invenio.dbquery import run_sql
return set((id[0] for id in run_sql(
'select id from bibrec where modification_date >= %s',
(from_date, ), run_on_slave=True))), search_pattern |
def _get_modified_recids_invenio2(from_date):
"""Get record ids for Invenio 2."""
from invenio.legacy.search_engine import search_pattern
from invenio.modules.records.models import Record
date = datetime.datetime.strptime(from_date, '%Y-%m-%d %H:%M:%S')
return set(
(x[0]
for x in Record.query.filter(Record.modification_date >= date).values(
Record.id))), search_pattern |
def _get_collection_restrictions(collection):
"""Get all restrictions for a given collection, users and fireroles."""
try:
from invenio.dbquery import run_sql
from invenio.access_control_firerole import compile_role_definition
except ImportError:
from invenio.modules.access.firerole import compile_role_definition
from invenio.legacy.dbquery import run_sql
res = run_sql(
'SELECT r.firerole_def_src, email '
'FROM accROLE as r '
'JOIN accROLE_accACTION_accARGUMENT ON r.id=id_accROLE '
'JOIN accARGUMENT AS a ON a.id=id_accARGUMENT '
'JOIN user_accROLE AS u ON r.id=u.id_accROLE '
'JOIN user ON user.id=u.id_user '
'WHERE a.keyword="collection" AND '
'a.value=%s AND '
'id_accACTION=(select id from accACTION where name="viewrestrcoll")',
(collection, ), run_on_slave=True
)
fireroles = set()
users = set()
for f, u in res:
fireroles.add(compile_role_definition(f))
users.add(u)
return {'fireroles': list(fireroles), 'users': users} |
def get_record_revisions(recid, from_date):
"""Get record revisions."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return run_sql(
'SELECT job_date, marcxml '
'FROM hstRECORD WHERE id_bibrec = %s AND job_date >= %s '
'ORDER BY job_date ASC', (recid, from_date),
run_on_slave=True) |
def get_record_collections(recid):
"""Get all collections the record belong to."""
try:
from invenio.search_engine import (
get_all_collections_of_a_record,
get_restricted_collections_for_recid)
except ImportError:
from invenio.legacy.search_engine import (
get_all_collections_of_a_record,
get_restricted_collections_for_recid)
collections = {
'all':
get_all_collections_of_a_record(recid, recreate_cache_if_needed=False),
}
collections['restricted'] = dict(
(coll, _get_collection_restrictions(coll))
for coll in get_restricted_collections_for_recid(
recid, recreate_cache_if_needed=False))
return collections |
def dump_record_json(marcxml):
"""Dump JSON of record."""
try:
from invenio.modules.records.api import Record
d = Record.create(marcxml, 'marc')
return d.dumps(clean=True)
except ImportError:
from invenio.bibfield import create_record
d = create_record(marcxml, master_format='marc')
return d.dumps() |
def get(query, from_date, **kwargs):
"""Get recids matching query and with changes."""
recids, search_pattern = get_modified_recids(from_date)
recids = recids.union(get_modified_bibdoc_recids(from_date))
if query:
recids = recids.intersection(
set(search_pattern(p=query.encode('utf-8'))))
return len(recids), recids |
def dump(recid,
from_date,
with_json=False,
latest_only=False,
with_collections=False,
**kwargs):
"""Dump MARCXML and JSON representation of a record.
:param recid: Record identifier
:param from_date: Dump only revisions from this date onwards.
:param with_json: If ``True`` use old ``Record.create`` to generate the
JSON representation of the record.
:param latest_only: Dump only the last revision of the record metadata.
:param with_collections: If ``True`` dump the list of collections that the
record belongs to.
:returns: List of versions of the record.
"""
# Grab latest only
if latest_only:
revision_iter = [get_record_revisions(recid, from_date)[-1]]
else:
revision_iter = get_record_revisions(recid, from_date)
# Dump revisions
record_dump = dict(
record=[],
files=[],
recid=recid,
collections=get_record_collections(recid)
if with_collections else None, )
for revision_date, revision_marcxml in revision_iter:
marcxml = zlib.decompress(revision_marcxml)
record_dump['record'].append(
dict(
modification_datetime=datetime_toutc(revision_date)
.isoformat(),
marcxml=marcxml,
json=dump_record_json(marcxml) if with_json else None, ))
record_dump['files'] = dump_bibdoc(recid, from_date)
return record_dump |
def dump(ra, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the remote accounts as a list of dictionaries.
:param ra: Remote account to be dumped.
:type ra: `invenio_oauthclient.models.RemoteAccount [Invenio2.x]`
:returns: Remote accounts serialized to dictionary.
:rtype: dict
"""
return dict(id=ra.id, user_id=ra.user_id, client_id=ra.client_id,
extra_data=ra.extra_data) |
def load_common(model_cls, data):
"""Helper function for loading JSON data verbatim into model."""
obj = model_cls(**data)
db.session.add(obj)
db.session.commit() |
def collect_things_entry_points():
"""Collect entry points."""
things = dict()
for entry_point in iter_entry_points(group='invenio_migrator.things'):
things[entry_point.name] = entry_point.load()
return things |
def init_app_context():
"""Initialize app context for Invenio 2.x."""
try:
from invenio.base.factory import create_app
app = create_app()
app.test_request_context('/').push()
app.preprocess_request()
except ImportError:
pass |
def memoize(func):
"""Cache for heavy function calls."""
cache = {}
@wraps(func)
def wrap(*args, **kwargs):
key = '{0}{1}'.format(args, kwargs)
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrap |
def _get_run_sql():
"""Import ``run_sql``."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return run_sql |
def get_connected_roles(action_id):
"""Get roles connected to an action."""
try:
from invenio.access_control_admin import compile_role_definition
except ImportError:
from invenio.modules.access.firerole import compile_role_definition
run_sql = _get_run_sql()
roles = {}
res = run_sql(
'select r.id, r.name, r.description, r.firerole_def_src, '
'a.keyword, a.value, email from accROLE as r '
'join accROLE_accACTION_accARGUMENT on r.id=id_accROLE '
'join accARGUMENT as a on a.id=id_accARGUMENT '
'join user_accROLE as u on r.id=u.id_accROLE '
'join user on user.id=u.id_user '
'where id_accACTION=%s', (action_id, )
)
for r in res:
role = roles.setdefault(
r[0], {
'id': r[0],
'name': r[1],
'description': r[2],
'firerole_def': r[3],
'compiled_firerole_def': compile_role_definition(r[3]),
'users': set(),
'parameters': {}
}
)
param = role['parameters'].setdefault(r[4], set())
param.add(r[5])
role['users'].add(r[6])
return six.itervalues(roles) |
def get(query, *args, **kwargs):
"""Get action definitions to dump."""
run_sql = _get_run_sql()
actions = [
dict(id=row[0],
name=row[1],
allowedkeywords=row[2],
optional=row[3])
for action in query.split(',') for row in run_sql(
'select id, name, description, allowedkeywords, optional '
'from accACTION where name like %s', (action, ),
run_on_slave=True)
]
return len(actions), actions |
def get(*args, **kwargs):
"""Get users."""
from invenio.modules.oauthclient.models import RemoteToken
q = RemoteToken.query
return q.count(), q.all() |
def dump(rt, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the remote tokens as a list of dictionaries.
:param ra: Remote toekn to be dumped.
:type ra: `invenio_oauthclient.models.RemoteToken [Invenio2.x]`
:returns: Remote tokens serialized to dictionary.
:rtype: dict
"""
return dict(id_remote_account=rt.id_remote_account,
token_type=rt.token_type,
access_token=rt.access_token,
secret=rt.secret) |
def load_token(data):
"""Load the oauth2server token from data dump."""
from invenio_oauth2server.models import Token
data['expires'] = iso2dt_or_none(data['expires'])
load_common(Token, data) |
def import_record(data, source_type=None, latest_only=False):
"""Migrate a record from a migration dump.
:param data: Dictionary for representing a single record and files.
:param source_type: Determines if the MARCXML or the JSON dump is used.
Default: ``marcxml``.
:param latest_only: Determine is only the latest revision should be loaded.
"""
source_type = source_type or 'marcxml'
assert source_type in ['marcxml', 'json']
recorddump = current_migrator.records_dump_cls(
data,
source_type=source_type,
pid_fetchers=current_migrator.records_pid_fetchers,
)
try:
current_migrator.records_dumploader_cls.create(recorddump)
db.session.commit()
except Exception:
db.session.rollback()
raise |
def config_imp_or_default(app, config_var_imp, default):
"""Import config var import path or use default value."""
imp = app.config.get(config_var_imp)
return import_string(imp) if imp else default |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app.config)
state = _InvenioMigratorState(app)
app.extensions['invenio-migrator'] = state
app.cli.add_command(dumps)
return state |
def get(*args, **kwargs):
"""Get users."""
from invenio.modules.oauth2server.models import Client
q = Client.query
return q.count(), q.all() |
def dump(obj, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the oauth2server Client."""
return dict(name=obj.name,
description=obj.description,
website=obj.website,
user_id=obj.user_id,
client_id=obj.client_id,
client_secret=obj.client_secret,
is_confidential=obj.is_confidential,
is_internal=obj.is_internal,
_redirect_uris=obj._redirect_uris,
_default_scopes=obj._default_scopes) |
def _get_users_invenio12(*args, **kwargs):
"""Get user accounts Invenio 1."""
from invenio.dbquery import run_sql, deserialize_via_marshal
User = namedtuple('User', [
'id', 'email', 'password', 'password_salt', 'note', 'full_name',
'settings', 'nickname', 'last_login'
])
users = run_sql(
'SELECT id, email, password, note, settings, nickname, last_login'
' FROM user',
run_on_slave=True)
return len(users), [
User(
id=user[0],
email=user[1],
password=user[2].decode('latin1'),
password_salt=user[1],
note=user[3],
full_name=user[5],
settings=deserialize_via_marshal(user[4]) if user[4] else {},
# we don't have proper nicknames on Invenio v1
nickname='id_{0}'.format(user[0]),
last_login=user[6]) for user in users
] |
def _get_users_invenio2(*args, **kwargs):
"""Get user accounts from Invenio 2."""
from invenio.modules.accounts.models import User
q = User.query
return q.count(), q.all() |
def get(*args, **kwargs):
"""Get users."""
try:
return _get_users_invenio12(*args, **kwargs)
except ImportError:
return _get_users_invenio2(*args, **kwargs) |
def dump(u, *args, **kwargs):
"""Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict
"""
return dict(
id=u.id,
email=u.email,
password=u.password,
password_salt=u.password_salt,
note=u.note,
full_name=u.full_name if hasattr(u, 'full_name') else '{0} {1}'.format(
u.given_names, u.family_name),
settings=u.settings,
nickname=u.nickname,
last_login=dt2iso_or_empty(u.last_login)) |
def load_deposit(data):
"""Load the raw JSON dump of the Deposition.
Uses Record API in order to bypass all Deposit-specific initialization,
which are to be done after the final stage of deposit migration.
:param data: Dictionary containing deposition data.
:type data: dict
"""
from invenio_db import db
deposit, dep_pid = create_record_and_pid(data)
deposit = create_files_and_sip(deposit, dep_pid)
db.session.commit() |
def create_record_and_pid(data):
"""Create the deposit record metadata and persistent identifier.
:param data: Raw JSON dump of the deposit.
:type data: dict
:returns: A deposit object and its pid
:rtype: (`invenio_records.api.Record`,
`invenio_pidstore.models.PersistentIdentifier`)
"""
from invenio_records.api import Record
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \
RecordIdentifier
deposit = Record.create(data=data)
created = arrow.get(data['_p']['created']).datetime
deposit.model.created = created.replace(tzinfo=None)
depid = deposit['_p']['id']
pid = PersistentIdentifier.create(
pid_type='depid',
pid_value=str(depid),
object_type='rec',
object_uuid=str(deposit.id),
status=PIDStatus.REGISTERED
)
if RecordIdentifier.query.get(int(depid)) is None:
RecordIdentifier.insert(int(depid))
deposit.commit()
return deposit, pid |
def create_files_and_sip(deposit, dep_pid):
"""Create deposit Bucket, Files and SIPs."""
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \
RecordIdentifier
from invenio_sipstore.errors import SIPUserDoesNotExist
from invenio_sipstore.models import SIP, RecordSIP, SIPFile
from invenio_files_rest.models import Bucket, FileInstance, ObjectVersion
from invenio_records_files.models import RecordsBuckets
from invenio_db import db
buc = Bucket.create()
recbuc = RecordsBuckets(record_id=deposit.id, bucket_id=buc.id)
db.session.add(recbuc)
deposit.setdefault('_deposit', dict())
deposit.setdefault('_buckets', dict(deposit=str(buc.id)))
deposit.setdefault('_files', list())
files = deposit.get('files', [])
sips = deposit.get('sips', [])
# Look for prereserved DOI (and recid)
if 'drafts' in deposit:
drafts = list(deposit['drafts'].items())
if len(drafts) != 1:
logger.exception('Deposit {dep_pid} has multiple drafts'.format(
dep_pid=dep_pid))
if len(drafts) == 1:
draft_type, draft = drafts[0]
draft_v = draft['values']
if 'prereserve_doi' in draft_v:
pre_recid = str(draft_v['prereserve_doi']['recid'])
pre_doi = str(draft_v['prereserve_doi']['doi'])
# If pre-reserve info available, try to reserve 'recid'
try:
pid = PersistentIdentifier.get(pid_type='recid',
pid_value=str(pre_recid))
except PIDDoesNotExistError:
# Reserve recid
pid = PersistentIdentifier.create(
pid_type='recid',
pid_value=str(pre_recid),
object_type='rec',
status=PIDStatus.RESERVED
)
# If pre-reserve info available, try to reserve 'doi'
try:
pid = PersistentIdentifier.get(pid_type='doi',
pid_value=str(pre_doi))
except PIDDoesNotExistError:
# Reserve DOI
pid = PersistentIdentifier.create(
pid_type='doi',
pid_value=str(pre_doi),
object_type='rec',
status=PIDStatus.RESERVED
)
if RecordIdentifier.query.get(int(pre_recid)) is None:
RecordIdentifier.insert(int(pre_recid))
# Store the path -> FileInstance mappings for SIPFile creation later
dep_file_instances = list()
for file_ in files:
size = file_['size']
key = file_['name']
# Warning: Assumes all checksums are MD5!
checksum = 'md5:{0}'.format(file_['checksum'])
fi = FileInstance.create()
fi.set_uri(file_['path'], size, checksum)
ov = ObjectVersion.create(buc, key, _file_id=fi.id)
ext = splitext(ov.key)[1].lower()
if ext.startswith('.'):
ext = ext[1:]
file_meta = dict(
bucket=str(ov.bucket.id),
key=ov.key,
checksum=ov.file.checksum,
size=ov.file.size,
version_id=str(ov.version_id),
type=ext,
)
deposit['_files'].append(file_meta)
dep_file_instances.append((file_['path'], fi))
# Get a recid from SIP information
recid = None
if sips:
recids = [int(sip['metadata']['recid']) for sip in sips]
if len(set(recids)) > 1:
logger.error('Multiple recids ({recids}) found in deposit {depid}'
' does not exists.'.format(recids=recids,
depid=dep_pid.pid_value))
raise DepositMultipleRecids(dep_pid.pid_value, list(set(recids)))
elif recids: # If only one recid
recid = recids[0]
for idx, sip in enumerate(sips):
agent = None
user_id = None
if sip['agents']:
agent = dict(
ip_address=empty_str_if_none(
sip['agents'][0].get('ip_address', "")),
email=empty_str_if_none(
sip['agents'][0].get('email_address', "")),
)
user_id = sip['agents'][0]['user_id']
if user_id == 0:
user_id = None
content = sip['package']
sip_format = 'marcxml'
try:
sip = SIP.create(sip_format,
content,
user_id=user_id,
agent=agent)
except SIPUserDoesNotExist:
logger.exception('User ID {user_id} referred in deposit {depid} '
'does not exists.'.format(
user_id=user_id, depid=dep_pid.pid_value))
sip = SIP.create(sip_format,
content,
agent=agent)
# Attach recid to SIP
if recid:
try:
pid = PersistentIdentifier.get(pid_type='recid',
pid_value=str(recid))
record_sip = RecordSIP(sip_id=sip.id, pid_id=pid.id)
db.session.add(record_sip)
except PIDDoesNotExistError:
logger.exception('Record {recid} referred in '
'Deposit {depid} does not exists.'.format(
recid=recid, depid=dep_pid.pid_value))
if deposit['_p']['submitted'] is True:
logger.exception('Pair {recid}/{depid} was submitted,'
' (should it be unpublished?).'.format(
recid=recid, depid=dep_pid.pid_value))
else:
msg = 'Pair {recid}/{depid} was not submitted.'.format(
recid=recid, depid=dep_pid.pid_value)
logger.exception(msg)
# Reserve recid
pid = PersistentIdentifier.create(
pid_type='recid',
pid_value=str(recid),
object_type='rec',
status=PIDStatus.RESERVED
)
if RecordIdentifier.query.get(int(recid)) is None:
RecordIdentifier.insert(int(recid))
if idx == 0:
for fp, fi in dep_file_instances:
sipf = SIPFile(sip_id=sip.id, filepath=fp, file_id=fi.id)
db.session.add(sipf)
deposit.commit()
return deposit |
def dump(c, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the community object as dictionary.
:param c: Community to be dumped.
:type c: `invenio.modules.communities.models.Community`
:returns: Community serialized to dictionary.
:rtype: dict
"""
return dict(id=c.id,
id_user=c.id_user,
title=c.title,
description=c.description,
page=c.page,
curation_policy=c.curation_policy,
last_record_accepted=dt2iso_or_empty(c.last_record_accepted),
logo_ext=c.logo_ext,
logo_url=c.logo_url,
ranking=c.ranking,
fixed_points=c.fixed_points,
created=c.created.isoformat(),
last_modified=c.last_modified.isoformat(),
id_collection_provisional=c.id_collection_provisional,
id_oairepository=c.id_oairepository,
id_collection=c.id_collection) |
def _loadrecord(record_dump, source_type, eager=False):
"""Load a single record into the database.
:param record_dump: Record dump.
:type record_dump: dict
:param source_type: 'json' or 'marcxml'
:param eager: If ``True`` execute the task synchronously.
"""
if eager:
import_record.s(record_dump, source_type=source_type).apply(throw=True)
elif current_migrator.records_post_task:
chain(
import_record.s(record_dump, source_type=source_type),
current_migrator.records_post_task.s()
)()
else:
import_record.delay(record_dump, source_type=source_type) |
def loadrecords(sources, source_type, recid):
"""Load records migration dump."""
# Load all record dumps up-front and find the specific JSON
if recid is not None:
for source in sources:
records = json.load(source)
for item in records:
if str(item['recid']) == str(recid):
_loadrecord(item, source_type, eager=True)
click.echo("Record '{recid}' loaded.".format(recid=recid))
return
click.echo("Record '{recid}' not found.".format(recid=recid))
else:
for idx, source in enumerate(sources, 1):
click.echo('Loading dump {0} of {1} ({2})'.format(
idx, len(sources), source.name))
data = json.load(source)
with click.progressbar(data) as records:
for item in records:
_loadrecord(item, source_type) |
def inspectrecords(sources, recid, entity=None):
"""Inspect records in a migration dump."""
for idx, source in enumerate(sources, 1):
click.echo('Loading dump {0} of {1} ({2})'.format(idx, len(sources),
source.name))
data = json.load(source)
# Just print record identifiers if none are selected.
if not recid:
click.secho('Record identifiers', fg='green')
total = 0
for r in (d['recid'] for d in data):
click.echo(r)
total += 1
click.echo('{0} records found in dump.'.format(total))
return
data = list(filter(lambda d: d['recid'] == recid, data))
if not data:
click.secho("Record not found.", fg='yellow')
return
for record in data:
if entity is None:
click.echo(json.dumps(record, indent=2))
if entity == 'files':
click.secho('Files', fg='green')
click.echo(
json.dumps(record['files'], indent=2))
if entity == 'json':
click.secho('Records (JSON)', fg='green')
for revision in record['record']:
click.secho('Revision {0}'.format(
revision['modification_datetime']), fg='yellow')
click.echo(json.dumps(revision['json'], indent=2))
if entity == 'marcxml':
click.secho('Records (MARCXML)', fg='green')
for revision in record['record']:
click.secho(
'Revision {0}'.format(revision['marcxml']),
fg='yellow')
click.echo(revision) |
def loadcommon(sources, load_task, asynchronous=True, predicate=None,
task_args=None, task_kwargs=None):
"""Common helper function for load simple objects.
.. note::
Keyword arguments ``task_args`` and ``task_kwargs`` are passed to the
``load_task`` function as ``*task_args`` and ``**task_kwargs``.
.. note::
The `predicate` argument is used as a predicate function to load only
a *single* item from across all dumps (this CLI function will return
after loading the item). This is primarily used for debugging of
the *dirty* data within the dump. The `predicate` should be a function
with a signature ``f(dict) -> bool``, i.e. taking a single parameter
(an item from the dump) and return ``True`` if the item
should be loaded. See the ``loaddeposit`` for a concrete example.
:param sources: JSON source files with dumps
:type sources: list of str (filepaths)
:param load_task: Shared task which loads the dump.
:type load_task: function
:param asynchronous: Flag for serial or asynchronous execution of the task.
:type asynchronous: bool
:param predicate: Predicate for selecting only a single item from the dump.
:type predicate: function
:param task_args: positional arguments passed to the task.
:type task_args: tuple
:param task_kwargs: named arguments passed to the task.
:type task_kwargs: dict
"""
# resolve the defaults for task_args and task_kwargs
task_args = tuple() if task_args is None else task_args
task_kwargs = dict() if task_kwargs is None else task_kwargs
click.echo('Loading dumps started.')
for idx, source in enumerate(sources, 1):
click.echo('Opening dump file {0} of {1} ({2})'.format(
idx, len(sources), source.name))
data = json.load(source)
with click.progressbar(data) as data_bar:
for d in data_bar:
# Load a single item from the dump
if predicate is not None:
if predicate(d):
load_task.s(d, *task_args, **task_kwargs).apply(
throw=True)
click.echo("Loaded a single record.")
return
# Load dumps normally
else:
if asynchronous:
load_task.s(d, *task_args, **task_kwargs).apply_async()
else:
load_task.s(d, *task_args, **task_kwargs).apply(
throw=True) |
def loadcommunities(sources, logos_dir):
"""Load communities."""
from invenio_migrator.tasks.communities import load_community
loadcommon(sources, load_community, task_args=(logos_dir, )) |
def loadusers(sources):
"""Load users."""
from .tasks.users import load_user
# Cannot be executed asynchronously due to duplicate emails and usernames
# which can create a racing condition.
loadcommon(sources, load_user, asynchronous=False) |
def loaddeposit(sources, depid):
"""Load deposit.
Usage:
invenio dumps loaddeposit ~/data/deposit_dump_*.json
invenio dumps loaddeposit -d 12345 ~/data/deposit_dump_*.json
"""
from .tasks.deposit import load_deposit
if depid is not None:
def pred(dep):
return int(dep["_p"]["id"]) == depid
loadcommon(sources, load_deposit, predicate=pred, asynchronous=False)
else:
loadcommon(sources, load_deposit) |
def get_profiler_statistics(sort="cum_time", count=20, strip_dirs=True):
"""Return profiler statistics.
:param str sort: dictionary key to sort by
:param int|None count: the number of results to return, None returns all results.
:param bool strip_dirs: if True strip the directory, otherwise return the full path
"""
json_stats = []
pstats = yappi.convert2pstats(yappi.get_func_stats())
if strip_dirs:
pstats.strip_dirs()
for func, func_stat in pstats.stats.iteritems():
path, line, func_name = func
cc, num_calls, total_time, cum_time, callers = func_stat
json_stats.append({
"path": path,
"line": line,
"func_name": func_name,
"num_calls": num_calls,
"total_time": total_time,
"total_time_per_call": total_time/num_calls if total_time else 0,
"cum_time": cum_time,
"cum_time_per_call": cum_time/num_calls if cum_time else 0
})
return sorted(json_stats, key=itemgetter(sort), reverse=True)[:count] |
def main(port=8888):
"""Run as sample test server."""
import tornado.ioloop
routes = [] + TornadoProfiler().get_routes()
app = tornado.web.Application(routes)
app.listen(port)
tornado.ioloop.IOLoop.current().start() |
def get(self):
"""Return current profiler statistics."""
sort = self.get_argument('sort', 'cum_time')
count = self.get_argument('count', 20)
strip_dirs = self.get_argument('strip_dirs', True)
error = ''
sorts = ('num_calls', 'cum_time', 'total_time',
'cum_time_per_call', 'total_time_per_call')
if sort not in sorts:
error += "Invalid `sort` '%s', must be in %s." % (sort, sorts)
try:
count = int(count)
except (ValueError, TypeError):
error += "Can't cast `count` '%s' to int." % count
if count <= 0:
count = None
strip_dirs = str(strip_dirs).lower() not in ('false', 'no', 'none',
'null', '0', '')
if error:
self.write({'error': error})
self.set_status(400)
self.finish()
return
try:
statistics = get_profiler_statistics(sort, count, strip_dirs)
self.write({'statistics': statistics})
self.set_status(200)
except TypeError:
logger.exception('Error while retrieving profiler statistics')
self.write({'error': 'No stats available. Start and stop the profiler before trying to retrieve stats.'})
self.set_status(404)
self.finish() |
def post(self):
"""Start a new profiler."""
if is_profiler_running():
self.set_status(201)
self.finish()
return
start_profiling()
self.set_status(201)
self.finish() |
def post(self):
"""Dump current profiler statistics into a file."""
filename = self.get_argument('filename', 'dump.prof')
CProfileWrapper.profiler.dump_stats(filename)
self.finish() |
def get(self):
"""Return current profiler statistics."""
CProfileWrapper.profiler.print_stats()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(CProfileWrapper.profiler, stream=s).sort_stats(sortby)
ps.print_stats()
self.set_status(200)
self.write(s.getvalue())
self.finish() |
def delete(self):
"""Clear profiler statistics."""
CProfileWrapper.profiler.create_stats()
self.enable()
self.set_status(204)
self.finish() |
def post(self):
"""Start a new profiler."""
CProfileWrapper.profiler = cProfile.Profile()
CProfileWrapper.profiler.enable()
self.running = True
self.set_status(201)
self.finish() |
def delete(self):
"""Stop the profiler."""
CProfileWrapper.profiler.disable()
self.running = False
self.set_status(204)
self.finish() |
def get(self):
"""Check if the profiler is running."""
self.write({"running": self.running})
self.set_status(200)
self.finish() |
def disable_timestamp(method):
"""Disable timestamp update per method."""
@wraps(method)
def wrapper(*args, **kwargs):
result = None
with correct_date():
result = method(*args, **kwargs)
return result
return wrapper |
def load_user(data):
"""Load user from data dump.
NOTE: This task takes into account the possible duplication of emails and
usernames, hence it should be called synchronously.
In such case of collision it will raise UserEmailExistsError or
UserUsernameExistsError, if email or username are already existing in
the database. Caller of this task should take care to to resolve those
collisions beforehand or after catching an exception.
:param data: Dictionary containing user data.
:type data: dict
"""
from invenio_accounts.models import User
from invenio_userprofiles.api import UserProfile
email = data['email'].strip()
if User.query.filter_by(email=email).count() > 0:
raise UserEmailExistsError(
"User email '{email}' already exists.".format(email=email))
last_login = None
if data['last_login']:
last_login = arrow.get(data['last_login']).datetime
confirmed_at = None
if data['note'] == '1':
confirmed_at = datetime.utcnow()
salt = data['password_salt']
checksum = data['password']
if not checksum:
new_password = None
# Test if password hash is in Modular Crypt Format
elif checksum.startswith('$'):
new_password = checksum
else:
new_password = str.join('$', ['', u'invenio-aes', salt, checksum])
with db.session.begin_nested():
obj = User(
id=data['id'],
password=new_password,
email=email,
confirmed_at=confirmed_at,
last_login_at=last_login,
active=(data['note'] != '0'),
)
db.session.add(obj)
nickname = data['nickname'].strip()
overwritten_username = ('username' in data and 'displayname' in data)
# NOTE: 'username' and 'displayname' will exist in data dump only
# if it was inserted there after dumping. It normally should not come from
# Invenio 1.x or 2.x data dumper script. In such case, those values will
# have precedence over the 'nickname' field.
if nickname or overwritten_username:
p = UserProfile(user=obj)
p.full_name = data.get('full_name', '').strip()
if overwritten_username:
p._username = data['username'].lower()
p._displayname = data['displayname']
elif nickname:
if UserProfile.query.filter(
UserProfile._username == nickname.lower()).count() > 0:
raise UserUsernameExistsError(
"Username '{username}' already exists.".format(
username=nickname))
try:
p.username = nickname
except ValueError:
current_app.logger.warn(
u'Invalid username {0} for user_id {1}'.format(
nickname, data['id']))
p._username = nickname.lower()
p._displayname = nickname
db.session.add(p)
db.session.commit() |
def calc_translations_parallel(images):
"""Calculate image translations in parallel.
Parameters
----------
images : ImageCollection
Images as instance of ImageCollection.
Returns
-------
2d array, (ty, tx)
ty and tx is translation to previous image in respectively
x or y direction.
"""
w = Parallel(n_jobs=_CPUS)
res = w(delayed(images.translation)(img) for img in images)
# save results to Image object, as Parallel is spawning another process
for i,translation in enumerate(res):
images[i].translation = translation
return np.array(res) |
def stitch(images):
"""Stitch regular spaced images.
Parameters
----------
images : ImageCollection or list of tuple(path, row, column)
Each image-tuple should contain path, row and column. Row 0,
column 0 is top left image.
Example:
>>> images = [('1.png', 0, 0), ('2.png', 0, 1)]
Returns
-------
tuple (stitched, offset)
Stitched image and registered offset (y, x).
"""
if type(images) != ImageCollection:
images = ImageCollection(images)
calc_translations_parallel(images)
_translation_warn(images)
yoffset, xoffset = images.median_translation()
if xoffset != yoffset:
warn('yoffset != xoffset: %s != %s' % (yoffset, xoffset))
# assume all images have the same shape
y, x = imread(images[0].path).shape
height = y*len(images.rows) + yoffset*(len(images.rows)-1)
width = x*len(images.cols) + xoffset*(len(images.cols)-1)
# last dimension is number of images on top of each other
merged = np.zeros((height, width, 2), dtype=np.int)
for image in images:
r, c = image.row, image.col
mask = _merge_slice(r, c, y, x, yoffset, xoffset)
# last dim is used for averaging the seam
img = _add_ones_dim(imread(image.path))
merged[mask] += img
# average seam, possible improvement: use gradient
merged[..., 0] /= merged[..., 1]
return merged[..., 0].astype(np.uint8), (yoffset, xoffset) |
def _add_ones_dim(arr):
"Adds a dimensions with ones to array."
arr = arr[..., np.newaxis]
return np.concatenate((arr, np.ones_like(arr)), axis=-1) |
def create(cls, dump):
"""Create record based on dump."""
# If 'record' is not present, just create the PID
if not dump.data.get('record'):
try:
PersistentIdentifier.get(pid_type='recid',
pid_value=dump.recid)
except PIDDoesNotExistError:
PersistentIdentifier.create(
'recid', dump.recid,
status=PIDStatus.RESERVED
)
db.session.commit()
return None
dump.prepare_revisions()
dump.prepare_pids()
dump.prepare_files()
# Create or update?
existing_files = []
if dump.record:
existing_files = dump.record.get('_files', [])
record = cls.update_record(revisions=dump.revisions,
created=dump.created,
record=dump.record)
pids = dump.missing_pids
else:
record = cls.create_record(dump)
pids = dump.pids
if pids:
cls.create_pids(record.id, pids)
if dump.files:
cls.create_files(record, dump.files, existing_files)
# Update files.
if dump.is_deleted(record):
cls.delete_record(record)
return record |
def create_record(cls, dump):
"""Create a new record from dump."""
# Reserve record identifier, create record and recid pid in one
# operation.
timestamp, data = dump.latest
record = Record.create(data)
record.model.created = dump.created.replace(tzinfo=None)
record.model.updated = timestamp.replace(tzinfo=None)
RecordIdentifier.insert(dump.recid)
PersistentIdentifier.create(
pid_type='recid',
pid_value=str(dump.recid),
object_type='rec',
object_uuid=str(record.id),
status=PIDStatus.REGISTERED
)
db.session.commit()
return cls.update_record(revisions=dump.rest, record=record,
created=dump.created) |
def update_record(cls, revisions, created, record):
"""Update an existing record."""
for timestamp, revision in revisions:
record.model.json = revision
record.model.created = created.replace(tzinfo=None)
record.model.updated = timestamp.replace(tzinfo=None)
db.session.commit()
return Record(record.model.json, model=record.model) |
def create_pids(cls, record_uuid, pids):
"""Create persistent identifiers."""
for p in pids:
PersistentIdentifier.create(
pid_type=p.pid_type,
pid_value=p.pid_value,
pid_provider=p.provider.pid_provider if p.provider else None,
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED,
)
db.session.commit() |
def delete_record(cls, record):
"""Delete a record and it's persistent identifiers."""
record.delete()
PersistentIdentifier.query.filter_by(
object_type='rec', object_uuid=record.id,
).update({PersistentIdentifier.status: PIDStatus.DELETED})
cls.delete_buckets(record)
db.session.commit() |
def create_files(cls, record, files, existing_files):
"""Create files.
This method is currently limited to a single bucket per record.
"""
default_bucket = None
# Look for bucket id in existing files.
for f in existing_files:
if 'bucket' in f:
default_bucket = f['bucket']
break
# Create a bucket in default location if none is found.
if default_bucket is None:
b = Bucket.create()
BucketTag.create(b, 'record', str(record.id))
default_bucket = str(b.id)
db.session.commit()
else:
b = Bucket.get(default_bucket)
record['_files'] = []
for key, meta in files.items():
obj = cls.create_file(b, key, meta)
ext = splitext(obj.key)[1].lower()
if ext.startswith('.'):
ext = ext[1:]
record['_files'].append(dict(
bucket=str(obj.bucket.id),
key=obj.key,
version_id=str(obj.version_id),
size=obj.file.size,
checksum=obj.file.checksum,
type=ext,
))
db.session.add(
RecordsBuckets(record_id=record.id, bucket_id=b.id)
)
record.commit()
db.session.commit()
return [b] |
def create_file(self, bucket, key, file_versions):
"""Create a single file with all versions."""
objs = []
for file_ver in file_versions:
f = FileInstance.create().set_uri(
file_ver['full_path'],
file_ver['size'],
'md5:{0}'.format(file_ver['checksum']),
)
obj = ObjectVersion.create(bucket, key).set_file(f)
obj.created = arrow.get(
file_ver['creation_date']).datetime.replace(tzinfo=None)
objs.append(obj)
# Set head version
db.session.commit()
return objs[-1] |
def delete_buckets(cls, record):
"""Delete the bucket."""
files = record.get('_files', [])
buckets = set()
for f in files:
buckets.add(f.get('bucket'))
for b_id in buckets:
b = Bucket.get(b_id)
b.deleted = True |
def missing_pids(self):
"""Filter persistent identifiers."""
missing = []
for p in self.pids:
try:
PersistentIdentifier.get(p.pid_type, p.pid_value)
except PIDDoesNotExistError:
missing.append(p)
return missing |
def prepare_revisions(self):
"""Prepare data."""
# Prepare revisions
self.revisions = []
it = [self.data['record'][0]] if self.latest_only \
else self.data['record']
for i in it:
self.revisions.append(self._prepare_revision(i)) |
def prepare_files(self):
"""Get files from data dump."""
# Prepare files
files = {}
for f in self.data['files']:
k = f['full_name']
if k not in files:
files[k] = []
files[k].append(f)
# Sort versions
for k in files.keys():
files[k].sort(key=lambda x: x['version'])
self.files = files |
def prepare_pids(self):
"""Prepare persistent identifiers."""
self.pids = []
for fetcher in self.pid_fetchers:
val = fetcher(None, self.revisions[-1][1])
if val:
self.pids.append(val) |
def is_deleted(self, record=None):
"""Check if record is deleted."""
record = record or self.revisions[-1][1]
return any(
col == 'deleted'
for col in record.get('collections', [])
) |
def load_community(data, logos_dir):
"""Load community from data dump.
:param data: Dictionary containing community data.
:type data: dict
:param logos_dir: Path to a local directory with community logos.
:type logos_dir: str
"""
from invenio_communities.models import Community
from invenio_communities.utils import save_and_validate_logo
logo_ext_washed = logo_ext_wash(data['logo_ext'])
c = Community(
id=data['id'],
id_user=data['id_user'],
title=data['title'],
description=data['description'],
page=data['page'],
curation_policy=data['curation_policy'],
last_record_accepted=iso2dt_or_none(data['last_record_accepted']),
logo_ext=logo_ext_washed,
ranking=data['ranking'],
fixed_points=data['fixed_points'],
created=iso2dt(data['created']),
updated=iso2dt(data['last_modified']),
)
logo_path = join(logos_dir, "{0}.{1}".format(c.id, logo_ext_washed))
db.session.add(c)
if isfile(logo_path):
with open(logo_path, 'rb') as fp:
save_and_validate_logo(fp, logo_path, c.id)
db.session.commit() |
def load_featured(data):
"""Load community featuring from data dump.
:param data: Dictionary containing community featuring data.
:type data: dict
"""
from invenio_communities.models import FeaturedCommunity
obj = FeaturedCommunity(id=data['id'],
id_community=data['id_community'],
start_date=iso2dt(data['start_date']))
db.session.add(obj)
db.session.commit() |
def dump(thing, query, from_date, file_prefix, chunk_size, limit, thing_flags):
"""Dump data from Invenio legacy."""
init_app_context()
file_prefix = file_prefix if file_prefix else '{0}_dump'.format(thing)
kwargs = dict((f.strip('-').replace('-', '_'), True) for f in thing_flags)
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
'{0} is not in the list of available things to migrate: '
'{1}'.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get(query, from_date, limit=limit, **kwargs)
progress_i = 0 # Progress bar counter
click.echo("Dumping {0}...".format(thing))
with click.progressbar(length=count) as bar:
for i, chunk_ids in enumerate(grouper(items, chunk_size)):
with open('{0}_{1}.json'.format(file_prefix, i), 'w') as fp:
fp.write("[\n")
for _id in chunk_ids:
try:
json.dump(
thing_func.dump(_id, from_date, **kwargs),
fp,
default=set_serializer
)
fp.write(",")
except Exception as e:
click.secho("Failed dump {0} {1} ({2})".format(
thing, _id, e.message), fg='red')
progress_i += 1
bar.update(progress_i)
# Strip trailing comma.
fp.seek(fp.tell()-1)
fp.write("\n]") |
def check(thing):
"""Check data in Invenio legacy."""
init_app_context()
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
'{0} is not in the list of available things to migrate: '
'{1}'.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get_check()
i = 0
click.echo("Checking {0}...".format(thing))
with click.progressbar(length=count) as bar:
for _id in items:
thing_func.check(_id)
i += 1
bar.update(i) |
def registerEventHandlers(self):
"""
Registers event handlers used by this widget, e.g. mouse click/motion and window resize.
This will allow the widget to redraw itself upon resizing of the window in case the position needs to be adjusted.
"""
self.peng.registerEventHandler("on_mouse_press",self.on_mouse_press)
self.peng.registerEventHandler("on_mouse_release",self.on_mouse_release)
self.peng.registerEventHandler("on_mouse_drag",self.on_mouse_drag)
self.peng.registerEventHandler("on_mouse_motion",self.on_mouse_motion)
self.peng.registerEventHandler("on_resize",self.on_resize) |
def pos(self):
"""
Property that will always be a 2-tuple representing the position of the widget.
Note that this method may call the method given as ``pos`` in the initializer.
The returned object will actually be an instance of a helper class to allow for setting only the x/y coordinate.
This property also respects any :py:class:`Container` set as its parent, any offset will be added automatically.
Note that setting this property will override any callable set permanently.
"""
if isinstance(self._pos,list) or isinstance(self._pos,tuple):
r = self._pos
elif callable(self._pos):
w,h = self.submenu.size[:]
r = self._pos(w,h,*self.size)
else:
raise TypeError("Invalid position type")
ox,oy = self.submenu.pos
r = r[0]+ox,r[1]+oy
#if isinstance(self.submenu,ScrollableContainer) and not self._is_scrollbar:# and self.name != "__scrollbar_%s"%self.submenu.name: # Widget inside scrollable container and not the scrollbar
# r = r[0],r[1]+self.submenu.offset_y
return _WatchingList(r,self._wlredraw_pos) |
def size(self):
"""
Similar to :py:attr:`pos` but for the size instead.
"""
if isinstance(self._size,list) or isinstance(self._size,tuple):
s = self._size
elif callable(self._size):
w,h = self.submenu.size[:]
s = self._size(w,h)
else:
raise TypeError("Invalid size type")
s = s[:]
if s[0]==-1:
s[0]=self.getMinSize()[0]
if s[1]==-1:
s[1]=self.getMinSize()[1]
# Prevents crashes with negative size
s = [max(s[0],0),max(s[1],0)]
return _WatchingList(s,self._wlredraw_size) |
def clickable(self):
"""
Property used for determining if the widget should be clickable by the user.
This is only true if the submenu of this widget is active and this widget is enabled.
The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute.
"""
if not isinstance(self.submenu,Container):
return self.submenu.name == self.submenu.menu.activeSubMenu and self.submenu.menu.name == self.window.activeMenu and self.enabled
else:
return self.submenu.clickable and self.enabled |
def delete(self):
"""
Deletes resources of this widget that require manual cleanup.
Currently removes all actions, event handlers and the background.
The background itself should automatically remove all vertex lists to avoid visual artifacts.
Note that this method is currently experimental, as it seems to have a memory leak.
"""
# TODO: fix memory leak upon widget deletion
del self.bg.widget
del self.bg
#self.clickable=False
del self._pos
del self._size
self.actions = {}
for e_type,e_handlers in self.peng.eventHandlers.items():
if True or e_type in eh:
to_del = []
for e_handler in e_handlers:
# Weird workaround due to implementation details of WeakMethod
if isinstance(e_handler,weakref.ref):
if super(weakref.WeakMethod,e_handler).__call__() is self:
to_del.append(e_handler)
elif e_handler is self:
to_del.append(e_handler)
for d in to_del:
try:
#print("Deleting handler %s of type %s"%(d,e_type))
del e_handlers[e_handlers.index(d)]
except Exception:
#print("Could not delete handler %s, memory leak may occur"%d)
import traceback;traceback.print_exc() |
def on_redraw(self):
"""
Draws the background and the widget itself.
Subclasses should use ``super()`` to call this method, or rendering may glitch out.
"""
if self.bg is not None:
if not self.bg.initialized:
self.bg.init_bg()
self.bg.initialized=True
self.bg.redraw_bg()
super(Widget,self).on_redraw() |
def calcSphereCoordinates(pos,radius,rot):
"""
Calculates the Cartesian coordinates from spherical coordinates.
``pos`` is a simple offset to offset the result with.
``radius`` is the radius of the input.
``rot`` is a 2-tuple of ``(azimuth,polar)`` angles.
Angles are given in degrees. Most directions in this game use the same convention.
The azimuth ranges from 0 to 360 degrees with 0 degrees pointing directly to the x-axis.
The polar angle ranges from -90 to 90 with -90 degrees pointing straight down and 90 degrees straight up.
A visualization of the angles required is given in the source code of this function.
"""
# Input angles should be in degrees, as in the rest of the game
# E.g. phi=inclination and theta=azimuth
# phi is yrad
# Look from above
#(Z goes positive towards you)
#
# Y- Z-
# | /
# | / "far"
# |/
# X- ------+-------> X+
# /| yrad |
# "near" / |<-----+
# / | "polar angle"
# Z+ Y+
# theta is xrad
# Look from above
#(Z goes positive towards you)
#
# Y- Z-
# | /
# | / "far"
# |/
# X- ------+-------> X+
# /| xrad |
# "near" /<-------+
# / | "azimuth angle"
# Z+ Y+
# Based on http://stackoverflow.com/questions/39647735/calculation-of-spherical-coordinates
# https://en.wikipedia.org/wiki/Spherical_coordinate_system
# http://stackoverflow.com/questions/25404613/converting-spherical-coordinates-to-cartesian?rq=1
phi,theta = rot
phi+=90 # very important, took me four days of head-scratching to figure out
phi,theta = math.radians(phi),math.radians(theta)
x = pos[0]+radius * math.sin(phi) * math.cos(theta)
y = pos[1]+radius * math.sin(phi) * math.sin(theta)
z = pos[2]+radius * math.cos(phi)
return x,y,z |
def v_magnitude(v):
"""
Simple vector helper function returning the length of a vector.
``v`` may be any vector, with any number of dimensions
"""
return math.sqrt(sum(v[i]*v[i] for i in range(len(v)))) |
def v_normalize(v):
"""
Normalizes the given vector.
The vector given may have any number of dimensions.
"""
vmag = v_magnitude(v)
return [ v[i]/vmag for i in range(len(v)) ] |
def transformTexCoords(self,data,texcoords,dims=2):
"""
Transforms the given texture coordinates using the internal texture coordinates.
Currently, the dimensionality of the input texture coordinates must always be 2 and the output is 3-dimensional with the last coordinate always being zero.
The given texture coordinates are fitted to the internal texture coordinates. Note that values higher than 1 or lower than 0 may result in unexpected visual glitches.
The length of the given texture coordinates should be divisible by the dimensionality.
"""
assert dims==2 # TODO
out = []
origcoords = self.tex_coords
min_u,min_v = origcoords[0],origcoords[1]
max_u,max_v = origcoords[6],origcoords[7]
diff_u,diff_v = max_u-min_u, max_v-min_v
itexcoords = iter(texcoords)
for u,v in zip(itexcoords,itexcoords): # Based on http://stackoverflow.com/a/5389547/3490549
out_u = min_u+(diff_u*u)
out_v = min_v+(diff_v*v)
out.extend((out_u,out_v,0))
return out |
def ensureBones(self,data):
"""
Helper method ensuring per-entity bone data has been properly initialized.
Should be called at the start of every method accessing per-entity data.
``data`` is the entity to check in dictionary form.
"""
if "_bones" not in data:
data["_bones"]={}
if self.name not in data["_bones"]:
data["_bones"][self.name]={"rot":self.start_rot[:],"length":self.blength} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.