id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10,700
|
file_system.py
|
zatosource_zato/code/zato-common/src/zato/common/util/file_system.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
import re
import string
from datetime import datetime, timedelta
from pathlib import Path
from tempfile import gettempdir
from time import sleep
from uuid import uuid4
# ################################################################################################################################
if 0:
from zato.common.typing_ import callable_, strlist
# ################################################################################################################################
_re_fs_safe_name = '[{}]'.format(string.punctuation + string.whitespace)
# ################################################################################################################################
def fs_safe_name(value:'str') -> 'str':
return re.sub(_re_fs_safe_name, '_', value)
# ################################################################################################################################
def fs_safe_now(_utcnow:'callable_'=datetime.utcnow) -> 'str':
""" Returns a UTC timestamp with any characters unsafe for filesystem names removed.
"""
return fs_safe_name(_utcnow().isoformat())
# ################################################################################################################################
def wait_for_file(path:'str', max_wait:'int'=5) -> 'None':
found = False
now = datetime.utcnow()
until = now + timedelta(seconds=max_wait)
while now < until:
found = os.path.exists(path)
if found:
break
else:
sleep(0.05)
now = datetime.utcnow()
# ################################################################################################################################
def get_tmp_path(prefix:'str'='', body:'str'='', suffix:'str'='') -> 'str':
tmp_dir = gettempdir()
prefix = prefix or 'zato'
body = body or uuid4().hex
suffix = suffix or uuid4().hex
file_name = f'{prefix}-{body}-{suffix}'
tmp_path = os.path.join(tmp_dir, file_name)
return tmp_path
# ################################################################################################################################
def resolve_path(path:'str', base_dir:'str'='') -> 'str':
# Local aliases
has_env = '$' in path
has_home = '~' in path
is_relative = not os.path.isabs(path)
# We can return the path as is if there is nothing to resolve
if not (has_env or has_home or is_relative):
return path
# Expand the path to the user's directory first ..
if has_home:
path = os.path.expanduser(path)
# .. we can expand environment variables too ..
if has_env:
path = os.path.expandvars(path)
# .. if what we have is not an absolute path, it means that we need to turn it into one ..
# .. while keeping it mind that it is relative to the base directory that we have on input ..
if not os.path.isabs(path):
path = os.path.join(base_dir, path)
path = os.path.abspath(path)
# .. now, we can return the result to our caller ..
return path
# ################################################################################################################################
def touch(path:'str') -> 'None':
Path(path).touch()
# ################################################################################################################################
def touch_multiple(path_list:'strlist') -> 'None':
for path in path_list:
touch(path)
# ################################################################################################################################
| 3,811
|
Python
|
.py
| 78
| 44.346154
| 130
| 0.421195
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,701
|
__init__.py
|
zatosource_zato/code/zato-common/src/zato/common/util/cloud/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,702
|
google.py
|
zatosource_zato/code/zato-common/src/zato/common/util/cloud/google.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.model.google import GoogleAPIDescription
from zato.common.typing_ import list_
# ################################################################################################################################
# ################################################################################################################################
def get_api_list() -> 'list_[GoogleAPIDescription]':
""" Returns a list of all the APIs that the Google client library supports.
"""
# stdlib
import os
from operator import itemgetter
# google-api-client
from googleapiclient.discovery_cache import DISCOVERY_DOC_DIR as root_dir
# Zato
from zato.common.model.google import GoogleAPIDescription
from zato.common.typing_ import cast_
from zato.common.util.open_ import open_r
from zato.common.util.json_ import JSONParser
# Response to produce
out = []
# A reusable parser
parser = JSONParser()
# A cache to make it easier to find out which APIs
# have only non-GA APIs.
version_cache = {}
# .. build a full path to the schema ..
full_path = os.path.join(root_dir, 'index.json')
with open_r(full_path) as f:
# .. read the contents and parse it ..
index = f.read().encode('utf8')
# .. parse it ..
doc = parser.parse(index)
# .. and extract only the required keys ..
for item in doc['items']:
api_id = item.at_pointer('/id')
api_name = item.at_pointer('/name')
api_title = item.at_pointer('/title')
api_version = item.at_pointer('/version')
desc = GoogleAPIDescription()
desc.id = api_id
desc.name = api_name
desc.title = api_title
desc.version = api_version
desc.title_full = '{} ({})'.format(desc.title, desc.version)
api_version_entry = version_cache.setdefault(desc.name, []) # type: list
api_version_entry.append(desc)
out.append(desc)
# Go through each API in the cache ..
for desc_list in version_cache.values():
# .. if the list of descriptions contains more than one
# .. we need to remove all the non-GA ones from the output list ..
if len(desc_list) > 1:
for item in desc_list:
item = cast_('GoogleAPIDescription', item)
if ('alpha' in item.version) or ('beta' in item.version):
out.remove(item)
out.sort(key=itemgetter('title'))
return out
# ################################################################################################################################
# ################################################################################################################################
| 3,266
|
Python
|
.py
| 67
| 41.343284
| 130
| 0.466919
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,703
|
confluence.py
|
zatosource_zato/code/zato-common/src/zato/common/util/cloud/atlassian/confluence.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from dataclasses import dataclass
# Zato
from zato.server.service import Model
from zato.common.typing_ import list_, list_field
from zato.common.util.file_system import fs_safe_now
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_
# ################################################################################################################################
# ################################################################################################################################
class Config:
ParamName: str = 'Parameters'
MacroName: str = 'details'
DataLayout: str = 'default'
SchemaVersion: str = '1'
KeyStyle: str = 'width: 340.0px;'
ValueStyle: str = 'width: 340.0px;'
UserLinkPattern: str = '<ac:link><ri:user ri:account-id="{account_id}" /></ac:link>'
RowPattern: str = """
<tr>
<th>
<p>{key}</p>
</th>
<td>
<p>{value}</p>
</td>
</tr>
""".rstrip()
MacroTemplate: str = """
<ac:structured-macro
ac:name="{macro_name}"
ac:schema-version="{schema_version}"
data-layout="default"
ac:local-id="structured-macro-local-id-{local_id}"
ac:macro-id="macro-id-{local_id}">
<ac:parameter ac:name="id">{param_name}</ac:parameter>
<ac:rich-text-body>
<table data-layout="default" ac:local-id="table-local-id-{local_id}">
<colgroup>
<col style="{key_style}" />
<col style="{value_style}" />
</colgroup>
<tbody>{rows}
</tbody>
</table>
</ac:rich-text-body>
</ac:structured-macro>
""".strip()
# ################################################################################################################################
# ################################################################################################################################
@dataclass
class Row(Model):
key: str
value: str
# ################################################################################################################################
# ################################################################################################################################
class PageProperties:
""" Allows one to create a table with a Confluence's Page Properties macro and table.
"""
param_name: str = Config.ParamName
key_style: str = Config.KeyStyle
value_style: str = Config.ValueStyle
macro_name: str = Config.MacroName
row_pattern: str = Config.RowPattern
macro_template: str = Config.MacroTemplate
schema_version: str = Config.SchemaVersion
user_link_pattern: str = Config.UserLinkPattern
rows: list_[Row] = list_field()
# ################################################################################################################################
def __init__(self, param_name:'str'='', local_id:'str'='') -> 'None':
self.param_name = param_name or Config.ParamName
self.local_id = local_id or 'zato-{}'.format(fs_safe_now())
self.rows = []
# ################################################################################################################################
def get_user_link(self, account_id:'str') -> 'str':
value = self.user_link_pattern.format(account_id=account_id)
return value
# ################################################################################################################################
def append(self, key:'any_', value:'any_') -> 'Row':
row = Row(key=key, value=value)
self.rows.append(row)
return row
# ################################################################################################################################
def get_result(self):
# To turn all rows into HTML data
html_rows = []
# .. convert each one ..
for row in self.rows:
html_row = self.row_pattern.format(key=row.key, value=row.value)
html_rows.append(html_row)
# .. and build a string representing all the rows.
rows = '\n'.join(html_rows)
ctx = {
'rows': rows,
'param_name': self.param_name,
'local_id': self.local_id,
'key_style': self.key_style,
'value_style': self.value_style,
'macro_name': self.macro_name,
'schema_version': self.schema_version,
}
result = self.macro_template.format(**ctx)
return result
# ################################################################################################################################
# ################################################################################################################################
| 5,273
|
Python
|
.py
| 112
| 41.125
| 130
| 0.381481
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,704
|
__init__.py
|
zatosource_zato/code/zato-common/src/zato/common/util/cloud/atlassian/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,705
|
__init__.py
|
zatosource_zato/code/zato-common/src/zato/common/util/sql/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from itertools import chain
from logging import DEBUG, getLogger
# Bunch
from bunch import bunchify
# gevent
from gevent import sleep
# SQLAlchemy
from sqlalchemy.exc import InternalError as SAInternalError, OperationalError as SAOperationalError
# Zato
from zato.common.api import GENERIC, SEARCH
from zato.common.json_internal import dumps, loads
from zato.common.odb.model import Base, SecurityBase
from zato.common.util.search import SearchResults
# ################################################################################################################################
# ################################################################################################################################
if 0:
from bunch import Bunch
from zato.common.typing_ import any_
# ################################################################################################################################
# ################################################################################################################################
logger_zato = getLogger('zato')
logger_pubsub = getLogger('zato_pubsub')
has_debug = logger_zato.isEnabledFor(DEBUG) or logger_pubsub.isEnabledFor(DEBUG)
# ################################################################################################################################
# ################################################################################################################################
_default_page_size = SEARCH.ZATO.DEFAULTS.PAGE_SIZE
_max_page_size = _default_page_size * 5
# All exceptions that can be raised when deadlocks occur
_DeadlockException = (SAInternalError, SAOperationalError)
# In MySQL, 1213 = 'Deadlock found when trying to get lock; try restarting transaction'
# but the underlying PyMySQL library returns only a string rather than an integer code.
_deadlock_code = 'Deadlock found when trying to get lock'
_zato_opaque_skip_attrs = {'needs_details', 'paginate', 'cur_page', 'query'}
# ################################################################################################################################
def search(search_func, config, filter_by, session=None, cluster_id=None, *args, **kwargs):
""" Adds search criteria to an SQLAlchemy query based on current search configuration.
"""
try:
cur_page = int(config.get('cur_page', 1))
except(ValueError, TypeError):
cur_page = 1
try:
page_size = min(int(config.get('page_size', _default_page_size)), _max_page_size)
except(ValueError, TypeError):
page_size = _default_page_size
# We need to substract 1 because externally our API exposes human-readable numbers,
# i.e. starting from 1, not 0, but internally the database needs 0-based slices.
if cur_page > 0:
cur_page -= 1
kwargs = {
'cur_page': cur_page,
'page_size': page_size,
'filter_by': filter_by,
'where': kwargs.get('where'),
'filter_op': kwargs.get('filter_op'),
'data_filter': kwargs.get('data_filter'),
}
query = config.get('query')
if query:
query = query.strip().split()
if query:
kwargs['query'] = query
result = search_func(session, cluster_id, *args, **kwargs)
# Fills out all the search-related information
result.set_data(cur_page, page_size)
return result
# ################################################################################################################################
def sql_op_with_deadlock_retry(cid, name, func, *args, **kwargs):
cid = cid or None
attempts = 0
while True:
attempts += 1
if has_debug:
logger_zato.info('In sql_op_with_deadlock_retry, %s %s %s %s %r %r', attempts, cid, name, func, args, kwargs)
try:
# Call the SQL function that will possibly result in a deadlock
func(*args, **kwargs)
if has_debug:
logger_zato.info('In sql_op_with_deadlock_retry, returning True')
# This will return only if there is no exception in calling the SQL function
return True
# Catch deadlocks - it may happen because both this function and delivery tasks update the same tables
except _DeadlockException as e:
if has_debug:
logger_zato.warning('Caught _DeadlockException `%s` `%s`', cid, e)
if _deadlock_code not in e.args[0]:
raise
else:
if attempts % 50 == 0:
msg = 'Still in deadlock for `{}` after %d attempts cid:%s args:%s'.format(name)
logger_zato.warning(msg, attempts, cid, args)
logger_pubsub.warning(msg, attempts, cid, args)
# Sleep for a while until the next attempt
sleep(0.005)
# Push the counter
attempts += 1
# ################################################################################################################################
def sql_query_with_retry(query:'any_', query_name:'str', *args:'any_') -> 'None':
""" Keeps repeating a given SQL query until it succeeds.
"""
idx = 0
is_ok = False
while not is_ok:
idx += 1
if has_debug:
logger_zato.info(f'{query_name} -> is_ok.{idx}:`{is_ok}`')
is_ok = query(*args)
if has_debug:
logger_zato.info(f'{query_name} -> is_ok.{idx}:`{is_ok}`')
# ################################################################################################################################
# ################################################################################################################################
class ElemsWithOpaqueMaker:
def __init__(self, elems):
self.elems = elems
# ################################################################################################################################
@staticmethod
def get_opaque_data(elem):
return elem.get(GENERIC.ATTR_NAME)
has_opaque_data = get_opaque_data
# ################################################################################################################################
@staticmethod
def _set_opaque(elem, drop_opaque=False):
opaque = ElemsWithOpaqueMaker.get_opaque_data(elem)
opaque = loads(opaque) if opaque else {}
opaque = opaque or {}
elem.update(opaque)
if drop_opaque:
del elem[GENERIC.ATTR_NAME]
# ################################################################################################################################
@staticmethod
def process_config_dict(config, drop_opaque=False):
ElemsWithOpaqueMaker._set_opaque(config, drop_opaque)
# ################################################################################################################################
def _process_elems(self, out, elems, _skip_class=(Base, list)):
for elem in elems:
if hasattr(elem, '_sa_class_manager'):
data = {}
for (name, _) in elem._sa_class_manager._all_sqla_attributes():
value = getattr(elem, name)
if name.startswith('__'):
continue
if isinstance(value, _skip_class):
continue
data[name] = value
else:
data = elem._asdict()
elem = bunchify(data)
ElemsWithOpaqueMaker._set_opaque(elem)
out.append(elem)
return out
# ################################################################################################################################
def _elems_with_opaque_search(self):
""" Resolves all opaque elements in search results.
"""
search_result = self.elems[0]
new_result = self._process_elems([], search_result.result)
search_result.result = new_result
return self.elems
# ################################################################################################################################
def get(self):
if isinstance(self.elems, tuple) and isinstance(self.elems[0], SearchResults):
return self._elems_with_opaque_search()
else:
return self._process_elems([], self.elems)
# ################################################################################################################################
# ################################################################################################################################
def elems_with_opaque(elems):
""" Turns a list of SQLAlchemy elements into a list of Bunch instances,
each possibly with its opaque elements already extracted to the level of each Bunch.
"""
return ElemsWithOpaqueMaker(elems).get()
# ################################################################################################################################
def parse_instance_opaque_attr(instance:'any_') -> 'Bunch':
opaque = getattr(instance, GENERIC.ATTR_NAME)
opaque = loads(opaque) if opaque else None
if not opaque:
return {}
ElemsWithOpaqueMaker.process_config_dict(opaque)
return bunchify(opaque)
# ################################################################################################################################
def get_dict_with_opaque(instance, to_bunch=False):
opaque = parse_instance_opaque_attr(instance)
out = instance._asdict() if hasattr(instance, '_asdict') else instance.asdict()
for k, v in opaque.items():
out[k] = v
return bunchify(out) if to_bunch else out
# ################################################################################################################################
def set_instance_opaque_attrs(instance, input, skip=None, only=None, _zato_skip=_zato_opaque_skip_attrs):
""" Given an SQLAlchemy object instance and incoming SimpleIO-based input,
populates all opaque values of that instance.
"""
only = only or []
instance_opaque_attrs = None
instance_attrs = set(instance.asdict())
input_attrs = set(input)
if only:
input_attrs = {elem for elem in input_attrs if elem in only}
instance_attrs = {elem for elem in instance_attrs if elem not in only}
# Any extra input attributes will be treated as opaque ones
input_opaque_attrs = input_attrs - instance_attrs
# Skip attributes related to pagination
for name in chain(skip or [], _zato_skip):
input_opaque_attrs.discard(name)
# Prepare generic attributes for instance
if GENERIC.ATTR_NAME in instance_attrs:
instance_opaque_attrs = getattr(instance, GENERIC.ATTR_NAME)
if instance_opaque_attrs:
instance_opaque_attrs = loads(instance_opaque_attrs)
instance_opaque_attrs = instance_opaque_attrs or {}
if isinstance(instance_opaque_attrs, str):
instance_opaque_attrs = loads(instance_opaque_attrs)
else:
instance_opaque_attrs = {}
for name in input_opaque_attrs:
value = input[name]
if isinstance(value, bytes):
value = value.decode('utf8')
instance_opaque_attrs[name] = value
# Set generic attributes for instance
if instance_opaque_attrs is not None:
setattr(instance, GENERIC.ATTR_NAME, dumps(instance_opaque_attrs))
# ################################################################################################################################
def get_security_by_id(session, security_id):
return session.query(SecurityBase).\
filter(SecurityBase.id==security_id).\
one()
# ################################################################################################################################
def get_instance_by_id(session, model_class, id):
return session.query(model_class).\
filter(model_class.id==id).\
one()
# ################################################################################################################################
def get_instance_by_name(session, model_class, type_, name):
return session.query(model_class).\
filter(model_class.type_==type_).\
filter(model_class.name==name).\
one()
# ################################################################################################################################
| 12,763
|
Python
|
.py
| 244
| 44.905738
| 130
| 0.468152
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,706
|
retry.py
|
zatosource_zato/code/zato-common/src/zato/common/util/sql/retry.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from logging import DEBUG, getLogger
# gevent
from gevent import sleep
# SQLAlchemy
from sqlalchemy.exc import InternalError as SAInternalError, OperationalError as SAOperationalError
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_, callable_
# ################################################################################################################################
# ################################################################################################################################
logger_zato = getLogger('zato')
logger_pubsub = getLogger('zato_pubsub')
has_debug = logger_zato.isEnabledFor(DEBUG) or logger_pubsub.isEnabledFor(DEBUG)
# ################################################################################################################################
# ################################################################################################################################
# All exceptions that can be raised when deadlocks occur
_DeadlockException = (SAInternalError, SAOperationalError)
# In MySQL, 1213 = 'Deadlock found when trying to get lock; try restarting transaction'
# but the underlying PyMySQL library returns only a string rather than an integer code.
_deadlock_code = 'Deadlock found when trying to get lock'
# ################################################################################################################################
# ################################################################################################################################
def sql_op_with_deadlock_retry(
cid, # type: str
name, # type: str
func, # type: callable_
*args, # type: any_
**kwargs # type: any_
) -> 'any_':
cid = cid or 'default-no-cid'
attempts = 0
while True:
attempts += 1
if has_debug:
logger_zato.info('In sql_op_with_deadlock_retry, %s %s %s %s %r %r',
attempts, cid, name, func, args, kwargs)
try:
# Call the SQL function that will possibly result in a deadlock
func(*args, **kwargs)
if has_debug:
logger_zato.info('In sql_op_with_deadlock_retry, returning True')
# This will return only if there is no exception in calling the SQL function
return True
# Catch deadlocks - it may happen because both this function and delivery tasks update the same tables
except _DeadlockException as e:
if has_debug:
logger_zato.warning('Caught _DeadlockException `%s` `%s`', cid, e)
if _deadlock_code not in e.args[0]:
raise
else:
if attempts % 50 == 0:
msg = 'Still in deadlock for `{}` after %d attempts cid:%s args:%s'.format(name)
logger_zato.warning(msg, attempts, cid, args)
logger_pubsub.warning(msg, attempts, cid, args)
# Sleep for a while until the next attempt
sleep(0.005)
# Push the counter
attempts += 1
# ################################################################################################################################
def sql_query_with_retry(query:'any_', query_name:'str', *args:'any_') -> 'None':
""" Keeps repeating a given SQL query until it succeeds.
"""
idx = 0
is_ok = False
while not is_ok:
idx += 1
if has_debug:
logger_zato.info(f'{query_name} -> is_ok.{idx}:`{is_ok}`')
is_ok = query(*args)
if has_debug:
logger_zato.info(f'{query_name} -> is_ok.{idx}:`{is_ok}`')
# ################################################################################################################################
# ################################################################################################################################
| 4,361
|
Python
|
.py
| 80
| 47.3
| 130
| 0.411959
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,707
|
connector.py
|
zatosource_zato/code/zato-common/src/zato/common/model/connector.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.common.typing_ import boolnone, dataclass, intnone, strnone
# ################################################################################################################################
# ################################################################################################################################
@dataclass
class ConnectorConfig:
id: int
name: str
port: intnone
address: strnone
is_active: boolnone
pool_size: intnone
def_name: strnone
old_name: strnone
password: strnone
service_name: strnone
# ################################################################################################################################
# ################################################################################################################################
| 997
|
Python
|
.py
| 23
| 40.347826
| 130
| 0.330579
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,708
|
keysight_.py
|
zatosource_zato/code/zato-common/src/zato/common/model/keysight_.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class KeysightVisionConfigObject:
def __init__(self):
self._config_attrs = []
self.id = -1 # type: int
self.name = '' # type: str
self.is_active = True # type: bool
self.host = '' # type: str
self.username = '' # type: str
self.sec_tls_ca_cert_id = -1 # type: int
# ################################################################################################################################
# ################################################################################################################################
| 1,036
|
Python
|
.py
| 18
| 52.944444
| 130
| 0.267522
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,709
|
hl7.py
|
zatosource_zato/code/zato-common/src/zato/common/model/hl7.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class HL7MLLPConfigObject:
def __init__(self):
self._config_attrs = []
self.id = None # type: int
self.name = None # type: str
self.is_active = None # type: bool
self.sec_type = None # type: str
self.security_id = None # type: str
self.is_audit_log_sent_active = None # type: bool
self.is_audit_log_received_active = None # type: bool
self.max_len_messages_sent = None # type: int
self.max_len_messages_received = None # type: int
self.max_bytes_per_message_sent = None # type: int
self.max_bytes_per_message_received = None # type: int
# ################################################################################################################################
# ################################################################################################################################
class HL7FHIRConfigObject:
def __init__(self):
self._config_attrs = []
self.id = None # type: int
self.name = None # type: str
self.is_active = None # type: bool
self.address = None # type: str
self.username = None # type: str
self.auth_type = None # type: str
self.pool_size = None # type: int
# ################################################################################################################################
# ################################################################################################################################
| 1,999
|
Python
|
.py
| 35
| 51.114286
| 130
| 0.349004
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,710
|
microsoft_365.py
|
zatosource_zato/code/zato-common/src/zato/common/model/microsoft_365.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class Microsoft365ConfigObject:
def __init__(self):
self._config_attrs = []
self.id = -1 # type: int
self.name = '' # type: str
self.is_active = True # type: bool
self.tenant_id = '' # type: str
self.client_id = '' # type: str
self.secret_value = '' # type: str
self.auth_redirect_url = '' # type: str
# ################################################################################################################################
# ################################################################################################################################
| 1,076
|
Python
|
.py
| 19
| 51.789474
| 130
| 0.280418
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,711
|
file_transfer.py
|
zatosource_zato/code/zato-common/src/zato/common/model/file_transfer.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class FileTransferChannel:
def __init__(self):
self._config_attrs = []
self.id = None # type: int
self.name = None # type: str
self.is_active = None # type: bool
self.is_hot_deploy = None # type: bool
self.source_type = None # type: str
self.pickup_from = '' # type: str
self.parse_with = '' # type: str
self.ftp_source_id = None # type: int
self.line_by_line = None # type: bool
self.file_patterns = '' # type: str
self.service_list = None # type: list
self.topic_list = None # type: list
self.outconn_rest_list = None # type: list
self.read_on_pickup = None # type: bool
self.sftp_source_id = None # type: int
self.parse_on_pickup = None # type: bool
self.ftp_source_name = '' # type: str
self.sftp_source_name = '' # type: str
self.service_list_json = None # type: str
self.topic_list_json = None # type: str
self.outconn_rest_list_json = None # type: str
self.scheduler_job_id = None # type: int
self.move_processed_to = '' # type: str
self.delete_after_pickup = None # type: bool
# ################################################################################################################################
def to_dict(self):
out = {}
for name in self._config_attrs:
value = getattr(self, name)
out[name] = value
return out
# ################################################################################################################################
@staticmethod
def from_dict(config):
# type: (dict) -> FileTransferChannel
out = FileTransferChannel()
for k, v in config.items():
out._config_attrs.append(k)
setattr(out, k, v)
return out
# ################################################################################################################################
# ################################################################################################################################
| 2,618
|
Python
|
.py
| 52
| 42.903846
| 130
| 0.388083
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,712
|
api.py
|
zatosource_zato/code/zato-common/src/zato/common/model/api.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 148
|
Python
|
.py
| 5
| 28.2
| 64
| 0.687943
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,713
|
salesforce.py
|
zatosource_zato/code/zato-common/src/zato/common/model/salesforce.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class SalesforceConfigObject:
def __init__(self):
self._config_attrs = []
self.id = None # type: int
self.name = None # type: str
self.is_active = None # type: bool
self.api_version = None # type: str
self.username = None # type: str
# ################################################################################################################################
# ################################################################################################################################
| 980
|
Python
|
.py
| 17
| 53.294118
| 130
| 0.273486
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,714
|
security.py
|
zatosource_zato/code/zato-common/src/zato/common/model/security.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.common.typing_ import dataclass
from zato.server.service import Model
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import datetime, dtnone, intnone, stranydict, timedelta
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class BearerTokenConfig(Model):
sec_def_name:'str'
username:'str'
password:'str'
scopes:'str'
grant_type:'str'
extra_fields:'stranydict'
auth_server_url:'str'
client_id_field:'str'
client_secret_field:'str'
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class BearerTokenInfo(Model):
creation_time: 'datetime'
sec_def_name: 'str'
token:'str'
token_type:'str'
expires_in:'timedelta | None'
expires_in_sec:'intnone'
expiration_time:'dtnone'
scopes:'str' = ''
username:'str' = ''
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class BearerTokenInfoResult(Model):
info: 'BearerTokenInfo'
is_cache_hit: 'bool'
cache_expiry: 'float'
cache_hits: 'int' = 0
# ################################################################################################################################
# ################################################################################################################################
| 2,364
|
Python
|
.py
| 48
| 46.083333
| 130
| 0.305122
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,715
|
sso.py
|
zatosource_zato/code/zato-common/src/zato/common/model/sso.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.common.typing_ import dataclass
from zato.server.service import Model
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class ExpiryHookInput(Model):
current_app: 'str'
username: 'str'
default_expiry: 'int'
# ################################################################################################################################
# ################################################################################################################################
| 891
|
Python
|
.py
| 17
| 50.352941
| 130
| 0.293779
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,716
|
__init__.py
|
zatosource_zato/code/zato-common/src/zato/common/model/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 148
|
Python
|
.py
| 5
| 28.2
| 64
| 0.687943
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,717
|
scheduler.py
|
zatosource_zato/code/zato-common/src/zato/common/model/scheduler.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from typing import Callable as callable_, Optional as optional
# Zato
from zato.common.typing_ import dataclass, from_dict
from zato.common.model.connector import ConnectorConfig
# ################################################################################################################################
# ################################################################################################################################
@dataclass
class WSXConnectorConfig(ConnectorConfig):
path: optional[str]
needs_auth: optional[bool]
sec_name: optional[str]
sec_type: optional[str]
data_format: optional[str]
token_ttl: optional[int]
new_token_wait_time: int
max_len_messages_sent: optional[int]
max_len_messages_received: optional[int]
hook_service: optional[callable_]
auth_func: optional[callable_]
vault_conn_default_auth_method: optional[str]
on_message_callback: optional[callable_]
parallel_server: optional[object] = None
pings_missed_threshold: optional[int] = 5
is_audit_log_sent_active: optional[bool] = False
is_audit_log_received_active: optional[bool] = False
@staticmethod
def from_dict(config_dict):
# type: (dict) -> WSXConnectorConfig
return from_dict(WSXConnectorConfig, config_dict)
# ################################################################################################################################
# ################################################################################################################################
| 1,728
|
Python
|
.py
| 37
| 43
| 130
| 0.508021
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,718
|
google.py
|
zatosource_zato/code/zato-common/src/zato/common/model/google.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.common.typing_ import dataclass
from zato.server.service import Model
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class GoogleAPIDescription(Model):
id: 'str'
name: 'str'
title: 'str'
version: 'str'
title_full: 'str'
# ################################################################################################################################
# ################################################################################################################################
| 909
|
Python
|
.py
| 19
| 45.473684
| 130
| 0.291855
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,719
|
kvdb.py
|
zatosource_zato/code/zato-common/src/zato/common/model/kvdb.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class KVDB:
def __init__(self):
self.id = None
self.is_active = True
self.name = ''
self.host = ''
self.port = None # type: int
self.db = 0
self.use_redis_sentinels = False
self.redis_sentinels = ''
self.redis_sentinels_master = ''
# ################################################################################################################################
# ################################################################################################################################
| 983
|
Python
|
.py
| 20
| 44.1
| 130
| 0.264092
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,720
|
wsx.py
|
zatosource_zato/code/zato-common/src/zato/common/model/wsx.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.common.api import WEB_SOCKET
from zato.common.typing_ import anynone, boolnone, callnone, dataclass, from_dict, intnone, strnone
from zato.common.model.connector import ConnectorConfig
# ################################################################################################################################
# ################################################################################################################################
@dataclass
class WSXConnectorConfig(ConnectorConfig):
host: strnone
port: intnone
needs_tls: boolnone
path: strnone
needs_auth: boolnone
sec_name: strnone
sec_type: strnone
data_format: str
token_ttl: int
new_token_wait_time: int
max_len_messages_sent: intnone
max_len_messages_received: intnone
hook_service: callnone
auth_func: callnone
vault_conn_default_auth_method: strnone
on_message_callback: callnone
parallel_server: anynone
pings_missed_threshold: intnone = WEB_SOCKET.DEFAULT.PINGS_MISSED_THRESHOLD
ping_interval: intnone = WEB_SOCKET.DEFAULT.PING_INTERVAL
is_audit_log_sent_active: bool = False
is_audit_log_received_active: boolnone = False
extra_properties: strnone = ''
@staticmethod
def from_dict(config_dict):
# type: (dict) -> WSXConnectorConfig
return from_dict(WSXConnectorConfig, config_dict)
# ################################################################################################################################
# ################################################################################################################################
| 1,805
|
Python
|
.py
| 41
| 40.121951
| 130
| 0.519636
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,721
|
audit_log.py
|
zatosource_zato/code/zato-common/src/zato/common/model/audit_log.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class AuditLogEvent:
def __init__(self):
self._config_attrs = []
self.server_name = ''
self.server_pid = ''
self.type_ = ''
self.object_id = ''
self.conn_id = ''
self.direction = ''
self.data = ''
self.timestamp = None # type: str
self.timestamp_utc = None
self.msg_id = ''
self.in_reply_to = ''
# ################################################################################################################################
# ################################################################################################################################
| 1,073
|
Python
|
.py
| 23
| 41.086957
| 130
| 0.272727
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,722
|
groups.py
|
zatosource_zato/code/zato-common/src/zato/common/model/groups.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import strlist
# ################################################################################################################################
# ################################################################################################################################
class GroupObject:
def __init__(self):
self._config_attrs = []
self.id = '' # type: str
self.is_active = True # type: bool
self.type = '' # type: str
self.name = '' # type: str
self.name_slug = '' # type: str
self.is_active = False # type: bool
self.members = [] # type: strlist
# ################################################################################################################################
# ################################################################################################################################
| 1,405
|
Python
|
.py
| 23
| 56.652174
| 130
| 0.226909
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,723
|
atlassian_.py
|
zatosource_zato/code/zato-common/src/zato/common/model/atlassian_.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
class AtlassianConfigObject:
def __init__(self):
self._config_attrs = []
self.id = -1 # type: int
self.name = '' # type: str
self.is_active = True # type: bool
self.api_version = '' # type: str
self.username = '' # type: str
# ################################################################################################################################
# ################################################################################################################################
| 982
|
Python
|
.py
| 17
| 53.411765
| 130
| 0.25625
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,724
|
amqp_.py
|
zatosource_zato/code/zato-common/src/zato/common/model/amqp_.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# dacite
from dacite.core import from_dict
# Zato
from zato.common.typing_ import callnone, dataclass, intnone, stranydict, strnone
from zato.common.model.connector import ConnectorConfig
# ################################################################################################################################
# ################################################################################################################################
@dataclass
class AMQPConnectorConfig(ConnectorConfig):
host: str
queue: strnone
ack_mode: strnone
conn_url: strnone
username: str
vhost: str
frame_max: int
prefetch_count: intnone
get_conn_class_func: callnone
consumer_tag_prefix: strnone
@staticmethod
def from_dict(config_dict:'stranydict') -> 'AMQPConnectorConfig':
return from_dict(AMQPConnectorConfig, config_dict)
# ################################################################################################################################
# ################################################################################################################################
| 1,298
|
Python
|
.py
| 29
| 41.551724
| 130
| 0.438541
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,725
|
_pyproject.toml.ini
|
zatosource_zato/code/_pyproject.toml.ini
|
[tool.pyright]
pythonVersion = "3.8"
typeCheckingMode = "basic"
#
# This is level 1
#
reportOptionalOperand = "error"
reportPrivateUsage = "none"
reportAssertAlwaysTrue = "error"
reportUnusedImport = "error"
reportUnusedFunction = "error"
reportUnusedCallResult = "error"
reportUnusedVariable = "none" # This leads to spurious errors
reportInvalidTypeVarUse = "error"
reportOverlappingOverload = "error"
reportPropertyTypeMismatch = "error"
reportUntypedClassDecorator = "error"
reportUnnecessaryCast = "error"
reportUnnecessaryComparison = "error"
reportUnnecessaryIsInstance = "error"
reportUnsupportedDunderAll = "error"
reportUntypedFunctionDecorator = "error"
reportWildcardImportFromLibrary = "error"
reportImplicitStringConcatenation = "error"
reportSelfClsParameterName = "none"
#
# This is level 1+
#
reportTypeCommentUsage = "none"
reportUnknownLambdaType = "none"
reportUnknownMemberType = "none"
reportMissingTypeArgument = "none"
reportUnknownVariableType = "none"
reportUnknownParameterType = "none"
#
# This is level 2
#
reportMissingImports = "none"
reportGeneralTypeIssues = "none"
reportOptionalMemberAccess = "error"
reportMissingModuleSource = "none"
#
# This is level 3
#
reportMissingParameterType = "none"
#
# This is level 4, needed before reportUnknownMemberType
#
# reportUnknownArgumentType = "error"
#
# This is level 5
#
# reportUnknownLambdaType = "error"
# reportUnknownMemberType = "error"
# reportUnknownParameterType = "error"
# reportUnknownVariableType = "error"
extraPaths = [
"./lib/python3.8/site-packages",
"./zato-agent/src",
"./zato-cli/src",
"./zato-common/src",
"./zato-cy/src",
"./zato-distlock/src",
"./zato-hl7/src",
"./zato-scheduler/src",
"./zato-server/src",
"./zato-sso/src",
"./zato-testing/src",
"./zato-web-admin/src",
"./zato-zmq/src",
]
| 1,931
|
Python
|
.pyp
| 71
| 25.366197
| 66
| 0.739342
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,726
|
mode-python.js
|
zatosource_zato/code/zato-web-admin/src/zato/admin/static/ace-builds/src/mode-python.js
|
define("ace/mode/python_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var PythonHighlightRules = function() {
var keywords = (
"and|as|assert|break|class|continue|def|del|elif|else|except|exec|" +
"finally|for|from|global|if|import|in|is|lambda|not|or|pass|print|" +
"raise|return|try|while|with|yield|async|await|nonlocal"
);
var builtinConstants = (
"True|False|None|NotImplemented|Ellipsis|__debug__"
);
var builtinFunctions = (
"abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" +
"eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" +
"binfile|bin|iter|property|tuple|bool|filter|len|range|type|bytearray|" +
"float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" +
"chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" +
"cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" +
"__import__|complex|hash|min|apply|delattr|help|next|setattr|set|" +
"buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern|" +
"ascii|breakpoint|bytes"
);
var keywordMapper = this.createKeywordMapper({
"invalid.deprecated": "debugger",
"support.function": builtinFunctions,
"variable.language": "self|cls",
"constant.language": builtinConstants,
"keyword": keywords
}, "identifier");
var strPre = "[uU]?";
var strRawPre = "[rR]";
var strFormatPre = "[fF]";
var strRawFormatPre = "(?:[rR][fF]|[fF][rR])";
var decimalInteger = "(?:(?:[1-9]\\d*)|(?:0))";
var octInteger = "(?:0[oO]?[0-7]+)";
var hexInteger = "(?:0[xX][\\dA-Fa-f]+)";
var binInteger = "(?:0[bB][01]+)";
var integer = "(?:" + decimalInteger + "|" + octInteger + "|" + hexInteger + "|" + binInteger + ")";
var exponent = "(?:[eE][+-]?\\d+)";
var fraction = "(?:\\.\\d+)";
var intPart = "(?:\\d+)";
var pointFloat = "(?:(?:" + intPart + "?" + fraction + ")|(?:" + intPart + "\\.))";
var exponentFloat = "(?:(?:" + pointFloat + "|" + intPart + ")" + exponent + ")";
var floatNumber = "(?:" + exponentFloat + "|" + pointFloat + ")";
var stringEscape = "\\\\(x[0-9A-Fa-f]{2}|[0-7]{3}|[\\\\abfnrtv'\"]|U[0-9A-Fa-f]{8}|u[0-9A-Fa-f]{4})";
this.$rules = {
"start" : [ {
token : "comment",
regex : "#.*$"
}, {
token : "string", // multi line """ string start
regex : strPre + '"{3}',
next : "qqstring3"
}, {
token : "string", // " string
regex : strPre + '"(?=.)',
next : "qqstring"
}, {
token : "string", // multi line ''' string start
regex : strPre + "'{3}",
next : "qstring3"
}, {
token : "string", // ' string
regex : strPre + "'(?=.)",
next : "qstring"
}, {
token: "string",
regex: strRawPre + '"{3}',
next: "rawqqstring3"
}, {
token: "string",
regex: strRawPre + '"(?=.)',
next: "rawqqstring"
}, {
token: "string",
regex: strRawPre + "'{3}",
next: "rawqstring3"
}, {
token: "string",
regex: strRawPre + "'(?=.)",
next: "rawqstring"
}, {
token: "string",
regex: strFormatPre + '"{3}',
next: "fqqstring3"
}, {
token: "string",
regex: strFormatPre + '"(?=.)',
next: "fqqstring"
}, {
token: "string",
regex: strFormatPre + "'{3}",
next: "fqstring3"
}, {
token: "string",
regex: strFormatPre + "'(?=.)",
next: "fqstring"
},{
token: "string",
regex: strRawFormatPre + '"{3}',
next: "rfqqstring3"
}, {
token: "string",
regex: strRawFormatPre + '"(?=.)',
next: "rfqqstring"
}, {
token: "string",
regex: strRawFormatPre + "'{3}",
next: "rfqstring3"
}, {
token: "string",
regex: strRawFormatPre + "'(?=.)",
next: "rfqstring"
}, {
token: "keyword.operator",
regex: "\\+|\\-|\\*|\\*\\*|\\/|\\/\\/|%|@|<<|>>|&|\\||\\^|~|<|>|<=|=>|==|!=|<>|="
}, {
token: "punctuation",
regex: ",|:|;|\\->|\\+=|\\-=|\\*=|\\/=|\\/\\/=|%=|@=|&=|\\|=|^=|>>=|<<=|\\*\\*="
}, {
token: "paren.lparen",
regex: "[\\[\\(\\{]"
}, {
token: "paren.rparen",
regex: "[\\]\\)\\}]"
}, {
token: ["keyword", "text", "entity.name.function"],
regex: "(def|class)(\\s+)([\\u00BF-\\u1FFF\\u2C00-\\uD7FF\\w]+)"
}, {
token: "text",
regex: "\\s+"
}, {
include: "constants"
}],
"qqstring3": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string", // multi line """ string end
regex: '"{3}',
next: "start"
}, {
defaultToken: "string"
}],
"qstring3": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string", // multi line ''' string end
regex: "'{3}",
next: "start"
}, {
defaultToken: "string"
}],
"qqstring": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string",
regex: "\\\\$",
next: "qqstring"
}, {
token: "string",
regex: '"|$',
next: "start"
}, {
defaultToken: "string"
}],
"qstring": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string",
regex: "\\\\$",
next: "qstring"
}, {
token: "string",
regex: "'|$",
next: "start"
}, {
defaultToken: "string"
}],
"rawqqstring3": [{
token: "string", // multi line """ string end
regex: '"{3}',
next: "start"
}, {
defaultToken: "string"
}],
"rawqstring3": [{
token: "string", // multi line ''' string end
regex: "'{3}",
next: "start"
}, {
defaultToken: "string"
}],
"rawqqstring": [{
token: "string",
regex: "\\\\$",
next: "rawqqstring"
}, {
token: "string",
regex: '"|$',
next: "start"
}, {
defaultToken: "string"
}],
"rawqstring": [{
token: "string",
regex: "\\\\$",
next: "rawqstring"
}, {
token: "string",
regex: "'|$",
next: "start"
}, {
defaultToken: "string"
}],
"fqqstring3": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string", // multi line """ string end
regex: '"{3}',
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"fqstring3": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string", // multi line ''' string end
regex: "'{3}",
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"fqqstring": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string",
regex: "\\\\$",
next: "fqqstring"
}, {
token: "string",
regex: '"|$',
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"fqstring": [{
token: "constant.language.escape",
regex: stringEscape
}, {
token: "string",
regex: "'|$",
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"rfqqstring3": [{
token: "string", // multi line """ string end
regex: '"{3}',
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"rfqstring3": [{
token: "string", // multi line ''' string end
regex: "'{3}",
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"rfqqstring": [{
token: "string",
regex: "\\\\$",
next: "rfqqstring"
}, {
token: "string",
regex: '"|$',
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"rfqstring": [{
token: "string",
regex: "'|$",
next: "start"
}, {
token: "paren.lparen",
regex: "{",
push: "fqstringParRules"
}, {
defaultToken: "string"
}],
"fqstringParRules": [{//TODO: nested {}
token: "paren.lparen",
regex: "[\\[\\(]"
}, {
token: "paren.rparen",
regex: "[\\]\\)]"
}, {
token: "string",
regex: "\\s+"
}, {
token: "string",
regex: "'[^']*'"
}, {
token: "string",
regex: '"[^"]*"'
}, {
token: "function.support",
regex: "(!s|!r|!a)"
}, {
include: "constants"
},{
token: 'paren.rparen',
regex: "}",
next: 'pop'
},{
token: 'paren.lparen',
regex: "{",
push: "fqstringParRules"
}],
"constants": [{
token: "constant.numeric", // imaginary
regex: "(?:" + floatNumber + "|\\d+)[jJ]\\b"
}, {
token: "constant.numeric", // float
regex: floatNumber
}, {
token: "constant.numeric", // long integer
regex: integer + "[lL]\\b"
}, {
token: "constant.numeric", // integer
regex: integer + "\\b"
}, {
token: ["punctuation", "function.support"],// method
regex: "(\\.)([a-zA-Z_]+)\\b"
}, {
token: keywordMapper,
regex: "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
}]
};
this.normalizeRules();
};
oop.inherits(PythonHighlightRules, TextHighlightRules);
exports.PythonHighlightRules = PythonHighlightRules;
});
define("ace/mode/folding/pythonic",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(markers) {
this.foldingStartMarker = new RegExp("([\\[{])(?:\\s*)$|(" + markers + ")(?:\\s*)(?:#.*)?$");
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.getFoldWidgetRange = function(session, foldStyle, row) {
var line = session.getLine(row);
var match = line.match(this.foldingStartMarker);
if (match) {
if (match[1])
return this.openingBracketBlock(session, match[1], row, match.index);
if (match[2])
return this.indentationBlock(session, row, match.index + match[2].length);
return this.indentationBlock(session, row);
}
};
}).call(FoldMode.prototype);
});
define("ace/mode/python",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/python_highlight_rules","ace/mode/folding/pythonic","ace/range"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var PythonHighlightRules = require("./python_highlight_rules").PythonHighlightRules;
var PythonFoldMode = require("./folding/pythonic").FoldMode;
var Range = require("../range").Range;
var Mode = function() {
this.HighlightRules = PythonHighlightRules;
this.foldingRules = new PythonFoldMode("\\:");
this.$behaviour = this.$defaultBehaviour;
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "#";
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start") {
var match = line.match(/^.*[\{\(\[:]\s*$/);
if (match) {
indent += tab;
}
}
return indent;
};
var outdents = {
"pass": 1,
"return": 1,
"raise": 1,
"break": 1,
"continue": 1
};
this.checkOutdent = function(state, line, input) {
if (input !== "\r\n" && input !== "\r" && input !== "\n")
return false;
var tokens = this.getTokenizer().getLineTokens(line.trim(), state).tokens;
if (!tokens)
return false;
do {
var last = tokens.pop();
} while (last && (last.type == "comment" || (last.type == "text" && last.value.match(/^\s+$/))));
if (!last)
return false;
return (last.type == "keyword" && outdents[last.value]);
};
this.autoOutdent = function(state, doc, row) {
row += 1;
var indent = this.$getIndent(doc.getLine(row));
var tab = doc.getTabString();
if (indent.slice(-tab.length) == tab)
doc.remove(new Range(row, indent.length-tab.length, row, indent.length));
};
this.$id = "ace/mode/python";
this.snippetFileId = "ace/snippets/python";
}).call(Mode.prototype);
exports.Mode = Mode;
}); (function() {
window.require(["ace/mode/python"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
| 15,694
|
Python
|
.pyt
| 472
| 22.834746
| 197
| 0.457915
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,727
|
test_attach.py
|
zatosource_zato/code/zato-cy/test/zato/cy/simpleio_/test_attach.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2020, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.common.test import BaseSIOTestCase
from zato.server.service import Service
# Zato - Cython
from zato.simpleio import CySimpleIO
# ################################################################################################################################
# ################################################################################################################################
class AttachSIOTestCase(BaseSIOTestCase):
def test_attach_sio(self):
class MyService(Service):
class SimpleIO:
input = 'aaa', 'bbb', 'ccc', '-ddd', '-eee'
output = 'qqq', 'www', '-eee', '-fff'
CySimpleIO.attach_sio(None, self.get_server_config(), MyService)
self.assertEqual(MyService._sio.definition._input_required.get_elem_names(), ['aaa', 'bbb', 'ccc'])
self.assertEqual(MyService._sio.definition._input_optional.get_elem_names(), ['ddd', 'eee'])
self.assertEqual(MyService._sio.definition._output_required.get_elem_names(), ['qqq', 'www'])
self.assertEqual(MyService._sio.definition._output_optional.get_elem_names(), ['eee', 'fff'])
self.assertTrue(MyService._sio.definition.has_input_required)
self.assertTrue(MyService._sio.definition.has_input_optional)
self.assertTrue(MyService._sio.definition.has_output_required)
self.assertTrue(MyService._sio.definition.has_output_optional)
# ################################################################################################################################
# ################################################################################################################################
| 1,935
|
Python
|
.tac
| 30
| 58.766667
| 130
| 0.503966
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,728
|
openstack.py
|
zatosource_zato/code/zato-web-admin/src/zato/admin/web/forms/security/openstack.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Django
from django import forms
class CreateForm(forms.Form):
id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(widget=forms.TextInput(attrs={'class':'required', 'style':'width:90%'}))
is_active = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'checked':'checked'}))
username = forms.CharField(widget=forms.TextInput(attrs={'class':'required', 'style':'width:90%'}))
class EditForm(CreateForm):
is_active = forms.BooleanField(required=False, widget=forms.CheckboxInput())
| 779
|
Python
|
.tac
| 15
| 49.2
| 107
| 0.741425
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,729
|
test_dataclass_open_api.py
|
zatosource_zato/code/zato-server/test/zato/apispec/test_dataclass_open_api.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from copy import deepcopy
from unittest import main
# Zato
from zato.common.test.apispec_ import run_common_apispec_assertions, service_name, sio_config
from zato.common.api import APISPEC, URL_TYPE
from zato.common.marshal_.simpleio import DataClassSimpleIO
from zato.common.test import BaseSIOTestCase
from zato.server.apispec.spec.core import Generator
from zato.server.apispec.spec.openapi import OpenAPIGenerator
from zato.server.service.internal.helpers import MyDataclassService
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_
# ################################################################################################################################
class _MatchTestCompiled:
group_names = ['phone_number']
# ################################################################################################################################
# ################################################################################################################################
class DataClassOpenAPITestCase(BaseSIOTestCase):
def test_dataclass_generate_open_api(self):
MyClass = deepcopy(MyDataclassService)
DataClassSimpleIO.attach_sio(None, self.get_server_config(), MyClass)
service_store_services = {
'my.impl.name': {
'name': service_name,
'service_class': MyClass,
}
}
include = ['*']
exclude = []
query = ''
tags = ['public']
generator = Generator(service_store_services, sio_config, include, exclude, query, tags, needs_sio_desc=False)
initial_info = generator.get_info() # type: any_
channel_data = [{
'service_name': service_name,
'transport': URL_TYPE.PLAIN_HTTP,
'url_path': '/test/{phone_number}',
'match_target_compiled': _MatchTestCompiled()
}]
needs_api_invoke = True
needs_rest_channels = True
api_invoke_path = APISPEC.GENERIC_INVOKE_PATH
open_api_generator = OpenAPIGenerator(initial_info, channel_data, needs_api_invoke, needs_rest_channels, api_invoke_path)
result = open_api_generator.generate()
run_common_apispec_assertions(self, result)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
| 2,986
|
Python
|
.tac
| 57
| 46.368421
| 130
| 0.462676
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,730
|
test_invoke_dataclasses.py
|
zatosource_zato/code/zato-server/test/zato/rest/test_invoke_dataclasses.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main
# Zato
from zato.common.test.rest_client import RESTClientTestCase
# ################################################################################################################################
# ################################################################################################################################
class InvocationTestCase(RESTClientTestCase):
needs_bunch = False
needs_current_app = False
payload_only_messages = False
# ################################################################################################################################
def setUp(self) -> None:
super().setUp()
self.rest_client.init()
# ################################################################################################################################
def test_invoke_helpers_api_spec_user(self) -> 'None':
# Test data
username = 'my.username'
# Prepare our request ..
request = {
'username': username
}
# .. invoke the helper service ..
response = self.get('/zato/api/invoke/helpers.api-spec.user', request)
# .. and check the response.
user = response['user']
parent_user = response['parent_user']
previous_user = response['previous_user']
self.assertListEqual(user, [
{'user_id': 222, 'username': 'username.222', 'display_name': 'display_name.222.' + username},
{'user_id': 111, 'username': 'username.111', 'display_name': 'display_name.111.' + username}
])
self.assertListEqual(parent_user, [])
self.assertListEqual(previous_user, [])
# ################################################################################################################################
def test_invoke_helpers_api_account_list_with_user_id(self) -> 'None':
# Test data
user_id = 999
account_id = 5555
# Prepare our request ..
request = {
'user_id': user_id,
'account_id': account_id,
}
# .. invoke the helper service ..
response = self.get('/zato/api/invoke/helpers.api-spec.account-list', request)
# .. and check the response.
user_account_list = response['user_account_list']
account1 = user_account_list[0]
account2 = user_account_list[1]
self.assertDictEqual(account1, {
'user': {'user_id': 222, 'username': 'username.222', 'display_name': 'display_name.222.999'},
'account_id': 7575,
'account_type': 2222
})
self.assertDictEqual(account2, {
'user': {'user_id': 111, 'username': 'username.111', 'display_name': 'display_name.111.999'},
'account_id': 6565,
'account_type': 1111
})
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 3,614
|
Python
|
.tac
| 71
| 43.535211
| 130
| 0.391465
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,731
|
test_json_to_dataclass.py
|
zatosource_zato/code/zato-common/test/zato/common/marshall_/test_json_to_dataclass.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main, TestCase
# Zato
from zato.common.ext.dataclasses import dataclass, field
from zato.common.marshal_.api import MarshalAPI, Model
from zato.common.test import rand_int, rand_string
from zato.common.test.marshall_ import CreateUserRequest, Role, TestService, User
from zato.common.typing_ import cast_, list_field, dictlist, strlistnone
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.marshal_.api import ModelCtx
from zato.server.service import Service
ModelCtx = ModelCtx
Service = Service
# ################################################################################################################################
# ################################################################################################################################
class JSONToDataclassTestCase(TestCase):
def test_unmarshall(self):
request_id = rand_int()
user_name = rand_string()
locality = rand_string()
role_type1 = 111
role_type2 = 222
role_name1 = 'role.name.111'
role_name2 = 'role.name.222'
data = {
'request_id': request_id,
'user': {
'user_name': user_name,
'address': {
'locality': locality,
}
},
'role_list': [
{'type': role_type1, 'name': role_name1},
{'type': role_type2, 'name': role_name2},
]
}
service = cast_('Service', None)
api = MarshalAPI()
result = api.from_dict(service, data, CreateUserRequest) # type: CreateUserRequest
self.assertIs(type(result), CreateUserRequest)
self.assertIsInstance(result.user, User)
self.assertEqual(result.request_id, request_id)
self.assertEqual(result.user.user_name, user_name)
self.assertIsInstance(result.role_list, list)
self.assertEqual(len(result.role_list), 2)
role1 = result.role_list[0] # type: Role
role2 = result.role_list[1] # type: Role
self.assertIsInstance(role1, Role)
self.assertIsInstance(role2, Role)
self.assertEqual(role1.type, role_type1)
self.assertEqual(role1.name, role_name1)
self.assertEqual(role2.type, role_type2)
self.assertEqual(role2.name, role_name2)
# ################################################################################################################################
def test_unmarshall_optional_list_of_strings_given_on_input(self):
elem1 = rand_string()
elem2 = rand_string()
elem3 = rand_string()
my_list = [elem1, elem2, elem3]
@dataclass
class MyRequest(Model):
my_list: strlistnone = list_field()
request1 = {
'my_list': my_list
}
service = None
api = MarshalAPI()
result = api.from_dict(cast_('Service', service), request1, MyRequest) # type: MyRequest
self.assertListEqual(my_list, cast_('list', result.my_list))
# ################################################################################################################################
def test_unmarshall_optional_list_of_strings_not_given_on_input(self):
@dataclass
class MyRequest(Model):
my_list: strlistnone = list_field()
request1 = {}
service = None
api = MarshalAPI()
result = api.from_dict(cast_('Service', service), request1, MyRequest) # type: MyRequest
self.assertListEqual([], cast_('list', result.my_list))
# ################################################################################################################################
def test_unmarshall_default(self):
request_id = rand_int()
user_name = rand_string()
locality = rand_string()
@dataclass
class CreateAdminRequest(CreateUserRequest):
admin_type: str = field(default='MyDefaultValue') # type: ignore
data = {
'request_id': request_id,
'user': {
'user_name': user_name,
'address': {
'locality': locality,
}
},
'role_list': [],
}
service = cast_('Service', None)
api = MarshalAPI()
result = api.from_dict(service, data, CreateAdminRequest) # type: CreateAdminRequest
self.assertIs(type(result), CreateAdminRequest)
self.assertIsInstance(result.user, User)
self.assertEqual(result.request_id, request_id)
self.assertEqual(result.admin_type, CreateAdminRequest.admin_type)
self.assertEqual(result.user.user_name, user_name)
# ################################################################################################################################
def test_unmarshall_and_run_after_created(self):
request_id = 123456789
user_name = 'my.user.name'
locality = 'my.locality'
@dataclass
class MyRequestWithAfterCreated(CreateUserRequest):
def after_created(self, ctx:'ModelCtx') -> 'None':
if not isinstance(ctx.service, TestService):
raise ValueError('Expected for service class to be {} instead of {}'.format(
TestService, type(ctx.service)))
if not isinstance(ctx.data, dict): # type: ignore
raise ValueError('Expected for service class to be a dict instead of {}'.format(type(ctx.data)))
request_id = ctx.data['request_id']
user_name = ctx.data['user']['user_name']
if request_id != 123456789:
raise ValueError('Value of request_id should be 123456789 instead of `{}`'.format(request_id))
if user_name != 'my.user.name':
raise ValueError('Value of request_id should be "my.user.name"instead of `{}`'.format(user_name))
if locality != 'my.locality':
raise ValueError('Value of locality should be "my.locality"instead of `{}`'.format(locality))
data = {
'request_id': request_id,
'user': {
'user_name': user_name,
'address': {
'locality': locality,
}
},
'role_list': [],
}
service = cast_('Service', TestService())
api = MarshalAPI()
api.from_dict(service, data, MyRequestWithAfterCreated)
# ################################################################################################################################
def test_unmarshall_input_is_a_dataclass(self):
@dataclass(init=False)
class MyModel(Model):
my_field: str
expected_value = 'abc'
data = {'my_field': expected_value}
service = cast_('Service', None)
api = MarshalAPI()
result = api.from_dict(service, data, MyModel) # type: MyModel
self.assertEqual(result.my_field, expected_value)
# ################################################################################################################################
def test_unmarshall_input_is_a_dictlist(self):
@dataclass(init=False)
class MyModel(Model):
my_field: dictlist
expected_value = [{
'abc':111,
'zxc':222
}]
data = {'my_field': expected_value}
service = cast_('Service', None)
api = MarshalAPI()
result = api.from_dict(service, data, MyModel) # type: MyModel
self.assertEqual(result.my_field, expected_value)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 8,727
|
Python
|
.tac
| 178
| 39.308989
| 130
| 0.463607
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,732
|
test_attach.py
|
zatosource_zato/code/zato-common/test/zato/common/marshall_/test_attach.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main
# Zato
from zato.common.ext.dataclasses import dataclass
from zato.common.marshal_.api import Model
from zato.common.marshal_.simpleio import DataClassSimpleIO
from zato.common.test import BaseSIOTestCase
from zato.server.service import Service
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False, repr=False)
class User(Model):
user_name: str
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=True, repr=False)
class MyRequest(Model):
request_id: int
user: User
# ################################################################################################################################
# ################################################################################################################################
class SIOAttachTestCase(BaseSIOTestCase):
def test_attach_sio(self):
class MyService(Service):
class SimpleIO:
input = MyRequest
DataClassSimpleIO.attach_sio(None, self.get_server_config(), MyService)
self.assertIsInstance(MyService._sio, DataClassSimpleIO)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 2,280
|
Python
|
.tac
| 39
| 55.205128
| 130
| 0.305169
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,733
|
_dataclasses.py
|
zatosource_zato/code/zato-common/src/zato/common/ext/_dataclasses.py
|
"""
This module is a vendor copy of the dataclasses package from https://pypi.org/project/dataclasses/
The original license is:
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# flake8: noqa
import re
import sys
import copy
import types
import inspect
import keyword
__all__ = ['dataclass',
'field',
'Field',
'FrozenInstanceError',
'InitVar',
'MISSING',
# Helper functions.
'fields',
'asdict',
'astuple',
'make_dataclass',
'replace',
'is_dataclass',
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError): pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return '<factory>'
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE('_FIELD')
_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR')
_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR')
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = '__dataclass_fields__'
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = '__dataclass_params__'
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = '__post_init__'
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)')
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = ('name',
'type',
'default',
'default_factory',
'repr',
'hash',
'init',
'compare',
'metadata',
'_field_type', # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare,
metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (_EMPTY_METADATA
if metadata is None or len(metadata) == 0 else
types.MappingProxyType(metadata))
self._field_type = None
def __repr__(self):
return ('Field('
f'name={self.name!r},'
f'type={self.type!r},'
f'default={self.default!r},'
f'default_factory={self.default_factory!r},'
f'init={self.init!r},'
f'repr={self.repr!r},'
f'hash={self.hash!r},'
f'compare={self.compare!r},'
f'metadata={self.metadata!r},'
f'_field_type={self._field_type}'
')')
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# defaul value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), '__set_name__', None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ('init',
'repr',
'eq',
'order',
'unsafe_hash',
'frozen',
)
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return ('_DataclassParams('
f'init={self.init!r},'
f'repr={self.repr!r},'
f'eq={self.eq!r},'
f'order={self.order!r},'
f'unsafe_hash={self.unsafe_hash!r},'
f'frozen={self.frozen!r}'
')')
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
hash=None, compare=True, metadata=None):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError('cannot specify both default and default_factory')
return Field(default, default_factory, init, repr, hash, compare,
metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return '()'
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
def _create_fn(name, args, body, *, globals=None, locals=None,
return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
return_annotation = ''
if return_type is not MISSING:
locals['_return_type'] = return_type
return_annotation = '->_return_type'
args = ','.join(args)
body = '\n'.join(f' {b}' for b in body)
# Compute the text of the entire function.
txt = f'def {name}({args}){return_annotation}:\n{body}'
exec(txt, globals, locals)
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f'object.__setattr__({self_name},{name!r},{value})'
return f'{self_name}.{name}={value}'
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f'_dflt_{f.name}'
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = (f'{default_name}() '
f'if {f.name} is _HAS_DEFAULT_FACTORY '
f'else {f.name}')
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f'{default_name}()'
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type is _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ''
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f'=_dflt_{f.name}'
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = '=_HAS_DEFAULT_FACTORY'
return f'{f.name}:_type_{f.name}{default}'
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f'non-default argument {f.name!r} '
'follows default argument')
globals = {'MISSING': MISSING,
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ','.join(f.name for f in fields
if f._field_type is _FIELD_INITVAR)
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})')
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ['pass']
locals = {f'_type_{f.name}': f.type for f in fields}
return _create_fn('__init__',
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None)
def _repr_fn(fields):
return _create_fn('__repr__',
('self',),
['return self.__class__.__qualname__ + f"(' +
', '.join([f"{f.name}={{self.{f.name}!r}}"
for f in fields]) +
')"'])
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {'cls': cls,
'FrozenInstanceError': FrozenInstanceError}
if fields:
fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)'
else:
# Special case for the zero-length tuple.
fields_str = '()'
return (_create_fn('__setattr__',
('self', 'name', 'value'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f'super(cls, self).__setattr__(name, value)'),
globals=globals),
_create_fn('__delattr__',
('self', 'name'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f'super(cls, self).__delattr__(name)'),
globals=globals),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(name,
('self', 'other'),
[ 'if other.__class__ is self.__class__:',
f' return {self_tuple}{op}{other_tuple}',
'return NotImplemented'])
def _hash_fn(fields):
self_tuple = _tuple_str('self', fields)
return _create_fn('__hash__',
('self',),
[f'return hash({self_tuple})'])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return type(a_type) is typing._ClassVar
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get('typing')
if typing:
if (_is_classvar(a_type, typing)
or (isinstance(f.type, str)
and _is_type(f.type, cls, typing, typing.ClassVar,
_is_classvar))):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if (_is_initvar(a_type, dataclasses)
or (isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar,
_is_initvar))):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f'field {f.name} cannot have a '
'default factory')
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(f'mutable default {type(f.default)} for field '
f'{f.name} is not allowed: use default_factory')
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f'Cannot overwrite attribute __hash__ '
f'in class {cls.__name__}')
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {(False, False, False, False): None,
(False, False, False, True ): None,
(False, False, True, False): None,
(False, False, True, True ): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True ): None,
(False, True, True, False): _hash_add,
(False, True, True, True ): None,
(True, False, False, False): _hash_add,
(True, False, False, True ): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True ): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True ): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True ): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order,
unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get('__annotations__', {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [_get_field(cls, name, type)
for name, type in cls_annotations.items()]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f'{name!r} is a field but has no type annotation')
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError('cannot inherit non-frozen dataclass from a '
'frozen one')
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError('cannot inherit frozen dataclass from a '
'non-frozen one')
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get('__hash__', MISSING)
has_explicit_hash = not (class_hash is MISSING or
(class_hash is None and '__eq__' in cls.__dict__))
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError('eq must be true if order is true')
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values()
if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(cls, '__init__',
_init_fn(flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
'__dataclass_self__' if 'self' in fields
else 'self',
))
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, '__repr__', _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
_set_new_attribute(cls, '__eq__',
_cmp_fn('__eq__', '==',
self_tuple, other_tuple))
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
for name, op in [('__lt__', '<'),
('__le__', '<='),
('__gt__', '>'),
('__ge__', '>='),
]:
if _set_new_attribute(cls, name,
_cmp_fn(name, op, self_tuple, other_tuple)):
raise TypeError(f'Cannot overwrite attribute {name} '
f'in class {cls.__name__}. Consider using '
'functools.total_ordering')
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(f'Cannot overwrite attribute {fn.__name__} '
f'in class {cls.__name__}')
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[bool(unsafe_hash),
bool(eq),
bool(frozen),
has_explicit_hash]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, '__doc__'):
# Create a class doc-string.
cls.__doc__ = (cls.__name__ +
str(inspect.signature(cls)).replace(' -> None', ''))
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(_cls=None, *, init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=False):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError('must be called with a dataclass type or instance')
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
repr=True, eq=True, order=False, unsafe_hash=False,
frozen=False):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = 'typing.Any'
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f'Invalid field: {item!r}')
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f'Field names must be valid identifers: {name!r}')
if keyword.iskeyword(name):
raise TypeError(f'Field names must not be keywords: {name!r}')
if name in seen:
raise TypeError(f'Field name duplicated: {name!r}')
seen.add(name)
anns[name] = tp
namespace['__annotations__'] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
unsafe_hash=unsafe_hash, frozen=frozen)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
# Only consider normal fields or InitVars.
if f._field_type is _FIELD_CLASSVAR:
continue
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(f'field {f.name} is declared with '
'init=False, it cannot be specified with '
'replace()')
continue
if f.name not in changes:
if f._field_type is _FIELD_INITVAR:
raise ValueError(f"InitVar {f.name!r} "
'must be specified with replace()')
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
| 57,009
|
Python
|
.tac
| 1,170
| 40.498291
| 98
| 0.609624
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,734
|
dataclasses.py
|
zatosource_zato/code/zato-common/src/zato/common/ext/dataclasses.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# flake8: noqa
# Be explicit about which import error we want to catch
try:
import dataclasses
# Python 3.6
except ImportError:
from zato.common.ext._dataclasses import _FIELDS, _PARAMS
from zato.common.ext._dataclasses import * # noqa
# Python 3.6+
else:
from dataclasses import _FIELDS, _PARAMS
from dataclasses import *
| 501
|
Python
|
.tac
| 17
| 26.941176
| 64
| 0.732218
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,735
|
setup.py
|
novoid_Memacs/setup.py
|
from setuptools import setup, find_packages
# building extra requirements with "all" option to install everything at once
extras_require = {
"gps": ["gpxpy", "geocoder"],
"rss": ["feedparser"],
"ical": ["icalendar"],
"lastfm": ["pylast"],
"battery": ["batinfo"],
"twitter": ["python-dateutil", "twython"],
}
extras_require["all"] = {r for v in extras_require.values() for r in v}
setup(
name="memacs",
version="2021.01.28.1",
description="Visualize your (digital) life in Emacs Org mode "
"by converting data to Org mode format",
author="Karl Voit",
author_email="tools@Karl-Voit.at",
url="https://github.com/novoid/memacs",
download_url="https://github.com/novoid/memacs/zipball/master",
keywords=["quantified self", "emacs", "org-mode", "org mode"],
packages=find_packages(), # Required
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
],
# package_data={},
install_requires=["orgformat", "emoji"],
extras_require=extras_require,
entry_points={
'console_scripts': [
"memacs_arbtt=bin.memacs_arbtt:main",
"memacs_battery=bin.memacs_battery:main [battery]",
"memacs_chrome=bin.memacs_chrome:main",
"memacs_csv=bin.memacs_csv:main",
"memacs_example=bin.memacs_example:main",
"memacs_filenametimestamps=bin.memacs_filenametimestamps:main",
"memacs_firefox=bin.memacs_firefox:main",
"memacs_git=bin.memacs_git:main",
"memacs_gpx=bin.memacs_gpx:main [gps]",
"memacs_ical=bin.memacs_ical:main [ical]",
"memacs_imap=bin.memacs_imap:main",
"memacs_kodi=bin.memacs_kodi:main",
"memacs_lastfm=bin.memacs_lastfm:main [lastfm]",
"memacs_mumail=bin.memacs_mumail:main",
"memacs_phonecalls=bin.memacs_phonecalls:main",
"memacs_phonecalls_superbackup=bin.memacs_phonecalls_superbackup:main",
"memacs_photos=bin.memacs_photos:main",
"memacs_rss=bin.memacs_rss:main [rss]",
"memacs_simplephonelogs=bin.memacs_simplephonelogs:main",
"memacs_sms=bin.memacs_sms:main",
"memacs_sms_superbackup=bin.memacs_sms_superbackup:main",
"memacs_svn=bin.memacs_svn:main",
"memacs_twitter=bin.memacs_twitter:main [twitter]",
"memacs_whatsapp=bin.memacs_whatsapp:main",
],
},
long_description="""This Python framework converts data from various sources to Org mode format
which may then included in Org mode agenda (calendar). This way, you get a 360-degree-view of your
digital life.
Each Memacs module converts a different input format into Org mode files.
- Target group: users of Emacs Org mode who are able to use command line tools
- Hosted and documented on github: https://github.com/novoid/memacs
"""
)
| 3,174
|
Python
|
.py
| 69
| 38.26087
| 99
| 0.650435
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,736
|
memacs_sms.py
|
novoid_Memacs/bin/memacs_sms.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2014-12-13 13:39:15 vk>
from memacs.sms import SmsMemacs
PROG_VERSION_NUMBER = "0.2"
PROG_VERSION_DATE = "2014-12-13"
PROG_SHORT_DESCRIPTION = "Memacs for sms"
PROG_TAG = "sms"
PROG_DESCRIPTION = """
This Memacs module will parse output of sms xml backup files
> A sample xml file you find in the documentation file memacs_sms.org.
Then an Org-mode file is generated.
"""
COPYRIGHT_YEAR = "2011-2014"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = SmsMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 985
|
Python
|
.py
| 30
| 29.033333
| 70
| 0.702218
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,737
|
memacs_firefox.py
|
novoid_Memacs/bin/memacs_firefox.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:18:07 vk>
from memacs.firefox import Firefox
PROG_VERSION_NUMBER = "0.0"
PROG_VERSION_DATE = "2018-07-14"
PROG_SHORT_DESCRIPTION = "Memacs for firefox url history "
PROG_TAG = "firefox"
PROG_DESCRIPTION = """
This class will parse firefox history file (places.sqlite) and
produce an org file with all your visited sites
"""
# set CONFIG_PARSER_NAME only, when you want to have a config file
# otherwise you can comment it out
# CONFIG_PARSER_NAME="memacs-example"
COPYRIGHT_YEAR = "2018"
COPYRIGHT_AUTHORS = """Raimon Grau <raimonster@gmail.com>"""
def main():
global memacs
memacs = Firefox(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
# use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,099
|
Python
|
.py
| 32
| 30.53125
| 66
| 0.709708
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,738
|
memacs_git.py
|
novoid_Memacs/bin/memacs_git.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:18:40 vk>
from memacs.git import GitMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2011-12-20"
PROG_SHORT_DESCRIPTION = "Memacs for git files "
PROG_TAG = "git"
PROG_DESCRIPTION = """
This class will parse files from git rev-parse output
use following command to generate input file
$ git rev-list --all --pretty=raw > /path/to/input file
Then an Org-mode file is generated that contains all commit message
If outputfile is specified, only non-existing commits are appended
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = GitMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,115
|
Python
|
.py
| 32
| 31.1875
| 67
| 0.715084
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,739
|
memacs_svn.py
|
novoid_Memacs/bin/memacs_svn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:20:01 vk>
from memacs.svn import SvnMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2011-12-27"
PROG_SHORT_DESCRIPTION = "Memacs for svn"
PROG_TAG = "svn"
PROG_DESCRIPTION = """
This Memacs module will parse output of svn log --xml
sample xml:
<?xml version="1.0"?>
<log>
<logentry
revision="13">
<author>bob</author>
<date>2011-11-05T18:18:22.936127Z</date>
<msg>Bugfix.</msg>
</logentry>
</log>
Then an Org-mode file is generated that contains information
about the log messages, author, and revision
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = SvnMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,178
|
Python
|
.py
| 40
| 25.45
| 60
| 0.682301
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,740
|
memacs_lastfm.py
|
novoid_Memacs/bin/memacs_lastfm.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from memacs.lastfm import LastFM
CONFIG_PARSER_NAME="memacs-lastfm"
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2017-02-24"
PROG_SHORT_DESCRIPTION = "Memacs for lastfm"
PROG_TAG = "lastfm"
COPYRIGHT_YEAR = "2017"
COPYRIGHT_AUTHORS = """Manuel Koell <mankoell@gmail.com>"""
def main():
global memacs
memacs = LastFM(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS,
use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 757
|
Python
|
.py
| 24
| 27
| 59
| 0.687845
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,741
|
memacs_mumail.py
|
novoid_Memacs/bin/memacs_mumail.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2015-04-30 17:12:02 vs>
from memacs.mu import MuMail
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2015-03-08"
PROG_SHORT_DESCRIPTION = "Memacs for Mu Mails"
PROG_TAG = "emails:mumail"
PROG_DESCRIPTION = """This memacs module will connect mu mail database,
fetch all mails and writes them to an orgfile.
"""
CONFIG_PARSER_NAME = ""
COPYRIGHT_YEAR = "2011-2015"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Stephanus Volke <post@stephanus-volke.de>"""
def main():
global memacs
memacs = MuMail(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS,
use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 998
|
Python
|
.py
| 30
| 29.266667
| 71
| 0.697505
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,742
|
memacs_sms_superbackup.py
|
novoid_Memacs/bin/memacs_sms_superbackup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-09-12 09:11 igb>
from memacs.sms_superbackup import SmsSuperBackupMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2012-03-07"
PROG_SHORT_DESCRIPTION = "Memacs for sms"
PROG_TAG = "sms"
PROG_DESCRIPTION = """
This Memacs module will parse output of sms xml backup files
> A sample xml file you find in the documentation file memacs_sms.org.
Then an Org-mode file is generated.
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>
Ian Barton <ian@manor-farm.org>"""
def main():
global memacs
memacs = SmsSuperBackupMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,049
|
Python
|
.py
| 31
| 30.129032
| 70
| 0.713861
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,743
|
memacs_arbtt.py
|
novoid_Memacs/bin/memacs_arbtt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2017-02-07 19:25 manu>
from memacs.arbtt import Arbtt
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2017-02-07"
PROG_SHORT_DESCRIPTION = "Memacs for arbtt"
PROG_TAG = "arbtt"
PROG_DESCRIPTION = """
This Memacs module will parse arbtt stats ....
"""
COPYRIGHT_YEAR = "2017"
COPYRIGHT_AUTHORS = """Manuel Koell <mankoell@gmail.com>"""
def main():
global memacs
memacs = Arbtt(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 819
|
Python
|
.py
| 27
| 26.222222
| 59
| 0.681122
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,744
|
memacs_filenametimestamps.py
|
novoid_Memacs/bin/memacs_filenametimestamps.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-10-03 11:44:44 vk>
from memacs.filenametimestamps import FileNameTimeStamps
PROG_VERSION_NUMBER = "1.0"
PROG_VERSION_DATE = "2019-10-03"
PROG_SHORT_DESCRIPTION = "Memacs for file name time stamp"
PROG_TAG = "filedatestamps"
PROG_DESCRIPTION = """This script parses a text file containing absolute paths
to files with ISO datestamps and timestamps in their file names:
Examples: "2010-03-29T20.12 Divegraph.tiff"
"2010-12-31T23.59_Cookie_recipies.pdf"
"2011-08-29T08.23.59_test.pdf"
Emacs tmp-files like file~ are automatically ignored
Then an Org-mode file is generated that contains links to the files.
At files, containing only the date information i.e. "2013-03-08_foo.txt", the
time will be extracted from the filesystem, when both dates are matching. To
Turn off this feature see argument "--skip-file-time-extraction"
"""
COPYRIGHT_YEAR = "2011 and higher"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = FileNameTimeStamps(prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,616
|
Python
|
.py
| 34
| 39.470588
| 78
| 0.669211
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,745
|
memacs_csv.py
|
novoid_Memacs/bin/memacs_csv.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2022-07-14 15:47:29 vk>
from memacs.csv import Csv
PROG_VERSION_NUMBER = "0.2"
PROG_VERSION_DATE = "2022-07-14"
PROG_SHORT_DESCRIPTION = "Memacs for csv files"
PROG_TAG = "csv"
PROG_DESCRIPTION = """
This Memacs module will parse csv files
"""
# set CONFIG_PARSER_NAME only, when you want to have a config file
# otherwise you can comment it out
# CONFIG_PARSER_NAME="memacs-example"
COPYRIGHT_YEAR = "2012 and higher"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = Csv(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
# use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,048
|
Python
|
.py
| 32
| 28.90625
| 66
| 0.698712
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,746
|
memacs_gpx.py
|
novoid_Memacs/bin/memacs_gpx.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from memacs.gpx import GPX
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2017-03-02"
PROG_SHORT_DESCRIPTION = "Memacs for GPX files"
PROG_TAG = "gps"
COPYRIGHT_YEAR = "2017"
COPYRIGHT_AUTHORS = """Manuel Koell <mankoell@gmail.com>"""
def main():
global memacs
memacs = GPX(prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS)
memacs.handle_main()
if __name__ == "__main__":
main()
| 691
|
Python
|
.py
| 20
| 28.15
| 63
| 0.64006
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,747
|
memacs_twitter.py
|
novoid_Memacs/bin/memacs_twitter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:18:07 vk>
from memacs.twitter import Twitter
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2013-09-01"
PROG_SHORT_DESCRIPTION = "Memacs for Twitter "
PROG_TAG = "mytag"
PROG_DESCRIPTION = """
This Memacs module will process your Twitter timeline ....
sample config:
[memacs-twitter] <-- "memacs-example" has to be CONFIG_PARSER_NAME
APP_KEY =
APP_SECRET =
OAUTH_TOKEN =
OAUTH_TOKEN_SECRET =
screen_name =
count =
"""
# set CONFIG_PARSER_NAME only, when you want to have a config file
# otherwise you can comment it out
CONFIG_PARSER_NAME="memacs-twitter"
COPYRIGHT_YEAR = "2013"
COPYRIGHT_AUTHORS = """Ian Barton <ian@manor-farm.org>"""
def main():
global memacs
memacs = Twitter(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS,
use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,203
|
Python
|
.py
| 39
| 27.435897
| 76
| 0.701906
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,748
|
memacs_battery.py
|
novoid_Memacs/bin/memacs_battery.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from memacs.battery import Battery
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2017-02-24"
PROG_SHORT_DESCRIPTION = "Memacs for battery"
PROG_TAG = "battery"
COPYRIGHT_YEAR = "2017"
COPYRIGHT_AUTHORS = """Manuel Koell <mankoell@gmail.com>"""
def main():
memacs = Battery(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 657
|
Python
|
.py
| 21
| 26.857143
| 59
| 0.681529
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,749
|
memacs_photos.py
|
novoid_Memacs/bin/memacs_photos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:19:39 vk>
from memacs.photos import PhotosMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2012-03-10"
PROG_SHORT_DESCRIPTION = "Memacs for photos (exif)"
PROG_TAG = "photos"
PROG_DESCRIPTION = """
This memacs module will walk through a given folder looking for photos.
If a photo is found, it will get a timestamp from the exif information.
Then an Org-mode file is generated.
"""
COPYRIGHT_YEAR = "2012-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = PhotosMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,020
|
Python
|
.py
| 30
| 30.2
| 72
| 0.706721
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,750
|
memacs_kodi.py
|
novoid_Memacs/bin/memacs_kodi.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from memacs.kodi import Kodi
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2018-10-22"
PROG_SHORT_DESCRIPTION = "Memacs for Kodi "
PROG_TAG = "kodi"
PROG_DESCRIPTION = """
this class will parse logs from the Kodi Mediacenter
"""
COPYRIGHT_YEAR = "2018"
COPYRIGHT_AUTHORS = """Max Beutelspacher <max.beutelspacher@mailbox.org>"""
def main():
global memacs
memacs = Kodi(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
# use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 857
|
Python
|
.py
| 27
| 27.37037
| 75
| 0.685298
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,751
|
memacs_whatsapp.py
|
novoid_Memacs/bin/memacs_whatsapp.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from memacs.whatsapp import WhatsApp
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2017-02-28"
PROG_SHORT_DESCRIPTION = "Memacs for whatsapp"
PROG_TAG = "whatsapp"
COPYRIGHT_YEAR = "2017"
COPYRIGHT_AUTHORS = """Manuel Koell <mankoell@gmail.com>"""
def main():
memacs = WhatsApp(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 665
|
Python
|
.py
| 21
| 27.095238
| 59
| 0.679749
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,752
|
memacs_phonecalls.py
|
novoid_Memacs/bin/memacs_phonecalls.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:19:18 vk>
from memacs.phonecalls import PhonecallsMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2012-03-08"
PROG_SHORT_DESCRIPTION = "Memacs for phonecalls"
PROG_TAG = "phonecalls"
PROG_DESCRIPTION = """
This Memacs module will parse output of phonecalls xml backup files
sample xml file:
<?xml version='1.0' encoding='UTF-8' standalone='yes' ?>
<calls count="8">
<call number="+43691234123" duration="59" date="1312563906092" type="1" />
<call number="06612341234" duration="22" date="1312541215834" type="2" />
<call number="-1" duration="382" date="1312530691081" type="1" />
<call number="+4312341234" duration="289" date="1312482327195" type="1" />
<call number="+4366412341234" duration="70" date="1312476334059" type="1" />
<call number="+4366234123" duration="0" date="1312473751975" type="2" />
<call number="+436612341234" duration="0" date="1312471300072" type="3" />
<call number="+433123412" duration="60" date="1312468562489" type="2" />
</calls>
Then an Org-mode file is generated.
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = PhonecallsMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,661
|
Python
|
.py
| 41
| 37.073171
| 78
| 0.701613
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,753
|
memacs_simplephonelogs.py
|
novoid_Memacs/bin/memacs_simplephonelogs.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2017-04-13 22:23:42 vk>
from memacs.simplephonelogs import SimplePhoneLogsMemacs
PROG_VERSION_NUMBER = "0.2"
PROG_VERSION_DATE = "2017-04-13"
PROG_SHORT_DESCRIPTION = "Memacs for simple phone logs"
PROG_TAG = "phonelog"
PROG_DESCRIPTION = """
This Memacs module will parse simple log files which were written
for example by Tasker.
sample log file: (DATE # TIME # WHAT # BATTERYSTATE # UPTIMESECONDS)
2012-11-20 # 11.56 # boot # 89 # 6692
2012-11-20 # 11.56 # boot # 89 # 6694
2012-11-20 # 19.59 # shutdown # 72 # 35682
2012-11-20 # 21.32 # boot # 71 # 117
2012-11-20 # 23.52 # shutdown # 63 # 8524
2012-11-21 # 07.23 # boot # 100 # 115
2012-11-21 # 07.52 # wifi-home # 95 # 1879
2012-11-21 # 08.17 # wifi-home-end # 92 # 3378
2012-11-21 # 13.06 # boot # 77 # 124
2012-11-21 # 21.08 # wifi-home # 50 # 29033
2012-11-21 # 21.15 # charging-start # 49 # 29253
2012-11-21 # 21.45 # charging-end # 95 # 32758
2012-11-22 # 00.12 # shutdown # 83 # 40089
2012-11-29 # 08.47 # boot # 100 # 114
2012-11-29 # 08.48 # wifi-home # 100 # 118
2012-11-29 # 09.41 # wifi-home-end # 98 # 3317
2012-11-29 # 14.46 # wifi-office # 81 # 21633
2012-11-29 # 16.15 # wifi-home # 76 # 26955
2012-11-29 # 17.04 # wifi-home-end # 74 # 29912
2012-11-29 # 23.31 # shutdown # 48 # 53146
Then an Org-mode file is generated accordingly.
"""
COPYRIGHT_YEAR = "2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>"""
def main():
global memacs
memacs = SimplePhoneLogsMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,912
|
Python
|
.py
| 50
| 35.56
| 68
| 0.666127
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,754
|
memacs_imap.py
|
novoid_Memacs/bin/memacs_imap.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:19:02 vk>
from memacs.imap import ImapMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2011-12-30"
PROG_SHORT_DESCRIPTION = "Memacs for imap emails"
PROG_TAG = "emails:imap"
PROG_DESCRIPTION = """The memacs module will connect to an IMAP Server,
fetch all mails of given folder (-f or --folder-name <folder>),
parses the mails and writes them to an orgfile.
This module uses configfiles (-c, --config-file <path>)
sample-config:
[memacs-imap]
host = imap.gmail.com
port = 993
user = foo@gmail.com
password = bar
"""
CONFIG_PARSER_NAME = "memacs-imap"
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = ImapMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS,
use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,238
|
Python
|
.py
| 38
| 29.131579
| 71
| 0.708648
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,755
|
memacs_chrome.py
|
novoid_Memacs/bin/memacs_chrome.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-10-03 15:18:07 br>
from memacs.chrome import Chrome
PROG_VERSION_NUMBER = "0.0"
PROG_VERSION_DATE = "2018-10-02"
PROG_SHORT_DESCRIPTION = "Memacs for chrome url history "
PROG_TAG = "chrome"
PROG_DESCRIPTION = """
This class will parse chrome history file (History) and
produce an org file with all your visited sites
"""
# set CONFIG_PARSER_NAME only, when you want to have a config file
# otherwise you can comment it out
# CONFIG_PARSER_NAME="memacs-example"
COPYRIGHT_YEAR = "2018"
COPYRIGHT_AUTHORS = """Bala Ramadurai <bala@balaramadurai.net>"""
def main():
global memacs
memacs = Chrome(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,040
|
Python
|
.py
| 31
| 29.903226
| 66
| 0.70987
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,756
|
memacs_ical.py
|
novoid_Memacs/bin/memacs_ical.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:18:50 vk>
from memacs.ical import CalendarMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2011-12-28"
PROG_SHORT_DESCRIPTION = "Memacs for ical Calendars"
PROG_TAG = "calendar"
PROG_DESCRIPTION = """This script parses a *.ics file and generates
Entries for VEVENTS
* other's like VALARM are not implemented by now
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = CalendarMemacs(prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,091
|
Python
|
.py
| 28
| 30.178571
| 74
| 0.614002
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,757
|
memacs_rss.py
|
novoid_Memacs/bin/memacs_rss.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:19:47 vk>
from memacs.rss import RssMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2011-12-27"
PROG_SHORT_DESCRIPTION = "Memacs for rss feeds"
PROG_TAG = "rss"
PROG_DESCRIPTION = """
This Memacs module will parse rss files.
rss can be read from file (-f FILE) or url (-u URL)
The items are automatically be appended to the org file.
Attention: RSS2.0 is required
Sample Org-entries
: ** <2009-09-06 Sun 18:45> [[http://www.wikipedia.org/][link]]: Example entry
: Here is some text containing an interesting description.
: :PROPERTIES:
: :LINK: [[http://www.wikipedia.org/]]
: :GUID: rss guid
: :SUMMARY: Here is some text containing an interesting description.
: :ID: unique string per item
: :END:
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = RssMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,367
|
Python
|
.py
| 40
| 31
| 78
| 0.692249
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,758
|
memacs_phonecalls_superbackup.py
|
novoid_Memacs/bin/memacs_phonecalls_superbackup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-09-12 09:11 igb>
from memacs.phonecalls_superbackup import PhonecallsSuperBackupMemacs
PROG_VERSION_NUMBER = "0.1"
PROG_VERSION_DATE = "2012-03-08"
PROG_SHORT_DESCRIPTION = "Memacs for phonecalls"
PROG_TAG = "phonecalls"
PROG_DESCRIPTION = """
This Memacs module will parse output of phonecalls xml backup files
sample xml file:
<?xml version='1.0' encoding='UTF-8' standalone='yes' ?>
<calls count="8">
<call number="+43691234123" duration="59" date="1312563906092" type="1" />
<call number="06612341234" duration="22" date="1312541215834" type="2" />
<call number="-1" duration="382" date="1312530691081" type="1" />
<call number="+4312341234" duration="289" date="1312482327195" type="1" />
<call number="+4366412341234" duration="70" date="1312476334059" type="1" />
<call number="+4366234123" duration="0" date="1312473751975" type="2" />
<call number="+436612341234" duration="0" date="1312471300072" type="3" />
<call number="+433123412" duration="60" date="1312468562489" type="2" />
</calls>
Then an Org-mode file is generated.
"""
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>
Ian Barton <ian@manor-farm.org>"""
def main():
global memacs
memacs = PhonecallsSuperBackupMemacs(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,725
|
Python
|
.py
| 42
| 37.690476
| 78
| 0.708657
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,759
|
memacs_example.py
|
novoid_Memacs/bin/memacs_example.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2013-04-04 16:18:07 vk>
from memacs.example import Foo
PROG_VERSION_NUMBER = "0.0"
PROG_VERSION_DATE = "2011-12-18"
PROG_SHORT_DESCRIPTION = "Memacs for ... "
PROG_TAG = "mytag"
PROG_DESCRIPTION = """
this class will do ....
Then an Org-mode file is generated that contains ....
if youre module needs a config file please give information about usage:
sample config:
[memacs-example] <-- "memacs-example" has to be CONFIG_PARSER_NAME
foo = 0
bar = 1
"""
# set CONFIG_PARSER_NAME only, when you want to have a config file
# otherwise you can comment it out
# CONFIG_PARSER_NAME="memacs-example"
COPYRIGHT_YEAR = "2011-2013"
COPYRIGHT_AUTHORS = """Karl Voit <tools@Karl-Voit.at>,
Armin Wieser <armin.wieser@gmail.com>"""
def main():
global memacs
memacs = Foo(
prog_version=PROG_VERSION_NUMBER,
prog_version_date=PROG_VERSION_DATE,
prog_description=PROG_DESCRIPTION,
prog_short_description=PROG_SHORT_DESCRIPTION,
prog_tag=PROG_TAG,
copyright_year=COPYRIGHT_YEAR,
copyright_authors=COPYRIGHT_AUTHORS
# use_config_parser_name=CONFIG_PARSER_NAME
)
memacs.handle_main()
if __name__ == "__main__":
main()
| 1,265
|
Python
|
.py
| 38
| 29.815789
| 76
| 0.697617
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,760
|
filenametimestamps.py
|
novoid_Memacs/memacs/filenametimestamps.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2022-05-15 16:18:39 vk>
import codecs
import logging
import os
import re
import sys
import time
from orgformat import OrgFormat, TimestampParseException
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
# Note: here, the day of the month is optional to allow "2019-10
# foo.txt" as valid ISO datestamp which will be changed to
# "<2019-10-01..." later on.
DATETIME_PATTERN = '([12]\d{3})-([01]\d)(-([0123]\d))?([- _T]([012]\d)[-_.]([012345]\d)([-_.]([012345]\d))?)?'
DATETIME_REGEX = re.compile('^' + DATETIME_PATTERN + '(--?' + DATETIME_PATTERN + ')?')
class FileNameTimeStamps(Memacs):
def _parser_add_arguments(self):
Memacs._parser_add_arguments(self)
self._parser.add_argument("-f", "--folder",
dest="filenametimestamps_folder",
action="append", nargs='*',
help="path to a folder to search for " +
"filenametimestamps, " +
"multiple folders can be specified: " +
"-f /path1 -f /path2")
self._parser.add_argument("-x", "--exclude", dest="exclude_folder", action="append", nargs='*',
help="path to excluding folder, for more excludes " +
"use this: -x /path/exclude -x /path/exclude")
self._parser.add_argument("--filelist", dest="filelist",
help="file containing a list of files to process. " +
"either use \"--folder\" or the \"--filelist\" argument, not both.")
self._parser.add_argument("--ignore-non-existing-items",
dest="ignore_nonexisting", action="store_true",
help="ignores non-existing files or folders within filelist")
self._parser.add_argument("-l", "--follow-links",
dest="follow_links", action="store_true",
help="follow symbolics links," +
" default False")
self._parser.add_argument("--skip-file-time-extraction",
dest="skip_filetime_extraction",
action="store_true",
help="by default, if there is an ISO datestamp without time, the mtime " +
"is used for time extraction, when the ISO days " +
"are matching. If you set this option, this extraction of the file time " +
"is omitted.")
self._parser.add_argument("--force-file-date-extraction",
dest="force_filedate_extraction",
action="store_true", help="force extraction of the file date and time" +
"even when there is an ISO datestamp in the filename.")
self._parser.add_argument("--skip-files-with-no-or-wrong-timestamp",
dest="skip_notimestamp_files",
action="store_true",
help="by default, files with a missing or a wrong time-stamp " +
"(2019-12-33) will be linked without Org mode time-stamp. " +
"If you set this option, these files will not be part of the " +
"output at all.")
self._parser.add_argument("--omit-drawers",
dest="omit_drawers", action="store_true",
help="do not generate drawers that contain " +
"ID properties. Can't be used with \"--append\".")
def _parser_parse_args(self):
Memacs._parser_parse_args(self)
if self._args.filenametimestamps_folder and self._args.filelist:
self._parser.error("You gave both \"--filelist\" and \"--folder\" argument. Please use either or.\n")
if self._args.omit_drawers and self._args.append:
self._parser.error("You gave both \"--append\" and \"--omit-drawers\" argument. Please use either or.\n")
if not self._args.filelist and not self._args.filenametimestamps_folder:
self._parser.error("no filenametimestamps_folder specified")
if self._args.filelist:
if not os.path.isfile(self._args.filelist):
self._parser.error("Check the filelist argument: " +
"[" + str(self._args.filelist) + "] is not an existing file")
if self._args.filenametimestamps_folder:
for f in self._args.filenametimestamps_folder[0]:
if not os.path.isdir(f):
self._parser.error("Check the folderlist argument: " +
"[" + str(f) + "] and probably more aren't folders")
def __ignore_dir(self, ignore_dir):
"""
@param ignore_dir: should this ignore_dir be ignored?
@param return: true - if ignore_dir should be ignored
false - otherwise
"""
## [item for ... ] -> flatten out list of lists to a single list
if self._args.exclude_folder and \
ignore_dir in [item for sublist in self._args.exclude_folder for item in sublist]:
logging.info("ignoring ignore_dir: " + ignore_dir)
return True
else:
return False
def __handle_folder(self, folder):
"""
walks through a folder
"""
for rootdir, dirs, files in os.walk(folder,
followlinks=self._args.follow_links):
if not self.__ignore_dir(rootdir):
for file in files:
self.__handle_file(file, rootdir)
def __write_file(self, file, link, timestamp):
"""
write entry to org file (omit replacement of spaces in file names)
"""
output = OrgFormat.link(link="file:" + link, description=file, replacespaces=False)
properties = None
if not self._args.omit_drawers:
# we need optional data for hashing due it can be, that more
# than one file have the same timestamp
properties = OrgProperties(data_for_hashing=output)
self._writer.write_org_subitem(timestamp=timestamp,
output=output,
properties=properties)
def __check_datestamp_correctness(self, datestamp):
"""
Checks a datestamp 'YYYY.MM.DD' if its components are a valid date.
"""
if len(datestamp) != 10:
return False
try:
year = int(datestamp[:4])
month = int(datestamp[5:7])
day = int(datestamp[8:10])
except ValueError:
logging.debug('__check_datestamp_correctness(' + str(datestamp) + ') does not follow YYYY-MM-DD with integers as components for year, month, day.')
return False
if year < 1900 or \
year > 2100 or \
month < 1 or \
month > 12 or \
day < 1 or \
day > 31:
logging.debug('__check_datestamp_correctness(' + str(datestamp) + ') NEGATIVE')
return False
else:
try:
orgdate = OrgFormat.strdate(datestamp)
except ValueError:
return False
return True
def __check_timestamp_correctness(self, timestamp):
"""
Checks a timestamp 'HH.MM' (no seconds) if its components are a valid time.
"""
if len(timestamp) != 5 or timestamp[2:3] != ':':
return False
try:
hour = int(timestamp[:2])
minute = int(timestamp[-2:])
except ValueError:
logging.debug('__check_timestamp_correctness(' + str(timestamp) + ') does not follow HH.MM with integers as components for hour and minute (and leading zeros).')
return False
if hour < 0 or \
hour > 23 or \
minute < 0 or \
minute > 59:
logging.debug('__check_timestamp_correctness(' + str(timestamp) + ') NEGATIVE')
return False
else:
return True
def __extract_days_and_times(self, match):
"""Takes a RegEx match group of corresponding DATETIME_REGEX and
derives booleans that indicate the existance of months,
days, hours and minutes. Further more, it extracts ISO
days ('YYYY-MM-DD') for one and an optional a second day
and their corresponding time-stamps lacking their optional
seconds ('HH:MM').
"""
# DATETIME_REGEX.match('2019-10-03T01.02.03--2019-10-04T23.59.59') results in:
# 1 ('2019',
# 2 '10',
# (3) '-03',
# 4 '03',
# (5) 'T01.02.03',
# 6 '01',
# 7 '02',
# (8) '.03',
# 9 '03',
# (10) '--2019-10-04T23.59.59',
# 11 '2019',
# 12 '10',
# (13) '-04',
# 14 '04',
# (15) 'T23.59.59',
# 16 '23',
# 17 '59',
# (18) '.59',
# 19 '59')
has_1ym = match.group(1) and match.group(2)
has_1ymd = has_1ym and match.group(4)
has_1ymdhm = has_1ymd and match.group(6) and match.group(7)
has_1ymdhms = has_1ymdhm and match.group(9)
has_2ym = match.group(11) and match.group(12)
has_2ymd = has_2ym and match.group(14)
has_2ymdhm = has_2ymd and match.group(15) and match.group(17)
has_2ymdhms = has_2ymdhm and match.group(19)
# initialize return values with None - their default if not found
day1 = None
day2 = None
time1 = None
time2 = None
# this method does not make any sense when the first day
# is not found. Please check for a positive match before.
assert(has_1ym)
# Note: assumption is that the match.group entries do
# contain leading zeros already.
if has_1ymd:
day1 = match.group(1) + '-' + match.group(2) + '-' + match.group(4)
elif has_1ym:
# assume if day of month is missing, set it to 1; allowing
# '2019-10' as date-stamp and change to '2019-10-01'
day1 = match.group(1) + '-' + match.group(2) + '-01'
has_1ymd = True # overwrite value from data with value including the added day
if has_1ymdhms or has_1ymdhm:
time1 = match.group(6) + ':' + match.group(7)
if has_2ymd:
day2 = match.group(11) + '-' + match.group(12) + '-' + match.group(14)
elif has_2ym:
# see comment above about missing day of month
day2 = match.group(11) + '-' + match.group(12) + '-01'
if has_2ymdhms or has_2ymdhm:
time2 = match.group(16) + ':' + match.group(17)
return has_1ymd, has_1ymdhm, has_2ymd, has_2ymdhm, day1, time1, day2, time2
def __check_if_days_in_timestamps_are_same(self, file_datetime, filename_datestamp):
"""handles timestamp differences for timestamps containing only day
information (and not times). filename_datestamp is like
'YYYY-MM-DD'."""
file_year = file_datetime.tm_year
file_month = file_datetime.tm_mon
file_day = file_datetime.tm_mday
try:
filename_year = int(filename_datestamp[:4])
filename_month = int(filename_datestamp[5:7])
filename_day = int(filename_datestamp[8:10])
except ValueError:
logging.debug('__check_if_days_in_timestamps_are_same(..., ' + str(filename_datestamp) + ') does not follow YYYY-MM-DD with integers as components for year, month, day.')
return False
if file_year != filename_year or \
file_month != filename_month or \
file_day != filename_day:
return False
logging.debug('__check_if_days_in_timestamps_are_same: days match!')
return True
def __handle_file(self, file, rootdir):
"""
handles a file (except ending with a tilde)
"""
# don't handle emacs tmp files (file~)
if file[-1:] == '~':
return
link = os.path.join(rootdir, file)
logging.debug('__handle_file: ' + '#' * 50)
logging.debug('__handle_file: ' + link)
orgdate = False # set to default value
if self._args.force_filedate_extraction:
# in this case, skip any clever
# extraction mechanism to extract
# date/time from file name and use
# the mtime instead:
logging.debug('__handle_file: force_filedate_extraction: using datetime from mtime of file')
file_datetime = time.localtime(os.path.getmtime(link))
orgdate = OrgFormat.date(file_datetime, show_time=True)
self.__write_file(file, link, orgdate)
return
# very basic checks for correctness (e.g., month=20, hour=70)
# are part of these RegEx (and do not have to be checked
# below)
filename_timestamp_match = DATETIME_REGEX.match(file)
logging.debug('__handle_file: filename_timestamp_match? ' + str(filename_timestamp_match is True))
if filename_timestamp_match:
# day1/2 are like 'YYYY-MM-DD' time1/2 like 'HH:MM':
has_1ymd, has_1ymdhm, has_2ymd, has_2ymdhm, \
day1, time1, day2, time2 = self.__extract_days_and_times(filename_timestamp_match)
# Note: following things are available for formatting:
# self._args.inactive_timestamps -> Bool
# OrgFormat.strdate('YYYY-MM-DD', inactive=False) -> <YYYY-MM-DD Sun>
# OrgFormat.strdate('YYYY-MM-DD HH:MM', inactive=False, show_time=True) -> <YYYY-MM-DD Sun HH:MM>
assert(has_1ymd)
try:
if has_1ymdhm:
if self.__check_datestamp_correctness(day1):
if self.__check_timestamp_correctness(time1):
orgdate = OrgFormat.strdate(day1 + ' ' + time1, inactive=self._args.inactive_timestamps, show_time=True)
else:
logging.warning('File "' + file + '" has an invalid timestamp (' + str(time1) + '). Skipping this faulty time-stamp.')
orgdate = OrgFormat.strdate(day1, inactive=self._args.inactive_timestamps)
else:
logging.warning('File "' + file + '" has an invalid datestamp (' + str(day1) + ').')
# omit optional second day if first has an issue:
has_2ymd = False
has_2ymdhm = False
orgdate = False
elif has_1ymd: # missing time-stamp for day1
if self.__check_datestamp_correctness(day1):
if not self._args.skip_filetime_extraction:
# we've got only a day but we're able to determine
# time from file mtime, if same as ISO day in file
# name:
logging.debug('__handle_file: try to get file time from mtime if days match between mtime and filename ISO ...')
file_datetime = time.localtime(os.path.getmtime(link))
if self.__check_if_days_in_timestamps_are_same(file_datetime, day1):
orgdate = OrgFormat.date(file_datetime, inactive=self._args.inactive_timestamps, show_time=True)
else:
logging.debug('__handle_file: day of mtime and filename ISO differs, using filename ISO day')
orgdate = OrgFormat.strdate(day1, inactive=self._args.inactive_timestamps)
else:
# we've got only a day and determining mtime
# is not planned, so use the day as date-stamp
orgdate = OrgFormat.strdate(day1, inactive=self._args.inactive_timestamps)
else:
logging.warning('File "' + file + '" has an invalid datestamp (' + str(day1) + ').')
orgdate = False
else:
logging.warning('File "' + file + '" has an invalid datestamp (' + str(day1) + '). Skipping this faulty date.')
# omit optional second day if first has an issue:
has_2ymd = False
has_2ymdhm = False
# there is a time range:
if has_2ymdhm:
assert(day2)
if self.__check_datestamp_correctness(day2):
if self.__check_timestamp_correctness(time2):
orgdate += '--' + OrgFormat.strdate(day2 + ' ' + time2, inactive=self._args.inactive_timestamps, show_time=True)
else:
logging.warning('File "' + file + '" has an invalid timestamp (' + str(time2) + '). Skipping this faulty time-stamp.')
orgdate += '--' + OrgFormat.strdate(day2, inactive=self._args.inactive_timestamps)
else:
logging.warning('File "' + file + '" has an invalid datestamp (' + str(day2) + '). Skipping this faulty date.')
elif has_2ymd:
assert(day2)
if self.__check_datestamp_correctness(day2):
orgdate += '--' + OrgFormat.strdate(day2, inactive=self._args.inactive_timestamps)
else:
logging.warning('File "' + file + '" has an invalid datestamp (' + str(day2) + '). Skipping this faulty date.')
except TimestampParseException:
logging.error('File "' + str(file) + '" has in invalid date- or timestamp. OrgFormat of one of day1: "' +
str(day1) + '" time1: "' + str(time1) + '" day2: "' +
str(day2) + '" time2: "' + str(time2) + '" ' +
'failed with TimestampParseException. Skipping this faulty date.')
orgdate = False
else:
logging.debug('__handle_file: no date- nor timestamp')
orgdate = False
if not orgdate and self._args.skip_notimestamp_files:
logging.debug('__handle_file: file had no or wrong time-stamp and you decided to skip them.')
return
self.__write_file(file, link, orgdate)
logging.debug('__handle_file: using orgdate: ' + str(orgdate))
return
def _main(self):
if self._args.filenametimestamps_folder:
for folder in [item for sublist in self._args.filenametimestamps_folder for item in sublist]:
self.__handle_folder(folder)
elif self._args.filelist:
for rawitem in codecs.open(self._args.filelist, "r", "utf-8"):
item = rawitem.strip()
if not os.path.exists(item):
if self._args.ignore_nonexisting:
logging.debug("File or folder does not exist: [%s] (add due to set ignore-nonexisting argument)", item)
self.__handle_file(os.path.basename(item), os.path.dirname(item))
else:
logging.warning("File or folder does not exist: [%s]", item)
else:
self.__handle_file(os.path.basename(item), os.path.dirname(item))
else:
logging.error("\nERROR: You did not provide \"--filelist\" nor \"--folder\" argument. Please use one of them.\n")
sys.exit(3)
| 20,271
|
Python
|
.py
| 368
| 39.608696
| 182
| 0.536166
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,761
|
arbtt.py
|
novoid_Memacs/memacs/arbtt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2017-02-07 19:25 manu>
import calendar
import distutils.spawn
import io
import logging
import os.path
import subprocess
import sys
import time
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import UnicodeCsvReader
ARBTT_STATS = 'arbtt-stats'
ARBTT_FORMAT = '%m/%d/%y %H:%M:%S'
class Arbtt(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"--logfile",
dest="log",
action="store",
help="use this file instead of ~/.arbtt/capture.log")
self._parser.add_argument(
"--categorizefile",
dest="cfg",
action="store",
help="use this file instead of ~/.arbtt/categorize.cfg")
self._parser.add_argument(
"--also-inactive",
dest="inactive",
action="store_true",
help="include inactive samples")
self._parser.add_argument(
"--intervals",
dest="intervals",
action="append", required=True,
help="list intervals of tag or category " + \
"(the latter has to end with a colon)"
)
self._parser.add_argument(
"--csv",
dest="csv",
action="store",
help="csv file"
)
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.log:
if not os.path.isfile(self._args.log):
self._parser.error("logfile does not exist")
if self._args.cfg:
if not os.path.isfile(self._args.cfg):
self._parser.error("config file does not exist")
if self._args.csv:
if not os.path.isfile(self._args.csv):
self._parser.error("csv file does not exist")
if self._args.csv and self._args.log or self._args.inactive:
self._parser.error("You gave both \"--csv\" and \"--logfile\" or \"--also-inactive\" argument." + \
"Please use either or.")
if not self._args.intervals:
self._parser.error("No intervals tag or category specified")
def get_sec(self, t):
"""
get H:M:S as seconds
@param t: hms string
@return: seconds
"""
h, m, s = [int(i) for i in t.split(':')]
return h*3600 + m*60 + s
def get_timestamp(self, dt):
"""
1. parse datetime string
2. utc to local time
@param dt: datetime string
@return: datetime in org format
"""
dt_tuple = time.strptime(dt, ARBTT_FORMAT)
dt_local = time.localtime(calendar.timegm(dt_tuple))
return time.strftime('<%Y-%m-%d %a %H:%M:%S>', dt_local)
def get_timerange(self, begin, end):
"""
return a date+time range (including seconds)
@param begin: start date (string)
@param end: end date (string)
@return: datetime range in org format
"""
return "%s--%s" % (self.get_timestamp(begin),
self.get_timestamp(end))
def __parse_sample(self, target, row):
"""
parse a row of csv and write entry
@param target: tag or category
@param row: list of columns
"""
tag, begin, end, duration = row
timestamp = self.get_timerange(begin, end)
duration = self.get_sec(duration)
properties = OrgProperties(data_for_hashing=timestamp)
properties.add('DURATION', duration)
tags = []
# remove colon from output
if target.endswith(':'):
target = target[:-1]
tags.append(target)
elif ':' in target:
target = target.split(':')[0]
output = target.capitalize()
tags.append(tag)
self._writer.write_org_subitem(
timestamp=timestamp,
output=output,
tags=tags,
properties=properties
)
def __handle_intervals(self, target):
"""
handles an interval of data records
@param target: tag or category
"""
command = [
ARBTT_STATS,
'--output-format=csv',
'--intervals=%s' % target
]
if self._args.log:
command.append('--logfile=%s' % self._args.log)
if self._args.cfg:
command.append('--categorizefile=%s' % self._args.cfg)
if self._args.inactive:
command.append('--also-inactive')
if not self._args.csv:
stats = subprocess.check_output(command)
f = io.StringIO(str(stats))
else:
# skip dump of stats data
f = open(self._args.csv)
# skip header columns
reader = UnicodeCsvReader(f, delimiter=',')
next(reader)
for row in reader:
self.__parse_sample(target, row)
def _main(self):
"""
get's automatically called from Memacs class
"""
# check if arbtt is installed
if distutils.spawn.find_executable(ARBTT_STATS) is None:
logging.error(ARBTT_STATS + ': command not found')
sys.exit(1)
for target in self._args.intervals:
self.__handle_intervals(target)
| 5,663
|
Python
|
.py
| 161
| 25.341615
| 111
| 0.55926
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,762
|
ical.py
|
novoid_Memacs/memacs/ical.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-05 16:01:05 vk>
import datetime
import logging
import os
import pytz
import sys
import time
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
try:
from icalendar import Calendar
except ImportError as e:
print("please install python package \"icalendar\"")
print(e)
sys.exit(3)
class CalendarMemacs(Memacs):
def _parser_add_arguments(self):
self._parser.add_argument("-c", "--calendar-url", dest="calendar_url",
help="url to calendar")
self._parser.add_argument("-cf", "--calendar-file",
dest="calendar_file",
help="path to calendar")
self._parser.add_argument(
"-x", "--exclude", dest="excludelist",
help="path to one or more folders seperated with \"|\"," + \
"i.e.:\"/path/to/folder1|/path/to/folder2|..\"")
def _parser_parse_args(self):
Memacs._parser_parse_args(self)
if not self._args.calendar_url and not self._args.calendar_file:
self._parser.error("specify a calendar url or calendar file")
if self._args.calendar_url and self._args.calendar_file:
self._parser.error(
"only set a url or path to a calendar not both.")
if self._args.calendar_file \
and not os.path.exists(self._args.calendar_file):
self._parser.error("calendar path not exists")
def __handle_vcalendar(self, component):
"""
handles a VCALENDAR Component
Sets fallback timezone for parsing of "floating events"
(events which don't specify a timezone), if a `X-WR-TIMEZONE`
line is provided in the calendar.
@param component: icalendar component
"""
# Set timezone
timezone = component.get('x-wr-timezone')
if timezone:
self.fallback_tz = pytz.timezone(timezone)
def __handle_rrule(self, component):
"""
Handles calendars rrule (used for reoccuring events)
returns org string for reoccuring date
"""
freq = self.__vtext_to_unicode(component.get('freq'))
if freq == "MINUTELY":
raise NotImplemented
elif freq == "HOURLY":
raise NotImplemented
elif freq == "DAILY":
return "+1d"
elif freq == "WEEKLY":
return "+1w"
elif freq == "YEARLY":
return "+1y"
else:
return ""
def __vtext_to_unicode(self, vtext, nonetype=None):
"""
@return unicode-string
None: otherwise
"""
if vtext:
return str(vtext)
else:
return nonetype
def __parse_ical_dt(self, component):
"""
Parse an iCalendar DATE or DATE-TIME component.
@return datetime.date (possibly datetime.datetime)
@param component: the iCalendar component to parse
"""
# Lean on `icalendar` to handle timezones, especially around
# `VTIMEZONE` specifications, which can be complex.
if isinstance(component.dt, datetime.date) and not isinstance(component.dt, datetime.datetime):
# DATE
return component.dt
elif isinstance(component.dt, datetime.datetime) and component.dt.tzinfo is not None:
# DATE-TIME w/ TZ - could be UTC, VTIMEZONE, or inline IANA-style
return component.dt.astimezone()
elif self.fallback_tz:
# Floating DATE-TIME w/ fallback TZ
dt_str = component.to_ical().decode('utf-8')
if len(dt_str) == 15: # YYYYMMDDTHHMMSS
return self.fallback_tz.localize(datetime.datetime.strptime(dt_str, '%Y%m%dT%H%M%S')).astimezone()
else:
raise ValueError("Invalid date format: " + dt_str)
else:
# Floating DATE-TIME
return component.dt
def __get_org_datetime_range(self, dtstart, dtend):
"""
@return string (range in Org format)
"""
assert isinstance(dtstart, datetime.date)
assert isinstance(dtend, datetime.date)
dates_only = not isinstance(dtend, datetime.datetime)
# Per the author of RFC5545, a one-day event should have the
# same start and end date, but the general practice in the
# wild (including Google Calendar) is to have a one-day event
# end the following day.
if dates_only:
dtend -= datetime.timedelta(days=1)
dtstart = dtstart.timetuple()
dtend = dtend.timetuple()
if dates_only and dtstart == dtend:
return OrgFormat.date(dtstart)
else:
return OrgFormat.daterange_autodetect_time(dtstart, dtend)
def __handle_vevent(self, component):
"""
handles a VCALENDAR Component
sets timezone to calendar's timezone
@param component: icalendar component
"""
logging.debug(component)
summary = self.__vtext_to_unicode(component.get('summary'),
nonetype="")
location = self.__vtext_to_unicode(component.get('location'))
description = self.__vtext_to_unicode(component.get('description'))
dtstart = self.__parse_ical_dt(component.get('DTSTART'))
## notice: end date/time is optional; no end date results in end date 9999-12-31
if component.has_key('DTEND'):
dtend = self.__parse_ical_dt(component.get('DTEND'))
orgdate = self.__get_org_datetime_range(dtstart, dtend)
else:
have_time = isinstance(dtstart, datetime.datetime)
orgdate = OrgFormat.date(dtstart.timetuple(), show_time=have_time) + "--<9999-12-31 Fri>"
logging.debug(orgdate + " " + summary)
# format: 20091207T180000Z
# not used: Datestamp created
# dtstamp = self.__vtext_to_unicode(component.get('dtstamp'))
# handle repeating events
# not implemented due to org-mode datestime-range cannot be repeated
# component.get('rrule')
org_properties = OrgProperties(data_for_hashing=component.get('UID'))
if location != None:
org_properties.add("LOCATION", location)
if description != None:
org_properties.add("DESCRIPTION", description)
self._writer.write_org_subitem(output=summary,
properties=org_properties,
timestamp=orgdate)
def _main(self):
# getting data
if self._args.calendar_file:
data = CommonReader.get_data_from_file(self._args.calendar_file,
encoding=None)
elif self._args.calendar_url:
data = CommonReader.get_data_from_url(self._args.calendar_url)
self.fallback_tz = None
# read and go through calendar
cal = Calendar.from_ical(data)
for component in cal.walk():
if component.name == "VCALENDAR":
self.__handle_vcalendar(component)
elif component.name == "VEVENT":
self.__handle_vevent(component)
else:
logging.debug("Not handling component: " + component.name)
| 7,473
|
Python
|
.py
| 173
| 32.763006
| 114
| 0.603776
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,763
|
sms_superbackup.py
|
novoid_Memacs/memacs/sms_superbackup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:27:45 vk>
import logging
import os
import sys
import time
import xml.sax
from orgformat import OrgFormat
from xml.sax._exceptions import SAXParseException
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
class SmsSaxHandler(xml.sax.handler.ContentHandler):
"""
Sax handler for sms backup xml produced by SuperBackup files.
See documentation memacs_sms.org for an example.
"""
def __init__(self, writer, ignore_incoming, ignore_outgoing):
"""
Ctor
@param writer: orgwriter
@param ignore_incoming: ignore incoming smses
"""
self._writer = writer
self._ignore_incoming = ignore_incoming
self._ignore_outgoing = ignore_outgoing
def startElement(self, name, attrs):
"""
at every <sms> tag write to orgfile
"""
logging.debug("Handler @startElement name=%s,attrs=%s", name, attrs)
if name == "sms":
#sms_subject = attrs['subject']
sms_date = int(attrs['date']) / 1000 # unix epoch
sms_body = attrs['body']
sms_address = attrs['address']
sms_time = attrs['time']
sms_service_center = attrs['service_center']
sms_type_incoming = int(attrs['type']) == 1
contact_name = attrs['name']
skip = False
if sms_type_incoming == True:
output = "SMS from "
if self._ignore_incoming:
skip = True
else:
output = "SMS to "
if self._ignore_outgoing:
skip = True
if not skip:
name_string = ""
if contact_name:
name_string = '[[contact:' + contact_name + '][' + contact_name + ']]'
else:
name_string = "Unknown"
output += name_string + ": "
#if sms_subject != "null":
# in case of MMS we have a subject
# output += sms_subject
# notes = sms_body
#else:
# output += sms_body
# notes = ""
notes = sms_body
timestamp = OrgFormat.date(time.gmtime(sms_date), show_time=True)
data_for_hashing = output + timestamp + notes
properties = OrgProperties(data_for_hashing=data_for_hashing)
properties.add("NUMBER", sms_address)
properties.add("NAME", contact_name)
properties.add("SMS_SERVICE_CENTER", sms_service_center)
properties.add("TIME", sms_time)
self._writer.write_org_subitem(output=output,
timestamp=timestamp,
note=notes,
properties=properties)
class SmsSuperBackupMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="smsxmlfile",
action="store", required=True,
help="path to sms xml backup file")
self._parser.add_argument(
"--ignore-incoming", dest="ignore_incoming",
action="store_true",
help="ignore incoming smses")
self._parser.add_argument(
"--ignore-outgoing", dest="ignore_outgoing",
action="store_true",
help="ignore outgoing smses")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not (os.path.exists(self._args.smsxmlfile) or \
os.access(self._args.smsxmlfile, os.R_OK)):
self._parser.error("input file not found or not readable")
def _main(self):
"""
get's automatically called from Memacs class
read the lines from sms backup xml file,
parse and write them to org file
"""
data = CommonReader.get_data_from_file(self._args.smsxmlfile)
try:
xml.sax.parseString(data.encode('utf-8'),
SmsSaxHandler(self._writer,
self._args.ignore_incoming,
self._args.ignore_outgoing))
except SAXParseException:
logging.error("No correct XML given")
sys.exit(1)
| 4,854
|
Python
|
.py
| 119
| 28.033613
| 90
| 0.538674
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,764
|
twitter.py
|
novoid_Memacs/memacs/twitter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:28:28 vk>
import logging
import sys
from dateutil import parser
from orgformat import OrgFormat
from twython import Twython, TwythonError
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class Twitter(Memacs):
def _main(self):
APP_KEY = self._get_config_option("APP_KEY")
APP_SECRET = self._get_config_option("APP_SECRET")
OAUTH_TOKEN = self._get_config_option("OAUTH_TOKEN")
OAUTH_TOKEN_SECRET = self._get_config_option("OAUTH_TOKEN_SECRET")
screen_name = self._get_config_option("screen_name")
count = self._get_config_option("count")
twitter = Twython(
APP_KEY,
APP_SECRET,
OAUTH_TOKEN,
OAUTH_TOKEN_SECRET
)
try:
home_timeline = twitter.get_home_timeline(screenname=screen_name, count=count)
except TwythonError as e:
logging.error(e)
sys.exit(1)
for tweet in home_timeline:
# strptime doesn't support timezone info, so we are using dateutils.
date_object = parser.parse(tweet['created_at'])
timestamp = OrgFormat.date(date_object, show_time=True)
try:
# Data is already Unicode, so don't try to re-encode it.
output = tweet['text']
except:
logging.error(sys.exc_info()[0])
print("Error: ", sys.exc_info()[0])
data_for_hashing = output + timestamp + output
properties = OrgProperties(data_for_hashing=data_for_hashing)
properties.add("name", tweet['user']['name'])
properties.add("twitter_id", tweet['id'])
properties.add("contributors", tweet['contributors'])
properties.add("truncated", tweet['truncated'])
properties.add("in_reply_to_status_id", tweet['in_reply_to_status_id'])
properties.add("favorite_count", tweet['favorite_count'])
properties.add("source", tweet['source'])
properties.add("retweeted", tweet['retweeted'])
properties.add("coordinates", tweet['coordinates'])
properties.add("entities", tweet['entities'])
self._writer.write_org_subitem(timestamp=timestamp,
output = output,
properties = properties)
| 2,496
|
Python
|
.py
| 54
| 34.87037
| 90
| 0.598763
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,765
|
battery.py
|
novoid_Memacs/memacs/battery.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import datetime
import logging
import sys
import batinfo
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
ROOT = '/sys/class/power_supply'
class Battery(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-b", "--battery", dest="name",
action="store", default="BAT0",
help="select battery to read stats from")
self._parser.add_argument(
"-p", "--path", dest="path",
action="store", default=ROOT,
help=argparse.SUPPRESS)
self._parser.add_argument(
"--output-format", dest="output_format",
action="store", default="{battery.name}",
help="format string to use for the output"
)
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
def _handle_battery(self, bat):
"""
handle single battery, e.g. BAT0
"""
# calculate watt usage
consumption = float(bat.current_now / 1000000.0 *
bat.voltage_now / 1000000.0)
timestamp = OrgFormat.date(datetime.datetime.now(), show_time=True)
output = self._args.output_format.format(battery=bat)
properties = OrgProperties(data_for_hashing=timestamp)
properties.add("CYCLE_COUNT", bat.cycle_count)
properties.add("CAPACITY", '%s%%' % bat.capacity)
properties.add("STATUS", bat.status.lower())
if consumption:
properties.add("CONSUMPTION", '%.1f W' % consumption)
self._writer.write_org_subitem(timestamp=timestamp,
output=output,
properties=properties)
def _main(self):
"""
get's automatically called from Memacs class
"""
try:
batteries = batinfo.Batteries(self._args.path)
for bat in batteries.stat:
if self._args.name in bat.name:
self._handle_battery(bat)
except OSError as e:
logging.error("no battery present")
sys.exit(1)
| 2,511
|
Python
|
.py
| 67
| 27.462687
| 75
| 0.589182
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,766
|
firefox.py
|
novoid_Memacs/memacs/firefox.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:23:20 vk>
import datetime
import os
import sqlite3
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class Firefox(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="historystore",
action="store", type=open , required=True,
help="""path to places.sqlite file. usually in
/home/rgrau/.mozilla/firefox/__SOMETHING__.default/places.sqlite """)
self._parser.add_argument(
"--output-format", dest="output_format",
action="store", default="[[{url}][{title}]]",
help="format string to use for the headline")
self._parser.add_argument(
"--omit-drawer", dest="omit_drawer",
action="store_true", required=False,
help="""Use a minimal output format that omits the PROPERTIES drawer.""")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
def _handle_url(self, params):
timestamp = datetime.datetime.fromtimestamp(int(params['timestamp']/1000000))
if not self._args.omit_drawer:
properties = OrgProperties()
if (params['title'] == "") :
params['title'] = params['url']
properties.add('URL', params['url'])
properties.add('VISIT_COUNT', params['visit_count'])
output = OrgFormat.link(params['url'], params['title'])
try:
output = self._args.output_format.decode('utf-8').format(**params)
except Exception:
pass
if self._args.omit_drawer:
self._writer.write_org_subitem(
timestamp=OrgFormat.date(timestamp, show_time=True),
output=output, properties=None)
else:
self._writer.write_org_subitem(
timestamp=OrgFormat.date(timestamp, show_time=True),
output=output, properties=properties)
def _main(self):
"""
get's automatically called from Memacs class
"""
conn = sqlite3.connect(os.path.abspath(self._args.historystore.name))
query = conn.execute("""
select url, title, visit_count,
-- datetime(last_visit_date/1000000, 'unixepoch')
last_visit_date
from moz_places
where last_visit_date IS NOT NULL
order by last_visit_date """)
for row in query:
self._handle_url({
'url' : row[0],
'title' : row[1],
'visit_count' : row[2],
'timestamp' : row[3],
})
| 3,015
|
Python
|
.py
| 75
| 30.16
| 85
| 0.58165
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,767
|
imap.py
|
novoid_Memacs/memacs/imap.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2012-09-06 19:54:04 armin>
import imaplib
import logging
import sys
from memacs.lib.mailparser import MailParser
from memacs.lib.memacs import Memacs
class ImapMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-l", "--list-folders",
dest="list_folders",
action="store_true",
help="show possible folders of connection")
self._parser.add_argument(
"-f", "--folder_name",
dest="folder_name",
help="name of folder to get emails from, " + \
"when you don't know name call --list-folders")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not self._args.list_folders and not self._args.folder_name:
self._parser.error("please specify a folder " + \
"use --list to find a folder")
def __fetch_mails_and_write(self, server, message_ids, folder_name):
"""
Fetches All headers, let Mailparser parse each mail,
write to outputfile
@param server: imaplib IMAP4_SLL object
@param message_ids: list of ids to fetch
@param folder_name: folder name of connection
"""
num = ",".join(message_ids)
logging.debug(num)
typ, data = server.uid("fetch",
num,
"(BODY.PEEK[HEADER.FIELDS " + \
"(Date Subject " + \
"From To Cc Reply-To Message-ID)])")
if typ == "OK":
i = 0
# we have to go in step 2 because every second string is a ")"
for i in range(0, len(data), 2):
message = data[i][1]
timestamp, output, note, properties = \
MailParser.parse_message(message)
# just for debbuging in orgfile
# properties.add("NUM",data[i][0][:5])
self._writer.write_org_subitem(timestamp,
output,
note,
properties)
else:
logging.error("Could not fetch mails typ - %s", typ)
server.logout(1)
sys.exit(1)
def __handle_folder(self, server, folder_name):
"""
Selects the folder, gets all ids, and calls
self.__fetch_mails_and_write(...)
@param server: imaplib IMAP4_SLL object
@param folder_name: folder to select
"""
logging.debug("folder: %s", folder_name)
# selecting the folder
typ, data = server.select(folder_name)
if typ != "OK":
logging.error("could not select folder %s", folder_name)
server.logout()
sys.exit(1)
# getting all
typ, data = server.uid('search', None, 'ALL')
if typ == "OK":
message_ids = data[0].split()
logging.debug("message_ids:%s", ",".join(message_ids))
# if number_entries is set we have to adapt messages_ids
if self._args.number_entries:
if len(message_ids) > self._args.number_entries:
message_ids = message_ids[-self._args.number_entries:]
self.__fetch_mails_and_write(server, message_ids, folder_name)
else:
logging.error("Could not select folder %s - typ:%s",
folder_name, typ)
server.logout()
sys.exit(1)
def __list_folders(self, server):
"""
lists all folders and writes them to
logging.info
@param server: imaplib IMAP4_SSL object
"""
typ, folder_list = server.list()
if typ == "OK":
logging.info("Folders:")
for f in folder_list:
logging.info(f[f.find("\"/\" \"") + 4:])
else:
logging.error("list folders was not ok: %s", typ)
server.logout()
sys.exit(1)
def __login_server(self, server, username, password):
"""
logs in to server, if failure then exit
@param server: imaplib IMAP4_SSL object
@param username
@param password
"""
try:
typ, dat = server.login(username, password)
if typ != "OK":
logging.warning("Could not log in")
server.logout()
sys.exit(1)
except Exception as e:
if "Invalid credentials" in e[0]:
logging.error("Invalid credentials cannot login")
server.logout()
sys.exit(1)
else:
logging.warning("Could not log in")
server.logout()
sys.exit(1)
def _main(self):
"""
get's automatically called from Memacs class
"""
username = self._get_config_option("user")
password = self._get_config_option("password")
host = self._get_config_option("host")
port = self._get_config_option("port")
try:
server = imaplib.IMAP4_SSL(host, int(port))
except Exception as e:
logging.warning("could not connect to server %s", host)
sys.exit(1)
self.__login_server(server, username, password)
if self._args.list_folders == True:
self.__list_folders(server)
else:
self.__handle_folder(server, self._args.folder_name)
server.logout()
| 5,927
|
Python
|
.py
| 151
| 26.953642
| 74
| 0.527415
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,768
|
csv.py
|
novoid_Memacs/memacs/csv.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2022-07-14 16:04:14 vk>
import argparse
import datetime
import json
import logging
import sys
import time
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import UnicodeDictReader
class Csv(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="csvfile", required=True,
action="store", help="input csv file", type=argparse.FileType('rb'))
self._parser.add_argument(
"-d", "--delimiter", dest="delimiter", default=";",
action="store", help='delimiter, default ";"')
self._parser.add_argument(
"-e", "--encoding", dest="encoding",
action="store", default="utf-8", help="default encoding utf-8, " +
"see http://docs.python.org/library/codecs.html#standard-encodings" +
"for possible encodings")
self._parser.add_argument(
"-n", "--fieldnames", dest="fieldnames", default=None,
action="store", help="header field names of the columns",
type=str.lower)
self._parser.add_argument(
"-p", "--properties", dest="properties", default='',
action="store", help="fields to use for properties",
type=str.lower)
self._parser.add_argument(
"--timestamp-field", dest="timestamp_field", required=True,
action="store", help="field name of the timestamp",
type=str.lower)
self._parser.add_argument(
"--timestamp-format", dest="timestamp_format",
action="store", help='format of the timestamp, i.e. ' +
'"%%d.%%m.%%Y %%H:%%M:%%S" for "14.02.2012 10:22:37" ' +
'see http://docs.python.org/library/time.html#time.strftime' +
'for possible formats. Default is the current local format, ' +
'so please do specify format in order to be unambiguous.')
self._parser.add_argument(
"--output-format", dest="output_format", required=True,
action="store", help="format string to use for the output")
self._parser.add_argument(
"--skip-header", dest="skip_header",
action="store_true", help="skip first line of the csv file")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.fieldnames:
self._args.fieldnames = [name.strip() for name in self._args.fieldnames.split(',')]
def _handle_row(self, row):
"""
handle a single row
"""
try:
# assume unix timestamp
if not self._args.timestamp_format:
timestamp = datetime.datetime.fromtimestamp(int(row[self._args.timestamp_field]))
else:
timestamp = time.strptime(row[self._args.timestamp_field], self._args.timestamp_format)
# show time with the timestamp format, but only
# if it contains at least hours and minutes
if not self._args.timestamp_format or \
any(x in self._args.timestamp_format for x in ['%H', '%M']):
timestamp = OrgFormat.date(timestamp, show_time=True)
else:
timestamp = OrgFormat.date(timestamp)
except ValueError as e:
logging.error("timestamp-format does not match: %s", e)
sys.exit(1)
except IndexError as e:
logging.error("did you specify the right delimiter?", e)
sys.exit(1)
properties = OrgProperties(data_for_hashing=json.dumps(row))
output = self._args.output_format.format(**row)
if self._args.properties:
for prop in self._args.properties.split(','):
properties.add(prop.upper().strip(), row[prop])
self._writer.write_org_subitem(timestamp=timestamp,
output=output,
properties=properties)
def _main(self):
"""
get's automatically called from Memacs class
"""
with self._args.csvfile as f:
try:
reader = UnicodeDictReader(f,
self._args.delimiter,
self._args.encoding,
self._args.fieldnames)
if self._args.skip_header:
next(reader)
for row in reader:
self._handle_row(row)
logging.debug(row)
except TypeError as e:
logging.error("not enough fieldnames or wrong delimiter given")
logging.debug("Error: %s" % e)
sys.exit(1)
except UnicodeDecodeError as e:
logging.error("could not decode file in utf-8, please specify input encoding")
sys.exit(1)
| 5,308
|
Python
|
.py
| 117
| 33.111111
| 103
| 0.569794
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,769
|
example.py
|
novoid_Memacs/memacs/example.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:22:08 vk>
import logging
import time
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class Foo(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
#self._parser.add_argument(
# "-e", "--example", dest="example",
# action="store_true",
# help="path to a folder to search for filenametimestamps, " +
# "multiple folders can be specified: -f /path1 -f /path2")
#self._parser.add_argument(
# "-i", "--int", dest="example_int",
# action="store_true",
# help="example2",
# type=int)
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
# if self._args.example == ...:
# self._parser.error("could not parse foo")
def _main(self):
"""
get's automatically called from Memacs class
"""
# do all the stuff
# if you need something from config:
# attention: foo will be unicode
# foo = self._get_config_option("foo")
logging.info("foo started")
# how to handle config files ?
# sample config file:
# ---------8<-----------
# [memacs-example]
# foo = 0
# bar = 1
# --------->8-----------
# to read it out, just do following:
# foo = self._get_config_option("foo")
# bar = self._get_config_option("bar")
# use logging.debug() for debug messages
# use logging.error() for error messages
# use logging.info() instead of print for informing user
#
# on an fatal error:
# use logging.error() and sys.exit(1)
timestamp = OrgFormat.date(time.gmtime(0), show_time=True)
# note: timestamp has to be a struct_time object
# Orgproperties
# Option 1: no properties given, specify argument for hashing data
properties = OrgProperties("hashing data :ALKJ!@# should be unique")
# Option 2: add properties which are all-together unique
# properties.add("Category","fun")
# properties.add("from","me@example.com")
# properties.add("body","foo")
self._writer.write_org_subitem(timestamp=timestamp,
output="foo",
properties=properties)
# writes following:
#** <1970-01-01 Thu 00:00> foo
# :PROPERTIES:
# :ID: da39a3ee5e6b4b0d3255bfef95601890afd80709
# :END:
notes = "bar notes\nfoo notes"
p = OrgProperties(data_for_hashing="read comment below")
# if a hash is not unique only with its :PROPERTIES: , then
# set data_for_hasing string additional information i.e. the output
# , which then makes the hash really unique
#
# if you *really*, *really* have already a unique id,
# then you can call following method:
# p.set_id("unique id here")
p.add("DESCRIPTION", "foooo")
p.add("foo-property", "asdf")
tags = ["tag1", "tag2"]
self._writer.write_org_subitem(timestamp=timestamp,
output="bar",
note=notes,
properties=p,
tags=tags)
# writes following:
#** <1970-01-01 Thu 00:00> bar :tag1:tag2:
# bar notes
# foo notes
# :PROPERTIES:
# :DESCRIPTION: foooo
# :FOO-PROPERTY: asdf
# :ID: 97521347348df02dab8bf86fbb6817c0af333a3f
# :END:
| 4,038
|
Python
|
.py
| 101
| 29.732673
| 76
| 0.548939
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,770
|
gpx.py
|
novoid_Memacs/memacs/gpx.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import time
import geocoder
import gpxpy
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class GPX(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--folder", dest="source",
action="store", required=True,
help="path to gpx file or folder")
self._parser.add_argument(
"-p", "--provider", dest="provider",
action="store", default="google",
help="geocode provider, default google")
self._parser.add_argument(
"-u", "--url", dest="url",
action="store", help="url to nominatim server (osm only)")
self._parser.add_argument(
"--output-format", dest="output_format",
action="store", default="{address}",
help="format string to use for the headline")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not os.path.exists(self._args.source):
self._parser.error("source file or folder does not exist")
if self._args.url and not self._args.url.startswith("http"):
self._parser.error("invalid url given")
def reverse_geocode(self, lat, lng):
"""get address for latitude/longitude"""
if 'google' in self._args.provider:
geocode = geocoder.google([lat, lng], method='reverse')
elif 'osm' in self._args.provider:
if not self._args.url:
geocode = geocoder.osm([lat, lng], method='reverse')
time.sleep(1) # Nominatim Usage Policy
else:
if 'localhost' in self._args.url:
geocode = geocoder.osm([lat, lng], method='reverse', url='http://localhost/nominatim/search')
else:
geocode = geocoder.osm([lat, lng], method='reverse', url=self._args.url)
else:
self._parser.error("invalid provider given")
raise ValueError('invalid provider given')
if not geocode.ok:
logging.error("geocoding failed or api limit exceeded")
raise RuntimeError('geocoding failed or api limit exceeded')
else:
logging.debug(geocode.json)
return geocode.json
def write_point(self, p):
"""write a point (including geocoding)"""
timestamp = OrgFormat.date(p.time, show_time=True)
geocode = self.reverse_geocode(p.latitude, p.longitude)
output = self._args.output_format.format(**geocode)
tags = []
properties = OrgProperties(data_for_hashing=timestamp)
if p.latitude:
properties.add('LATITUDE', p.latitude)
if p.longitude:
properties.add('LONGITUDE', p.longitude)
if p.source:
tags.append(p.source.lower())
if timestamp:
self._writer.write_org_subitem(timestamp=timestamp,
output=output,
properties=properties,
tags=tags)
def handle_file(self, f):
"""iterate through a file"""
data = open(f)
gpx = gpxpy.parse(data)
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
self.write_point(point)
logging.debug(point)
def _main(self):
"""
get's automatically called from Memacs class
"""
if os.path.isfile(self._args.source):
self.handle_file(self._args.source)
else:
for root, dirs, files in os.walk(self._args.source):
for f in files:
if f.endswith('.gpx'):
self.handle_file(os.path.join(root, f))
| 4,233
|
Python
|
.py
| 102
| 29.892157
| 113
| 0.570766
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,771
|
lastfm.py
|
novoid_Memacs/memacs/lastfm.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import logging
import sys
import pylast
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class LastFM(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
'--output-format', dest='output_format',
action='store', default='{title}',
help='formt string to use for the output'
)
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.output_format:
self._args.output_format = self._args.output_format
def _handle_recent_tracks(self, tracks):
"""parse recent tracks"""
logging.debug(tracks)
for t in tracks:
timestamp = datetime.datetime.fromtimestamp(int(t.timestamp))
output = self._args.output_format.format(title=t.track.title,
artist=t.track.artist,
album=t.album)
properties = OrgProperties(data_for_hashing=t.timestamp)
properties.add('ARTIST', t.track.artist)
properties.add('ALBUM', t.album)
self._writer.write_org_subitem(timestamp=OrgFormat.date(timestamp, show_time=True),
output=output,
properties=properties)
def _main(self):
"""
get's automatically called from Memacs class
"""
options = {
'api_secret': self._get_config_option('api_secret'),
'api_key': self._get_config_option('api_key'),
'password_hash': pylast.md5(self._get_config_option('password')),
'username': self._get_config_option('username')
}
try:
if 'lastfm' in self._get_config_option('network'):
network = pylast.LastFMNetwork(**options)
if 'librefm' in self._get_config_option('network'):
network = pylast.LibreFMNetwork(**options)
user = network.get_user(options['username'])
self._handle_recent_tracks(user.get_recent_tracks(limit=100))
except pylast.WSError as e:
logging.error('an issue with the network web service occured: %s' % e)
sys.exit(1)
| 2,673
|
Python
|
.py
| 63
| 30.52381
| 95
| 0.579822
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,772
|
git.py
|
novoid_Memacs/memacs/git.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:23:39 vk>
import logging
import os
import sys
import time
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class Commit(object):
"""
class for representing one commit
"""
def __init__(self):
"""
Ctor
"""
self.__empty = True
self.__subject = ""
self.__body = ""
self.__timestamp = ""
self.__author = ""
self.__properties = OrgProperties()
def __set_author_timestamp(self, line):
"""
extracts the date + time from line:
author Forename Lastname <mail> 1234567890 +0000
@param line
"""
self.__empty = False
date_info = line[-16:] # 1234567890 +0000
seconds_since_epoch = float(date_info[:10])
#timezone_info = date_info[11:]
self.__timestamp = OrgFormat.date(
time.localtime(seconds_since_epoch), show_time=True)
self.__author = line[7:line.find("<")].strip()
def add_header(self, line):
"""
adds line to the header
if line contains "author" this method
calls self.__set_author_timestamp(line)
for setting right author + datetime created
every line will be added as property
i.e:
commit <hashtag>
would then be following property:
:COMMIT: <hashtag>
@param line:
"""
self.__empty = False
if line != "":
whitespace = line.find(" ")
tag = line[:whitespace].upper()
value = line[whitespace:]
self.__properties.add(tag, value)
if tag == "AUTHOR":
self.__set_author_timestamp(line)
def add_body(self, line):
"""
adds a line to the body
if line starts with Signed-off-by,
also a property of that line is added
"""
line = line.strip()
if line != "":
if line[:14] == "Signed-off-by:":
self.__properties.add("SIGNED-OFF-BY", line[15:])
elif self.__subject == "":
self.__subject = line
else:
self.__body += line + "\n"
def is_empty(self):
"""
@return: True - empty commit
False - not empty commit
"""
return self.__empty
def get_output(self):
"""
@return tuple: output,properties,body for Orgwriter.write_sub_item()
"""
output = self.__author + ": " + self.__subject
return output, self.__properties, self.__body, self.__author, \
self.__timestamp
class GitMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="gitrevfile",
action="store",
help="path to a an file which contains output from " + \
" following git command: git rev-list --all --pretty=raw")
self._parser.add_argument(
"-g", "--grep-user", dest="grepuser",
action="store",
help="if you wanna parse only commit from a specific person. " + \
"format:<Forname Lastname> of user to grep")
self._parser.add_argument(
"-e", "--encoding", dest="encoding",
action="store",
help="default encoding utf-8, see " + \
"http://docs.python.org/library/codecs.html#standard-encodings" + \
"for possible encodings")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.gitrevfile and not \
(os.path.exists(self._args.gitrevfile) or \
os.access(self._args.gitrevfile, os.R_OK)):
self._parser.error("input file not found or not readable")
if not self._args.encoding:
self._args.encoding = "utf-8"
def get_line_from_stream(self, input_stream):
try:
return input_stream.readline()
except UnicodeError as e:
logging.error("Can't decode to encoding %s, " + \
"use argument -e or --encoding see help",
self._args.encoding)
sys.exit(1)
def _main(self):
"""
get's automatically called from Memacs class
read the lines from git-rev-list file,parse and write them to org file
"""
# read file
if self._args.gitrevfile:
logging.debug("using as %s input_stream",
self._args.gitrevfile)
input_stream = open(self._args.gitrevfile)
else:
logging.debug("using sys.stdin as input_stream")
input_stream = sys.stdin
# now go through the file
# Logic (see example commit below)
# first we are in an header and not in an body
# every newline toggles output
# if we are in body then add the body to commit class
# if we are in header then add the header to commit class
#
# commit 6fb35035c5fa7ead66901073413a42742a323e89
# tree 7027c628031b3ad07ad5401991f5a12aead8237a
# parent 05ba138e6aa1481db2c815ddd2acb52d3597852f
# author Armin Wieser <armin.wieser@example.com> 1324422878 +0100
# committer Armin Wieser <armin.wieser@example.com> 1324422878 +0100
#
# PEP8
# Signed-off-by: Armin Wieser <armin.wieser@gmail.com>
was_in_body = False
commit = Commit()
commits = []
line = self.get_line_from_stream(input_stream)
while line:
line = line.rstrip() # removing \n
logging.debug("got line: %s", line)
if line.strip() == "" or len(line) != len(line.lstrip()):
commit.add_body(line)
was_in_body = True
else:
if was_in_body:
commits.append(commit)
commit = Commit()
commit.add_header(line)
was_in_body = False
line = self.get_line_from_stream(input_stream)
# adding last commit
if not commit.is_empty():
commits.append(commit)
logging.debug("got %d commits", len(commits))
if len(commits) == 0:
logging.error("Is there an error? Because i found no commits.")
# time to write all commits to org-file
for commit in commits:
output, properties, note, author, timestamp = commit.get_output()
if not(self._args.grepuser) or \
(self._args.grepuser and self._args.grepuser == author):
# only write to stream if
# * grepuser is not set or
# * grepuser is set and we got an entry with the right author
self._writer.write_org_subitem(output=output,
timestamp=timestamp,
properties=properties,
note=note)
if self._args.gitrevfile:
input_stream.close()
| 7,503
|
Python
|
.py
| 193
| 27.80829
| 80
| 0.551093
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,773
|
photos.py
|
novoid_Memacs/memacs/photos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:26:47 vk>
import imghdr
import logging
import os
import time
from PIL import Image
from PIL.ExifTags import TAGS
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
def get_exif_datetime(filename):
"""
Get datetime of exif information of a file
"""
try:
exif_data_decoded = {}
image = Image.open(filename)
if hasattr(image, '_getexif'):
exif_info = image._getexif()
if exif_info != None:
for tag, value in list(exif_info.items()):
decoded_tag = TAGS.get(tag, tag)
exif_data_decoded[decoded_tag] = value
if "DateTime" in list(exif_data_decoded.keys()):
return exif_data_decoded["DateTime"]
if "DateTimeOriginal" in list(exif_data_decoded.keys()):
return exif_data_decoded["DateTimeOriginal"]
except IOError as e:
logging.warning("IOError at %s:", filename, e)
return None
class PhotosMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--folder", dest="photo_folder",
action="store", required=True,
help="path to search for photos")
self._parser.add_argument("-l", "--follow-links",
dest="follow_links", action="store_true",
help="follow symbolics links," + \
" default False")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not os.path.exists(self._args.photo_folder):
self._parser.error("photo folder does not exist")
def __handle_file(self, photo_file, filename):
"""
checks if file is an image, try to get exif data and
write to org file
"""
logging.debug("handling file %s", filename)
# check if file is an image:
if imghdr.what(filename) != None:
datetime = get_exif_datetime(filename)
if datetime == None:
logging.debug("skipping: %s has no EXIF information", filename)
else:
try:
datetime = time.strptime(datetime, "%Y:%m:%d %H:%M:%S")
timestamp = OrgFormat.date(datetime, show_time=True)
output = OrgFormat.link(filename, photo_file)
properties = OrgProperties(photo_file + timestamp)
self._writer.write_org_subitem(timestamp=timestamp,
output=output,
properties=properties)
except ValueError as e:
logging.warning("skipping: Could not parse " + \
"timestamp for %s : %s", filename, e)
def _main(self):
"""
get's automatically called from Memacs class
walk through given folder and handle each file
"""
for rootdir, dirs, files in os.walk(self._args.photo_folder,
followlinks=self._args.follow_links):
for photo_file in files:
filename = rootdir + os.sep + photo_file
self.__handle_file(photo_file, filename)
| 3,688
|
Python
|
.py
| 88
| 29.659091
| 79
| 0.559251
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,774
|
chrome.py
|
novoid_Memacs/memacs/chrome.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:21:42 vk>
import datetime
import os
import sqlite3
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class Chrome(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="historystore",
action="store", type=open, required=True,
help="""path to Google Chrome History sqlite file. usually in
/home/bala/.config/google-chrome/Default/History """)
self._parser.add_argument(
"--output-format", dest="output_format",
action="store", default="[[{url}][{title}]]",
help="format string to use for the headline")
self._parser.add_argument(
"--omit-drawer", dest="omit_drawer",
action="store_true", required=False,
help="""Use a minimal output format that omits the PROPERTIES drawer.""")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
def _handle_url(self, params):
epoch = datetime.datetime(1970, 1, 1)-datetime.datetime(1601, 1, 1)
url_time = params['timestamp']/1000000-epoch.total_seconds()
if (url_time > 0) :
timestamp = datetime.datetime.fromtimestamp(int(url_time))
else:
timestamp = datetime.datetime(1970, 1, 1)
if not self._args.omit_drawer:
properties = OrgProperties()
if (params['title'] == "") :
params['title'] = params['url']
properties.add('URL', params['url'])
properties.add('VISIT_COUNT', params['visit_count'])
output = OrgFormat.link(params['url'], params['title'])
try:
output = self._args.output_format.decode('utf-8').format(**params)
except Exception:
pass
if self._args.omit_drawer:
self._writer.write_org_subitem(
timestamp=OrgFormat.date(timestamp, show_time=True),
output=output, properties=None)
else:
self._writer.write_org_subitem(
timestamp=OrgFormat.date(timestamp, show_time=True),
output=output, properties=properties)
def _main(self):
"""
get's automatically called from Memacs class
"""
conn = sqlite3.connect(os.path.abspath(self._args.historystore.name))
query = conn.execute("""
select url, title, visit_count,
last_visit_time
from urls
where last_visit_time IS NOT NULL
order by last_visit_time """)
for row in query:
self._handle_url({
'url' : row[0],
'title' : row[1],
'visit_count' : row[2],
'timestamp' : row[3],
})
| 3,168
|
Python
|
.py
| 79
| 30.101266
| 85
| 0.581107
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,775
|
mu.py
|
novoid_Memacs/memacs/mu.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import locale
import re
import subprocess
from datetime import datetime
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
# Sets this script's locale to be the same as system locale
locale.setlocale(locale.LC_TIME, '')
class MuMail(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-q", "--query",
dest="query",
help="mu search query")
self._parser.add_argument(
"-m", "--me",
dest="sender",
help="space seperated list of mail addresses that belongs to you")
self._parser.add_argument(
"-d", "--delegation",
dest="todo",
action='store_true',
help="adds NEXT or WAITING state to flagged messages")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
self._query = []
if self._args.sender:
self._args.sendern = self._args.sender.strip()
self._sender = list(self._args.sender.split(" "))
else:
raise ValueError('You have to specify at least one e mail adress')
if self._args.query:
self._query = self._args.query
if self._args.todo:
self._todo = True
else:
self._todo = False
def __parse_Plain(self,plain_mails):
messages = plain_mails.decode('utf-8')
return messages.splitlines()
def __getTimestamp(self, time, onlyDate=False):
"""
converts xml timestamp into org readable timestamp
Do 6 Nov 21:22:17 2014
"""
time = time.strip()
mail_date = datetime.strptime(time,"%c")
if onlyDate is False:
return OrgFormat.date(mail_date, show_time=True)
return OrgFormat.date(mail_date)
def __create_mail_link(self, sender):
"""
creates well formated org mail link from message 'from' field.
"""
rex = re.compile('([\w\s.,-]*?)[\s<"]*([\w.-]+@[\w.-]+)',re.UNICODE)
m = rex.search(sender)
if m:
name = m.group(1).strip()
mail = m.group(2).strip()
if name is not "":
return ("[[mailto:" + mail + "][" + name + "]]",name,mail)
else:
return ("[[mailto:" + mail + "][" + mail + "]]",name,mail)
return ("Unknown","Unknown","Unknown")
def _main(self):
"""
get's automatically called from Memacs class
fetches all mails out of mu database
"""
command = self._query
# command.extend(self._query)
command = command+" --fields=t:#:d:#:f:#:g:#:s:#:i --format=plain"
try:
xml_mails = subprocess.check_output(command, shell=True)
except:
print("something goes wrong")
exit()
messages = self.__parse_Plain(xml_mails)
properties = OrgProperties()
for message in messages:
(an,datum,von,flags,betreff,msgid) = message.split(":#:")
betreff = betreff.replace("[","<")
betreff = betreff.replace("]",">")
properties.add('TO',an)
if von != "":
(sender,vname,vmail) = self.__create_mail_link(von)
(an,aname,amail) = self.__create_mail_link(an)
timestamp = self.__getTimestamp(datum)
properties.add_data_for_hashing(timestamp + "_" + msgid)
properties.add("FROM",sender)
notes = ""
if any(match in vmail for match in self._sender):
output = output = "".join(["T: ",an,": [[mu4e:msgid:",msgid,"][",betreff,"]]"])
pre = 'WAITING '
else:
output = "".join(["F: ",sender,": [[mu4e:msgid:",msgid,"][",betreff,"]]"])
pre = 'NEXT '
if (flags.find('F') >= 0 and self._todo):
date = self.__getTimestamp(datum,True)
notes = "SCHEDULED: "+date
timestamp = ""
output = pre+output
self._writer.write_org_subitem(timestamp, output, notes, properties)
| 4,554
|
Python
|
.py
| 115
| 28.86087
| 99
| 0.53928
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,776
|
phonecalls_superbackup.py
|
novoid_Memacs/memacs/phonecalls_superbackup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:26:30 vk>
import datetime
import logging
import os
import sys
import time
import xml.sax
from orgformat import OrgFormat
from xml.sax._exceptions import SAXParseException
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
#import pdb
logging.basicConfig(filename='debug.log', level=logging.DEBUG)
class PhonecallsSaxHandler(xml.sax.handler.ContentHandler):
"""
Sax handler for following xml's:
<?xml version="1.0" encoding="UTF-8"?>
<alllogs count="500">
<log number="01270811333" time="3 Sep 2013 10:03:26" date="1378199006383" type="1" name="" new="1" dur="30" />
<log number="01270588896" time="1 Sep 2013 19:41:05" date="1378060865117" type="2" name="Nick Powell" new="1" dur="143" />
<log number="07989385391" time="1 Sep 2013 13:41:23" date="1378039283149" type="1" name="Anne Barton" new="1" dur="19" />
<log number="+447943549963" time="1 Sep 2013 13:26:31" date="1378038391562" type="2" name="John M Barton" new="1" dur="0" />
<log number="+447943549963" time="1 Sep 2013 13:11:46" date="1378037506896" type="2" name="John M Barton" new="1" dur="0" />
</alllogs>"""
def __init__(self,
writer,
ignore_incoming,
ignore_outgoing,
ignore_missed,
ignore_voicemail,
ignore_rejected,
ignore_refused,
minimum_duration
):
"""
Ctor
@param writer: orgwriter
@param ignore_incoming: ignore incoming phonecalls
@param ignore_outgoing: ignore outgoing phonecalls
@param ignore_missed: ignore missed phonecalls
@param ignore_voicemail: ignore voicemail phonecalls
@param ignore_rejected: ignore rejected phonecalls
@param ignore_refused: ignore refused phonecalls
@param minimum_duration: ignore phonecalls less than that time
"""
self._writer = writer
self._ignore_incoming = ignore_incoming
self._ignore_outgoing = ignore_outgoing
self._ignore_missed = ignore_missed
self._ignore_voicemail = ignore_voicemail
self._ignore_rejected = ignore_rejected
self._ignore_refused = ignore_refused
self._minimum_duration = minimum_duration
def startElement(self, name, attrs):
"""
at every <log> write to orgfile
"""
logging.debug("Handler @startElement name=%s,attrs=%s", name, attrs)
if name == "log":
call_number = attrs['number']
call_duration = int(attrs['dur'])
call_date = int(attrs['date']) / 1000 # unix epoch
call_type = int(attrs['type'])
call_incoming = call_type == 1
call_outgoing = call_type == 2
call_missed = call_type == 3
call_voicemail = call_type == 4
call_rejected = call_type == 5
call_refused = call_type == 6
call_name = attrs['name']
output = "Phonecall "
skip = False
if call_incoming:
output += "from "
if self._ignore_incoming:
skip = True
elif call_outgoing:
output += "to "
if self._ignore_outgoing:
skip = True
elif call_missed:
output += "missed "
if self._ignore_missed:
skip = True
elif call_voicemail:
output += "voicemail "
if self._ignore_voicemail:
skip = True
elif call_rejected:
output += "rejected "
if self._ignore_rejected:
skip = True
elif call_refused:
output += "refused "
if self._ignore_refused:
skip = True
else:
raise Exception("Invalid Phonecall Type: %d", call_type)
call_number_string = ""
if call_number != "-1":
call_number_string = call_number
else:
call_number_string = "Unknown Number"
name_string = ""
if call_name != "(Unknown)":
name_string = '[[contact:' + call_name + '][' + call_name + ']]'
else:
name_string = "Unknown"
output += name_string
if call_duration < self._minimum_duration:
skip = True
timestamp = OrgFormat.date(time.gmtime(call_date), show_time=True)
end_datetimestamp = datetime.datetime.utcfromtimestamp(call_date + call_duration)
logging.debug("timestamp[%s] duration[%s] end[%s]" %
(str(timestamp), str(call_duration), str(end_datetimestamp)))
end_timestamp_string = OrgFormat.date(end_datetimestamp, show_time=True)
logging.debug("end_time [%s]" % end_timestamp_string)
data_for_hashing = output + timestamp
properties = OrgProperties(data_for_hashing=data_for_hashing)
properties.add("NUMBER", call_number_string)
properties.add("DURATION", call_duration)
properties.add("NAME", call_name)
if not skip:
self._writer.write_org_subitem(output=output,
timestamp=timestamp + '-' + end_timestamp_string,
properties=properties
)
class PhonecallsSuperBackupMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="smsxmlfile",
action="store", required=True,
help="path to sms xml backup file")
self._parser.add_argument(
"--ignore-incoming", dest="ignore_incoming",
action="store_true",
help="ignore incoming phonecalls")
self._parser.add_argument(
"--ignore-outgoing", dest="ignore_outgoing",
action="store_true",
help="ignore outgoing phonecalls")
self._parser.add_argument(
"--ignore-missed", dest="ignore_missed",
action="store_true",
help="ignore outgoing phonecalls")
self._parser.add_argument(
"--ignore-voicemail", dest="ignore_voicemail",
action="store_true",
help="ignore voicemail phonecalls")
self._parser.add_argument(
"--ignore-rejected", dest="ignore_rejected",
action="store_true",
help="ignore rejected phonecalls")
self._parser.add_argument(
"--ignore-refused", dest="ignore_refused",
action="store_true",
help="ignore refused phonecalls")
self._parser.add_argument(
"--minimum-duration", dest="minimum_duration",
action="store", type=int,
help="[sec] show only calls with duration >= this argument")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not (os.path.exists(self._args.smsxmlfile) or \
os.access(self._args.smsxmlfile, os.R_OK)):
self._parser.error("input file not found or not readable")
def _main(self):
"""
gets called automatically from Memacs class.
read the lines from phonecalls backup xml file,
parse and write them to org file
"""
data = CommonReader.get_data_from_file(self._args.smsxmlfile)
try:
xml.sax.parseString(data.encode('utf-8'),
PhonecallsSaxHandler(self._writer,
self._args.ignore_incoming,
self._args.ignore_outgoing,
self._args.ignore_missed,
self._args.ignore_voicemail,
self._args.ignore_rejected,
self._args.ignore_refused,
self._args.minimum_duration or 0,
))
except SAXParseException:
logging.error("No correct XML given")
sys.exit(1)
| 8,897
|
Python
|
.py
| 199
| 31.035176
| 136
| 0.544782
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,777
|
simplephonelogs.py
|
novoid_Memacs/memacs/simplephonelogs.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2018-08-25 15:07:57 vk>
import datetime
import logging
import os
import re
import time
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
class SimplePhoneLogsMemacs(Memacs):
_REGEX_SEPARATOR = " *?# *?"
## match for example: "2012-11-20 # 19.59 # shutdown # 72 # 35682"
## 0 1 2 3 4 5
LOGFILEENTRY_REGEX = re.compile("([12]\d\d\d-[012345]\d-[012345]\d)" +
_REGEX_SEPARATOR +
"([ 012]\d)[:.]([012345]\d)" +
_REGEX_SEPARATOR +
"(.+)" +
_REGEX_SEPARATOR +
"(\d+)" +
_REGEX_SEPARATOR +
"(\d+)$", flags=re.U)
RE_ID_DATESTAMP = 0
RE_ID_HOURS = 1
RE_ID_MINUTES = 2
RE_ID_NAME = 3
RE_ID_BATT = 4
RE_ID_UPTIME = 5
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="phonelogfile",
action="store", required=True,
help="path to phone log file")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not (os.path.exists(self._args.phonelogfile) or
os.access(self._args.phonelogfile, os.R_OK)):
self._parser.error("input file not found or not readable")
def _generateOrgentry(self, e_time, e_name, e_batt, e_uptime,
e_last_opposite_occurrence, e_last_occurrence,
prev_office_sum, prev_office_first_begin, office_lunchbreak,
battery_percentage_when_booting):
"""
takes the data from the parameters and generates an Org-mode entry.
@param e_time: time-stamp of the entry
@param e_name: entry name/description
@param e_batt: battery level
@param e_uptime: uptime in seconds
@param e_last_opposite_occurrence: time-stamp of previous opposite occurrence (if not False)
@param e_last_occurrence: time-stamp of previous occurrence
@param additional_paren_string: string that gets appended to the parenthesis
@param prev_office_sum: holds the sum of all previous working duration today
@param prev_office_first_begin: holds the first time-stamp of wifi-office for today
@param office_lunchbreak: array of begin- and end-time-stamp of lunch-break (if any)
@param battery_percentage_when_booting: battery level of previous boot (only set if no charge event was in-between)
"""
assert e_time.__class__ == datetime.datetime
assert e_name.__class__ == str
assert e_batt.__class__ == str
assert e_uptime.__class__ == str
assert (e_last_opposite_occurrence.__class__ == datetime.datetime or not e_last_opposite_occurrence)
assert (e_last_occurrence.__class__ == datetime.datetime or not e_last_occurrence)
assert (not battery_percentage_when_booting or battery_percentage_when_booting.__class__ == int)
last_info = ''
in_between_hms = ''
in_between_s = ''
ignore_occurrence = False
# convert parameters to be writable:
office_sum = prev_office_sum
office_first_begin = prev_office_first_begin
if e_last_opposite_occurrence:
in_between_s = (e_time - e_last_opposite_occurrence).seconds + \
(e_time - e_last_opposite_occurrence).days * 3600 * 24
in_between_hms = str(OrgFormat.hms_from_sec(in_between_s))
if e_name == 'boot':
last_info = ' (off for '
elif e_name == 'shutdown':
last_info = ' (on for '
elif e_name.endswith('-end'):
last_info = ' (' + e_name[0:-4].replace('wifi-', '') + ' for '
else:
last_info = ' (not ' + e_name.replace('wifi-', '') + ' for '
# handle special case: office hours
additional_paren_string = ""
if e_name == 'wifi-office-end':
office_total = None
# calculate office_sum and office_total
if not office_sum:
office_sum = (e_time - e_last_opposite_occurrence).seconds
office_total = office_sum
else:
assert(office_first_begin)
assert(office_sum)
office_sum = office_sum + (e_time - e_last_opposite_occurrence).seconds
office_total = int(time.mktime(e_time.timetuple()) - time.mktime(office_first_begin.timetuple()))
assert(type(office_total) == int)
assert(type(office_sum) == int)
assert(type(in_between_s) == int)
# come up with the additional office-hours string:
additional_paren_string = '; today ' + OrgFormat.hms_from_sec(office_sum) + \
'; today total ' + OrgFormat.hms_from_sec(office_total)
if additional_paren_string:
last_info += str(OrgFormat.dhms_from_sec(in_between_s)) + additional_paren_string + ')'
else:
last_info += str(OrgFormat.dhms_from_sec(in_between_s)) + ')'
elif e_last_occurrence:
in_between_s = (e_time - e_last_occurrence).seconds + \
(e_time - e_last_occurrence).days * 3600 * 24
in_between_hms = str(OrgFormat.hms_from_sec(in_between_s))
# handle special case: office hours
if e_name == 'wifi-office':
if not office_sum or not office_first_begin:
# new day
office_first_begin = e_time
else:
# check if we've found a lunch-break (first wifi-office between 11:30-13:00 where not office for > 17min)
if e_time.time() > datetime.time(11, 30) and e_time.time() < datetime.time(13, 00) and e_last_opposite_occurrence:
if e_last_opposite_occurrence.date() == e_time.date() and in_between_s > (17 * 60) and in_between_s < (80 * 60):
office_lunchbreak = [e_last_opposite_occurrence.time(), e_time.time()]
# handle special case: boot without previous shutdown = crash
if (e_name == 'boot') and \
(e_last_occurrence and e_last_opposite_occurrence) and \
(e_last_occurrence > e_last_opposite_occurrence):
# last boot is more recent than last shutdown -> crash has happened
last_info = ' after crash'
in_between_hms = ''
in_between_s = ''
ignore_occurrence = True
properties = OrgProperties()
if in_between_s == 0: # omit in-between content of property when it is zero
in_between_s = ''
in_between_hms = ''
properties.add("IN-BETWEEN", in_between_hms)
properties.add("IN-BETWEEN-S", str(in_between_s))
properties.add("BATT-LEVEL", e_batt)
properties.add("UPTIME", OrgFormat.hms_from_sec(int(e_uptime)))
properties.add("UPTIME-S", e_uptime)
if e_name == 'wifi-office-end' and office_lunchbreak:
properties.add("OFFICE-SUMMARY",
e_last_opposite_occurrence.strftime('| %Y-%m-%d | %a ') +
prev_office_first_begin.strftime('| %H:%M ') +
office_lunchbreak[0].strftime('| %H:%M ') +
office_lunchbreak[1].strftime('| %H:%M ') +
e_time.strftime('| %H:%M | | |'))
elif e_name == 'wifi-office-end' and not office_lunchbreak:
properties.add("OFFICE-SUMMARY",
e_last_opposite_occurrence.strftime('| %Y-%m-%d | %a ') +
prev_office_first_begin.strftime('| %H:%M | 11:30 | 12:00 ') +
e_time.strftime('| %H:%M | | |'))
elif e_name == 'shutdown':
if battery_percentage_when_booting:
batt_diff_from_boot_to_shutdown = battery_percentage_when_booting - int(e_batt)
if batt_diff_from_boot_to_shutdown >= 20:
# hypothetical run-time (in hours; derived from boot to shutdown) of the device for 100% battery capacity
# Note: battery_percentage_when_booting is set to False when a "charge-start"-event is recognized between boot and shutdown
# Note: only calculated when at least 20 percent difference of battery level between boot and shutdown
runtime_extrapolation = 100 * int(e_uptime) // batt_diff_from_boot_to_shutdown // 3600
properties.add("HOURS_RUNTIME_EXTRAPOLATION", runtime_extrapolation)
self._writer.write_org_subitem(timestamp=e_time.strftime('<%Y-%m-%d %a %H:%M>'),
output=e_name + last_info,
properties=properties)
return '** ' + e_time.strftime('<%Y-%m-%d %a %H:%M>') + ' ' + e_name + last_info + \
'\n:PROPERTIES:\n:IN-BETWEEN: ' + in_between_hms + \
'\n:IN-BETWEEN-S: ' + str(in_between_s) + \
'\n:BATT-LEVEL: ' + e_batt + \
'\n:UPTIME: ' + str(OrgFormat.hms_from_sec(int(e_uptime))) + \
'\n:UPTIME-S: ' + str(e_uptime) + '\n:END:\n', \
ignore_occurrence, office_sum, office_first_begin, office_lunchbreak
def _determine_opposite_eventname(self, e_name):
"""
Takes a look at the event and returns the name of the opposite event description.
Opposite of 'boot' is 'shutdown' (and vice versa).
Opposite of 'foo' is 'foo-end' (and vice versa).
@param e_name: string of an event name/description
"""
assert (e_name.__class__ == str)
if e_name == 'boot':
return 'shutdown'
elif e_name == 'shutdown':
return 'boot'
elif e_name.endswith('-end'):
return e_name[0:-4]
else:
return e_name + '-end'
def _parse_data(self, data):
"""parses the phone log data"""
last_occurrences = {} # holds the previous occurrences of each event
office_day = None # holds the current day (in order to recognize day change)
office_first_begin = None # holds the time-stamp of the first appearance of wifi-office
office_sum = None # holds the sum of periods of all office-durations for this day
office_lunchbreak = [] # array of begin and end time of lunch break
battery_percentage_when_booting = False # percentage of battery status of previous boot (only set if no charging event happened)
for line in data.split('\n'):
if not line:
continue
logging.debug("line: %s", line)
components = re.match(self.LOGFILEENTRY_REGEX, line)
if components:
logging.debug("line matches")
else:
logging.debug("line does not match! (skipping this line)")
continue
# extracting the components to easy to use variables:
datestamp = components.groups()[self.RE_ID_DATESTAMP].strip()
hours = int(components.groups()[self.RE_ID_HOURS].strip())
minutes = int(components.groups()[self.RE_ID_MINUTES].strip())
e_name = str(components.groups()[self.RE_ID_NAME].strip())
opposite_e_name = self._determine_opposite_eventname(e_name)
e_batt = components.groups()[self.RE_ID_BATT].strip()
e_uptime = components.groups()[self.RE_ID_UPTIME].strip()
# generating a datestamp object from the time information:
e_time = datetime.datetime(int(datestamp.split('-')[0]),
int(datestamp.split('-')[1]),
int(datestamp.split('-')[2]),
hours, minutes)
if e_name == 'boot':
battery_percentage_when_booting = int(e_batt)
elif e_name == 'charging-start':
# set to False when a charging event is detected between boot and shutdown (which would render H_RUNTIME_EXTRAPOLATION useless)
battery_percentage_when_booting = False
elif e_name == 'shutdown' and opposite_e_name not in last_occurrences:
# set to False when there is no boot in-between two shutdown events
battery_percentage_when_booting = False
# resetting office_day
if e_name == 'wifi-office':
if not office_day:
office_sum = None
office_day = datestamp
office_lunchbreak = []
elif office_day != datestamp:
office_sum = None
office_day = datestamp
office_lunchbreak = []
if opposite_e_name in last_occurrences:
e_last_opposite_occurrence = last_occurrences[opposite_e_name]
else:
# no previous occurrence of the opposite event type
e_last_opposite_occurrence = False
if e_name in last_occurrences:
last_time = last_occurrences[e_name]
else:
last_time = False
result, ignore_occurrence, office_sum, office_first_begin, office_lunchbreak = \
self._generateOrgentry(e_time, e_name, e_batt,
e_uptime,
e_last_opposite_occurrence,
last_time,
office_sum, office_first_begin, office_lunchbreak,
battery_percentage_when_booting)
## update last_occurrences-dict
if not ignore_occurrence:
last_occurrences[e_name] = e_time
def _main(self):
"""
gets called automatically from Memacs class.
read the lines from phonecalls backup xml file,
parse and write them to org file
"""
self._parse_data(CommonReader.get_data_from_file(self._args.phonelogfile))
# Local Variables:
# mode: flyspell
# eval: (ispell-change-dictionary "en_US")
# End:
| 14,962
|
Python
|
.py
| 275
| 40.116364
| 143
| 0.555016
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,778
|
svn.py
|
novoid_Memacs/memacs/svn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:28:04 vk>
import logging
import os
import sys
import xml.sax
from orgformat import OrgFormat
from xml.sax._exceptions import SAXParseException
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
class SvnSaxHandler(xml.sax.handler.ContentHandler):
"""
Sax handler for following xml's:
<?xml version="1.0"?>
<log>
<logentry
revision="13">
<author>bob</author>
<date>2011-11-05T18:18:22.936127Z</date>
<msg>Bugfix.</msg>
</logentry>
</log>
"""
def __init__(self, writer, grepauthor):
"""
Ctor
@param writer: orgwriter
"""
self.__reset()
self._writer = writer
self.__grepauthor = grepauthor
def __reset(self):
"""
resets all variables
"""
self.__author = ""
self.__date = ""
self.__msg = ""
self.__rev = -1
self.__on_node_name = "" # used to store on which element we are
self.__id_prefix = "rev-"
def __write(self):
"""
write attributes to writer (make an org_sub_item)
"""
logging.debug("msg:%s", self.__msg)
self.__msg = self.__msg.splitlines()
subject = ""
notes = ""
# idea: look for the first -nonempty- message
if len(self.__msg) > 0:
start_notes = 0
for i in range(len(self.__msg)):
if self.__msg[i].strip() != "":
subject = self.__msg[i].strip()
start_notes = i + 1
break
if len(self.__msg) > start_notes:
for n in self.__msg[start_notes:]:
if n != "":
notes += n + "\n"
output = "%s (r%d): %s" % (self.__author, self.__rev, subject)
properties = OrgProperties(data_for_hashing=self.__author + subject)
timestamp = OrgFormat.date(
OrgFormat.parse_basic_iso_datetime(self.__date), show_time=True)
properties.add("REVISION", self.__rev)
if self.__grepauthor == None or \
(self.__author.strip() == self.__grepauthor.strip()):
self._writer.write_org_subitem(output=output,
timestamp=timestamp,
note=notes,
properties=properties)
def characters(self, content):
"""
handles xml tags:
- <author/>
- <date/>
- <msg/>
and set those attributes
"""
logging.debug("Handler @characters @%s , content=%s",
self.__on_node_name, content)
if self.__on_node_name == "author":
self.__author += content
elif self.__on_node_name == "date":
self.__date += content
elif self.__on_node_name == "msg":
self.__msg += content
def startElement(self, name, attrs):
"""
at every <tag> remember the tagname
* sets the revision when in tag "logentry"
"""
logging.debug("Handler @startElement name=%s,attrs=%s", name, attrs)
if name == "logentry":
self.__rev = int(attrs['revision'])
self.__on_node_name = name
def endElement(self, name):
"""
at every </tag> clear the remembered tagname
if we are at </logentry> then we can write a entry to stream
"""
logging.debug("Handler @endElement name=%s", name)
self.__on_node_name = ""
if name == "logentry":
self.__write()
self.__reset()
class SvnMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="svnlogxmlfile",
action="store",
help="path to a an file which contains output from " + \
" following svn command: svn log --xml")
self._parser.add_argument(
"-g", "--grep-author", dest="grepauthor",
action="store",
help="if you wanna parse only messages from a specific person. " + \
"format:<author> of author to grep")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.svnlogxmlfile and not \
(os.path.exists(self._args.svnlogxmlfile) or \
os.access(self._args.svnlogxmlfile, os.R_OK)):
self._parser.error("input file not found or not readable")
def _main(self):
"""
get's automatically called from Memacs class
read the lines from svn xml file, parse and write them to org file
"""
# read file
if self._args.svnlogxmlfile:
logging.debug("using as %s input_stream", self._args.svnlogxmlfile)
data = CommonReader.get_data_from_file(self._args.svnlogxmlfile)
else:
logging.info("Using stdin as input_stream")
data = CommonReader.get_data_from_stdin()
try:
xml.sax.parseString(data.encode('utf-8'),
SvnSaxHandler(self._writer,
self._args.grepauthor))
except SAXParseException:
logging.error("No correct XML given")
sys.exit(1)
| 5,726
|
Python
|
.py
| 155
| 26.470968
| 79
| 0.540072
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,779
|
sms.py
|
novoid_Memacs/memacs/sms.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2024-01-18 14:13:58 (klaus)>
import codecs ## Unicode conversion
import html.parser ## un-escaping HTML entities like emojis
import logging
import os
import sys
import tempfile ## create temporary files
import time
import xml.sax
from orgformat import OrgFormat
from xml.sax._exceptions import SAXParseException
from memacs.lib.contactparser import parse_org_contact_file
from memacs.lib.contactparser import parse_bbdb_file
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
class SmsSaxHandler(xml.sax.handler.ContentHandler):
"""
Sax handler for sms backup xml files.
See documentation memacs_sms.org for an example.
"""
## from https://github.com/wooorm/emoji-emotion/blob/master/Support.md
EMOJIS = {
'\ud83d\udc7f':'imp',
'\ud83d\ude3e':'pouting_cat',
'\ud83d\ude21':'rage',
'\ud83d\ude20':'angry',
'\ud83d\ude27':'anguished',
'\ud83d\ude2d':'sob',
'\ud83d\ude31':'scream',
'\ud83d\ude40':'scream_cat',
'\ud83d\ude08':'smiling_imp',
'\ud83d\ude1f':'worried',
'\ud83d\ude3f':'crying_cat_face',
'\ud83d\ude15':'confused',
'\ud83d\ude16':'confounded',
'\ud83d\ude30':'cold_sweat',
'\ud83d\ude22':'cry',
'\ud83d\ude1e':'disappointed',
'\ud83d\ude33':'flushed',
'\ud83d\ude28':'fearful',
'\ud83d\ude2c':'grimacing',
'\ud83d\ude2e':'open_mouth',
'\ud83d\ude23':'persevere',
'\ud83d\ude2b':'tired_face',
'\ud83d\ude12':'unamused',
'\ud83d\ude29':'weary',
'\ud83d\ude35':'dizzy_face',
'\ud83d\ude25':'disappointed_relieved',
'\ud83d\ude26':'frowning',
'\ud83d\ude01':'grin',
'\ud83d\ude2f':'hushed',
'\ud83d\ude37':'mask',
'\ud83d\ude14':'pensive',
'\ud83d\ude13':'sweat',
'\ud83d\ude1c':'stuck_out_tongue_winking_eye',
'\ud83d\ude11':'expressionless',
'\ud83d\ude36':'no_mouth',
'\ud83d\ude10':'neutral_face',
'\ud83d\ude34':'sleeping',
'\ud83d\ude1d':'stuck_out_tongue_closed_eyes',
'\ud83d\ude2a':'sleepy',
'\ud83d\ude06':'laughing; satisfied',
'\ud83d\ude0e':'sunglasses',
'\ud83d\ude1b':'stuck_out_tongue',
'\ud83d\ude32':'astonished',
'\ud83d\ude0a':'blush',
'\ud83d\ude00':'grinning',
'\ud83d\ude3d':'kissing_cat',
'\ud83d\ude19':'kissing_smiling_eyes',
'\ud83d\ude17':'kissing',
'\ud83d\ude1a':'kissing_closed_eyes',
'\u263a\ufe0f':'relaxed',
'\ud83d\ude0c':'relieved',
'\ud83d\ude04':'smile',
'\ud83d\ude3c':'smirk_cat',
'\ud83d\ude38':'smile_cat',
'\ud83d\ude03':'smiley',
'\ud83d\ude3a':'smiley_cat',
'\ud83d\ude05':'sweat_smile',
'\ud83d\ude0f':'smirk',
'\ud83d\ude3b':'heart_eyes_cat',
'\ud83d\ude0d':'heart_eyes',
'\ud83d\ude07':'innocent',
'\ud83d\ude02':'joy',
'\ud83d\ude39':'joy_cat',
'\ud83d\ude18':'kissing_heart',
'\ud83d\ude09':'wink',
'\ud83d\ude0b':'yum',
'\ud83d\ude24':'triumph'}
EMOJI_ENCLOSING_CHARACTER = "~" ## character which encloses emojis found ~wink~
def __init__(self, writer, ignore_incoming, ignore_outgoing, numberdict):
"""
Ctor
@param writer: orgwriter
@param ignore_incoming: ignore incoming smses
"""
self._writer = writer
self._ignore_incoming = ignore_incoming
self._ignore_outgoing = ignore_outgoing
self._numberdict = numberdict
def startElement(self, name, attrs):
"""
at every <sms> tag write to orgfile
"""
logging.debug("Handler @startElement name=%s,attrs=%s", name, attrs)
htmlparser = html.parser.HTMLParser()
if name == "sms":
sms_subject = attrs.get('subject','')
sms_date = int(attrs['date']) / 1000 # unix epoch
sms_body = attrs['body']
sms_address = attrs['address'].strip().replace('-','').replace('/','').replace(' ','').replace('+','00')
sms_type_incoming = int(attrs['type']) == 1
contact_name = False
if 'contact_name' in attrs:
## NOTE: older version of backup app did not insert contact_name into XML
contact_name = attrs['contact_name']
else:
if self._numberdict:
if sms_address in list(self._numberdict.keys()):
contact_name = self._numberdict[sms_address]
skip = False
if sms_type_incoming == True:
output = "SMS from "
if self._ignore_incoming:
skip = True
else:
output = "SMS to "
if self._ignore_outgoing:
skip = True
if not skip:
name_string = ""
if contact_name:
name_string = '[[contact:' + contact_name + '][' + contact_name + ']]'
else:
name_string = "Unknown"
output += name_string + ": "
## reverse encoding hack from just before:
sms_body = html.unescape(sms_body.replace('EnCoDiNgHaCk42', '&#'))
for emoji in list(self.EMOJIS.keys()):
## FIXXME: this is a horrible dumb brute-force algorithm.
## In case of bad performance, this can be optimized dramtically
sms_body = sms_body.replace(emoji, self.EMOJI_ENCLOSING_CHARACTER + \
self.EMOJIS[emoji] + self.EMOJI_ENCLOSING_CHARACTER).replace('\n', '‚èé')
if sms_subject != "null":
# in case of MMS we have a subject
output += sms_subject
notes = sms_body
else:
output += sms_body
notes = ""
timestamp = OrgFormat.date(time.gmtime(sms_date), show_time=True)
data_for_hashing = output + timestamp + notes
properties = OrgProperties(data_for_hashing=data_for_hashing)
properties.add("NUMBER", sms_address)
properties.add("NAME", contact_name)
self._writer.write_org_subitem(output=output,
timestamp=timestamp,
note=notes,
properties=properties)
class SmsMemacs(Memacs):
_numberdict = False
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="smsxmlfile",
action="store", required=True,
help="path to sms xml backup file")
self._parser.add_argument(
"--ignore-incoming", dest="ignore_incoming",
action="store_true",
help="ignore incoming smses")
self._parser.add_argument(
"--ignore-outgoing", dest="ignore_outgoing",
action="store_true",
help="ignore outgoing smses")
self._parser.add_argument(
"--orgcontactsfile", dest="orgcontactsfile",
action="store", required=False,
help="path to Org-contacts file for phone number lookup. Phone numbers have to match.")
self._parser.add_argument(
"--bbdbfile", dest="bbdbfile",
action="store", required=False,
help="path to bbdb file for phone number lookup. Phone numbers have to match.")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not (os.path.exists(self._args.smsxmlfile) or \
os.access(self._args.smsxmlfile, os.R_OK)):
self._parser.error("input file not found or not readable")
if self._args.orgcontactsfile:
if not (os.path.exists(self._args.orgcontactsfile) or \
os.access(self._args.orgcontactsfile, os.R_OK)):
self._parser.error("Org-contacts file not found or not readable")
self._numberdict = parse_org_contact_file(self._args.orgcontactsfile)
elif self._args.bbdbfile:
if not (os.path.exists(self._args.bbdbfile) or \
os.access(self._args.bbdbfile, os.R_OK)):
self._parser.error("bbdb file not found or not readable")
self._numberdict = parse_bbdb_file(self._args.bbdbfile)
def _main(self):
"""
get's automatically called from Memacs class
read the lines from sms backup xml file,
parse and write them to org file
"""
## replace HTML entities "&#" in original file to prevent XML parser from worrying:
temp_xml_file = tempfile.mkstemp()[1]
line_number = 0
logging.debug("tempfile [%s]", str(temp_xml_file))
with codecs.open(temp_xml_file, 'w', encoding='utf-8') as outputhandle:
for line in codecs.open(self._args.smsxmlfile, 'r', encoding='utf-8'):
try:
## NOTE: this is a dirty hack to prevent te XML parser from complainaing about
## encoding issues of UTF-8 encoded emojis. Will be reverted when parsing sms_body
outputhandle.write(line.replace('&#', 'EnCoDiNgHaCk42') + '\n')
except IOError as e:
print("tempfile line " + str(line_number) + " [" + str(temp_xml_file) + "]")
print("I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError as e:
print("tempfile line " + str(line_number) + " [" + str(temp_xml_file) + "]")
print("Value error: {0}".format(e))
#print "line [%s]" % str(line)
except:
print("tempfile line " + str(line_number) + " [" + str(temp_xml_file) + "]")
print("Unexpected error:", sys.exc_info()[0])
raise
data = CommonReader.get_data_from_file(temp_xml_file)
try:
xml.sax.parseString(data.encode('utf-8'),
SmsSaxHandler(self._writer,
self._args.ignore_incoming,
self._args.ignore_outgoing,
self._numberdict))
except SAXParseException:
logging.error("No correct XML given")
sys.exit(1)
else:
os.remove(temp_xml_file)
| 11,154
|
Python
|
.py
| 248
| 32.604839
| 121
| 0.547948
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,780
|
whatsapp.py
|
novoid_Memacs/memacs/whatsapp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:29:12 vk>
import datetime
import json
import logging
import os
import re
import sqlite3
import emoji
from orgformat import OrgFormat
from memacs.lib.contactparser import parse_org_contact_file
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
class WhatsApp(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="msgstore",
action="store", type=open, required=True,
help="path to decrypted msgstore.db file")
self._parser.add_argument(
"--ignore-incoming", dest="ignore_incoming",
action="store_true", help="ignore received messages")
self._parser.add_argument(
"--ignore-outgoing", dest="ignore_outgoing",
action="store_true", help="ignore sent messages")
self._parser.add_argument(
"--ignore-groups", dest="ignore_groups",
action="store_true",help="ignore group messages")
self._parser.add_argument(
"--output-format", dest="output_format",
action="store", default="{verb} [[{handler}:{name}][{name}]]: {text}",
help="format string to use for the headline")
self._parser.add_argument(
"--set-handler", dest="handler",
action="store", default="tel",
help="set link handler")
self._parser.add_argument(
"--demojize", dest="demojize",
action="store_true", help="replace emoji with the appropriate :shortcode:")
self._parser.add_argument(
"--skip-emoji", dest="skip_emoji",
action="store_true", help="skip all emoji")
self._parser.add_argument(
"--orgcontactsfile", dest="orgcontactsfile",
action="store", required=False,
help="path to Org-contacts file for phone number lookup. Phone numbers have to match.")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.orgcontactsfile:
if not (os.path.exists(self._args.orgcontactsfile) or \
os.access(self._args.orgcontactsfile, os.R_OK)):
self._parser.error("Org-contacts file not found or not readable")
self._numberdict = parse_org_contact_file(self._args.orgcontactsfile)
else:
self._numberdict = {}
def _is_ignored(self, msg):
"""check for ignored message type"""
if msg['type'] is 'INCOMING' and self._args.ignore_incoming:
return True
if msg['type'] is 'OUTGOING' and self._args.ignore_outgoing:
return True
group_message_regex = r'-[0-9]{10}'
if self._args.ignore_groups and re.findall(group_message_regex, msg['number']):
return True
def _handle_message(self, msg):
"""parse a single message row"""
msg['number'] = '00' + msg['number'].split('@')[0]
msg['name'] = self._numberdict.get(msg['number'],msg['number'])
msg['verb'] = 'to' if msg['type'] else 'from'
msg['type'] = 'OUTGOING' if msg['type'] else 'INCOMING'
msg['handler'] = self._args.handler
if msg['text']:
if self._args.demojize:
msg['text'] = emoji.demojize(msg['text'])
if self._args.skip_emoji:
msg['text'] = re.sub(emoji.get_emoji_regexp(), '', msg['text'])
timestamp = datetime.datetime.fromtimestamp(msg['timestamp'] / 1000)
properties = OrgProperties(data_for_hashing=json.dumps(msg))
properties.add('NUMBER', msg['number'])
properties.add('TYPE', msg['type'])
output = self._args.output_format.format(**msg)
if msg['text'] and not self._is_ignored(msg):
self._writer.write_org_subitem(timestamp=OrgFormat.date(timestamp, show_time=True),
output=output, properties=properties)
def _main(self):
"""
get's automatically called from Memacs class
"""
conn = sqlite3.connect(os.path.abspath(self._args.msgstore.name))
query = conn.execute('SELECT * FROM messages')
for row in query:
self._handle_message({
'timestamp': row[7],
'number': row[1],
'type': row[2],
'text': row[6]
})
logging.debug(row)
| 4,774
|
Python
|
.py
| 108
| 33.944444
| 99
| 0.591921
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,781
|
phonecalls.py
|
novoid_Memacs/memacs/phonecalls.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:26:05 vk>
import datetime
import logging
import os
import sys
import time
import xml.sax
from orgformat import OrgFormat
from xml.sax._exceptions import SAXParseException
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
#import pdb
class PhonecallsSaxHandler(xml.sax.handler.ContentHandler):
"""
Sax handler for following xml's:
2013-04-10: update: contact_name is also recognized
<?xml version='1.0' encoding='UTF-8' standalone='yes' ?>
<calls count="8">
<call number="+43691234123" duration="59" date="13193906092" type="1" />
<call number="06612341234" duration="22" date="131254215834" type="2" />
<call number="-1" duration="382" date="1312530691081" type="1" />
<call number="+4312341234" duration="289" date="13124327195" type="1" />
<call number="+4366412341234" duration="70" date="136334059" type="1" />
<call number="+4366234123" duration="0" date="1312473751975" type="2" />
<call number="+436612341234" duration="0" date="12471300072" type="3" />
<call number="+433123412" duration="60" date="1312468562489" type="2" />
</calls>"""
def __init__(self,
writer,
ignore_incoming,
ignore_outgoing,
ignore_missed,
ignore_cancelled,
minimum_duration
):
"""
Ctor
@param writer: orgwriter
@param ignore_incoming: ignore incoming phonecalls
@param ignore_outgoing: ignore outgoing phonecalls
@param ignore_missed: ignore missed phonecalls
@param ignore_cancelled: ignore cancelled phonecalls
@param minimum_duration: ignore phonecalls less than that time
"""
self._writer = writer
self._ignore_incoming = ignore_incoming
self._ignore_outgoing = ignore_outgoing
self._ignore_missed = ignore_missed
self._ignore_cancelled = ignore_cancelled
self._minimum_duration = minimum_duration
def startElement(self, name, attrs):
"""
at every <call> write to orgfile
"""
logging.debug("Handler @startElement name=%s,attrs=%s", name, attrs)
if name == "call":
call_number = attrs['number']
call_duration = int(attrs['duration'])
call_date = int(attrs['date']) / 1000 # unix epoch
call_type = int(attrs['type'])
call_incoming = call_type == 1
call_outgoing = call_type == 2
call_missed = call_type == 3
call_cancelled = call_type == 5
call_name = call_number
if 'contact_name' in attrs:
## NOTE: older version of backup app did not insert contact_name into XML
call_name = attrs['contact_name']
output = "Phonecall "
skip = False
if call_incoming:
output += "from "
if self._ignore_incoming:
skip = True
elif call_outgoing:
output += "to "
if self._ignore_outgoing:
skip = True
elif call_missed:
output += "missed "
if self._ignore_missed:
skip = True
elif call_cancelled:
output += "cancelled "
if self._ignore_cancelled:
skip = True
else:
raise Exception("Invalid Phonecall Type: %d", call_type)
call_number_string = ""
if call_number != "-1":
call_number_string = call_number
else:
call_number_string = "Unknown Number"
name_string = ""
if call_name != "(Unknown)":
name_string = '[[contact:' + call_name + '][' + call_name + ']]'
else:
name_string = "Unknown"
output += name_string
if call_duration < self._minimum_duration:
skip = True
timestamp = OrgFormat.date(time.gmtime(call_date), show_time=True)
end_datetimestamp = datetime.datetime.utcfromtimestamp(call_date + call_duration)
logging.debug("timestamp[%s] duration[%s] end[%s]" %
(str(timestamp), str(call_duration), str(end_datetimestamp)))
end_timestamp_string = OrgFormat.date(end_datetimestamp, show_time=True)
logging.debug("end_time [%s]" % end_timestamp_string)
data_for_hashing = output + timestamp
properties = OrgProperties(data_for_hashing=data_for_hashing)
properties.add("NUMBER", call_number_string)
properties.add("DURATION", call_duration)
properties.add("NAME", call_name)
if not skip:
self._writer.write_org_subitem(output=output,
timestamp=timestamp + '-' + end_timestamp_string,
properties=properties
)
class PhonecallsMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-f", "--file", dest="smsxmlfile",
action="store", required=True,
help="path to sms xml backup file")
self._parser.add_argument(
"--ignore-incoming", dest="ignore_incoming",
action="store_true",
help="ignore incoming phonecalls")
self._parser.add_argument(
"--ignore-outgoing", dest="ignore_outgoing",
action="store_true",
help="ignore outgoing phonecalls")
self._parser.add_argument(
"--ignore-missed", dest="ignore_missed",
action="store_true",
help="ignore outgoing phonecalls")
self._parser.add_argument(
"--ignore-cancelled", dest="ignore_cancelled",
action="store_true",
help="ignore cancelled phonecalls")
self._parser.add_argument(
"--minimum-duration", dest="minimum_duration",
action="store", type=int,
help="[sec] show only calls with duration >= this argument")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if not (os.path.exists(self._args.smsxmlfile) or \
os.access(self._args.smsxmlfile, os.R_OK)):
self._parser.error("input file not found or not readable")
def _main(self):
"""
gets called automatically from Memacs class.
read the lines from phonecalls backup xml file,
parse and write them to org file
"""
data = CommonReader.get_data_from_file(self._args.smsxmlfile)
try:
xml.sax.parseString(data.encode('utf-8'),
PhonecallsSaxHandler(self._writer,
self._args.ignore_incoming,
self._args.ignore_outgoing,
self._args.ignore_missed,
self._args.ignore_cancelled,
self._args.minimum_duration or 0,
))
except SAXParseException:
logging.error("No correct XML given")
sys.exit(1)
| 7,892
|
Python
|
.py
| 179
| 31.044693
| 96
| 0.55318
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,782
|
rss.py
|
novoid_Memacs/memacs/rss.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:27:12 vk>
import calendar
import logging
import os
import re
import sys
import time
import feedparser
from orgformat import OrgFormat
from memacs.lib.memacs import Memacs
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import CommonReader
class RssMemacs(Memacs):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
Memacs._parser_add_arguments(self)
self._parser.add_argument(
"-u", "--url", dest="url",
action="store",
help="url to a rss file")
self._parser.add_argument(
"-f", "--file", dest="file",
action="store",
help="path to rss file")
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
Memacs._parser_parse_args(self)
if self._args.url and self._args.file:
self._parser.error("you cannot set both url and file")
if not self._args.url and not self._args.file:
self._parser.error("please specify a file or url")
if self._args.file:
if not os.path.exists(self._args.file):
self._parser.error("file %s not readable", self._args.file)
if not os.access(self._args.file, os.R_OK):
self._parser.error("file %s not readable", self._args.file)
def __get_item_data(self, item):
"""
gets information out of <item>..</item>
@return: output, note, properties, tags
variables for orgwriter.append_org_subitem
"""
try:
# logging.debug(item)
properties = OrgProperties()
guid = item['id']
if not guid:
logging.error("got no id")
unformatted_link = item['link']
short_link = OrgFormat.link(unformatted_link, "link")
# if we found a url in title
# then append the url in front of subject
if re.search("http[s]?://", item['title']) is not None:
output = short_link + ": " + item['title']
else:
output = OrgFormat.link(unformatted_link, item['title'])
note = item['description']
# converting updated_parsed UTC --> LOCALTIME
# Karl 2018-09-22 this might be changed due to:
# DeprecationWarning: To avoid breaking existing software
# while fixing issue 310, a temporary mapping has been
# created from `updated_parsed` to `published_parsed` if
# `updated_parsed` doesn't exist. This fallback will be
# removed in a future version of feedparser.
timestamp = OrgFormat.date(
time.localtime(calendar.timegm(item['updated_parsed'])), show_time=True)
properties.add("guid", guid)
except KeyError:
logging.error("input is not a RSS 2.0")
sys.exit(1)
tags = []
# Karl 2018-09-22 this might be changed due to:
# DeprecationWarning: To avoid breaking existing software
# while fixing issue 310, a temporary mapping has been created
# from `updated_parsed` to `published_parsed` if
# `updated_parsed` doesn't exist. This fallback will be
# removed in a future version of feedparser.
dont_parse = ['title', 'description', 'updated', 'summary',
'updated_parsed', 'link', 'links']
for i in item:
logging.debug(i)
if i not in dont_parse:
if (type(i) == str or type(i) == str) and \
type(item[i]) == str and item[i] != "":
if i == "id":
i = "guid"
properties.add(i, item[i])
else:
if i == "tags":
for tag in item[i]:
logging.debug("found tag: %s", tag['term'])
tags.append(tag['term'])
return output, note, properties, tags, timestamp
def _main(self):
"""
get's automatically called from Memacs class
"""
# getting data
if self._args.file:
data = CommonReader.get_data_from_file(self._args.file)
elif self._args.url:
data = CommonReader.get_data_from_url(self._args.url)
rss = feedparser.parse(data)
logging.info("title: %s", rss['feed']['title'])
logging.info("there are: %d entries", len(rss.entries))
for item in rss.entries:
logging.debug(item)
output, note, properties, tags, timestamp = \
self.__get_item_data(item)
self._writer.write_org_subitem(output=output,
timestamp=timestamp,
note=note,
properties=properties,
tags=tags)
| 5,192
|
Python
|
.py
| 122
| 30.327869
| 88
| 0.549058
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,783
|
kodi.py
|
novoid_Memacs/memacs/kodi.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import sys
import time
from itertools import tee, islice, chain
from orgformat import OrgFormat
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import UnicodeDictReader
from .csv import Csv
# stolen from https://stackoverflow.com/questions/1011938/python-previous-and-next-values-inside-a-loop/1012089#1012089
def previous_current_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return zip(prevs, items, nexts)
class Kodi(Csv):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
super()._parser_add_arguments()
self._parser.add_argument(
'--action-field',
dest="action_field",
required=True,
action='store',
help='field name of the action (start/paused,stopped)',
type=str.lower)
self._parser.add_argument(
'--identification-fields',
dest="identification_fields",
required=True,
action='store',
help='field names to uniquely identify one track e.g. title,artist',
type=str.lower)
self._parser.add_argument(
'--minimal-pause-duration',
dest='minimal_pause_duration',
required=False,
action='store',
default=0,
help=
'minimal duration in seconds of a pause to be logged as a pause instead of being ignored',
type=int,
)
self._parser.add_argument(
'--start-actions',
dest='start_actions',
required=False,
action='store',
default='started,resumed',
help=
'comma seperated action commands when track is started (default started,resumed)'
)
self._parser.add_argument(
'--stop-actions',
dest='stop_actions',
required=False,
action='store',
default='stopped,paused',
help=
'comma seperated action commands when track is stopped/paused (default stopped,paused)'
)
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
super()._parser_parse_args()
self._args.stop_actions = [
name.strip() for name in self._args.stop_actions.split(',')
]
self._args.start_actions = [
name.strip() for name in self._args.start_actions.split(',')
]
if self._args.identification_fields:
self._args.identification_fields = [
name.strip()
for name in self._args.identification_fields.split(',')
]
def read_timestamp(self, row):
if not self._args.timestamp_format:
timestamp = datetime.datetime.fromtimestamp(
int(row[self._args.timestamp_field]))
else:
timestamp = time.strptime(row[self._args.timestamp_field],
self._args.timestamp_format)
return timestamp
def format_timestamp(self, timestamp):
# show time with the timestamp format, but only
# if it contains at least hours and minutes
show_time = not self._args.timestamp_format or \
any(x in self._args.timestamp_format for x in ['%H', '%M'])
timestamp = OrgFormat.date(timestamp,show_time=show_time)
return timestamp
def read_properties(self, row):
properties = OrgProperties(data_for_hashing=json.dumps(row))
output = self._args.output_format.format(**row)
if self._args.properties:
for prop in self._args.properties.split(','):
properties.add(prop.upper().strip(), row[prop])
return properties
def write_one_track(self, row, start_time, stop_time):
properties = self.read_properties(row)
output = self._args.output_format.format(**row)
self._writer.write_org_subitem(
timestamp=self.format_timestamp(start_time) + '--' +
self.format_timestamp(stop_time),
output=output,
properties=properties)
def tracks_are_identical(self, row1, row2):
for field in self._args.identification_fields:
if row1[field] != row2[field]:
return False
return True
def track_is_paused(self, row, next_row):
return next_row and self.tracks_are_identical(row, next_row) and (
self.read_timestamp(next_row) - self.read_timestamp(row)
).total_seconds() < self._args.minimal_pause_duration
def read_log(self, reader):
"""goes through rows and searches for start/stop actions"""
start_time, stop_time = None, None
for prev_row, row, next_row in previous_current_next(reader):
timestamp = self.read_timestamp(row)
action = row[self._args.action_field]
if action in self._args.start_actions:
if not start_time:
start_time = timestamp
elif prev_row and not self.track_is_paused(prev_row, row):
self.write_one_track(prev_row, start_time, timestamp)
start_time = timestamp
elif action in self._args.stop_actions and start_time:
if not self.track_is_paused(row, next_row):
stop_time = timestamp
else:
stop_time = None
if start_time and stop_time:
if self.tracks_are_identical(row, prev_row):
self.write_one_track(row, start_time, stop_time)
start_time, stop_time = None, None
def _main(self):
"""
get's automatically called from Memacs class
"""
with self._args.csvfile as f:
try:
reader = UnicodeDictReader(f, self._args.delimiter,
self._args.encoding,
self._args.fieldnames)
if self._args.skip_header:
next(reader)
self.read_log(reader)
except TypeError as e:
logging.error("not enough fieldnames or wrong delimiter given")
logging.debug("Error: %s" % e)
sys.exit(1)
except UnicodeDecodeError as e:
logging.error(
"could not decode file in utf-8, please specify input encoding"
)
sys.exit(1)
| 6,858
|
Python
|
.py
| 165
| 29.927273
| 119
| 0.578655
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,784
|
argparser.py
|
novoid_Memacs/memacs/lib/argparser.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2014-01-28 16:17:20 vk>
import os
import re
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
class MemacsArgumentParser(ArgumentParser):
"""
Inherited from Argumentparser
MemacsArgumentParser handles default arguments which are needed for every
Memacs module and gives a nicer output for help message.
"""
def __init__(self,
prog_version,
prog_version_date,
prog_description,
copyright_year,
copyright_authors,
use_config_parser_name=""
):
self.__version = "%(prog)s v" + prog_version + " from " + \
prog_version_date
# format copyright authors:
# indent from second author
copyright_authors = copyright_authors.splitlines()
for i in range(len(copyright_authors)):
copyright_authors[i] = " " + copyright_authors[i]
copyright_authors = "\n".join(map(str, copyright_authors))
epilog = ":copyright: (c) " + copyright_year + " by \n" + \
copyright_authors + \
"\n:license: GPL v2 or any later version\n" + \
":bugreports: https://github.com/novoid/Memacs\n" + \
":version: " + prog_version + " from " + prog_version_date + "\n"
self.__use_config_parser_name = use_config_parser_name
ArgumentParser.__init__(self,
description=prog_description,
add_help=True,
epilog=epilog,
formatter_class=RawDescriptionHelpFormatter
)
self.__add_arguments()
def __add_arguments(self):
"""
Add's all standard arguments of a Memacs module
"""
self.add_argument('--version',
action='version',
version=self.__version)
self.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="enable verbose mode")
self.add_argument("-s", "--suppress-messages",
dest="suppressmessages",
action="store_true",
help="do not show any log message " + \
"- helpful when -o not set")
self.add_argument("-o", "--output",
dest="outputfile",
help="Org-mode file that will be generated " + \
" (see above). If no output file is given, " + \
"result gets printed to stdout",
metavar="FILE")
self.add_argument("-a", "--append",
dest="append",
help="""when set and outputfile exists, then
only new entries are appendend.
criterion: :ID: property""",
action="store_true")
self.add_argument("-t", "--tag",
dest="tag",
help="overriding tag: :Memacs:<tag>: (on top entry)")
self.add_argument("--autotagfile",
dest="autotagfile",
help="file containing autotag information, see " + \
"doc file FAQs_and_Best_Practices.org",
metavar="FILE")
self.add_argument("--number-entries",
dest="number_entries",
help="how many entries should be written?",
type=int)
self.add_argument("--columns-header",
dest="columns_header",
help="if you want to add an #+COLUMNS header, please specify " + \
"its content as STRING",
metavar="STRING")
self.add_argument("--custom-header",
dest="custom_header",
help="if you want to add an arbitrary header line, please specify " + \
"its content as STRING",
metavar="STRING")
self.add_argument("--add-to-time-stamps",
dest="timestamp_delta",
help="if data is off by, e.g., two hours, you can specify \"+2\" " + \
"or \"-2\" here to correct it with plus/minus two hours",
metavar="STRING")
self.add_argument("--inactive-time-stamps",
dest="inactive_timestamps",
help="""inactive time-stamps are written to the output file
instead of active time-stamps. Helps to move modules with many entries
to the inactive layer of the agenda.""",
action="store_true")
# ---------------------
# Config parser
# ---------------------
if self.__use_config_parser_name:
self.add_argument("-c", "--config",
dest="configfile",
help="path to config file",
metavar="FILE")
def parse_args(self, args=None, namespace=None):
"""
overwriting ArgParser's parse_args and
do checking default argument outputfile
"""
args = ArgumentParser.parse_args(self, args=args, namespace=namespace)
if args.outputfile:
if not os.path.exists(os.path.dirname(args.outputfile)):
self.error("Output file path(%s) does not exist!" %
args.outputfile)
if not os.access(os.path.dirname(args.outputfile), os.W_OK):
self.error("Output file %s is not writeable!" %
args.outputfile)
else:
if args.append:
self.error("cannot set append when no outputfile specified")
if args.suppressmessages == True and args.verbose == True:
self.error("cannot set both verbose and suppress-messages")
if args.autotagfile:
if not os.path.exists(os.path.dirname(args.autotagfile)):
self.error("Autotag file path(%s) doest not exist!" %
args.autotagfile)
if not os.access(args.autotagfile, os.R_OK):
self.error("Autotag file (%s) is not readable!" %
args.autotagfile)
if args.timestamp_delta:
timestamp_components = re.match('[+-]\d\d?', args.timestamp_delta)
if not timestamp_components:
self.error("format of \"--add-to-time-stamps\" is not recognized. Should be similar " + \
"to ,e.g., \"+1\" or \"-3\".")
# ---------------------
# Config parser
# ---------------------
if self.__use_config_parser_name:
xdg = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
configdir = os.path.join(xdg, 'memacs')
if os.path.isdir(configdir):
for filename in os.listdir(configdir):
if self.__use_config_parser_name == os.path.splitext(filename)[0]:
args.configfile = os.path.join(configdir, filename)
continue
if args.configfile:
if not os.path.exists(args.configfile):
self.error("Config file (%s) does not exist" %
args.configfile)
if not os.access(args.configfile, os.R_OK):
self.error("Config file (%s) is not readable!" %
args.configfile)
else:
self.error("please specify a config file")
return args
| 8,041
|
Python
|
.py
| 161
| 32.57764
| 105
| 0.489936
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,785
|
memacs.py
|
novoid_Memacs/memacs/lib/memacs.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2014-01-28 16:19:18 vk>
import logging
import sys
import traceback
from configparser import ConfigParser
from .argparser import MemacsArgumentParser
from .loggingsettings import handle_logging
from .orgwriter import OrgOutputWriter
class Memacs(object):
"""
Memacs class
With this class it is easier to make a Memacs module
because it handles common things like
* default arguments + parsing
- orgoutputfile
- version
- verbose
- suppress-messages
* set logging information
- write error logs to error.org if
orgfile is specified
use handle_main() to start Memacs
Testing:
* use test_get_all() for getting whole org output
* use test_get_entries() for getting only org entries
"""
def __init__(self,
prog_version="no version specified",
prog_version_date="no date specified",
prog_description="no description specified",
prog_short_description="no short-description specified",
prog_tag="no tag specified",
copyright_year="",
copyright_authors="",
use_config_parser_name="",
argv=sys.argv[1:]):
"""
Ctor
Please set Memacs information like version, description, ...
set argv when you want to test class
set write_footer i
"""
self.__prog_version = prog_version
self.__prog_version_date = prog_version_date
self.__prog_description = prog_description
self.__prog_short_description = prog_short_description
self.__prog_tag = prog_tag
self.__writer_append = False
self.__copyright_year = copyright_year
self.__copyright_authors = copyright_authors
self.__use_config_parser_name = use_config_parser_name
self.__config_parser = None
self.__argv = argv
def __init(self, test=False):
"""
we use this method to initialize because here it could be, that
Exceptions are thrown. in __init__() we could not catch them
see handle_main() to understand
@param test: used in test_get_all
"""
self._parser = MemacsArgumentParser(
prog_version=self.__prog_version,
prog_version_date=self.__prog_version_date,
prog_description=self.__prog_description,
copyright_year=self.__copyright_year,
copyright_authors=self.__copyright_authors,
use_config_parser_name=self.__use_config_parser_name)
# adding additional arguments from our subcass
self._parser_add_arguments()
# parse all arguments
self._parser_parse_args()
# set logging configuration
handle_logging(self._args.__dict__,
self._args.verbose,
self._args.suppressmessages,
self._args.outputfile,
)
# for testing purposes it's good to see which args are secified
logging.debug("args specified:")
logging.debug(self._args)
# if an tag is specified as argument take that tag
if self._args.tag:
tag = self._args.tag
else:
tag = self.__prog_tag
#
if self.__use_config_parser_name != "":
self.__config_parser = ConfigParser()
self.__config_parser.read(self._args.configfile)
logging.debug("cfg: %s",
self.__config_parser.items(
self.__use_config_parser_name))
# handling autotagging
autotag_dict = self.__handle_autotagfile()
## collect additional header lines:
additional_headerlines = False
if self._args.columns_header:
additional_headerlines = '#+COLUMNS: ' + self._args.columns_header
if self._args.custom_header:
additional_headerlines = self._args.custom_header
# set up orgoutputwriter
self._writer = OrgOutputWriter(
file_name=self._args.outputfile,
short_description=self.__prog_short_description,
tag=tag,
test=test,
append=self._args.append,
autotag_dict=autotag_dict,
number_entries=self._args.number_entries,
additional_headerlines = additional_headerlines,
timestamp_delta=self._args.timestamp_delta,
inactive_timestamps=self._args.inactive_timestamps)
def _get_config_option(self, option):
"""
@return: value of the option of configfile
"""
if self.__config_parser:
ret = self.__config_parser.get(self.__use_config_parser_name,
option)
return ret
else:
raise Exception("no config parser specified, cannot get option")
def _main(self):
"""
does nothing in this (super) class
this method should be overwritten by subclass
"""
pass
def _parser_add_arguments(self):
"""
does nothing in this (super) class,
In subclass we add arguments to the parser
"""
pass
def _parser_parse_args(self):
"""
Let's parse the default arguments
In subclass we have to do additional
parsing on (the additional) arguments
"""
self._args = self._parser.parse_args(self.__argv)
def __get_writer_data(self):
"""
@return org_file_data (only when on testing)
"""
return self._writer.get_test_result()
def handle_main(self):
"""
this should be called instead of main()
With this method we can catch exceptions
and log them as error
logging.error makes a org-agenda-entry too if a
outputfile was specified :)
"""
try:
self.__init()
self._main()
self._writer.close()
except KeyboardInterrupt:
logging.info("Received KeyboardInterrupt")
except SystemExit as e:
# if we get an sys.exit() do exit!
sys.exit(e)
except:
error_lines = traceback.format_exc().splitlines()
logging.error("\n ".join(map(str, error_lines)))
raise # re raise exception
def test_get_all(self):
"""
Use this for Testing
@param return: whole org-file
"""
self.__init(test=True)
self._main()
self._writer.close()
return self.__get_writer_data()
def test_get_entries(self):
"""
Use this for Testing
@param return: org-file without header +footer (only entries)
"""
data = self.test_get_all()
ret_data = []
for d in data.splitlines():
if d[:2] != "* " and d[:1] != "#":
ret_data.append(d)
return ret_data
def __handle_autotagfile(self):
"""
read out the autotag file and generate a dict
@return - return autotag_dict
"""
autotag_dict = {}
if self._args.autotagfile:
cfgp = ConfigParser()
cfgp.read(self._args.autotagfile)
if "autotag" not in cfgp.sections():
logging.error("autotag file contains no section [autotag]")
sys.exit(1)
for item in cfgp.items("autotag"):
tag = item[0]
values = item[1].split(",")
values = [x.strip() for x in values]
autotag_dict[tag] = values
return autotag_dict
| 7,797
|
Python
|
.py
| 207
| 27.188406
| 78
| 0.575874
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,786
|
contactparser.py
|
novoid_Memacs/memacs/lib/contactparser.py
|
import logging
import re
from bbdb.database import BBDB
def sanitize_phonenumber(phonenumber):
"""
Convert phonenumber to digits only
"""
sanitized_phonenumber = phonenumber.strip().replace('-','').replace('/','').replace(' ','').replace('+','00')
return sanitized_phonenumber
def parse_org_contact_file(orgfile):
"""
Parses the given Org-mode file for contact entries.
The return format is a follows:
numbers = {'004369912345678':'First2 Last1', '0316987654':'First2 Last2', ...}
@param orgfile: file name of a Org-mode file to parse
@param return: list of dict-entries containing the numbers to name dict
"""
linenr = 0
## defining distinct parsing status states:
headersearch = 21
propertysearch = 42
inproperty = 73
status = headersearch
contacts = {}
current_name = ''
HEADER_REGEX = re.compile('^(\*+)\s+([^\s:]([^:]*)[^\s:])(\s+(:[^\s]+:)+)?')
PHONE = '\s+([\+\d\-/ ]{7,})$'
PHONE_REGEX = re.compile(':(PHONE|oldPHONE|MOBILE|oldMOBILE|HOMEPHONE|oldHOMEPHONE|WORKPHONE|oldWORKPHONE):' + PHONE)
for rawline in open(orgfile, 'r'):
line = rawline.strip() ## trailing and leading spaces are stupid
linenr += 1
header_components = re.match(HEADER_REGEX, line)
if header_components:
## in case of new header, make new currententry because previous one was not a contact header with a property
current_name = header_components.group(2)
status = propertysearch
continue
if status == headersearch:
## if there is something to do, it was done above when a new heading is found
continue
if status == propertysearch:
if line == ':PROPERTIES:':
status = inproperty
continue
elif status == inproperty:
phone_components = re.match(PHONE_REGEX, line)
if phone_components:
phonenumber = sanitize_phonenumber(phone_components.group(2))
contacts[phonenumber] = current_name
elif line == ':END:':
status = headersearch
continue
else:
## I must have mixed up status numbers or similar - should never be reached.
logging.error("Oops. Internal parser error: status \"%s\" unknown. The programmer is an idiot. Current contact entry might get lost due to recovering from that shock. (line number %s)" % (str(status), str(linenr)))
status = headersearch
continue
logging.info("found %s suitable contacts while parsing \"%s\"" % (str(len(contacts)), orgfile))
return contacts
def parse_bbdb_file(bbdbfile):
"""
Parses the given bbdb file for contact entries.
The return format is a follows:
numbers = {'004369912345678':'First2 Last1', '0316987654':'First2 Last2', ...}
@param bbdbfile: file name of a Org-mode file to parse
@param return: list of dict-entries containing the numbers to name dict
"""
contacts = {}
bb = BBDB.fromfile(bbdbfile)
bd = bb.model_dump()
records = bd["records"]
for record in records:
if len(record["phone"]) > 0:
current_name = f"{record['firstname']} {record['lastname']}"
for number in record["phone"]:
phonenumber = sanitize_phonenumber(record["phone"][number])
contacts[phonenumber] = current_name
logging.info("found %s suitable contacts while parsing \"%s\"" % (str(len(contacts)), bbdbfile))
return contacts
| 3,597
|
Python
|
.py
| 79
| 37.278481
| 226
| 0.632846
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,787
|
loggingsettings.py
|
novoid_Memacs/memacs/lib/loggingsettings.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2012-05-30 18:19:27 armin>
import logging
import os
import sys
def handle_logging(args,
verbose=False,
suppressmessages=False,
org_file=""):
"""
Handle/format logging regarding boolean parameter verbose
@param verbose: options from OptionParser
"""
if suppressmessages == True:
logging.basicConfig(level=logging.ERROR)
elif verbose:
FORMAT = "%(levelname)-8s %(asctime)-15s %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
else:
FORMAT = "%(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
if org_file:
if not os.path.exists(os.path.dirname(org_file)):
org_file = None
else:
org_error_file = os.path.dirname(org_file) + os.sep + \
"error.org"
memacs_module_filename = os.path.basename(sys.argv[0])
# add file logger
console = logging.FileHandler(org_error_file, 'a', 'utf-8', 0)
console.setLevel(logging.ERROR)
formatter = logging.Formatter(
'** %(asctime)s ' + memacs_module_filename + \
' had an %(levelname)s \n %(message)s \n' + \
' Arguments: ' + str(args) + '\n',
datefmt="<%Y-%m-%d %a %H:%M:%S +1d>")
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
| 1,503
|
Python
|
.py
| 38
| 29.578947
| 74
| 0.568789
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,788
|
orgwriter.py
|
novoid_Memacs/memacs/lib/orgwriter.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:32:26 vk>
import codecs
import logging
import os
import re
import sys
import time
from orgformat import OrgFormat
from .orgproperty import OrgProperties
from .reader import CommonReader
class OrgOutputWriter(object):
"""
OrgOutputWriter is used especially for writing
org-mode entries
most notable function:
- write_org_subitem (see its comment)
"""
__handler = None
__test = False
def __init__(self,
short_description,
tag,
file_name=None,
test=False,
append=False,
autotag_dict={},
number_entries=None,
additional_headerlines=None,
timestamp_delta=None,
inactive_timestamps=False):
"""
@param file_name:
"""
self.__test = test
self.__test_data = ""
self.__append = append
self.__time = time.time()
self.__short_description = short_description
self.__tag = tag
self.__file_name = file_name
self.__existing_ids = []
self.__autotag_dict = autotag_dict
self.__number_entries = number_entries
self.__entries_count = 0
self.__lower_autotag_dict()
self.__additional_header_lines = additional_headerlines
self.__timestamp_delta = timestamp_delta
self.__inactive_timestamps = inactive_timestamps
if self.__timestamp_delta is not None:
logging.debug("orgwriter: timestamp_delta found: %s" , timestamp_delta)
if file_name:
if append and os.path.exists(file_name):
self.__handler = codecs.open(file_name, 'a', "utf-8")
self.__compute_existing_id_list()
else:
self.__handler = codecs.open(file_name, 'w', "utf-8")
self.__write_header()
else:
self.__write_header()
def get_test_result(self):
return self.__test_data
def write(self, output):
"""
Write "<output>"
"""
if self.__handler:
self.__handler.write(str(output))
else:
if self.__test:
self.__test_data += output
else:
# don't remove the comma(otherwise there will be a \n)
print(output, end=' ')
def writeln(self, output=""):
"""
Write "<output>\n"
"""
self.write(str(output) + "\n")
def __write_header(self):
"""
Writes the header of the file
__init__() does call this function
"""
self.write_commentln("-*- coding: utf-8 mode: org -*-")
self.write_commentln(
"This file was generated by " + sys.argv[0] + \
". Any modification will be overwritten upon next invocation.")
self.write_commentln(
"To add this file to your list of org-agenda files, open the stub file" + \
" (file.org) not this file (file.org_archive) within emacs" + \
" and do following: M-x org-agenda-file-to-front")
if self.__additional_header_lines:
for line in self.__additional_header_lines.split('\n'):
self.writeln(line)
self.write_org_item(
self.__short_description + " :Memacs:" + self.__tag + ":")
def __write_footer(self):
"""
Writes the footer of the file including calling python script and time
Don't call this function - call instead function close(),
close() does call this function
"""
self.writeln("* successfully parsed " +\
str(self.__entries_count) + \
" entries by " + \
sys.argv[0] + " at " + \
OrgFormat.date(time.localtime(), show_time=True, inactive=True) + \
" in ~" + self.__time.strip() + ".")
def write_comment(self, output):
"""
Write output as comment: "## <output>"
"""
self.write("## " + output)
def write_commentln(self, output):
"""
Write output line as comment: "## <output>\n"
"""
self.write_comment(output + "\n")
def write_org_item(self, output):
"""
Writes an org item line.
i.e: * <output>\n
"""
self.writeln("* " + output)
def __write_org_subitem(self,
timestamp,
output,
note="",
properties=OrgProperties(),
tags=[]):
"""
internally called by write_org_subitem and __append_org_subitem
If properties is None, write only the header line
"""
output_tags = ""
if tags != []:
output_tags = "\t:" + ":".join(map(str, tags)) + ":"
output = output.lstrip()
if timestamp:
timestamp = timestamp.strip()
self.writeln("** " + timestamp + " " + output + output_tags)
else:
self.writeln("** " + output + output_tags)
if properties:
self.writeln(str(properties))
if self.__test:
self.write(properties.get_multiline_properties())
else:
self.writeln(properties.get_multiline_properties())
if note != "":
for n in note.splitlines():
self.writeln(" " + n)
def write_org_subitem(self,
timestamp,
output,
note="",
properties=OrgProperties(),
tags=None):
"""
Writes an org item line.
i.e:** <timestamp> <output> :<tags>:\n
:PROPERTIES:
<properties>
:ID: -generated id-
:END:
if an argument -a or --append is given,
then a desicion regarding the :ID: is made if the item has to be
written to file
@param timestamp: str/unicode or False (for no time-stamp)
@param output: st tar/unicode
@param note: str/unicode
@param tags: list of tags
@param properties: OrgProperties object
"""
assert (not timestamp or timestamp.__class__ == str or timestamp.__class__ == str)
assert tags.__class__ == list or tags == None
if self.__append:
## for the append mode, the ID within properties is mandatory:
assert properties.__class__ == OrgProperties
else:
## when properties is None, the whole drawer is omitted:
assert properties.__class__ == OrgProperties or not properties
assert (output.__class__ == str or output.__class__ == str)
assert (note.__class__ == str or note.__class__ == str)
# count the entries we have written, if above our limit do not write
if self.__number_entries and \
self.__entries_count == self.__number_entries:
return
else:
self.__entries_count += 1
if tags == None:
tags = []
if self.__autotag_dict != {}:
self.__get_autotags(tags, output)
## fix time-stamps (if user wants to)
if timestamp and self.__timestamp_delta:
timestamp = OrgFormat.apply_timedelta_to_org_timestamp(
timestamp, float(self.__timestamp_delta)
)
## a bit of a hack to get inactive time-stamps:
## FIXXME: use OrgFormat method to generate inactive time-stamps in the first place and remove asserts
if timestamp and self.__inactive_timestamps and timestamp[0] == '<' and timestamp[-1] == '>':
# FIXXME: very stupid overwriting of active time-stamp
# with inactive one when inactive option is used and
# somehow an active time-stamp is delivered somehow:
timestamp = '[' + timestamp[1:-1] + ']'
if self.__append:
self.__append_org_subitem(timestamp,
output,
note,
properties,
tags)
else:
self.__write_org_subitem(timestamp,
output,
note,
properties,
tags)
def __append_org_subitem(self,
timestamp,
output,
note="",
properties=OrgProperties(),
tags=[]):
"""
Checks if subitem exists in orgfile (:ID: <id> is same),
if not, it will be appended
"""
identifier = properties.get_id()
if id == None:
raise Exception("id :ID: Property not set!")
if self.__id_exists(identifier):
# do nothing, id exists ...
logging.debug("NOT appending")
else:
# id does not exist so we can append
logging.debug("appending")
self.__write_org_subitem(timestamp, output, note, properties, tags)
def __compute_existing_id_list(self):
"""
Reads the outputfile, looks for :ID: properties and stores them in
self.__existing_ids
"""
assert self.__existing_ids == []
data = CommonReader.get_data_from_file(self.__file_name)
for found_id in re.findall(":ID:(.*)\n.*:END:", data):
found_id = found_id.strip()
if found_id != "":
self.__existing_ids.append(found_id)
logging.debug("found id :ID: %s", found_id)
logging.debug("there are already %d entries", len(self.__existing_ids))
def __id_exists(self, searchid):
"""
@return: if searchid already exists in output file
"""
return str(searchid).strip() in self.__existing_ids
def close(self):
"""
Writes the footer and closes the file
@param write_footer: write the foother with time ?
"""
self.__time = "%1fs " % (time.time() - self.__time)
if not self.__append:
self.__write_footer()
if self.__handler != None:
self.__handler.close()
def __lower_autotag_dict(self):
"""
lowers all values of dict
"""
for tag in self.__autotag_dict.keys():
values = []
for value in self.__autotag_dict[tag]:
values.append(value.lower())
self.__autotag_dict[tag] = values
def __get_autotags(self, tags, string):
"""
Searches for tags in a given wordlist.
Append them to tags
@param tags: list to append the matched tags
@param string: string to look for matching values
"""
string = string.lower()
for autotag_tag in self.__autotag_dict.keys():
for matching_word in self.__autotag_dict[autotag_tag]:
if matching_word in string:
if autotag_tag not in tags:
tags.append(autotag_tag)
continue
| 11,460
|
Python
|
.py
| 290
| 27.172414
| 110
| 0.516993
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,789
|
orgproperty.py
|
novoid_Memacs/memacs/lib/orgproperty.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2012-09-06 22:07:05 armin>
import hashlib
from collections import OrderedDict
class OrgProperties(object):
"""
Class for handling Memacs's org-drawer:
:PROPERTIES:
...
:<tag>: value
...
:ID: - id is generated from all above tags/values
:END:
"""
def __init__(self, data_for_hashing=""):
"""
Ctor
@param data_for_hashing: if no special properties are set,
you can add here data only for hash generation
"""
self.__properties = OrderedDict()
self.__properties_multiline = {}
self.__data_for_hashing = data_for_hashing
self.__id = None
def add(self, tag, value):
"""
Add an OrgProperty(tag,value) to the properties
@param tag: property tag
@param value: property value
"""
tag = str(tag).strip().upper()
value = str(value).strip()
if tag == "ID":
raise Exception("you should not specify an :ID: property " + \
"it will be generated automatically")
value_multiline = value.splitlines()
if len(value_multiline) > 1:
# we do have multiline value
multiline_value = [" " + v for v in value_multiline]
self.__properties_multiline[tag] = multiline_value
value = " ".join(value_multiline)
self.__properties[tag] = str(value)
def set_id(self, value):
"""
set id here, then its not generated / hashed
"""
self.__id = value
def delete(self, key):
"""
delete a pair out of properties
@param key index
"""
try:
del self.__properties[key]
del self.__properties_multiline[key]
except Keyerror as e:
pass
def __get_property_max_tag_width(self):
width = 10 # :PROPERTIES: has width 10
for key in list(self.__properties.keys()):
if width < len(key):
width = len(key)
return width
def __format_tag(self, tag):
num_whitespaces = self.__get_property_max_tag_width() - len(tag)
whitespaces = ""
for w in range(num_whitespaces):
whitespaces += " "
return " :" + tag + ": " + whitespaces
def __str__(self):
"""
for representig properties in unicode with org formatting
"""
if self.__properties == {} and \
self.__data_for_hashing == "" and \
self.__id == None:
raise Exception("No data for hashing specified, and no " + \
"property was given. Cannot generate unique ID.")
ret = " :PROPERTIES:\n"
for tag, value in self.__properties.items():
ret += self.__format_tag(tag) + value + "\n"
ret += self.__format_tag("ID") + self.get_id() + "\n"
ret += " :END:"
return ret
def get_id(self):
"""
generates the hash string for all properties
@return: sha1(properties)
"""
if self.__id != None:
return self.__id
to_hash = "".join(map(str, list(self.__properties.values())))
to_hash += "".join(map(str, list(self.__properties.keys())))
to_hash += self.__data_for_hashing
return hashlib.sha1(to_hash.encode('utf-8')).hexdigest()
def get_value(self, key):
"""
@param: key of property
@return: returns the value of a given key
"""
return self.__properties[key]
def add_data_for_hashing(self, data_for_hashing):
"""
add additional data for hashing
useful when no possibility to set in Ctor
"""
self.__data_for_hashing += data_for_hashing
def get_value_delete_but_add_for_hashing(self, key):
"""
see method name ;)
"""
ret = self.get_value(key)
self.delete(key)
self.add_data_for_hashing(ret)
return ret
def get_multiline_properties(self):
ret = ""
for key in list(self.__properties_multiline.keys()):
ret += "\n " + key + ":\n"
ret += "\n".join(self.__properties_multiline[key])
ret += "\n"
return ret
| 4,309
|
Python
|
.py
| 122
| 26.155738
| 77
| 0.543839
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,790
|
mailparser.py
|
novoid_Memacs/memacs/lib/mailparser.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:24:33 vk>
import logging
import time
from email import message_from_string
from email.header import decode_header
from email.utils import parsedate
from orgformat import OrgFormat
from .orgproperty import OrgProperties
class MailParser(object):
@staticmethod
def get_value_or_empty_str(headers, key, remove_newline=False):
"""
@param return: headers[key] if exist else ""
"""
ret = ''
if key in headers:
ret = headers[key]
if remove_newline:
ret = ret.replace("\n", "")
arr = []
for item in decode_header(ret):
value, charset = item
if charset:
arr.append(value.decode(charset))
else:
arr.append(value)
return ' '.join(arr)
@staticmethod
def parse_message(message, add_body=False):
"""
parses whole mail from string
@param message: mail message
@param add_body: if specified, body is added
@return values for OrgWriter.write_org_subitem
"""
msg = message_from_string(message)
# Read only these fields
use_headers = ["To",
"Date",
"From",
"Subject",
"Reply-To",
"Newsgroups",
"Cc",
]
# These fields are added, if found to :PROPERTIES: drawer
not_properties = ["Date",
"Subject",
"From"
]
properties = OrgProperties()
headers = {}
logging.debug("Message items:")
logging.debug(list(msg.items()))
msg_id = None
# fill headers and properties
for key, value in list(msg.items()):
value = value.replace("\r", "")
if key in use_headers:
headers[key] = value
if key not in not_properties:
properties.add(key, MailParser.get_value_or_empty_str(headers, key, True))
if key.upper() == "MESSAGE-ID":
msg_id = value
notes = ""
# look for payload
# if more than one payload, use text/plain payload
if add_body:
payload = msg.get_payload()
if payload.__class__ == list:
# default use payload[0]
payload_msg = payload[0].get_payload()
for payload_id in len(payload):
for param in payload[payload_id].get_params():
if param[0] == 'text/plain':
payload_msg = payload[payload_id].get_payload()
break
if payload_msg != payload[0].get_payload():
break
notes = payload_msg
else:
notes = payload
notes = notes.replace("\r", "")
output_from = MailParser.get_value_or_empty_str(headers, "From")
if output_from != "":
output_from = OrgFormat.mailto_link(output_from)
subject = MailParser.get_value_or_empty_str(headers, "Subject", True)
dt = MailParser.get_value_or_empty_str(headers, "Date", False)
timestamp = ""
if dt != "":
try:
time_tuple = time.localtime(time.mktime(parsedate(dt)))
timestamp = OrgFormat.date(time_tuple, show_time=True)
except TypeError:
logging.error("could not parse dateime from msg %s", dt)
properties.add_data_for_hashing(timestamp + "_" + msg_id)
if "Newsgroups" in headers:
ng_list = []
for ng in headers["Newsgroups"].split(","):
ng_list.append(OrgFormat.newsgroup_link(ng))
output_ng = ", ".join(map(str, ng_list))
output = output_from + "@" + output_ng + ": " + subject
else:
output = output_from + ": " + subject
return timestamp, output, notes, properties
| 4,186
|
Python
|
.py
| 106
| 26.537736
| 94
| 0.514053
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,791
|
reader.py
|
novoid_Memacs/memacs/lib/reader.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2012-05-24 19:08:10 armin>
import codecs
import csv
import logging
import sys
from collections import OrderedDict
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.request import urlopen
class CommonReader:
"""
Class for reading
* files
* url's
"""
@staticmethod
def get_data_from_file(path, encoding='utf-8'):
"""
reads a file
@param file: path to file
@return: returns data
"""
try:
input_file = codecs.open(path, 'rb', encoding=encoding)
data = input_file.read()
input_file.close()
return data
except IOError as e:
logging.error("Error at opening file: %s:%s", path, e)
sys.exit(1)
@staticmethod
def get_reader_from_file(path):
"""
gets a stream of a file
@param path: file
@return: stream of file
"""
try:
return codecs.open(path, encoding='utf-8')
except IOError as e:
logging.error("Error at opening file: %s:%s", path, e)
sys.exit(1)
return None
@staticmethod
def get_data_from_url(url):
"""
reads from a url
@param url: url to read
@return: returns data
"""
try:
req = urlopen(url, None, 10)
return req.read()
except HTTPError as e:
logging.error("HTTPError: %s", e)
sys.exit(1)
except URLError as e:
logging.error("URLError: %s", e)
sys.exit(1)
except ValueError as e:
logging.error("ValueError: %s", e)
sys.exit(1)
except Exception as e:
logging.error("Exception: %s", e)
sys.exit(1)
@staticmethod
def get_data_from_stdin():
"""
reads from stdin
@return: data from stdin
"""
input_stream = codecs.getreader('utf-8')(sys.stdin)
data = input_stream.read()
input_stream.close()
return data
@staticmethod
def get_reader_from_stdin():
"""
get a utf-8 stream reader for stdin
@return: stdin-stream
"""
return codecs.getreader('utf-8')(sys.stdin)
class UTF8Recoder:
"""
from http://docs.python.org/library/csv.html
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def __next__(self):
return next(self.reader)
class UnicodeCsvReader:
"""
from http://docs.python.org/library/csv.html
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, delimiter=";", encoding="utf-8", **kwds):
# f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, delimiter=delimiter, **kwds)
def __next__(self):
row = next(self.reader)
return [str(s) for s in row]
def __iter__(self):
return self
class UnicodeDictReader:
"""
from http://stackoverflow.com/questions/19740385/dictreader-and-unicodeerror
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, delimiter=";", encoding="utf-8", fieldnames=None, **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.DictReader(f, delimiter=delimiter, fieldnames=fieldnames, **kwds)
def __next__(self):
row = next(self.reader)
return OrderedDict((k.lower(), row[k]) for k in self.reader.fieldnames)
def __iter__(self):
return self
| 3,827
|
Python
|
.py
| 122
| 23.680328
| 91
| 0.589339
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,792
|
orgproperty_test.py
|
novoid_Memacs/memacs/lib/tests/orgproperty_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:24:56 vk>
import time
import unittest
from orgformat import OrgFormat
from memacs.lib.orgproperty import OrgProperties
class TestOrgProperties(unittest.TestCase):
def test_properties_default_ctor(self):
p = OrgProperties("hashing data 1235")
properties = str(p).splitlines()
self.assertEqual(properties[0], " :PROPERTIES:")
self.assertEqual(properties[1],
" :ID: 063fad7f77461ed6a818b6b79306d641e9c90a83")
self.assertEqual(properties[2], " :END:")
def test_properties_with_own_created(self):
p = OrgProperties()
p.add("CREATED",
OrgFormat.date(time.gmtime(0), show_time=True))
properties = str(p).splitlines()
self.assertEqual(properties[0], " :PROPERTIES:")
self.assertEqual(properties[1], " :CREATED: <1970-01-0" + \
"1 Thu 00:00>")
self.assertEqual(properties[2], " :ID: fede47e9" + \
"f49e1b7f5c6599a6d607e9719ca98625")
self.assertEqual(properties[3], " :END:")
| 1,140
|
Python
|
.py
| 25
| 37.08
| 72
| 0.620596
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,793
|
orgwriter_test.py
|
novoid_Memacs/memacs/lib/tests/orgwriter_test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-stamp: <2019-11-06 15:25:23 vk>
import codecs
import shutil
import tempfile
import time
import unittest
from orgformat import OrgFormat
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.orgwriter import OrgOutputWriter
class TestOutputWriter(unittest.TestCase):
def setUp(self):
self.TMPFOLDER = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.TMPFOLDER)
def test_output_to_file(self):
"""
Simple Test
"""
test_filename = self.TMPFOLDER + "testfile.org"
properties = OrgProperties("data_for_hashing")
# writing test output
writer = OrgOutputWriter("short descript", "test-tag", test_filename)
writer.write("## abc\n")
writer.writeln("## abc")
writer.write_comment("abc\n")
writer.write_commentln("abc")
writer.write_org_item("begin")
timestamp = OrgFormat.date(time.gmtime(0), show_time=True)
writer.write_org_subitem(timestamp=timestamp,
output="sub",
properties=properties)
writer.write_org_subitem(timestamp=timestamp,
output="sub",
tags=["foo", "bar"],
properties=properties)
writer.write_org_subitem(timestamp=False,
output="no timestamp",
tags=["bar", "baz"],
properties=properties)
writer.close()
# read and check the file_handler
with codecs.open(test_filename, "r", "utf-8") as file_handler:
data = file_handler.readlines()
self.assertEqual(
data[3],
"* short descript :Memacs:test-tag:\n")
self.assertEqual(
data[4],
"## abc\n")
self.assertEqual(
data[5],
"## abc\n")
self.assertEqual(
data[6],
"## abc\n")
self.assertEqual(
data[7],
"## abc\n")
self.assertEqual(
data[8],
"* begin\n")
self.assertEqual(
data[9],
"** <1970-01-01 Thu 00:00> sub\n")
self.assertEqual(
data[10],
" :PROPERTIES:\n")
self.assertEqual(
data[11],
" :ID: 9cc53a63e13e18437401513316185f6f3b7ed703\n")
self.assertEqual(
data[12],
" :END:\n")
self.assertEqual(
data[13],
"\n")
self.assertEqual(
data[14],
"** <1970-01-01 Thu 00:00> sub\t:foo:bar:\n")
self.assertEqual(
data[15],
" :PROPERTIES:\n")
self.assertEqual(
data[16],
" :ID: 9cc53a63e13e18437401513316185f6f3b7ed703\n")
self.assertEqual(
data[17],
" :END:\n")
self.assertEqual(
data[18],
"\n")
self.assertEqual(
data[19],
"** no timestamp\t:bar:baz:\n")
self.assertEqual(
data[20],
" :PROPERTIES:\n")
self.assertEqual(
data[21],
" :ID: 9cc53a63e13e18437401513316185f6f3b7ed703\n")
self.assertEqual(
data[22],
" :END:\n")
def test_utf8(self):
test_filename = self.TMPFOLDER + "testutf8.org"
# writing test output
writer = OrgOutputWriter("short-des", "tag", test_filename)
writer.write("☁☂☃☄★☆☇☈☉☊☋☌☍☎☏☐☑☒☓☔☕☖☗♞♟♠♡♢♣♤♥♦♧♨♩♪♫♬♭♮♯♰♱♲♳♴♵\n")
writer.close()
# read and check the file_handler
file_handler = codecs.open(test_filename, "r", "utf-8")
input_handler = file_handler.readlines()
file_handler.close()
self.assertEqual(input_handler[4],
"☁☂☃☄★☆☇☈☉☊☋☌☍☎☏☐☑☒☓☔☕☖☗♞♟♠♡♢♣♤♥♦♧♨♩♪♫♬♭♮♯♰♱♲♳♴♵\n",
"utf-8 failure")
def test_autotag(self):
test_filename = self.TMPFOLDER + "testautotag.org"
autotag_dict = {}
autotag_dict["programming"] = ["programming", "python", "java"]
autotag_dict["TUG"] = ["tugraz", "university"]
output = "Programming for my bachelor thesis at University"
# writing test output
writer = OrgOutputWriter(short_description="short-des",
tag="tag",
file_name=test_filename,
autotag_dict=autotag_dict)
timestamp = OrgFormat.date(time.gmtime(0), show_time=True)
properties = OrgProperties("data_for_hashing")
writer.write_org_subitem(timestamp=timestamp,
output=output,
properties=properties)
writer.close()
# read and check the file_handler
file_handler = codecs.open(test_filename, "r", "utf-8")
input_handler = file_handler.readlines()
file_handler.close()
self.assertTrue(input_handler[4].startswith("** <1970-01-01 Thu 00:00> Programming for my " +
"bachelor thesis at University\t:"))
self.assertTrue(input_handler[4].endswith("programming:TUG:\n") or
input_handler[4].endswith("TUG:programming:\n"))
if __name__ == '__main__':
unittest.main()
| 5,733
|
Python
|
.py
| 145
| 26.117241
| 101
| 0.514605
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,794
|
reader_test.py
|
novoid_Memacs/memacs/lib/tests/reader_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2011-12-30 12:16:47 armin>
import unittest
from memacs.lib.reader import CommonReader
class TestReader(unittest.TestCase):
def test_file_no_path(self):
try:
CommonReader.get_data_from_file("")
self.assertTrue(False, "false path failed")
except SystemExit:
pass
| 361
|
Python
|
.py
| 11
| 26.272727
| 55
| 0.652174
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,795
|
argparser_test.py
|
novoid_Memacs/memacs/lib/tests/argparser_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2011-12-30 12:16:47 armin>
import os
import shutil
import tempfile
import unittest
from memacs.lib.argparser import MemacsArgumentParser
class TestArgParser(unittest.TestCase):
def setUp(self):
self.prog_version = "0.1"
self.prog_version_date = "2011-12-19"
self.description = "descriptionbla"
self.copyright_year = "2011"
self.copyright_authors = "Armin Wieser <armin.wieser@gmail.com>"
self.parser = MemacsArgumentParser(
prog_version=self.prog_version,
prog_description=self.description,
prog_version_date=self.prog_version_date,
copyright_authors=self.copyright_authors,
copyright_year=self.copyright_year)
self.TMPFOLDER = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.TMPFOLDER)
def test_verbose(self):
"""
testing MemacsArgumentParser's argument verbose
"""
args = self.parser.parse_args('-v'.split())
args2 = self.parser.parse_args('--verbose'.split())
self.assertEqual(args, args2, "-v and --verbose do different things")
self.assertEqual(args.outputfile, None,
"verbose - args.outputfile should be None")
self.assertEqual(args.suppressmessages, False,
"verbose - args.suppressmessages should be False")
self.assertEqual(args.verbose, True,
"verbose - args.verbose should be True")
def test_suppress(self):
"""
testing MemacsArgumentParser's suppress-messages
"""
args = self.parser.parse_args('-s'.split())
args2 = self.parser.parse_args('--suppress-messages'.split())
self.assertEqual(args, args2,
"-s and --suppress-messages do different things")
self.assertEqual(args.outputfile, None,
"suppressmessages - args.outputfile should be None")
self.assertEqual(
args.suppressmessages, True,
"suppressmessages - args.suppressmessages should be True")
self.assertEqual(args.verbose, False,
"suppressmessages - args.verbose should be False")
def test_outputfile(self):
#args = self.parser.parse_args('-o'.split())
outputfile_path = self.TMPFOLDER + "outputfile"
outputfile_argument = "-o " + outputfile_path
outputfile_argument2 = "--output " + outputfile_path
args = self.parser.parse_args(outputfile_argument.split())
args2 = self.parser.parse_args(outputfile_argument2.split())
self.assertEqual(args, args2, "-o and --output do different things")
def test_nonexistingoutputdir(self):
outputfile_path = self.TMPFOLDER + "NONEXIST" + os.sep + "outputfile"
outputfile_argument = "-o " + outputfile_path
try:
self.parser.parse_args(outputfile_argument.split())
self.assertTrue(False,
"parsing was correct altough nonexist. outputfile")
except SystemExit:
pass
def test_verbose_suppress_both(self):
try:
self.parser.parse_args('-s -v'.split())
self.assertTrue(
False,
"parsing was correct altough " + \
"both suppress and verbose was specified")
except SystemExit:
pass
| 3,464
|
Python
|
.py
| 77
| 34.376623
| 79
| 0.620516
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,796
|
mailparser_test.py
|
novoid_Memacs/memacs/lib/tests/mailparser_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2011-12-30 12:16:47 armin>
import unittest
from memacs.lib.mailparser import MailParser
class TestMailParser(unittest.TestCase):
def test_parse_mail_without_body(self):
message = """Date: Wed, 28 Dec 2011 14:02:00 +0100
From: Alice Ally <alice@ally.com>
To: Bob Bobby <Bob@bobby.com>
Subject: Bob sends a mesage
Message-ID: f2c1165a321d0e0@foo.com
X-Scanned-By: MIMEDefang 2.71 on 129.27.10.2
Hi!
Hope you can read my message
kind reagards,
Bob
"""
timestamp, output, notes, properties = \
MailParser.parse_message(message)
self.assertEqual(timestamp, "<2011-12-28 Wed 14:02>")
self.assertEqual(output, "[[mailto:alice@ally.com]" + \
"[Alice Ally]]: Bob sends a mesage")
self.assertEqual(notes, "")
self.assertEqual(
properties.get_value('TO'), 'Bob Bobby <Bob@bobby.com>'
)
for key in ('FROM', 'SUBJECT', 'DATE', 'MESSAGE-ID', 'X-SCANNED-BY'):
with self.assertRaises(KeyError):
properties.get_value(key)
def test_parse_mail_with_body(self):
message = """Date: Wed, 28 Dec 2011 14:02:00 +0100
From: Alice Ally <alice@ally.com>
To: Bob Bobby <Bob@bobby.com>
Subject: Bob sends a mesage
Message-ID: f2c1165a321d0e0@foo.com
X-Scanned-By: MIMEDefang 2.71 on 129.27.10.2
Hi!
Hope you can read my message
kind reagards,
Bob"""
timestamp, output, notes, properties = \
MailParser.parse_message(message,
True)
self.assertEqual(timestamp, "<2011-12-28 Wed 14:02>")
self.assertEqual(output, "[[mailto:alice@ally.com]" + \
"[Alice Ally]]: Bob sends a mesage")
self.assertEqual(notes, "Hi!\n\nHope you can read my message\n" + \
"\nkind reagards,\nBob")
self.assertEqual(
properties.get_value('TO'), 'Bob Bobby <Bob@bobby.com>'
)
for key in ('FROM', 'SUBJECT', 'DATE', 'MESSAGE-ID', 'X-SCANNED-BY'):
with self.assertRaises(KeyError):
properties.get_value(key)
def test_parse_ng_with_body(self):
message = """Path: news.tugraz.at!not-for-mail
From: Alice Ally <alice@ally.com>
Newsgroups: tu-graz.betriebssysteme.linux
Subject: I love Memacs
Date: Thu, 17 Nov 2011 22:02:06 +0100
Message-ID: <2011-11-17T21-58-27@ally.com>
Reply-To: news@ally.com
Content-Type: text/plain; charset=utf-8
i just want to say that i love Memacs
"""
timestamp, output, notes, properties = \
MailParser.parse_message(message,
True)
self.assertEqual(timestamp, "<2011-11-17 Thu 22:02>")
self.assertEqual(output,
"[[mailto:alice@ally.com][Alice Ally]]@[[news:tu-" + \
"graz.betriebssysteme.linux]" + \
"[tu-graz.betriebssysteme.linux]]: I love Memacs")
self.assertEqual(notes, "i just want to say that i love Memacs\n")
self.assertEqual(
properties.get_value('NEWSGROUPS'), 'tu-graz.betriebssysteme.linux'
)
self.assertEqual(
properties.get_value('REPLY-TO'), 'news@ally.com'
)
for key in ('FROM', 'SUBJECT', 'DATE', 'MESSAGE-ID', 'CONTENT-TYPE'):
with self.assertRaises(KeyError):
properties.get_value(key)
| 3,450
|
Python
|
.py
| 83
| 32.855422
| 79
| 0.610746
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,797
|
phonecalls_test.py
|
novoid_Memacs/memacs/tests/phonecalls_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2012-09-06 22:02:48 armin>
import os
import re
import time
import unittest
import xml.etree.ElementTree as ET
from memacs.phonecalls import PhonecallsMemacs
class TestPhonecallsMemacs(unittest.TestCase):
def setUp(self):
self._test_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data', 'calls.xml'
)
argv = "-s -f " + self._test_file
memacs = PhonecallsMemacs(argv=argv.split())
self.data = memacs.test_get_entries()
self._calls = ET.parse(self._test_file)
def test_from_file(self):
data = self.data
for i in range(8):
self._assertPhoneLog(i, data[i*7:(i+1)*7])
def _assertPhoneLog(self, index, call_data):
call = self._calls.findall('call')[index]
duration = call_data[3].split()[-1]
number = call_data[4].split()[-1]
start, _ = re.findall('<(.*?)>', call_data[0])
date = time.gmtime(int(call.get('date'))/1000.)
self.assertEqual(time.strftime('%Y-%m-%d %a %H:%M', date), start)
self.assertEqual(duration, call.get('duration'))
self.assertEqual(number, call.get('number'))
| 1,205
|
Python
|
.py
| 30
| 33.4
| 75
| 0.619211
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,798
|
sms_test.py
|
novoid_Memacs/memacs/tests/sms_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2012-03-09 15:36:52 armin>
import os
import re
import time
import unittest
import xml.etree.ElementTree as ET
from memacs.sms import SmsMemacs
class TestSmsMemacs(unittest.TestCase):
def setUp(self):
self._test_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data', 'smsxml.txt'
)
argv = "-s -f " + self._test_file
memacs = SmsMemacs(argv=argv.split())
self.data = memacs.test_get_entries()
self._smses = ET.parse(self._test_file)
def test_from_file(self):
data = self.data
for i in range(4):
self._assertSMSLog(i, data[i*6:(i+1)*6])
def _assertSMSLog(self, index, sms_data):
sms = self._smses.findall('sms')[index]
name = sms_data[3].split()[-1]
name = 'Unknown' if name == 'False' else name
to_from = 'SMS to' if sms.get('type') == '2' else 'SMS from'
number = sms_data[2].split()[-1]
timestamp, = re.findall('<(.*?)>', sms_data[0])
date = time.gmtime(int(sms.get('date'))/1000.)
self.assertIn(name, sms_data[0])
self.assertIn(to_from, sms_data[0])
self.assertEqual(time.strftime('%Y-%m-%d %a %H:%M', date), timestamp)
self.assertEqual(number, sms.get('address').replace('+','00'))
| 1,339
|
Python
|
.py
| 33
| 33.575758
| 77
| 0.597222
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,799
|
csv_test.py
|
novoid_Memacs/memacs/tests/csv_test.py
|
# -*- coding: utf-8 -*-
# Time-stamp: <2011-10-28 15:13:31 aw>
import os
import unittest
from memacs.csv import Csv
class TestCsv(unittest.TestCase):
def test_example1(self):
example1 = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data', 'example1.csv'
)
argv = []
argv.append("-f")
argv.append(example1)
argv.append("--fieldnames")
argv.append("date,text,value,currency,")
argv.append("--timestamp-field")
argv.append("date")
argv.append("--timestamp-format")
argv.append("%d.%m.%Y")
argv.append("--output-format")
argv.append("{text}")
argv.append("--properties")
argv.append("currency,value")
memacs = Csv(argv=argv)
data = memacs.test_get_entries()
self.assertEqual(data[0], "** <2012-02-23 Thu> Amazon")
self.assertEqual(data[1], " :PROPERTIES:")
self.assertEqual(data[2], " :CURRENCY: EUR")
self.assertEqual(data[3], " :VALUE: 100,00")
self.assertEqual(data[4], " :ID: a9cfa86bd9d89b72f35bfca0bb75131be0ca86b1")
self.assertEqual(data[5], " :END:")
def test_example2(self):
example1 = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data', 'example2.csv'
)
argv = []
argv.append("--delimiter")
argv.append("|")
argv.append("-f")
argv.append(example1)
argv.append("--timestamp-field")
argv.append("date")
argv.append("--output-format")
argv.append("{text}")
memacs = Csv(argv=argv)
data = memacs.test_get_entries()
self.assertEqual(data[0], "** <2012-02-23 Thu 14:40> Alibaba")
self.assertEqual(data[1], " :PROPERTIES:")
self.assertEqual(data[2], " :ID: b1a598a360d4766cf94822cdcd96629f0b3936ef")
self.assertEqual(data[3], " :END:")
| 1,965
|
Python
|
.py
| 50
| 31.02
| 93
| 0.576984
|
novoid/Memacs
| 1,003
| 67
| 17
|
GPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|