code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
anime_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'anime'}, headers=self.header) if anime_data.status_code != 200: raise ConnectionError( "Anime Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format( anime_data.status_code, anime_data.text)) manga_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'manga'}, headers=self.header) if manga_data.status_code != 200: raise ConnectionError( "Manga Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format( manga_data.status_code, manga_data.text)) root = ET.fromstring(anime_data.text) uid = root.find('myinfo').find('user_id').text uname = root.find('myinfo').find('user_name').text anime_object_list = self.parse_anime_data(anime_data.text) manga_object_list = self.parse_manga_data(manga_data.text) return User(uid=uid, name=uname, anime_list=NT_USER_ANIME( watching=[x for x in anime_object_list['data'] if x.status.user == "Currently Watching"], completed=[x for x in anime_object_list['data'] if x.status.user == "Completed"], held=[x for x in anime_object_list['data'] if x.status.user == "On Hold"], dropped=[x for x in anime_object_list['data'] if x.status.user == "Dropped"], planned=[x for x in anime_object_list['data'] if x.status.user == "Plan to Watch"] ), anime_days=anime_object_list['days'], manga_list=NT_USER_MANGA( reading=[x for x in manga_object_list['data'] if x.status.user == "Currently Reading"], completed=[x for x in manga_object_list['data'] if x.status.user == "Completed"], held=[x for x in manga_object_list['data'] if x.status.user == "On Hold"], dropped=[x for x in manga_object_list['data'] if x.status.user == "Dropped"], planned=[x for x in manga_object_list['data'] if x.status.user == "Plan to Read"] ), manga_days=manga_object_list['days'])
def user(self, name)
Get a user's anime list and details. This returns an encapsulated data type. :param str name: The username to query :rtype: :class:`Pymoe.Mal.Objects.User` :return: A :class:`Pymoe.Mal.Objects.User` Object
1.921674
1.857266
1.034679
finvars = self.clientvars if username and password: finvars['username'] = username finvars['password'] = password self.loggedin = True ret = self.send_command('login', ujson.dumps(finvars)) if not isinstance(ret, str): # should just be 'Ok' if self.loggedin: self.loggedin = False raise UserLoginFailed(ret['msg']) else: raise GeneralLoginError(ret['msg'])
def login(self, username, password)
This handles login logic instead of stuffing all that in the __init__. :param username: The username to log in as or None :param password: The password for that user or None :return: Nothing :raises: :class:`Pymoe.errors.UserLoginFailed` - Didn't respond with Ok :raises: :class:`Pymoe.errors.GeneralLoginError` - For some reason, we were already logged in, tried to login again and it failed. This probably isn't bad.
4.849787
4.050808
1.197239
if args: if isinstance(args, str): final_command = command + ' ' + args + '\x04' else: # We just let ujson propogate the error here if it can't parse the arguments final_command = command + ' ' + ujson.dumps(args) + '\x04' else: final_command = command + '\x04' self.sslwrap.sendall(final_command.encode('utf-8')) return self._recv_data()
def send_command(self, command, args=None)
Send a command to VNDB and then get the result. :param command: What command are we sending :param args: What are the json args for this command :return: Servers Response :rtype: Dictionary (See D11 docs on VNDB)
3.884121
4.090724
0.949495
temp = "" while True: self.data_buffer = self.sslwrap.recv(1024) if '\x04' in self.data_buffer.decode('utf-8', 'ignore'): temp += self.data_buffer.decode('utf-8', 'ignore') break else: temp += self.data_buffer.decode('utf-8', 'ignore') self.data_buffer = bytes(1024) temp = temp.replace('\x04', '') if 'Ok' in temp: # Because login return temp else: return ujson.loads(temp.split(' ', 1)[1])
def _recv_data(self)
Receieves data until we reach the \x04 and then returns it. :return: The data received
3.402061
3.29022
1.033992
filters = self.__format_filters(filters) r = requests.get(self.apiurl + "/users/{}/library-entries".format(uid), headers=self.header, params=filters) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['meta']['count']: return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header) else: return None
def get(self, uid, filters=None)
Get a user's list of library entries. While individual entries on this list don't show what type of entry it is, you can use the filters provided by the Kitsu API to only select which ones you want :param uid: str: User ID to get library entries for :param filters: dict: Dictionary of filters for the library :return: Results or ServerError :rtype: SearchWrapper or Exception
3.743741
3.170064
1.180967
final_dict = { "data": { "type": "libraryEntries", "attributes": data, "relationships":{ "user":{ "data":{ "id": user_id, "type": "users" } }, "media":{ "data":{ "id": media_id, "type": item_type } } } } } final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.post(self.apiurl + "/library-entries", json=final_dict, headers=final_headers) if r.status_code != 201: raise ConnectionError(r.text) jsd = r.json() return jsd['data']['id']
def create(self, user_id, media_id, item_type, token, data)
Create a library entry for a user. data should be just the attributes. Data at least needs a status and progress. :param user_id str: User ID that this Library Entry is for :param media_id str: ID for the media this entry relates to :param item_type str: anime, drama or manga depending :param token str: OAuth token for user :param data dict: Dictionary of attributes for the entry :return: New Entry ID or ServerError :rtype: Str or Exception
2.317474
2.20882
1.049191
final_dict = {"data": {"id": eid, "type": "libraryEntries", "attributes": data}} final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.patch(self.apiurl + "/library-entries/{}".format(eid), json=final_dict, headers=final_headers) if r.status_code != 200: raise ConnectionError(r.text) return True
def update(self, eid, data, token)
Update a given Library Entry. :param eid str: Entry ID :param data dict: Attributes :param token str: OAuth token :return: True or ServerError :rtype: Bool or Exception
3.177513
3.069685
1.035127
final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.delete(self.apiurl + "/library-entries/{}".format(eid), headers=final_headers) if r.status_code != 204: print(r.status_code) raise ConnectionError(r.text) return True
def delete(self, eid, token)
Delete a library entry. :param eid str: Entry ID :param token str: OAuth Token :return: True or ServerError :rtype: Bool or Exception
3.212956
3.040892
1.056584
if filters is not None: for k in filters: if 'filter[' not in k: filters['filter[{}]'.format(k)] = filters.pop(k) return filters
def __format_filters(filters)
Format filters for the api query (to filter[<filter-name>]) :param filters: dict: can be None, filters for the query :return: the formatted filters, or None
3.771646
3.585158
1.052017
@functools.wraps(func) def wrapper(self, *args, **kwargs): result = func(self, *args, **kwargs) if result is not None: if isinstance(result, dict): return _record(result) return (_record(i) for i in result) return result return wrapper
def convert_to_record(func)
Wrap mongodb record to a dict record with default value None
2.372919
2.274977
1.043052
if kwargs.get('wrapper'): return cls._wrapper_to_one_str(value) return _es.to_dict_str(value)
def to_one_str(cls, value, *args, **kwargs)
Convert single record's values to str
10.013731
8.458839
1.183819
if callback and callable(callback): if isinstance(values, dict): return callback(_es.to_str(values)) return [callback(_es.to_str(i)) for i in values] return _es.to_str(values)
def to_str(cls, values, callback=None)
Convert many records's values to str
3.47574
3.305987
1.051347
if not cls._instance.get(name): model_name = name.split('.') ins_name = '.'.join( ['models', model_name[0], 'model', model_name[1]]) cls._instance[name] = cls.import_model(ins_name)() return cls._instance[name]
def instance(cls, name)
Instantiate a model class according to import path args: name: class import path like `user.User` return: model instance
3.827464
3.768211
1.015725
try: package_space = getattr(cls, 'package_space') except AttributeError: raise ValueError('package_space not exist') else: return import_object(ins_name, package_space)
def import_model(cls, ins_name)
Import model class in models package
4.819266
4.620376
1.043046
from turbo import log app_config.app_name = app_name app_config.app_setting = app_setting app_config.project_name = os.path.basename(get_base_dir(mainfile, 2)) app_config.web_application_setting.update(web_application_setting) if app_setting.get('session_config'): app_config.session_config.update(app_setting['session_config']) log.getLogger(**app_setting.log) _install_app(package_space)
def register_app(app_name, app_setting, web_application_setting, mainfile, package_space)
insert current project root path into sys path
3.639224
3.580769
1.016325
if name is None and kwargs is None: app_config.urls.append((url, handler)) return if name is None: app_config.urls.append((url, handler, kwargs)) return app_config.urls.append((url, handler, kwargs, name))
def register_url(url, handler, name=None, kwargs=None)
insert url into tornado application handlers group :arg str url: url :handler object handler: url mapping handler :name reverse url name :kwargs dict tornado handler initlize args
2.461398
2.629173
0.936187
super(BaseHandler, self).write_error(status_code, **kwargs)
def write_error(self, status_code, **kwargs)
Override to implement custom error pages. http://tornado.readthedocs.org/en/stable/_modules/tornado/web.html#RequestHandler.write_error
3.496161
2.553522
1.369152
''' according to request method config to filter all request paremter if value is invalid then set None ''' method = self.request.method.lower() arguments = self.request.arguments files = self.request.files rpd = {} # request parameter dict def filter_parameter(key, tp, default=None): if tp not in self._types: raise ValueError( '%s parameter expected types %s' % (key, self._types)) if not isinstance(tp, file_types): if key not in arguments: rpd[key] = default return if tp in [ObjectId, int, float, bool]: rpd[key] = getattr(self, 'to_%s' % getattr( tp, '__name__').lower())(self.get_argument(key)) return if tp == basestring_type or issubclass(tp, basestring_type): rpd[key] = self.get_argument(key, strip=False) return if tp == list: rpd[key] = self.get_arguments(key) return if tp == file: if key not in files: rpd[key] = [] return rpd[key] = self.request.files[key] required_params = getattr(self, '_required_params', None) if isinstance(required_params, list): for key, tp, default in required_params: filter_parameter(key, tp, default) # extract method required params method_required_params = getattr( self, '_%s_required_params' % method, None) if isinstance(method_required_params, list): for key, tp, default in method_required_params: filter_parameter(key, tp, default) params = getattr(self, '_%s_params' % method, None) if params is None: return rpd # need arguments try: for key, tp in params.get('need', []): if tp == list: filter_parameter(key, tp, []) else: filter_parameter(key, tp) except ValueError as e: app_log.error( '%s request need arguments parse error: %s' % (method, e)) raise ValueError(e) except Exception as e: app_log.error( '%s request need arguments parse error: %s' % (method, e)) raise e # option arguments for key, tp, default in params.get('option', []): filter_parameter(key, tp, default) return rpd
def parameter(self)
according to request method config to filter all request paremter if value is invalid then set None
2.854196
2.48118
1.150338
if self._data is not None: resp['res'] = self.to_str(self._data) return self.wo_json(resp)
def wo_resp(self, resp)
can override for other style
7.94909
8.136333
0.976987
check = kwargs.pop('check', True) if isinstance(doc_or_docs, dict): if check is True: doc_or_docs = self._valid_record(doc_or_docs) result = self.__collect.insert_one(doc_or_docs, **kwargs) return result.inserted_id else: if check is True: for d in doc_or_docs: d = self._valid_record(d) result = self.__collect.insert_many(doc_or_docs, **kwargs) return result.inserted_ids
def insert(self, doc_or_docs, **kwargs)
Insert method
2.064728
2.054557
1.004951
check = kwargs.pop('check', True) if check: self._valid_record(to_save) if '_id' in to_save: self.__collect.replace_one( {'_id': to_save['_id']}, to_save, **kwargs) return to_save['_id'] else: result = self.__collect.insert_one(to_save, **kwargs) return result.inserted_id
def save(self, to_save, **kwargs)
save method
2.755434
2.659482
1.036079
self._valide_update_document(document) if multi: return self.__collect.update_many(filter_, document, **kwargs) else: return self.__collect.update_one(filter_, document, **kwargs)
def update(self, filter_, document, multi=False, **kwargs)
update method
3.655502
3.409288
1.072219
if isinstance(filter_, dict) and filter_ == {}: raise ValueError('not allowed remove all documents') if filter_ is None: raise ValueError('not allowed remove all documents') if kwargs.pop('multi', False) is True: return self.__collect.delete_many(filter_, **kwargs) else: return self.__collect.delete_one(filter_, **kwargs)
def remove(self, filter_=None, **kwargs)
collection remove method warning: if you want to remove all documents, you must override _remove_all method to make sure you understand the result what you do
3.77482
3.552986
1.062436
check = kwargs.pop('check', True) if check is True: self._valid_record(doc_or_docs) return self.__collect.insert_one(doc_or_docs, **kwargs)
def insert_one(self, doc_or_docs, **kwargs)
Insert method
4.829314
4.657589
1.03687
check = kwargs.pop('check', True) if check is True: for i in doc_or_docs: i = self._valid_record(i) return self.__collect.insert_many(doc_or_docs, **kwargs)
def insert_many(self, doc_or_docs, **kwargs)
Insert method
4.576922
4.381082
1.044701
wrapper = kwargs.pop('wrapper', False) if wrapper is True: return self._wrapper_find_one(filter_, *args, **kwargs) return self.__collect.find_one(filter_, *args, **kwargs)
def find_one(self, filter_=None, *args, **kwargs)
find_one method
3.7846
3.442989
1.099219
wrapper = kwargs.pop('wrapper', False) if wrapper is True: return self._wrapper_find(*args, **kwargs) return self.__collect.find(*args, **kwargs)
def find(self, *args, **kwargs)
collection find method
5.02228
4.01554
1.250711
return self.__collect.find_one(filter_, *args, **kwargs)
def _wrapper_find_one(self, filter_=None, *args, **kwargs)
Convert record to a dict that has no key error
7.004286
5.289303
1.324236
self._valide_update_document(document) return self.__collect.update_one(filter_, document, **kwargs)
def update_one(self, filter_, document, **kwargs)
update method
7.932036
6.95652
1.140231
if isinstance(_id, list) or isinstance(_id, tuple): return list(self.__collect.find( {'_id': {'$in': [self._to_primary_key(i) for i in _id]}}, projection)) document_id = self._to_primary_key(_id) if document_id is None: return None return self.__collect.find_one({'_id': document_id}, projection)
def find_by_id(self, _id, projection=None)
find record by _id
2.908435
2.738884
1.061905
if field: attrs = {'name': name, 'field': field} else: attrs = {'name': name, 'field': {'_id': ObjectId()}} return type(str(name), (cls, ), attrs)()
def create_model(cls, name, field=None)
dynamic create new model :args field table field, if field is None or {}, this model can not use create method
4.069178
4.38544
0.927884
result = [] for index, v in enumerate(value): if isinstance(v, dict): result.append(to_dict_str(v, encode)) continue if isinstance(v, list): result.append(to_list_str(v, encode)) continue if encode: result.append(encode(v)) else: result.append(default_encode(v)) return result
def to_list_str(value, encode=None)
recursively convert list content into string :arg list value: The list that need to be converted. :arg function encode: Function used to encode object.
2.10306
2.105217
0.998975
value = copy.deepcopy(origin_value) for k, v in value.items(): if isinstance(v, dict): value[k] = to_dict_str(v, encode) continue if isinstance(v, list): value[k] = to_list_str(v, encode) continue if encode: value[k] = encode(v) else: value[k] = default_encode(v) return value
def to_dict_str(origin_value, encode=None)
recursively convert dict content into string
1.823784
1.709877
1.066617
if isinstance(v, ObjectId): return unicode_type(v) if isinstance(v, datetime): return format_time(v) if isinstance(v, date): return format_time(v) return v
def default_encode(v)
convert ObjectId, datetime, date into string
3.314065
2.421808
1.368426
if isinstance(v, basestring_type): return v if isinstance(v, dict): return to_dict_str(v, encode) if isinstance(v, Iterable): return to_list_str(v, encode) if encode: return encode(v) else: return default_encode(v)
def to_str(v, encode=None)
convert any list, dict, iterable and primitives object to string
2.50736
2.399973
1.044745
if objid is None: return objid try: objid = ObjectId(objid) except: util_log.error('%s is invalid objectid' % objid) return None return objid
def to_objectid(objid)
字符对象转换成objectid
3.362229
3.169514
1.060803
root_path = os.path.abspath(currfile) for i in range(0, dir_level_num): root_path = os.path.dirname(root_path) return root_path
def get_base_dir(currfile, dir_level_num=3)
find certain path according to currfile
2.071819
1.910974
1.084169
if os.path.isdir(currfile): root_path = currfile else: root_path = get_base_dir(currfile, dir_level_num) sys.path.append(root_path)
def join_sys_path(currfile, dir_level_num=3)
find certain path then load into sys path
2.829408
2.580451
1.096478
as_list = [] length = len(name) for index, i in enumerate(name): if index != 0 and index != length - 1 and i.isupper(): as_list.append('_%s' % i.lower()) else: as_list.append(i.lower()) return ''.join(as_list)
def camel_to_underscore(name)
convert CamelCase style to under_score_case
2.305755
2.234543
1.031869
if isinstance(value, _BASESTRING_TYPES): return value if not isinstance(value, bytes): raise TypeError( "Expected bytes, unicode, or None; got %r" % type(value) ) return value.decode("utf-8")
def to_basestring(value)
Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode.
2.762311
2.958695
0.933625
''' url paremeter encode ''' try: _fo = lambda k, v: '{name}={value}'.format( name=k, value=to_basestring(quote(v))) except: _fo = lambda k, v: '%s=%s' % (k, to_basestring(quote(v))) _en = utf8 return '&'.join([_fo(k, _en(v)) for k, v in kw.items() if not is_empty(v)])
def encode_http_params(**kw)
url paremeter encode
4.346441
3.883446
1.119223
if level not in [logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]: level = logging.DEBUG for h in logger.handlers: if isinstance(h, logging.handlers.RotatingFileHandler): if h.level == level: return fh = logging.handlers.RotatingFileHandler( log_path, maxBytes=log_size, backupCount=log_count) fh.setLevel(level) fh.setFormatter(_formatter) logger.addHandler(fh)
def _init_file_logger(logger, level, log_path, log_size, log_count)
one logger only have one level RotatingFileHandler
1.785699
1.713691
1.042019
self.store.cleanup(self._config.timeout) self._load()
def _processor(self)
Application processor to setup session for every request
36.006733
29.100946
1.237305
self.session_id = self._session_object.get_session_id() # protection against session_id tampering if self.session_id and not self._valid_session_id(self.session_id): self.session_id = None if self.session_id: d = self.store[self.session_id] if isinstance(d, dict) and d: self.update(d) if not self.session_id: self.session_id = self._session_object.generate_session_id() if not self._data: if self._initializer and isinstance(self._initializer, dict): self.update(deepcopy(self._initializer)) self._session_object.set_session_id(self.session_id)
def _load(self)
Load the session from the store, by the id from cookie
3.036437
2.796117
1.085948
secret_key = self._config.secret_key while True: rand = os.urandom(16) now = time.time() session_id = sha1(utf8("%s%s%s%s" % ( rand, now, self.handler.request.remote_ip, secret_key))) session_id = session_id.hexdigest() if session_id not in self.store: break return session_id
def generate_session_id(self)
Generate a random id for session
3.553758
3.340331
1.063894
pickled = pickle.dumps(session_data) return to_basestring(encodebytes(pickled))
def encode(self, session_data)
encodes session dict as a string
8.754368
7.406938
1.181915
pickled = decodebytes(utf8(session_data)) return pickle.loads(pickled)
def decode(self, session_data)
decodes the data to get back the session dict
10.563972
10.095302
1.046425
from flask.globals import _app_ctx_stack app = _app_ctx_stack.top.app options = {key: app.config.get(key, DEFAULTS[key]) for key in DEFAULTS.keys()} base_context = {"app": app} if options["KONCH_FLASK_IMPORTS"]: base_context.update(get_flask_imports()) context = dict(base_context) if options["KONCH_FLASK_SHELL_CONTEXT"]: flask_context = app.make_shell_context() context.update(flask_context) context.update(options["KONCH_CONTEXT"]) def context_formatter(ctx): formatted_base = ", ".join(sorted(base_context.keys())) ret = "\n{FLASK}\n{base_context}\n".format( FLASK=click.style("Flask:", bold=True), base_context=formatted_base ) if options["KONCH_FLASK_SHELL_CONTEXT"]: variables = ", ".join(sorted(flask_context.keys())) ret += "\n{ADDITIONAL}\n{variables}\n".format( ADDITIONAL=click.style( "Flask shell context (see shell_context_processor()):", bold=True ), variables=variables, ) if options["KONCH_CONTEXT"]: variables = ", ".join(sorted(options["KONCH_CONTEXT"].keys())) ret += "\n{ADDITIONAL}\n{variables}".format( ADDITIONAL=click.style( "Additional variables (see KONCH_CONTEXT):", bold=True ), variables=variables, ) return ret context_format = options["KONCH_CONTEXT_FORMAT"] or context_formatter konch.start( context=context, shell=options["KONCH_SHELL"], banner=options["KONCH_BANNER"], prompt=options["KONCH_PROMPT"], output=options["KONCH_OUTPUT"], ptpy_vi_mode=options["KONCH_PTPY_VI_MODE"], context_format=context_format, ipy_extensions=options["KONCH_IPY_EXTENSIONS"], ipy_autoreload=options["KONCH_IPY_AUTORELOAD"], ipy_colors=options["KONCH_IPY_COLORS"], ipy_highlighting_style=options["KONCH_IPY_HIGHLIGHTING_STYLE"], )
def cli()
An improved shell command, based on konch.
2.502561
2.439491
1.025854
replacement = self._tw.tasks.get(uuid=self._uuid) self.__class__ = replacement.__class__ self.__dict__ = replacement.__dict__
def replace(self)
Performs conversion to the regular Task object, referenced by the stored UUID.
9.570759
5.371941
1.78162
replacement = self._tw.tasks.filter(' '.join(self._uuids)) self.__class__ = replacement.__class__ self.__dict__ = replacement.__dict__
def replace(self)
Performs conversion to the regular TaskQuerySet object, referenced by the stored UUIDs.
14.166913
8.032487
1.763702
args = [task['uuid'], 'modify'] if task.saved else ['add'] args.extend(self._get_modified_task_fields_as_args(task)) output = self.execute_command(args) # Parse out the new ID, if the task is being added for the first time if not task.saved: id_lines = [l for l in output if l.startswith('Created task ')] # Complain loudly if it seems that more tasks were created # Should not happen. # Expected output: Created task 1. # Created task 1 (recurrence template). if len(id_lines) != 1 or len(id_lines[0].split(' ')) not in (3, 5): raise TaskWarriorException( 'Unexpected output when creating ' 'task: %s' % '\n'.join(id_lines), ) # Circumvent the ID storage, since ID is considered read-only identifier = id_lines[0].split(' ')[2].rstrip('.') # Identifier can be either ID or UUID for completed tasks try: task._data['id'] = int(identifier) except ValueError: task._data['uuid'] = identifier # Refreshing is very important here, as not only modification time # is updated, but arbitrary attribute may have changed due hooks # altering the data before saving task.refresh(after_save=True)
def save_task(self, task)
Save a task into TaskWarrior database using add/modify call
7.143446
6.684964
1.068584
# None value should not be converted by normalizer if value is None: return None normalize_func = getattr(self, 'normalize_{0}'.format(key), lambda x: x) return normalize_func(value)
def _normalize(self, key, value)
Use normalize_<key> methods to normalize user input. Any user input will be normalized at the moment it is used as filter, or entered as a value of Task attribute.
5.054036
4.4962
1.124068
if ( isinstance(value, datetime.date) and not isinstance(value, datetime.datetime) ): # Convert to local midnight value_full = datetime.datetime.combine(value, datetime.time.min) localized = local_zone.localize(value_full) elif isinstance(value, datetime.datetime): if value.tzinfo is None: # Convert to localized datetime object localized = local_zone.localize(value) else: # If the value is already localized, there is no need to change # time zone at this point. Also None is a valid value too. localized = value elif isinstance(value, six.string_types): localized = self.backend.convert_datetime_string(value) else: raise ValueError("Provided value could not be converted to " "datetime, its type is not supported: {}" .format(type(value))) return localized
def datetime_normalizer(self, value)
Normalizes date/datetime value (considered to come from user input) to localized datetime value. Following conversions happen: naive date -> localized datetime with the same date, and time=midnight naive datetime -> localized datetime with the same value localized datetime -> localized datetime (no conversion)
3.590432
3.540824
1.01401
self._data.update(dict((key, self._deserialize(key, value)) for key, value in data.items())) # In certain situations, we want to treat missing keys as removals if remove_missing: for key in set(self._data.keys()) - set(data.keys()): self._data[key] = None if update_original: self._original_data = copy.deepcopy(self._data)
def _update_data(self, data, update_original=False, remove_missing=False)
Low level update of the internal _data dict. Data which are coming as updates should already be serialized. If update_original is True, the original_data dict is updated as well.
2.98901
2.774018
1.077502
# We need to remove spaces for TW-1504, use custom separators data_tuples = ((key, self._serialize(key, value)) for key, value in six.iteritems(self._data)) # Empty string denotes empty serialized value, we do not want # to pass that to TaskWarrior. data_tuples = filter(lambda t: t[1] is not '', data_tuples) data = dict(data_tuples) return json.dumps(data, separators=(',', ':'))
def export_data(self)
Exports current data contained in the Task as JSON
7.103306
6.68617
1.062388
# Detect the hook type if not given directly name = os.path.basename(sys.argv[0]) modify = name.startswith('on-modify') if modify is None else modify # Create the TaskWarrior instance if none passed if backend is None: backends = importlib.import_module('tasklib.backends') hook_parent_dir = os.path.dirname(os.path.dirname(sys.argv[0])) backend = backends.TaskWarrior(data_location=hook_parent_dir) # TaskWarrior instance is set to None task = cls(backend) # Load the data from the input task._load_data(json.loads(input_file.readline().strip())) # If this is a on-modify event, we are provided with additional # line of input, which provides updated data if modify: task._update_data(json.loads(input_file.readline().strip()), remove_missing=True) return task
def from_input(cls, input_file=sys.stdin, modify=None, backend=None)
Creates a Task object, directly from the stdin, by reading one line. If modify=True, two lines are used, first line interpreted as the original state of the Task object, and second line as its new, modified value. This is consistent with the TaskWarrior's hook system. Object created by this method should not be saved, deleted or refreshed, as t could create a infinite loop. For this reason, TaskWarrior instance is set to None. Input_file argument can be used to specify the input file, but defaults to sys.stdin.
5.535194
4.667557
1.185887
clone = self._clone() for f in args: clone.filter_obj.add_filter(f) for key, value in kwargs.items(): clone.filter_obj.add_filter_param(key, value) return clone
def filter(self, *args, **kwargs)
Returns a new TaskQuerySet with the given filters added.
3.16306
2.680195
1.180161
clone = self.filter(**kwargs) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise Task.DoesNotExist( 'Task matching query does not exist. ' 'Lookup parameters were {0}'.format(kwargs), ) raise ValueError( 'get() returned more than one Task -- it returned {0}! ' 'Lookup parameters were {1}'.format(num, kwargs), )
def get(self, **kwargs)
Performs the query and returns a single object matching the given keyword arguments.
4.187829
4.274471
0.97973
''' Returns True if *filename* is a subpath of any of the paths in *pathlist*. ''' filename = os.path.abspath(filename) for path_name in pathlist: path_name = os.path.abspath(path_name) if is_subpath(filename, path_name): return True return False
def is_local(filename, pathlist)
Returns True if *filename* is a subpath of any of the paths in *pathlist*.
2.808312
2.06803
1.357965
''' Returns True if *path* points to the same or a subpath of *parent*. ''' try: relpath = os.path.relpath(path, parent) except ValueError: return False # happens on Windows if drive letters don't match return relpath == os.curdir or not relpath.startswith(os.pardir)
def is_subpath(path, parent)
Returns True if *path* points to the same or a subpath of *parent*.
4.14597
3.320453
1.248616
''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*). ''' if dest is None: dest = sys.path if not os.path.isfile(filename): return with open(filename, 'r') as fp: for index, line in enumerate(fp): if line.startswith('import'): if imports is None: exec_pth_import(filename, index+1, line) else: imports.append((filename, index+1, line)) else: index = line.find('#') if index > 0: line = line[:index] line = line.strip() if not os.path.isabs(line): line = os.path.join(os.path.dirname(filename), line) line = os.path.normpath(line) if line and line not in dest: dest.insert(0, line) return dest
def eval_pth(filename, sitedir, dest=None, imports=None)
Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*).
3.270146
1.650367
1.981466
''' Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original #pkgutil.extend_path() gets mocked by this function inside the #localimport context. ''' def zip_isfile(z, name): name.rstrip('/') return name in z.namelist() pname = os.path.join(*name.split('.')) zname = '/'.join(name.split('.')) init_py = '__init__' + os.extsep + 'py' init_pyc = '__init__' + os.extsep + 'pyc' init_pyo = '__init__' + os.extsep + 'pyo' mod_path = list(pth) for path in sys.path: if zipfile.is_zipfile(path): try: egg = zipfile.ZipFile(path, 'r') addpath = ( zip_isfile(egg, zname + '/__init__.py') or zip_isfile(egg, zname + '/__init__.pyc') or zip_isfile(egg, zname + '/__init__.pyo')) fpath = os.path.join(path, path, zname) if addpath and fpath not in mod_path: mod_path.append(fpath) except (zipfile.BadZipfile, zipfile.LargeZipFile): pass # xxx: Show a warning at least? else: path = os.path.join(path, pname) if os.path.isdir(path) and path not in mod_path: addpath = ( os.path.isfile(os.path.join(path, init_py)) or os.path.isfile(os.path.join(path, init_pyc)) or os.path.isfile(os.path.join(path, init_pyo))) if addpath and path not in mod_path: mod_path.append(path) return [os.path.normpath(x) for x in mod_path]
def extend_path(pth, name)
Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original #pkgutil.extend_path() gets mocked by this function inside the #localimport context.
2.81461
2.116743
1.329689
''' Mock for #pkg_resources.declare_namespace() which calls #pkgutil.extend_path() afterwards as the original implementation doesn't seem to properly find all available namespace paths. ''' self.state['declare_namespace'](package_name) mod = sys.modules[package_name] mod.__path__ = pkgutil.extend_path(mod.__path__, package_name)
def _declare_namespace(self, package_name)
Mock for #pkg_resources.declare_namespace() which calls #pkgutil.extend_path() afterwards as the original implementation doesn't seem to properly find all available namespace paths.
8.990061
2.722528
3.3021
''' Not fragments: ip_frag(packet) == 0 not ip_frag(packet) First packet of fragments: ip_frag(packet) == IP_FRAG_ANY Not first packet of fragments: ip_frag(packet) & IP_FRAG_LATER All fragments: ip_frag(packet) & IP_FRAG_ANY ''' return ((packet.frag_off & IP_OFFMASK) and IP_FRAG_LATER) | ((packet.frag_off & (IP_OFFMASK | IP_MF)) and IP_FRAG_ANY)
def ip_frag(packet)
Not fragments: ip_frag(packet) == 0 not ip_frag(packet) First packet of fragments: ip_frag(packet) == IP_FRAG_ANY Not first packet of fragments: ip_frag(packet) & IP_FRAG_LATER All fragments: ip_frag(packet) & IP_FRAG_ANY
5.292429
2.332422
2.26907
besti, bestj, bestsize = _cdifflib.find_longest_match(self, alo, ahi, blo, bhi) return _Match(besti, bestj, bestsize)
def find_longest_match(self, alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi]. Wrapper for the C implementation of this function.
3.962722
3.709584
1.068239
if a is self.a: return self.a = a if not isinstance(self.a, list): self.a = list(self.a) # Types must be hashable to work in the c layer. This will raise if # list items are *not* hashable. [hash(x) for x in self.a]
def set_seq1(self, a)
Same as SequenceMatcher.set_seq1, but check for non-list inputs implementation.
6.06543
5.811673
1.043663
if b is self.b and hasattr(self, 'isbjunk'): return self.b = b if not isinstance(self.a, list): self.a = list(self.a) if not isinstance(self.b, list): self.b = list(self.b) # Types must be hashable to work in the c layer. This check lines will # raise the correct error if they are *not* hashable. [hash(x) for x in self.a] [hash(x) for x in self.b] self.matching_blocks = self.opcodes = None self.fullbcount = None junk, popular = _cdifflib.chain_b(self) assert hasattr(junk, '__contains__') assert hasattr(popular, '__contains__') self.isbjunk = junk.__contains__ self.isbpopular = popular.__contains__
def set_seq2(self, b)
Same as SequenceMatcher.set_seq2, but uses the c chainb implementation.
5.705693
4.597469
1.241051
if self.matching_blocks is not None: return self.matching_blocks matching_blocks = _cdifflib.matching_blocks(self) matching_blocks.append((len(self.a), len(self.b), 0)) self.matching_blocks = matching_blocks return map(_Match._make, self.matching_blocks)
def get_matching_blocks(self)
Same as SequenceMatcher.get_matching_blocks, but calls through to a faster loop for find_longest_match. The rest is the same.
4.393399
3.506181
1.253044
if hasattr(parser, 'tostream'): return parser.tostream(obj, stream, skipprepack) else: data = parser.tobytes(obj, skipprepack) cls = type(parser) if cls not in _deprecated_parsers: _deprecated_parsers.add(cls) warnings.warn("Parser %r does not have 'tostream' interfaces" % (cls,), UserWarning) return stream.write(data)
def _tostream(parser, obj, stream, skipprepack = False)
Compatible to old parsers
3.336933
3.149354
1.059561
_dict = OrderedDict if ordered else dict if isinstance(dumped_val, dict): return OrderedDict((k, _to_str(v, encoding)) for k,v in dumped_val.items()) elif isinstance(dumped_val, (list, tuple)): return [_to_str(v, encoding) for v in dumped_val] elif isinstance(dumped_val, bytes): try: d = dumped_val.decode('utf-8') except Exception: d = repr(dumped_val) return d else: return dumped_val
def _to_str(dumped_val, encoding='utf-8', ordered=True)
Convert bytes in a dump value to str, allowing json encode
1.836298
1.827566
1.004778
''' Convert a parsed NamedStruct (probably with additional NamedStruct as fields) into a JSON-friendly format, with only Python primitives (dictionaries, lists, bytes, integers etc.) Then you may use json.dumps, or pprint to further process the result. :param val: parsed result, may contain NamedStruct :param humanread: if True (default), convert raw data into readable format with type-defined formatters. For example, enumerators are converted into names, IP addresses are converted into dotted formats, etc. :param dumpextra: if True, dump "extra" data in '_extra' field. False (default) to ignore them. :param typeinfo: Add struct type information in the dump result. May be the following values: DUMPTYPE_FLAT ('flat') add a field '_type' for the type information (default) DUMPTYPE_KEY ('key') convert the value to dictionary like: {'<struc_type>': value} DUMPTYPE_NONE ('none') do not add type information :param tostr: if True, convert all bytes to str :param encoding: if tostr=`True`, first try to decode bytes in `encoding`. If failed, use `repr()` instead. :returns: "dump" format of val, suitable for JSON-encode or print. ''' dumped = _dump(val, humanread, dumpextra, typeinfo, ordered) if tostr: dumped = _to_str(dumped, encoding, ordered) return dumped
def dump(val, humanread = True, dumpextra = False, typeinfo = DUMPTYPE_FLAT, ordered=True, tostr=False, encoding='utf-8')
Convert a parsed NamedStruct (probably with additional NamedStruct as fields) into a JSON-friendly format, with only Python primitives (dictionaries, lists, bytes, integers etc.) Then you may use json.dumps, or pprint to further process the result. :param val: parsed result, may contain NamedStruct :param humanread: if True (default), convert raw data into readable format with type-defined formatters. For example, enumerators are converted into names, IP addresses are converted into dotted formats, etc. :param dumpextra: if True, dump "extra" data in '_extra' field. False (default) to ignore them. :param typeinfo: Add struct type information in the dump result. May be the following values: DUMPTYPE_FLAT ('flat') add a field '_type' for the type information (default) DUMPTYPE_KEY ('key') convert the value to dictionary like: {'<struc_type>': value} DUMPTYPE_NONE ('none') do not add type information :param tostr: if True, convert all bytes to str :param encoding: if tostr=`True`, first try to decode bytes in `encoding`. If failed, use `repr()` instead. :returns: "dump" format of val, suitable for JSON-encode or print.
8.590819
1.220571
7.038361
''' Factory to generate a function which get size from specified field with limits. Often used in nstruct "size" parameter. To retrieve size without limit, simply use lambda expression: lambda x: x.header.length :param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised to protect against serious result like memory overflow or dead loop. :param properties: the name of the specified fields. Specify more than one string to form a property path, like: sizefromlen(256, 'header', 'length') -> s.header.length :returns: a function which takes a NamedStruct as parameter, and returns the length value from specified property path. ''' def func(namedstruct): v = namedstruct._target for p in properties: v = getattr(v, p) if v > limit: raise BadLenError('Struct length exceeds limit ' + str(limit)) return v return func
def sizefromlen(limit, *properties)
Factory to generate a function which get size from specified field with limits. Often used in nstruct "size" parameter. To retrieve size without limit, simply use lambda expression: lambda x: x.header.length :param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised to protect against serious result like memory overflow or dead loop. :param properties: the name of the specified fields. Specify more than one string to form a property path, like: sizefromlen(256, 'header', 'length') -> s.header.length :returns: a function which takes a NamedStruct as parameter, and returns the length value from specified property path.
10.25404
1.584987
6.46948
''' Revert to sizefromlen, store the struct size (len(struct)) to specified property path. The size includes padding. To store the size without padding, use packrealsize() instead. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], len(namedstruct)) return func
def packsize(*properties)
Revert to sizefromlen, store the struct size (len(struct)) to specified property path. The size includes padding. To store the size without padding, use packrealsize() instead. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field.
11.825698
1.793233
6.594625
''' Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], namedstruct._realsize()) return func
def packrealsize(*properties)
Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field.
12.015131
1.823526
6.588954
''' Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], value) return func
def packvalue(value, *properties)
Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path.
10.673854
1.947281
5.481414
''' Store a evaluated value to specified property path. Often used in nstruct "prepack" parameter. :param func: a function which takes a NamedStruct as parameter and returns a value, often a lambda expression :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the return value of func to property path. ''' def func2(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], func(namedstruct)) return func2
def packexpr(func, *properties)
Store a evaluated value to specified property path. Often used in nstruct "prepack" parameter. :param func: a function which takes a NamedStruct as parameter and returns a value, often a lambda expression :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the return value of func to property path.
8.903116
1.760375
5.057511
''' Create indices for all the embedded structs. For parser internal use. ''' try: _set(self, '_embedded_indices', dict((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: _set(self, '_embedded_indices', {})
def _create_embedded_indices(self)
Create indices for all the embedded structs. For parser internal use.
9.018075
4.727477
1.907587
''' Unpack a struct from bytes. For parser internal use. ''' #self._logger.log(logging.DEBUG, 'unpacking %r', self) current = self while current is not None: data = current._parser.unpack(data, current) last = current current = getattr(current, '_sub', None) _set(last, '_extra', data)
def _unpack(self, data)
Unpack a struct from bytes. For parser internal use.
7.231639
4.842178
1.493468
''' Pack current struct into stream. For parser internal use. :param stream: a buffered stream (File or BytesIO) :return: packed bytes length ''' #self._logger.log(logging.DEBUG, 'packing %r', self) total_size = 0 current = self while current is not None: total_size += current._parser.packto(current, stream) last = current current = getattr(current, '_sub', None) if hasattr(last, '_extra'): _extra = last._extra total_size += stream.write(_extra) return total_size
def _packto(self, stream)
Pack current struct into stream. For parser internal use. :param stream: a buffered stream (File or BytesIO) :return: packed bytes length
5.71876
3.148876
1.816128
''' Prepack stage. For parser internal use. ''' current = self while current is not None: current._parser.prepack(current, skip_self = True) current = getattr(current, '_sub', None) current = self while current is not None: current._parser.prepack(current, skip_sub = True) current = getattr(current, '_sub', None)
def _prepack(self)
Prepack stage. For parser internal use.
4.419309
3.014354
1.466088
''' Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes. :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: converted bytes ''' stream = BytesIO() self._tostream(stream, skipprepack) return stream.getvalue()
def _tobytes(self, skipprepack = False)
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes. :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: converted bytes
6.375315
2.068252
3.082465
''' Convert the struct into a bytes stream. This is the standard way to convert a NamedStruct to bytes. :param stream: a list of bytes to get the result :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: total appended size ''' if not skipprepack: self._prepack() datasize = self._packto(stream) paddingSize = self._parser.paddingsize2(datasize) if paddingSize > datasize: stream.write(b'\x00' * (paddingSize - datasize)) return paddingSize
def _tostream(self, stream, skipprepack= False)
Convert the struct into a bytes stream. This is the standard way to convert a NamedStruct to bytes. :param stream: a list of bytes to get the result :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: total appended size
7.377673
2.507702
2.942006
''' Get the struct size without padding (or the "real size") :returns: the "real size" in bytes ''' current = self size= 0 while current is not None: size += current._parser.sizeof(current) last = current current = getattr(current, '_sub', None) size += len(getattr(last, '_extra', b'')) return size
def _realsize(self)
Get the struct size without padding (or the "real size") :returns: the "real size" in bytes
8.066007
5.004175
1.611856
''' Create sub-classed struct from extra data, with specified parser. For parser internal use. :param parser: parser of subclass ''' _set(self, '_sub', parser._create(memoryview(getattr(self, '_extra', b'')), self._target)) try: object.__delattr__(self, '_extra') except: pass
def _subclass(self, parser)
Create sub-classed struct from extra data, with specified parser. For parser internal use. :param parser: parser of subclass
16.109766
5.012446
3.213953
''' Append a subclass (extension) after the base class. For parser internal use. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_sub', newsub) try: object.__delattr__(self, '_extra') except: pass
def _extend(self, newsub)
Append a subclass (extension) after the base class. For parser internal use.
9.839278
4.00427
2.457197
''' Return current type of this struct :returns: a typedef object (e.g. nstruct) ''' current = self lastname = getattr(current._parser, 'typedef', None) while hasattr(current, '_sub'): current = current._sub tn = getattr(current._parser, 'typedef', None) if tn is not None: lastname = tn return lastname
def _gettype(self)
Return current type of this struct :returns: a typedef object (e.g. nstruct)
8.68038
4.403373
1.971302
''' Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_extra', extradata)
def _setextra(self, extradata)
Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields.
11.976466
3.598624
3.328068
''' Get the extra data of this struct. ''' current = self while hasattr(current, '_sub'): current = current._sub return getattr(current, '_extra', None)
def _getextra(self)
Get the extra data of this struct.
7.331161
4.785143
1.532067
''' Replace the embedded struct to a newly-created struct of another type (usually based on the original type). The attributes of the old struct is NOT preserved. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :param newtype: the new type to replace ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] t._seqs[i] = newtype.parser().new(self._target)
def _replace_embedded_type(self, name, newtype)
Replace the embedded struct to a newly-created struct of another type (usually based on the original type). The attributes of the old struct is NOT preserved. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :param newtype: the new type to replace
12.770152
3.147257
4.057549
''' Return an embedded struct object to calculate the size or use _tobytes(True) to convert just the embedded parts. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :returns: an embedded struct ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] return t._seqs[i]
def _get_embedded(self, name)
Return an embedded struct object to calculate the size or use _tobytes(True) to convert just the embedded parts. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :returns: an embedded struct
18.076178
3.073982
5.880378
''' Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered. ''' NamedStruct._pickleNames[typedef] = name NamedStruct._pickleTypes[name] = typedef
def _registerPickleType(name, typedef)
Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered.
11.589989
2.583532
4.486102
''' Create indices for all the embedded structs. For parser internal use. ''' try: self._target._embedded_indices.update(((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: pass
def _create_embedded_indices(self)
Create indices for all the embedded structs. For parser internal use.
14.057521
6.329939
2.220799
''' Try to parse the struct from bytes sequence. The bytes sequence is taken from a streaming source. :param buffer: bytes sequence to be parsed from. :param inlineparent: if specified, this struct is embedded in another struct. :returns: None if the buffer does not have enough data for this struct (e.g. incomplete read from socket); (struct, size) else, where struct is the parsed result (usually a NamedStruct object) and size is the used bytes length, so you can start another parse from buffer[size:]. ''' if self.base is not None: return self.base.parse(buffer, inlineparent) r = self._parse(buffer, inlineparent) if r is None: return None (s, size) = r self.subclass(s) return (s, (size + self.padding - 1) // self.padding * self.padding)
def parse(self, buffer, inlineparent = None)
Try to parse the struct from bytes sequence. The bytes sequence is taken from a streaming source. :param buffer: bytes sequence to be parsed from. :param inlineparent: if specified, this struct is embedded in another struct. :returns: None if the buffer does not have enough data for this struct (e.g. incomplete read from socket); (struct, size) else, where struct is the parsed result (usually a NamedStruct object) and size is the used bytes length, so you can start another parse from buffer[size:].
6.517561
1.777129
3.667466
''' Sub-class a NamedStruct into correct sub types. :param namedstruct: a NamedStruct of this type. ''' cp = self cs = namedstruct while True: if hasattr(cs, '_sub'): cs = cs._sub cp = cs._parser continue subp = None clsfr = getattr(cp, 'classifier', None) if clsfr is not None: clsvalue = clsfr(namedstruct) subp = cp.subindices.get(clsvalue) if subp is None: for sc in cp.subclasses: if sc.isinstance(namedstruct): subp = sc break if subp is None: break cs._subclass(subp) cs = cs._sub cp = subp
def subclass(self, namedstruct)
Sub-class a NamedStruct into correct sub types. :param namedstruct: a NamedStruct of this type.
4.83591
3.61969
1.336001
''' Create an empty struct of this type. "initfunc" is called on the created struct to initialize it. :param inlineparent: if specified, this struct is embedded into another struct "inlineparent" :returns: a created struct (usually a NamedStruct object) ''' if self.base is not None: s = self.base.new(inlineparent) s._extend(self._new(s._target)) else: s = self._new(inlineparent) if self.initfunc is not None: self.initfunc(s) return s
def new(self, inlineparent = None)
Create an empty struct of this type. "initfunc" is called on the created struct to initialize it. :param inlineparent: if specified, this struct is embedded into another struct "inlineparent" :returns: a created struct (usually a NamedStruct object)
5.280643
2.308886
2.287096
''' Create a struct and use all bytes of data. Different from parse(), this takes all data, store unused bytes in "extra" data of the struct. Some types like variable-length array may have different parse result with create() and parse(). :param data: bytes of a packed struct. :param inlineparent: if specified, this struct is embedded in another struct "inlineparent" :returns: a created NamedStruct object. ''' if self.base is not None: return self.base.create(data, inlineparent) c = self._create(data, inlineparent) self.subclass(c) return c
def create(self, data, inlineparent = None)
Create a struct and use all bytes of data. Different from parse(), this takes all data, store unused bytes in "extra" data of the struct. Some types like variable-length array may have different parse result with create() and parse(). :param data: bytes of a packed struct. :param inlineparent: if specified, this struct is embedded in another struct "inlineparent" :returns: a created NamedStruct object.
11.072611
1.80557
6.132475
''' Return the size of the padded struct (including the "real" size and the padding bytes) :param namedstruct: a NamedStruct object of this type. :returns: size including both data and padding. ''' if self.base is not None: return self.base.paddingsize(namedstruct) realsize = namedstruct._realsize() return (realsize + self.padding - 1) // self.padding * self.padding
def paddingsize(self, namedstruct)
Return the size of the padded struct (including the "real" size and the padding bytes) :param namedstruct: a NamedStruct object of this type. :returns: size including both data and padding.
5.072608
2.161255
2.347066
''' Return a padded size from realsize, for NamedStruct internal use. ''' if self.base is not None: return self.base.paddingsize2(realsize) return (realsize + self.padding - 1) // self.padding * self.padding
def paddingsize2(self, realsize)
Return a padded size from realsize, for NamedStruct internal use.
5.59419
2.326631
2.404417
''' Convert a NamedStruct to packed bytes, append the bytes to the stream :param namedstruct: a NamedStruct object of this type to pack. :param skipprepack: if True, the prepack stage is skipped. :param stream: a buffered stream :return: appended bytes size ''' return namedstruct._tostream(stream, skipprepack)
def tostream(self, namedstruct, stream, skipprepack = False)
Convert a NamedStruct to packed bytes, append the bytes to the stream :param namedstruct: a NamedStruct object of this type to pack. :param skipprepack: if True, the prepack stage is skipped. :param stream: a buffered stream :return: appended bytes size
6.641211
1.821321
3.646369
''' Run prepack ''' if not skip_self and self.prepackfunc is not None: self.prepackfunc(namedstruct)
def prepack(self, namedstruct, skip_self=False, skip_sub=False)
Run prepack
5.048692
4.473372
1.12861