sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def colAdd(self,name="",desc="",unit="",comment="",coltype=0,data=[],pos=None): """ column types: 0: Y 1: Disregard 2: Y Error 3: X 4: Label 5: Z 6: X Error """ if pos is None: pos=len(self.colNames) self.colNames.insert(pos,name) self.colDesc.insert(pos,desc) self.colUnits.insert(pos,unit) self.colComments.insert(pos,comment) self.colTypes.insert(pos,coltype) self.colData.insert(pos,data) return
column types: 0: Y 1: Disregard 2: Y Error 3: X 4: Label 5: Z 6: X Error
entailment
def colDelete(self,colI=-1): """delete a column at a single index. Negative numbers count from the end.""" # print("DELETING COLUMN: [%d] %s"%(colI,self.colDesc[colI])) self.colNames.pop(colI) self.colDesc.pop(colI) self.colUnits.pop(colI) self.colComments.pop(colI) self.colTypes.pop(colI) self.colData.pop(colI) return
delete a column at a single index. Negative numbers count from the end.
entailment
def onex(self): """ delete all X columns except the first one. """ xCols=[i for i in range(self.nCols) if self.colTypes[i]==3] if len(xCols)>1: for colI in xCols[1:][::-1]: self.colDelete(colI)
delete all X columns except the first one.
entailment
def alignXY(self): """aligns XY pairs (or XYYY etc) by X value.""" # figure out what data we have and will align to xVals=[] xCols=[x for x in range(self.nCols) if self.colTypes[x]==3] yCols=[x for x in range(self.nCols) if self.colTypes[x]==0] xCols,yCols=np.array(xCols),np.array(yCols) for xCol in xCols: xVals.extend(self.colData[xCol]) #xVals=list(np.round(set(xVals),5)) xVals=list(sorted(list(set(xVals)))) # prepare our new aligned dataset newData=np.empty(len(xVals)*self.nCols) newData[:]=np.nan newData=newData.reshape(len(xVals),self.nCols) oldData=np.round(self.data,5) # do the alignment for xCol in xCols: columnsToShift=[xCol] for col in range(xCol+1,self.nCols): if self.colTypes[col]==0: columnsToShift.append(col) else: break # determine how to move each row for row in range(len(oldData)): oldXvalue=oldData[row,xCol] if oldXvalue in xVals: newRow=xVals.index(oldXvalue) newData[newRow,columnsToShift]=oldData[row,columnsToShift] # commit changes newData[:,0]=xVals self.data=newData self.onex()
aligns XY pairs (or XYYY etc) by X value.
entailment
def wiggle(self,noiseLevel=.1): """Slightly changes value of every cell in the worksheet. Used for testing.""" noise=(np.random.rand(*self.data.shape))-.5 self.data=self.data+noise*noiseLevel
Slightly changes value of every cell in the worksheet. Used for testing.
entailment
def pull(self,bookName=None,sheetName=None): """pull data into this OR.SHEET from a real book/sheet in Origin""" # tons of validation if bookName is None and self.bookName: bookName=self.bookName if sheetName is None and self.sheetName: sheetName=self.sheetName if bookName is None: bookName=OR.activeBook() if bookName and sheetName is None: sheetName=OR.activeSheet() if not bookName or not sheetName: print("can't figure out where to pull from! [%s]%s"%(bookName,sheetName)) return # finally doing the thing poSheet=OR.getSheet(bookName,sheetName) self.bookName=bookName self.sheetName=sheetName self.desc=poSheet.GetLongName() self.colNames=[poCol.GetName() for poCol in poSheet.Columns()] self.colDesc=[poCol.GetLongName() for poCol in poSheet.Columns()] self.colUnits=[poCol.GetUnits() for poCol in poSheet.Columns()] self.colComments=[poCol.GetComments() for poCol in poSheet.Columns()] self.colTypes=[poCol.GetType() for poCol in poSheet.Columns()] self.colData=[poCol.GetData() for poCol in poSheet.Columns()]
pull data into this OR.SHEET from a real book/sheet in Origin
entailment
def push(self,bookName=None,sheetName=None,overwrite=False): """pull this OR.SHEET into a real book/sheet in Origin""" # tons of validation if bookName: self.bookName=bookName if sheetName: self.sheetName=sheetName if not self.sheetName in OR.sheetNames(bookName): print("can't find [%s]%s!"%(bookName,sheetName)) return # clear out out sheet by deleting EVERY column poSheet=OR.getSheet(bookName,sheetName) # CPyWorksheetPageI if not poSheet: print("WARNING: didn't get posheet",poSheet,bookName,sheetName) for poCol in [x for x in poSheet if x.IsValid()]: poCol.Destroy() # create columns and assign properties to each for i in range(len(self.colNames)): poSheet.InsertCol(i,self.colNames[i]) poSheet.Columns(i).SetName(self.colNames[i]) poSheet.Columns(i).SetLongName(self.colDesc[i]) poSheet.Columns(i).SetUnits(self.colUnits[i]) poSheet.Columns(i).SetComments(self.colComments[i]) poSheet.Columns(i).SetType(self.colTypes[i]) poSheet.Columns(i).SetData(self.colData[i])
pull this OR.SHEET into a real book/sheet in Origin
entailment
def nRows(self): """returns maximum number of rows based on the longest colData""" if self.nCols: return max([len(x) for x in self.colData]) else: return 0
returns maximum number of rows based on the longest colData
entailment
def data(self): """return all of colData as a 2D numpy array.""" data=np.empty((self.nRows,self.nCols),dtype=np.float) data[:]=np.nan # make everything nan by default for colNum,colData in enumerate(self.colData): validIs=np.where([np.isreal(v) for v in colData])[0] validData=np.ones(len(colData))*np.nan validData[validIs]=np.array(colData)[validIs] data[:len(colData),colNum]=validData # only fill cells that have data return data
return all of colData as a 2D numpy array.
entailment
def data(self,data): """Given a 2D numpy array, fill colData with it.""" assert type(data) is np.ndarray assert data.shape[1] == self.nCols for i in range(self.nCols): self.colData[i]=data[:,i].tolist()
Given a 2D numpy array, fill colData with it.
entailment
def focusout(self, event): """Change style on focus out events.""" bc = self.style.lookup("TEntry", "bordercolor", ("!focus",)) dc = self.style.lookup("TEntry", "darkcolor", ("!focus",)) lc = self.style.lookup("TEntry", "lightcolor", ("!focus",)) self.style.configure("%s.spinbox.TFrame" % self.frame, bordercolor=bc, darkcolor=dc, lightcolor=lc)
Change style on focus out events.
entailment
def focusin(self, event): """Change style on focus in events.""" self.old_value = self.get() bc = self.style.lookup("TEntry", "bordercolor", ("focus",)) dc = self.style.lookup("TEntry", "darkcolor", ("focus",)) lc = self.style.lookup("TEntry", "lightcolor", ("focus",)) self.style.configure("%s.spinbox.TFrame" % self.frame, bordercolor=bc, darkcolor=dc, lightcolor=lc)
Change style on focus in events.
entailment
def open(self): """ Obtains the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """ if not self.handle: try: path = self.system_dir except AttributeError: path = '' self.__handle = lvm_init(path) if not bool(self.__handle): raise HandleError("Failed to initialize LVM handle.")
Obtains the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
entailment
def close(self): """ Closes the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """ if self.handle: q = lvm_quit(self.handle) if q != 0: raise HandleError("Failed to close LVM handle.") self.__handle = None
Closes the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
entailment
def get_vg(self, name, mode="r"): """ Returns an instance of VolumeGroup. The name parameter should be an existing volume group. By default, all volume groups are open in "read" mode:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg") To open a volume group with write permissions set the mode parameter to "w":: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") *Args:* * name (str): An existing volume group name. * mode (str): "r" or "w" for read/write respectively. Default is "r". *Raises:* * HandleError """ vg = VolumeGroup(self, name=name, mode=mode) return vg
Returns an instance of VolumeGroup. The name parameter should be an existing volume group. By default, all volume groups are open in "read" mode:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg") To open a volume group with write permissions set the mode parameter to "w":: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") *Args:* * name (str): An existing volume group name. * mode (str): "r" or "w" for read/write respectively. Default is "r". *Raises:* * HandleError
entailment
def create_vg(self, name, devices): """ Returns a new instance of VolumeGroup with the given name and added physycal volumes (devices):: from lvm2py import * lvm = LVM() vg = lvm.create_vg("myvg", ["/dev/sdb1", "/dev/sdb2"]) *Args:* * name (str): A volume group name. * devices (list): A list of device paths. *Raises:* * HandleError, CommitError, ValueError """ self.open() vgh = lvm_vg_create(self.handle, name) if not bool(vgh): self.close() raise HandleError("Failed to create VG.") for device in devices: if not os.path.exists(device): self._destroy_vg(vgh) raise ValueError("%s does not exist." % device) ext = lvm_vg_extend(vgh, device) if ext != 0: self._destroy_vg(vgh) raise CommitError("Failed to extend Volume Group.") try: self._commit_vg(vgh) except CommitError: self._destroy_vg(vgh) raise CommitError("Failed to add %s to VolumeGroup." % device) self._close_vg(vgh) vg = VolumeGroup(self, name) return vg
Returns a new instance of VolumeGroup with the given name and added physycal volumes (devices):: from lvm2py import * lvm = LVM() vg = lvm.create_vg("myvg", ["/dev/sdb1", "/dev/sdb2"]) *Args:* * name (str): A volume group name. * devices (list): A list of device paths. *Raises:* * HandleError, CommitError, ValueError
entailment
def remove_vg(self, vg): """ Removes a volume group:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") lvm.remove_vg(vg) *Args:* * vg (obj): A VolumeGroup instance. *Raises:* * HandleError, CommitError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised. """ vg.open() rm = lvm_vg_remove(vg.handle) if rm != 0: vg.close() raise CommitError("Failed to remove VG.") com = lvm_vg_write(vg.handle) if com != 0: vg.close() raise CommitError("Failed to commit changes to disk.") vg.close()
Removes a volume group:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") lvm.remove_vg(vg) *Args:* * vg (obj): A VolumeGroup instance. *Raises:* * HandleError, CommitError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised.
entailment
def vgscan(self): """ Probes the system for volume groups and returns a list of VolumeGroup instances:: from lvm2py import * lvm = LVM() vgs = lvm.vgscan() *Raises:* * HandleError """ vg_list = [] self.open() names = lvm_list_vg_names(self.handle) if not bool(names): return vg_list vgnames = [] vg = dm_list_first(names) while vg: c = cast(vg, POINTER(lvm_str_list)) vgnames.append(c.contents.str) if dm_list_end(names, vg): # end of linked list break vg = dm_list_next(names, vg) self.close() for name in vgnames: vginst = self.get_vg(name) vg_list.append(vginst) return vg_list
Probes the system for volume groups and returns a list of VolumeGroup instances:: from lvm2py import * lvm = LVM() vgs = lvm.vgscan() *Raises:* * HandleError
entailment
def from_file(cls, path): """ Create a text from a file. Args: path (str): The file path. """ with open(path, 'r', errors='replace') as f: return cls(f.read())
Create a text from a file. Args: path (str): The file path.
entailment
def load_stopwords(self, path): """ Load a set of stopwords. Args: path (str): The stopwords file path. """ if path: with open(path) as f: self.stopwords = set(f.read().splitlines()) else: self.stopwords = set( pkgutil .get_data('textplot', 'data/stopwords.txt') .decode('utf8') .splitlines() )
Load a set of stopwords. Args: path (str): The stopwords file path.
entailment
def tokenize(self): """ Tokenize the text. """ self.tokens = [] self.terms = OrderedDict() # Generate tokens. for token in utils.tokenize(self.text): # Ignore stopwords. if token['unstemmed'] in self.stopwords: self.tokens.append(None) else: # Token: self.tokens.append(token) # Term: offsets = self.terms.setdefault(token['stemmed'], []) offsets.append(token['offset'])
Tokenize the text.
entailment
def term_counts(self): """ Returns: OrderedDict: An ordered dictionary of term counts. """ counts = OrderedDict() for term in self.terms: counts[term] = len(self.terms[term]) return utils.sort_dict(counts)
Returns: OrderedDict: An ordered dictionary of term counts.
entailment
def term_count_buckets(self): """ Returns: dict: A dictionary that maps occurrence counts to the terms that appear that many times in the text. """ buckets = {} for term, count in self.term_counts().items(): if count in buckets: buckets[count].append(term) else: buckets[count] = [term] return buckets
Returns: dict: A dictionary that maps occurrence counts to the terms that appear that many times in the text.
entailment
def most_frequent_terms(self, depth): """ Get the X most frequent terms in the text, and then probe down to get any other terms that have the same count as the last term. Args: depth (int): The number of terms. Returns: set: The set of frequent terms. """ counts = self.term_counts() # Get the top X terms and the instance count of the last word. top_terms = set(list(counts.keys())[:depth]) end_count = list(counts.values())[:depth][-1] # Merge in all other words with that appear that number of times, so # that we don't truncate the last bucket - eg, half of the words that # appear 5 times, but not the other half. bucket = self.term_count_buckets()[end_count] return top_terms.union(set(bucket))
Get the X most frequent terms in the text, and then probe down to get any other terms that have the same count as the last term. Args: depth (int): The number of terms. Returns: set: The set of frequent terms.
entailment
def unstem(self, term): """ Given a stemmed term, get the most common unstemmed variant. Args: term (str): A stemmed term. Returns: str: The unstemmed token. """ originals = [] for i in self.terms[term]: originals.append(self.tokens[i]['unstemmed']) mode = Counter(originals).most_common(1) return mode[0][0]
Given a stemmed term, get the most common unstemmed variant. Args: term (str): A stemmed term. Returns: str: The unstemmed token.
entailment
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'): """ Estimate the kernel density of the instances of term in the text. Args: term (str): A stemmed term. bandwidth (int): The kernel bandwidth. samples (int): The number of evenly-spaced sample points. kernel (str): The kernel function. Returns: np.array: The density estimate. """ # Get the offsets of the term instances. terms = np.array(self.terms[term])[:, np.newaxis] # Fit the density estimator on the terms. kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms) # Score an evely-spaced array of samples. x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis] scores = kde.score_samples(x_axis) # Scale the scores to integrate to 1. return np.exp(scores) * (len(self.tokens) / samples)
Estimate the kernel density of the instances of term in the text. Args: term (str): A stemmed term. bandwidth (int): The kernel bandwidth. samples (int): The number of evenly-spaced sample points. kernel (str): The kernel function. Returns: np.array: The density estimate.
entailment
def score_intersect(self, term1, term2, **kwargs): """ Compute the geometric area of the overlap between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """ t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) # Integrate the overlap. overlap = np.minimum(t1_kde, t2_kde) return np.trapz(overlap)
Compute the geometric area of the overlap between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
entailment
def score_cosine(self, term1, term2, **kwargs): """ Compute a weighting score based on the cosine distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """ t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.cosine(t1_kde, t2_kde)
Compute a weighting score based on the cosine distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
entailment
def score_braycurtis(self, term1, term2, **kwargs): """ Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """ t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.braycurtis(t1_kde, t2_kde)
Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
entailment
def plot_term_kdes(self, words, **kwargs): """ Plot kernel density estimates for multiple words. Args: words (list): A list of unstemmed terms. """ stem = PorterStemmer().stem for word in words: kde = self.kde(stem(word), **kwargs) plt.plot(kde) plt.show()
Plot kernel density estimates for multiple words. Args: words (list): A list of unstemmed terms.
entailment
def generate(self, field_name, field): """Tries to lookup a matching formfield generator (lowercase field-classname) and raises a NotImplementedError of no generator can be found. """ if hasattr(self, 'generate_%s' % field.__class__.__name__.lower()): generator = getattr( self, 'generate_%s' % field.__class__.__name__.lower()) return generator( field_name, field, (field.verbose_name or field_name).capitalize()) else: raise NotImplementedError('%s is not supported by MongoForm' % \ field.__class__.__name__)
Tries to lookup a matching formfield generator (lowercase field-classname) and raises a NotImplementedError of no generator can be found.
entailment
def _fixIndex(self, index, truncate=False): """ @param truncate: If true, negative indices which go past the beginning of the list will be evaluated as zero. For example:: >>> L = List([1,2,3,4,5]) >>> len(L) 5 >>> L._fixIndex(-9, truncate=True) 0 """ assert not isinstance(index, slice), 'slices are not supported (yet)' if index < 0: index += self.length if index < 0: if not truncate: raise IndexError('stored List index out of range') else: index = 0 return index
@param truncate: If true, negative indices which go past the beginning of the list will be evaluated as zero. For example:: >>> L = List([1,2,3,4,5]) >>> len(L) 5 >>> L._fixIndex(-9, truncate=True) 0
entailment
def overlapping(startAttribute, # X endAttribute, # Y startValue, # A endValue, # B ): """ Return an L{axiom.iaxiom.IComparison} (an object that can be passed as the 'comparison' argument to Store.query/.sum/.count) which will constrain a query against 2 attributes for ranges which overlap with the given arguments. For a database with Items of class O which represent values in this configuration:: X Y (a) (b) |-------------------| (c) (d) |--------| (e) (f) |--------| (g) (h) |---| (i) (j) |------| (k) (l) |-------------------------------------| (a) (l) |-----------------------------| (c) (b) |------------------------| (c) (a) |----| (b) (l) |---------| The query:: myStore.query( O, findOverlapping(O.X, O.Y, a, b)) Will return a generator of Items of class O which represent segments a-b, c-d, e-f, k-l, a-l, c-b, c-a and b-l, but NOT segments g-h or i-j. (NOTE: If you want to pass attributes of different classes for startAttribute and endAttribute, read the implementation of this method to discover the additional join clauses required. This may be eliminated some day so for now, consider this method undefined over multiple classes.) In the database where this query is run, for an item N, all values of N.startAttribute must be less than N.endAttribute. startValue must be less than endValue. """ assert startValue <= endValue return OR( AND(startAttribute >= startValue, startAttribute <= endValue), AND(endAttribute >= startValue, endAttribute <= endValue), AND(startAttribute <= startValue, endAttribute >= endValue) )
Return an L{axiom.iaxiom.IComparison} (an object that can be passed as the 'comparison' argument to Store.query/.sum/.count) which will constrain a query against 2 attributes for ranges which overlap with the given arguments. For a database with Items of class O which represent values in this configuration:: X Y (a) (b) |-------------------| (c) (d) |--------| (e) (f) |--------| (g) (h) |---| (i) (j) |------| (k) (l) |-------------------------------------| (a) (l) |-----------------------------| (c) (b) |------------------------| (c) (a) |----| (b) (l) |---------| The query:: myStore.query( O, findOverlapping(O.X, O.Y, a, b)) Will return a generator of Items of class O which represent segments a-b, c-d, e-f, k-l, a-l, c-b, c-a and b-l, but NOT segments g-h or i-j. (NOTE: If you want to pass attributes of different classes for startAttribute and endAttribute, read the implementation of this method to discover the additional join clauses required. This may be eliminated some day so for now, consider this method undefined over multiple classes.) In the database where this query is run, for an item N, all values of N.startAttribute must be less than N.endAttribute. startValue must be less than endValue.
entailment
def _tupleCompare(tuple1, ineq, tuple2, eq=lambda a,b: (a==b), ander=AND, orer=OR): """ Compare two 'in-database tuples'. Useful when sorting by a compound key and slicing into the middle of that query. """ orholder = [] for limit in range(len(tuple1)): eqconstraint = [ eq(elem1, elem2) for elem1, elem2 in zip(tuple1, tuple2)[:limit]] ineqconstraint = ineq(tuple1[limit], tuple2[limit]) orholder.append(ander(*(eqconstraint + [ineqconstraint]))) return orer(*orholder)
Compare two 'in-database tuples'. Useful when sorting by a compound key and slicing into the middle of that query.
entailment
def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT): """ Truncates the rationale for analytics event emission if necessary Args: rationale (string): the string value of the rationale max_length (int): the max length for truncation Returns: truncated_value (string): the possibly truncated version of the rationale was_truncated (bool): returns true if the rationale is truncated """ if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length: return rationale[0:max_length], True else: return rationale, False
Truncates the rationale for analytics event emission if necessary Args: rationale (string): the string value of the rationale max_length (int): the max length for truncation Returns: truncated_value (string): the possibly truncated version of the rationale was_truncated (bool): returns true if the rationale is truncated
entailment
def validate_options(options): """ Validate the options that course author set up and return errors in a dict if there is any """ errors = [] if int(options['rationale_size']['min']) < 1: errors.append(_('Minimum Characters')) if int(options['rationale_size']['max']) < 0 or int(options['rationale_size']['max']) > MAX_RATIONALE_SIZE: errors.append(_('Maximum Characters')) if not any(error in [_('Minimum Characters'), _('Maximum Characters')] for error in errors) \ and int(options['rationale_size']['max']) <= int(options['rationale_size']['min']): errors += [_('Minimum Characters'), _('Maximum Characters')] try: if options['algo']['num_responses'] != '#' and int(options['algo']['num_responses']) < 0: errors.append(_('Number of Responses')) except ValueError: errors.append(_('Not an Integer')) if not errors: return None else: return {'options_error': _('Invalid Option(s): ') + ', '.join(errors)}
Validate the options that course author set up and return errors in a dict if there is any
entailment
def get_student_item_dict(self, anonymous_user_id=None): """Create a student_item_dict from our surrounding context. See also: submissions.api for details. Args: anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair. Returns: (dict): The student item associated with this XBlock instance. This includes the student id, item id, and course id. """ item_id = self._serialize_opaque_key(self.scope_ids.usage_id) # This is not the real way course_ids should work, but this is a # temporary expediency for LMS integration if hasattr(self, "xmodule_runtime"): course_id = self.get_course_id() # pylint:disable=E1101 if anonymous_user_id: student_id = anonymous_user_id else: student_id = self.xmodule_runtime.anonymous_student_id # pylint:disable=E1101 else: course_id = "edX/Enchantment_101/April_1" if self.scope_ids.user_id is None: student_id = '' else: student_id = unicode(self.scope_ids.user_id) student_item_dict = dict( student_id=student_id, item_id=item_id, course_id=course_id, item_type='ubcpi' ) return student_item_dict
Create a student_item_dict from our surrounding context. See also: submissions.api for details. Args: anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair. Returns: (dict): The student item associated with this XBlock instance. This includes the student id, item id, and course id.
entailment
def get_answers_for_student(student_item): """ Retrieve answers from backend for a student and question Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. Returns: Answers: answers for the student """ submissions = sub_api.get_submissions(student_item) if not submissions: return Answers() latest_submission = submissions[0] latest_answer_item = latest_submission.get('answer', {}) return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))
Retrieve answers from backend for a student and question Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. Returns: Answers: answers for the student
entailment
def add_answer_for_student(student_item, vote, rationale): """ Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option """ answers = get_answers_for_student(student_item) answers.add_answer(vote, rationale) sub_api.create_submission(student_item, { ANSWER_LIST_KEY: answers.get_answers_as_list() })
Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option
entailment
def _safe_get(self, revision, key): """ Get an answer data (vote or rationale) by revision Args: revision (int): the revision number for student answer, could be 0 (original) or 1 (revised) key (str); key for retrieve answer data, could be VOTE_KEY or RATIONALE_KEY Returns: the answer data or None if revision doesn't exists """ if self.has_revision(revision): return self.raw_answers[revision].get(key) else: return None
Get an answer data (vote or rationale) by revision Args: revision (int): the revision number for student answer, could be 0 (original) or 1 (revised) key (str); key for retrieve answer data, could be VOTE_KEY or RATIONALE_KEY Returns: the answer data or None if revision doesn't exists
entailment
def add_answer(self, vote, rationale): """ Add an answer Args: vote (int): the option that student voted for rationale (str): the reason why the student vote for the option """ self.raw_answers.append({ VOTE_KEY: vote, RATIONALE_KEY: rationale, })
Add an answer Args: vote (int): the option that student voted for rationale (str): the reason why the student vote for the option
entailment
def exceptionToString(e,silent=False): """when you "except Exception as e", give me the e and I'll give you a string.""" exc_type, exc_obj, exc_tb = sys.exc_info() s=("\n"+"="*50+"\n") s+="EXCEPTION THROWN UNEXPECTEDLY\n" s+=" FILE: %s\n"%os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] s+=" LINE: %s\n"%exc_tb.tb_lineno s+=" TYPE: %s\n"%exc_type s+='-'*50+'\n' s+=traceback.format_exc() s=s.strip()+'\n'+"="*50+"\n" if not silent: print(s) return s
when you "except Exception as e", give me the e and I'll give you a string.
entailment
def createNew(cls, store, pathSegments): """ Create a new SubStore, allocating a new file space for it. """ if isinstance(pathSegments, basestring): raise ValueError( 'Received %r instead of a sequence' % (pathSegments,)) if store.dbdir is None: self = cls(store=store, storepath=None) else: storepath = store.newDirectory(*pathSegments) self = cls(store=store, storepath=storepath) self.open() self.close() return self
Create a new SubStore, allocating a new file space for it.
entailment
def createStore(self, debug, journalMode=None): """ Create the actual Store this Substore represents. """ if self.storepath is None: self.store._memorySubstores.append(self) # don't fall out of cache if self.store.filesdir is None: filesdir = None else: filesdir = (self.store.filesdir.child("_substore_files") .child(str(self.storeID)) .path) return Store(parent=self.store, filesdir=filesdir, idInParent=self.storeID, debug=debug, journalMode=journalMode) else: return Store(self.storepath.path, parent=self.store, idInParent=self.storeID, debug=debug, journalMode=journalMode)
Create the actual Store this Substore represents.
entailment
def upgradeCatalog1to2(oldCatalog): """ Create _TagName instances which version 2 of Catalog automatically creates for use in determining the tagNames result, but which version 1 of Catalog did not create. """ newCatalog = oldCatalog.upgradeVersion('tag_catalog', 1, 2, tagCount=oldCatalog.tagCount) tags = newCatalog.store.query(Tag, Tag.catalog == newCatalog) tagNames = tags.getColumn("name").distinct() for t in tagNames: _TagName(store=newCatalog.store, catalog=newCatalog, name=t) return newCatalog
Create _TagName instances which version 2 of Catalog automatically creates for use in determining the tagNames result, but which version 1 of Catalog did not create.
entailment
def tagNames(self): """ Return an iterator of unicode strings - the unique tag names which have been applied objects in this catalog. """ return self.store.query(_TagName, _TagName.catalog == self).getColumn("name")
Return an iterator of unicode strings - the unique tag names which have been applied objects in this catalog.
entailment
def tagsOf(self, obj): """ Return an iterator of unicode strings - the tag names which apply to the given object. """ return self.store.query( Tag, AND(Tag.catalog == self, Tag.object == obj)).getColumn("name")
Return an iterator of unicode strings - the tag names which apply to the given object.
entailment
def loaded(self, oself, dbval): """ This method is invoked when the item is loaded from the database, and when a transaction is reverted which restores this attribute's value. @param oself: an instance of an item which has this attribute. @param dbval: the underlying database value which was retrieved. """ setattr(oself, self.dbunderlying, dbval) delattr(oself, self.underlying)
This method is invoked when the item is loaded from the database, and when a transaction is reverted which restores this attribute's value. @param oself: an instance of an item which has this attribute. @param dbval: the underlying database value which was retrieved.
entailment
def _convertPyval(self, oself, pyval): """ Convert a Python value to a value suitable for inserting into the database. @param oself: The object on which this descriptor is an attribute. @param pyval: The value to be converted. @return: A value legal for this column in the database. """ # convert to dbval later, I guess? if pyval is None and not self.allowNone: raise TypeError("attribute [%s.%s = %s()] must not be None" % ( self.classname, self.attrname, self.__class__.__name__)) return self.infilter(pyval, oself, oself.store)
Convert a Python value to a value suitable for inserting into the database. @param oself: The object on which this descriptor is an attribute. @param pyval: The value to be converted. @return: A value legal for this column in the database.
entailment
def _queryContainer(self, store): """ Generate and cache the subselect SQL and its arguments. Return the subselect SQL. """ if self._subselectSQL is None: sql, args = self.container._sqlAndArgs('SELECT', self.container._queryTarget) self._subselectSQL, self._subselectArgs = sql, args return self._subselectSQL
Generate and cache the subselect SQL and its arguments. Return the subselect SQL.
entailment
def _sequenceContainer(self, store): """ Smash whatever we got into a list and save the result in case we are executed multiple times. This keeps us from tripping up over generators and the like. """ if self._sequence is None: self._sequence = list(self.container) self._clause = ', '.join(['?'] * len(self._sequence)) return self._clause
Smash whatever we got into a list and save the result in case we are executed multiple times. This keeps us from tripping up over generators and the like.
entailment
def _sequenceArgs(self, store): """ Filter each element of the data using the attribute type being tested for containment and hand back the resulting list. """ self._sequenceContainer(store) # Force _sequence to be valid return [self.attribute.infilter(pyval, None, store) for pyval in self._sequence]
Filter each element of the data using the attribute type being tested for containment and hand back the resulting list.
entailment
def prepareInsert(self, oself, store): """ Prepare for insertion into the database by making the dbunderlying attribute of the item a relative pathname with respect to the store rather than an absolute pathname. """ if self.relative: fspath = self.__get__(oself) oself.__dirty__[self.attrname] = self, self.infilter(fspath, oself, store)
Prepare for insertion into the database by making the dbunderlying attribute of the item a relative pathname with respect to the store rather than an absolute pathname.
entailment
def restore(self, time=None): """ Undeletes the object. Returns True if undeleted, False if it was already not deleted """ if self.deleted: time = time if time else self.deleted_at if time == self.deleted_at: self.deleted = False self.save() return True else: return False return False
Undeletes the object. Returns True if undeleted, False if it was already not deleted
entailment
def full_restore(self, using=None): using = using or router.db_for_write(self.__class__, instance=self) restore_counter = Counter() if self.deleted: time = self.deleted_at else: return restore_counter self.collector = models.deletion.Collector(using=using) self.collector.collect([self]) for model, instances in self.collector.data.items(): instances_to_delete = sorted(instances, key=attrgetter("pk")) self.sort() for qs in self.collector.fast_deletes: # TODO make sure the queryset delete has been made a soft delete for qs_instance in qs: restore_counter.update([qs_instance._meta.model_name]) qs_instance.restore(time=time) for model, instances in self.collector.data.items(): for instance in instances: restore_counter.update([instance._meta.model_name]) instance.restore(time=time) return sum(restore_counter.values()), dict(restore_counter) """ Restores itself, as well as objects that might have been deleted along with it if cascade is the deletion strategy """ self.collector = models.deletion.Collector(using=using) self.collector.collect([self], keep_parents=keep_parents)
Restores itself, as well as objects that might have been deleted along with it if cascade is the deletion strategy
entailment
def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None): """ Connect to AWS ec2 :type region: str :param region: AWS region to connect to :type access_key: str :param access_key: AWS access key id :type secret_key: str :param secret_key: AWS secret access key :returns: boto.ec2.connection.EC2Connection -- EC2 connection """ if access_key: # Connect using supplied credentials logger.info('Connecting to AWS EC2 in {}'.format(region)) connection = ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) else: # Fetch instance metadata metadata = get_instance_metadata(timeout=1, num_retries=1) if metadata: try: region = metadata['placement']['availability-zone'][:-1] except KeyError: pass # Connect using env vars or boto credentials logger.info('Connecting to AWS EC2 in {}'.format(region)) connection = ec2.connect_to_region(region) if not connection: logger.error('An error occurred when connecting to EC2') sys.exit(1) return connection
Connect to AWS ec2 :type region: str :param region: AWS region to connect to :type access_key: str :param access_key: AWS access key id :type secret_key: str :param secret_key: AWS secret access key :returns: boto.ec2.connection.EC2Connection -- EC2 connection
entailment
def parse(self, output): """ Find stems for a given text. """ output = self._get_lines_with_stems(output) words = self._make_unique(output) return self._parse_for_simple_stems(words)
Find stems for a given text.
entailment
def validlocations(configuration=None): # type: () -> List[Dict] """ Read valid locations from HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[Dict]: A list of valid locations """ if Locations._validlocations is None: if configuration is None: configuration = Configuration.read() Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True}) return Locations._validlocations
Read valid locations from HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[Dict]: A list of valid locations
entailment
def get_location_from_HDX_code(code, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str] """Get location from HDX location code Args: code (str): code for which to get location name locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: location name """ if locations is None: locations = Locations.validlocations(configuration) for locdict in locations: if code.upper() == locdict['name'].upper(): return locdict['title'] return None
Get location from HDX location code Args: code (str): code for which to get location name locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: location name
entailment
def get_HDX_code_from_location(location, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str] """Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: HDX code or None """ if locations is None: locations = Locations.validlocations(configuration) locationupper = location.upper() for locdict in locations: locationcode = locdict['name'].upper() if locationupper == locationcode: return locationcode for locdict in locations: if locationupper == locdict['title'].upper(): return locdict['name'].upper() return None
Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: HDX code or None
entailment
def get_HDX_code_from_location_partial(location, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Tuple[Optional[str], bool] """Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Tuple[Optional[str], bool]: HDX code and if the match is exact or (None, False) for no match """ hdx_code = Locations.get_HDX_code_from_location(location, locations, configuration) if hdx_code is not None: return hdx_code, True if locations is None: locations = Locations.validlocations(configuration) locationupper = location.upper() for locdict in locations: locationname = locdict['title'].upper() if locationupper in locationname or locationname in locationupper: return locdict['name'].upper(), False return None, False
Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Tuple[Optional[str], bool]: HDX code and if the match is exact or (None, False) for no match
entailment
def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser( description='Validates dtFabric format definitions.') argument_parser.add_argument( 'source', nargs='?', action='store', metavar='PATH', default=None, help=( 'path of the file or directory containing the dtFabric format ' 'definitions.')) options = argument_parser.parse_args() if not options.source: print('Source value is missing.') print('') argument_parser.print_help() print('') return False if not os.path.exists(options.source): print('No such file: {0:s}'.format(options.source)) print('') return False logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(message)s') source_is_directory = os.path.isdir(options.source) validator = DefinitionsValidator() if source_is_directory: source_description = os.path.join(options.source, '*.yaml') else: source_description = options.source print('Validating dtFabric definitions in: {0:s}'.format(source_description)) if source_is_directory: result = validator.CheckDirectory(options.source) else: result = validator.CheckFile(options.source) if not result: print('FAILURE') else: print('SUCCESS') return result
The main program function. Returns: bool: True if successful or False if not.
entailment
def CheckDirectory(self, path, extension='yaml'): """Validates definition files in a directory. Args: path (str): path of the definition file. extension (Optional[str]): extension of the filenames to read. Returns: bool: True if the directory contains valid definitions. """ result = True if extension: glob_spec = os.path.join(path, '*.{0:s}'.format(extension)) else: glob_spec = os.path.join(path, '*') for definition_file in sorted(glob.glob(glob_spec)): if not self.CheckFile(definition_file): result = False return result
Validates definition files in a directory. Args: path (str): path of the definition file. extension (Optional[str]): extension of the filenames to read. Returns: bool: True if the directory contains valid definitions.
entailment
def CheckFile(self, path): """Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions. """ print('Checking: {0:s}'.format(path)) definitions_registry = registry.DataTypeDefinitionsRegistry() definitions_reader = reader.YAMLDataTypeDefinitionsFileReader() result = False try: definitions_reader.ReadFile(definitions_registry, path) result = True except KeyError as exception: logging.warning(( 'Unable to register data type definition in file: {0:s} with ' 'error: {1:s}').format(path, exception)) except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1:s}'.format( path, exception)) return result
Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions.
entailment
def styleattribute(self, element): """ returns css.CSSStyleDeclaration of inline styles, for html: @style """ css_text = element.get('style') if css_text: return cssutils.css.CSSStyleDeclaration(cssText=css_text) else: return None
returns css.CSSStyleDeclaration of inline styles, for html: @style
entailment
def get_configuration(filename): """ Read configuration file :type filename: str :param filename: Path to the configuration file """ logger.debug('Reading configuration from {}'.format(filename)) conf = SafeConfigParser() conf.read(filename) if not conf: logger.error('Configuration file {} not found'.format(filename)) sys.exit(1) if not conf.has_section('general'): logger.error('Missing [general] section in the configuration file') sys.exit(1) try: config = { 'access-key-id': conf.get('general', 'access-key-id'), 'secret-access-key': conf.get('general', 'secret-access-key'), 'region': conf.get('general', 'region'), } except NoOptionError as err: logger.error('Error in config file: {}'.format(err)) sys.exit(1) return config
Read configuration file :type filename: str :param filename: Path to the configuration file
entailment
def inline_css(html_message, encoding='unicode'): """ Inlines all CSS in an HTML string Given an HTML document with CSS declared in the HEAD, inlines it into the applicable elements. Used primarily in the preparation of styled emails. Arguments: html_message -- a string of HTML, including CSS """ document = etree.HTML(html_message) converter = Conversion() converter.perform(document, html_message, '', encoding=encoding) return converter.convertedHTML
Inlines all CSS in an HTML string Given an HTML document with CSS declared in the HEAD, inlines it into the applicable elements. Used primarily in the preparation of styled emails. Arguments: html_message -- a string of HTML, including CSS
entailment
def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size): """Checks if the byte stream is large enough for the data type. Args: byte_stream (bytes): byte stream. byte_offset (int): offset into the byte stream where to start. data_type_size (int): data type size. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the size of the byte stream cannot be determined. """ try: byte_stream_size = len(byte_stream) except Exception as exception: raise errors.MappingError(exception) if byte_stream_size - byte_offset < data_type_size: raise errors.ByteStreamTooSmallError( 'Byte stream too small requested: {0:d} available: {1:d}'.format( data_type_size, byte_stream_size))
Checks if the byte stream is large enough for the data type. Args: byte_stream (bytes): byte stream. byte_offset (int): offset into the byte stream where to start. data_type_size (int): data type size. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the size of the byte stream cannot be determined.
entailment
def _GetByteStreamOperation(self): """Retrieves the byte stream operation. Returns: ByteStreamOperation: byte stream operation or None if unable to determine. """ byte_order_string = self.GetStructByteOrderString() format_string = self.GetStructFormatString() # pylint: disable=assignment-from-none if not format_string: return None format_string = ''.join([byte_order_string, format_string]) return byte_operations.StructOperation(format_string)
Retrieves the byte stream operation. Returns: ByteStreamOperation: byte stream operation or None if unable to determine.
entailment
def GetStructByteOrderString(self): """Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined. """ if not self._data_type_definition: return None return self._BYTE_ORDER_STRINGS.get( self._data_type_definition.byte_order, None)
Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined.
entailment
def FoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: value = self.FoldValue(mapped_value) return self._operation.WriteTo(tuple([value])) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def MapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ data_type_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size) try: struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:]) mapped_value = self.MapValue(*struct_tuple) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = data_type_size return mapped_value
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def FoldValue(self, value): """Folds the data type into a value. Args: value (object): value. Returns: object: folded value. Raises: ValueError: if the data type definition cannot be folded into the value. """ if value is False and self._data_type_definition.false_value is not None: return self._data_type_definition.false_value if value is True and self._data_type_definition.true_value is not None: return self._data_type_definition.true_value raise ValueError('No matching True and False values')
Folds the data type into a value. Args: value (object): value. Returns: object: folded value. Raises: ValueError: if the data type definition cannot be folded into the value.
entailment
def GetStructFormatString(self): """Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined. """ if self._data_type_definition.format == definitions.FORMAT_UNSIGNED: return self._FORMAT_STRINGS_UNSIGNED.get( self._data_type_definition.size, None) return self._FORMAT_STRINGS_SIGNED.get( self._data_type_definition.size, None)
Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined.
entailment
def FoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ value = None try: if self._byte_order == definitions.BYTE_ORDER_BIG_ENDIAN: value = mapped_value.bytes elif self._byte_order == definitions.BYTE_ORDER_LITTLE_ENDIAN: value = mapped_value.bytes_le except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string) return value
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def MapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: uuid.UUID: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ data_type_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size) try: if self._byte_order == definitions.BYTE_ORDER_BIG_ENDIAN: mapped_value = uuid.UUID( bytes=byte_stream[byte_offset:byte_offset + 16]) elif self._byte_order == definitions.BYTE_ORDER_LITTLE_ENDIAN: mapped_value = uuid.UUID( bytes_le=byte_stream[byte_offset:byte_offset + 16]) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = data_type_size return mapped_value
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: uuid.UUID: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def _CalculateElementsDataSize(self, context): """Calculates the elements data size. Args: context (Optional[DataTypeMapContext]): data type map context, used to determine the size hint. Returns: int: the elements data size or None if not available. """ elements_data_size = None if self._HasElementsDataSize(): elements_data_size = self._EvaluateElementsDataSize(context) elif self._HasNumberOfElements(): element_byte_size = self._element_data_type_definition.GetByteSize() if element_byte_size is not None: number_of_elements = self._EvaluateNumberOfElements(context) elements_data_size = number_of_elements * element_byte_size return elements_data_size
Calculates the elements data size. Args: context (Optional[DataTypeMapContext]): data type map context, used to determine the size hint. Returns: int: the elements data size or None if not available.
entailment
def _EvaluateElementsDataSize(self, context): """Evaluates elements data size. Args: context (DataTypeMapContext): data type map context. Returns: int: elements data size. Raises: MappingError: if the elements data size cannot be determined. """ elements_data_size = None if self._data_type_definition.elements_data_size: elements_data_size = self._data_type_definition.elements_data_size elif self._data_type_definition.elements_data_size_expression: expression = self._data_type_definition.elements_data_size_expression namespace = {} if context and context.values: namespace.update(context.values) # Make sure __builtins__ contains an empty dictionary. namespace['__builtins__'] = {} try: elements_data_size = eval(expression, namespace) # pylint: disable=eval-used except Exception as exception: raise errors.MappingError( 'Unable to determine elements data size with error: {0!s}'.format( exception)) if elements_data_size is None or elements_data_size < 0: raise errors.MappingError( 'Invalid elements data size: {0!s}'.format(elements_data_size)) return elements_data_size
Evaluates elements data size. Args: context (DataTypeMapContext): data type map context. Returns: int: elements data size. Raises: MappingError: if the elements data size cannot be determined.
entailment
def _EvaluateNumberOfElements(self, context): """Evaluates number of elements. Args: context (DataTypeMapContext): data type map context. Returns: int: number of elements. Raises: MappingError: if the number of elements cannot be determined. """ number_of_elements = None if self._data_type_definition.number_of_elements: number_of_elements = self._data_type_definition.number_of_elements elif self._data_type_definition.number_of_elements_expression: expression = self._data_type_definition.number_of_elements_expression namespace = {} if context and context.values: namespace.update(context.values) # Make sure __builtins__ contains an empty dictionary. namespace['__builtins__'] = {} try: number_of_elements = eval(expression, namespace) # pylint: disable=eval-used except Exception as exception: raise errors.MappingError( 'Unable to determine number of elements with error: {0!s}'.format( exception)) if number_of_elements is None or number_of_elements < 0: raise errors.MappingError( 'Invalid number of elements: {0!s}'.format(number_of_elements)) return number_of_elements
Evaluates number of elements. Args: context (DataTypeMapContext): data type map context. Returns: int: number of elements. Raises: MappingError: if the number of elements cannot be determined.
entailment
def _GetElementDataTypeDefinition(self, data_type_definition): """Retrieves the element data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeDefinition: element data type definition. Raises: FormatError: if the element data type cannot be determined from the data type definition. """ if not data_type_definition: raise errors.FormatError('Missing data type definition') element_data_type_definition = getattr( data_type_definition, 'element_data_type_definition', None) if not element_data_type_definition: raise errors.FormatError( 'Invalid data type definition missing element') return element_data_type_definition
Retrieves the element data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeDefinition: element data type definition. Raises: FormatError: if the element data type cannot be determined from the data type definition.
entailment
def GetSizeHint(self, context=None, **unused_kwargs): """Retrieves a hint about the size. Args: context (Optional[DataTypeMapContext]): data type map context, used to determine the size hint. Returns: int: hint of the number of bytes needed from the byte stream or None. """ context_state = getattr(context, 'state', {}) elements_data_size = self.GetByteSize() if elements_data_size: return elements_data_size try: elements_data_size = self._CalculateElementsDataSize(context) except errors.MappingError: pass if elements_data_size is None and self._HasElementsTerminator(): size_hints = context_state.get('size_hints', {}) size_hint = size_hints.get(self._data_type_definition.name, None) elements_data_size = 0 if size_hint: elements_data_size = size_hint.byte_size if not size_hint or not size_hint.is_complete: elements_data_size += self._element_data_type_definition.GetByteSize() return elements_data_size
Retrieves a hint about the size. Args: context (Optional[DataTypeMapContext]): data type map context, used to determine the size hint. Returns: int: hint of the number of bytes needed from the byte stream or None.
entailment
def _CompositeMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the data type definition cannot be mapped on the byte stream. """ elements_data_size = None elements_terminator = None number_of_elements = None if self._HasElementsDataSize(): elements_data_size = self._EvaluateElementsDataSize(context) element_byte_size = self._element_data_type_definition.GetByteSize() if element_byte_size is not None: number_of_elements, _ = divmod(elements_data_size, element_byte_size) else: elements_terminator = ( self._element_data_type_definition.elements_terminator) elif self._HasElementsTerminator(): elements_terminator = self._data_type_definition.elements_terminator elif self._HasNumberOfElements(): number_of_elements = self._EvaluateNumberOfElements(context) if elements_terminator is None and number_of_elements is None: raise errors.MappingError( 'Unable to determine element terminator or number of elements') context_state = getattr(context, 'state', {}) elements_data_offset = context_state.get('elements_data_offset', 0) element_index = context_state.get('element_index', 0) element_value = None mapped_values = context_state.get('mapped_values', []) size_hints = context_state.get('size_hints', {}) subcontext = context_state.get('context', None) if not subcontext: subcontext = DataTypeMapContext() try: while byte_stream[byte_offset:]: if (number_of_elements is not None and element_index == number_of_elements): break if (elements_data_size is not None and elements_data_offset >= elements_data_size): break element_value = self._element_data_type_map.MapByteStream( byte_stream, byte_offset=byte_offset, context=subcontext) byte_offset += subcontext.byte_size elements_data_offset += subcontext.byte_size element_index += 1 mapped_values.append(element_value) if (elements_terminator is not None and element_value == elements_terminator): break except errors.ByteStreamTooSmallError as exception: context_state['context'] = subcontext context_state['elements_data_offset'] = elements_data_offset context_state['element_index'] = element_index context_state['mapped_values'] = mapped_values raise errors.ByteStreamTooSmallError(exception) except Exception as exception: raise errors.MappingError(exception) if number_of_elements is not None and element_index != number_of_elements: context_state['context'] = subcontext context_state['elements_data_offset'] = elements_data_offset context_state['element_index'] = element_index context_state['mapped_values'] = mapped_values error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: missing element: {2:d}').format( self._data_type_definition.name, byte_offset, element_index - 1) raise errors.ByteStreamTooSmallError(error_string) if (elements_terminator is not None and element_value != elements_terminator and ( elements_data_size is None or elements_data_offset < elements_data_size)): byte_stream_size = len(byte_stream) size_hints[self._data_type_definition.name] = DataTypeMapSizeHint( byte_stream_size - byte_offset) context_state['context'] = subcontext context_state['elements_data_offset'] = elements_data_offset context_state['element_index'] = element_index context_state['mapped_values'] = mapped_values context_state['size_hints'] = size_hints error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: unable to find elements terminator').format( self._data_type_definition.name, byte_offset) raise errors.ByteStreamTooSmallError(error_string) if context: context.byte_size = elements_data_offset context.state = {} return tuple(mapped_values)
Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: return self._operation.WriteTo(mapped_value) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def _LinearMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps a data type sequence on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ elements_data_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size) try: struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:]) mapped_values = map(self._element_data_type_map.MapValue, struct_tuple) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = elements_data_size return tuple(mapped_values)
Maps a data type sequence on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def GetStructFormatString(self): """Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined. """ if not self._element_data_type_map: return None number_of_elements = None if self._data_type_definition.elements_data_size: element_byte_size = self._element_data_type_definition.GetByteSize() if element_byte_size is None: return None number_of_elements, _ = divmod( self._data_type_definition.elements_data_size, element_byte_size) elif self._data_type_definition.number_of_elements: number_of_elements = self._data_type_definition.number_of_elements format_string = self._element_data_type_map.GetStructFormatString() if not number_of_elements or not format_string: return None return '{0:d}{1:s}'.format(number_of_elements, format_string)
Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined.
entailment
def FoldByteStream(self, mapped_value, context=None, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. context (Optional[DataTypeMapContext]): data type map context. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ elements_data_size = self._CalculateElementsDataSize(context) if elements_data_size is not None: if elements_data_size != len(mapped_value): raise errors.FoldingError( 'Mismatch between elements data size and mapped value size') elif not self._HasElementsTerminator(): raise errors.FoldingError('Unable to determine elements data size') else: elements_terminator = self._data_type_definition.elements_terminator elements_terminator_size = len(elements_terminator) if mapped_value[-elements_terminator_size:] != elements_terminator: mapped_value = b''.join([mapped_value, elements_terminator]) return mapped_value
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. context (Optional[DataTypeMapContext]): data type map context. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def MapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ context_state = getattr(context, 'state', {}) size_hints = context_state.get('size_hints', {}) elements_data_size = self._CalculateElementsDataSize(context) if elements_data_size is not None: self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size) elif not self._HasElementsTerminator(): raise errors.MappingError( 'Unable to determine elements data size and missing elements ' 'terminator') else: byte_stream_size = len(byte_stream) element_byte_size = self._element_data_type_definition.GetByteSize() elements_data_offset = byte_offset next_elements_data_offset = elements_data_offset + element_byte_size elements_terminator = self._data_type_definition.elements_terminator element_value = byte_stream[ elements_data_offset:next_elements_data_offset] while byte_stream[elements_data_offset:]: elements_data_offset = next_elements_data_offset if element_value == elements_terminator: elements_data_size = elements_data_offset - byte_offset break next_elements_data_offset += element_byte_size element_value = byte_stream[ elements_data_offset:next_elements_data_offset] if element_value != elements_terminator: size_hints[self._data_type_definition.name] = DataTypeMapSizeHint( byte_stream_size - byte_offset) context_state['size_hints'] = size_hints error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: unable to find elements terminator').format( self._data_type_definition.name, byte_offset) raise errors.ByteStreamTooSmallError(error_string) if context: context.byte_size = elements_data_size size_hints[self._data_type_definition.name] = DataTypeMapSizeHint( elements_data_size, is_complete=True) context_state['size_hints'] = size_hints return byte_stream[byte_offset:byte_offset + elements_data_size]
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def MapByteStream(self, byte_stream, byte_offset=0, **unused_kwargs): """Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ return byte_stream[byte_offset:byte_offset + self.byte_size]
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def FoldByteStream(self, mapped_value, **kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: byte_stream = mapped_value.encode(self._data_type_definition.encoding) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.MappingError(error_string) return super(StringMap, self).FoldByteStream(byte_stream, **kwargs)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def MapByteStream(self, byte_stream, byte_offset=0, **kwargs): """Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: str: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ byte_stream = super(StringMap, self).MapByteStream( byte_stream, byte_offset=byte_offset, **kwargs) if self._HasElementsTerminator(): # Remove the elements terminator and any trailing data from # the byte stream. elements_terminator = self._data_type_definition.elements_terminator elements_terminator_size = len(elements_terminator) byte_offset = 0 byte_stream_size = len(byte_stream) while byte_offset < byte_stream_size: end_offset = byte_offset + elements_terminator_size if byte_stream[byte_offset:end_offset] == elements_terminator: break byte_offset += elements_terminator_size byte_stream = byte_stream[:byte_offset] try: return byte_stream.decode(self._data_type_definition.encoding) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string)
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: str: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def _CheckCompositeMap(self, data_type_definition): """Determines if the data type definition needs a composite map. Args: data_type_definition (DataTypeDefinition): structure data type definition. Returns: bool: True if a composite map is needed, False otherwise. Raises: FormatError: if a composite map is needed cannot be determined from the data type definition. """ if not data_type_definition: raise errors.FormatError('Missing data type definition') members = getattr(data_type_definition, 'members', None) if not members: raise errors.FormatError('Invalid data type definition missing members') is_composite_map = False last_member_byte_order = data_type_definition.byte_order for member_definition in members: if member_definition.IsComposite(): is_composite_map = True break # TODO: check for padding type # TODO: determine if padding type can be defined as linear if (last_member_byte_order != definitions.BYTE_ORDER_NATIVE and member_definition.byte_order != definitions.BYTE_ORDER_NATIVE and last_member_byte_order != member_definition.byte_order): is_composite_map = True break last_member_byte_order = member_definition.byte_order return is_composite_map
Determines if the data type definition needs a composite map. Args: data_type_definition (DataTypeDefinition): structure data type definition. Returns: bool: True if a composite map is needed, False otherwise. Raises: FormatError: if a composite map is needed cannot be determined from the data type definition.
entailment
def _CompositeFoldByteStream( self, mapped_value, context=None, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. context (Optional[DataTypeMapContext]): data type map context. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ context_state = getattr(context, 'state', {}) attribute_index = context_state.get('attribute_index', 0) subcontext = context_state.get('context', None) if not subcontext: subcontext = DataTypeMapContext(values={ type(mapped_value).__name__: mapped_value}) data_attributes = [] for attribute_index in range(attribute_index, self._number_of_attributes): attribute_name = self._attribute_names[attribute_index] data_type_map = self._data_type_maps[attribute_index] member_value = getattr(mapped_value, attribute_name, None) if data_type_map is None or member_value is None: continue member_data = data_type_map.FoldByteStream( member_value, context=subcontext) if member_data is None: return None data_attributes.append(member_data) if context: context.state = {} return b''.join(data_attributes)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. context (Optional[DataTypeMapContext]): data type map context. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def _CompositeMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ context_state = getattr(context, 'state', {}) attribute_index = context_state.get('attribute_index', 0) mapped_values = context_state.get('mapped_values', None) subcontext = context_state.get('context', None) if not mapped_values: mapped_values = self._structure_values_class() if not subcontext: subcontext = DataTypeMapContext(values={ type(mapped_values).__name__: mapped_values}) members_data_size = 0 for attribute_index in range(attribute_index, self._number_of_attributes): attribute_name = self._attribute_names[attribute_index] data_type_map = self._data_type_maps[attribute_index] member_definition = self._data_type_definition.members[attribute_index] condition = getattr(member_definition, 'condition', None) if condition: namespace = dict(subcontext.values) # Make sure __builtins__ contains an empty dictionary. namespace['__builtins__'] = {} try: condition_result = eval(condition, namespace) # pylint: disable=eval-used except Exception as exception: raise errors.MappingError( 'Unable to evaluate condition with error: {0!s}'.format( exception)) if not isinstance(condition_result, bool): raise errors.MappingError( 'Condition does not result in a boolean value') if not condition_result: continue if isinstance(member_definition, data_types.PaddingDefinition): _, byte_size = divmod( members_data_size, member_definition.alignment_size) if byte_size > 0: byte_size = member_definition.alignment_size - byte_size data_type_map.byte_size = byte_size try: value = data_type_map.MapByteStream( byte_stream, byte_offset=byte_offset, context=subcontext) setattr(mapped_values, attribute_name, value) except errors.ByteStreamTooSmallError as exception: context_state['attribute_index'] = attribute_index context_state['context'] = subcontext context_state['mapped_values'] = mapped_values raise errors.ByteStreamTooSmallError(exception) except Exception as exception: raise errors.MappingError(exception) supported_values = getattr(member_definition, 'values', None) if supported_values and value not in supported_values: raise errors.MappingError( 'Value: {0!s} not in supported values: {1:s}'.format( value, ', '.join([ '{0!s}'.format(value) for value in supported_values]))) byte_offset += subcontext.byte_size members_data_size += subcontext.byte_size if attribute_index != (self._number_of_attributes - 1): context_state['attribute_index'] = attribute_index context_state['context'] = subcontext context_state['mapped_values'] = mapped_values error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: missing attribute: {2:d}').format( self._data_type_definition.name, byte_offset, attribute_index) raise errors.ByteStreamTooSmallError(error_string) if context: context.byte_size = members_data_size context.state = {} return mapped_values
Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def _GetAttributeNames(self, data_type_definition): """Determines the attribute (or field) names of the members. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: list[str]: attribute names. Raises: FormatError: if the attribute names cannot be determined from the data type definition. """ if not data_type_definition: raise errors.FormatError('Missing data type definition') attribute_names = [] for member_definition in data_type_definition.members: attribute_names.append(member_definition.name) return attribute_names
Determines the attribute (or field) names of the members. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: list[str]: attribute names. Raises: FormatError: if the attribute names cannot be determined from the data type definition.
entailment
def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache): """Retrieves the member data type maps. Args: data_type_definition (DataTypeDefinition): data type definition. data_type_map_cache (dict[str, DataTypeMap]): cached data type maps. Returns: list[DataTypeMap]: member data type maps. Raises: FormatError: if the data type maps cannot be determined from the data type definition. """ if not data_type_definition: raise errors.FormatError('Missing data type definition') members = getattr(data_type_definition, 'members', None) if not members: raise errors.FormatError('Invalid data type definition missing members') data_type_maps = [] members_data_size = 0 for member_definition in members: if isinstance(member_definition, data_types.MemberDataTypeDefinition): member_definition = member_definition.member_data_type_definition if (data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE and member_definition.byte_order == definitions.BYTE_ORDER_NATIVE): # Make a copy of the data type definition where byte-order can be # safely changed. member_definition = copy.copy(member_definition) member_definition.name = '_{0:s}_{1:s}'.format( data_type_definition.name, member_definition.name) member_definition.byte_order = data_type_definition.byte_order if member_definition.name not in data_type_map_cache: data_type_map = DataTypeMapFactory.CreateDataTypeMapByType( member_definition) data_type_map_cache[member_definition.name] = data_type_map data_type_map = data_type_map_cache[member_definition.name] if members_data_size is not None: if not isinstance(member_definition, data_types.PaddingDefinition): byte_size = member_definition.GetByteSize() else: _, byte_size = divmod( members_data_size, member_definition.alignment_size) if byte_size > 0: byte_size = member_definition.alignment_size - byte_size data_type_map.byte_size = byte_size if byte_size is None: members_data_size = None else: members_data_size += byte_size data_type_maps.append(data_type_map) return data_type_maps
Retrieves the member data type maps. Args: data_type_definition (DataTypeDefinition): data type definition. data_type_map_cache (dict[str, DataTypeMap]): cached data type maps. Returns: list[DataTypeMap]: member data type maps. Raises: FormatError: if the data type maps cannot be determined from the data type definition.
entailment
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: attribute_values = [ getattr(mapped_value, attribute_name, None) for attribute_name in self._attribute_names] attribute_values = [ value for value in attribute_values if value is not None] return self._operation.WriteTo(tuple(attribute_values)) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment
def _LinearMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps a data type sequence on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ members_data_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, members_data_size) try: struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:]) struct_values = [] for attribute_index, value in enumerate(struct_tuple): data_type_map = self._data_type_maps[attribute_index] member_definition = self._data_type_definition.members[attribute_index] value = data_type_map.MapValue(value) supported_values = getattr(member_definition, 'values', None) if supported_values and value not in supported_values: raise errors.MappingError( 'Value: {0!s} not in supported values: {1:s}'.format( value, ', '.join([ '{0!s}'.format(value) for value in supported_values]))) struct_values.append(value) mapped_value = self._structure_values_class(*struct_values) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = members_data_size return mapped_value
Maps a data type sequence on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
entailment
def GetSizeHint(self, context=None, **unused_kwargs): """Retrieves a hint about the size. Args: context (Optional[DataTypeMapContext]): data type map context, used to determine the size hint. Returns: int: hint of the number of bytes needed from the byte stream or None. """ context_state = getattr(context, 'state', {}) subcontext = context_state.get('context', None) if not subcontext: mapped_values = context_state.get('mapped_values', None) subcontext = DataTypeMapContext(values={ type(mapped_values).__name__: mapped_values}) size_hint = 0 for data_type_map in self._data_type_maps: data_type_size = data_type_map.GetSizeHint(context=subcontext) if data_type_size is None: break size_hint += data_type_size return size_hint
Retrieves a hint about the size. Args: context (Optional[DataTypeMapContext]): data type map context, used to determine the size hint. Returns: int: hint of the number of bytes needed from the byte stream or None.
entailment
def GetStructFormatString(self): """Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined. """ if self._format_string is None and self._data_type_maps: format_strings = [] for member_data_type_map in self._data_type_maps: if member_data_type_map is None: return None member_format_string = member_data_type_map.GetStructFormatString() if member_format_string is None: return None format_strings.append(member_format_string) self._format_string = ''.join(format_strings) return self._format_string
Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined.
entailment
def FoldByteStream(self, mapped_value, **unused_kwargs): # pylint: disable=redundant-returns-doc """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ raise errors.FoldingError( 'Unable to fold {0:s} data type into byte stream'.format( self._data_type_definition.TYPE_INDICATOR))
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
entailment