repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
timknip/pycsg
csg/geom.py
Plane.splitPolygon
def splitPolygon(self, polygon, coplanarFront, coplanarBack, front, back): """ Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back` """ COPLANAR = 0 # all the vertices are within EPSILON distance from plane FRONT = 1 # all the vertices are in front of the plane BACK = 2 # all the vertices are at the back of the plane SPANNING = 3 # some vertices are in front, some in the back # Classify each point as well as the entire polygon into one of the above # four classes. polygonType = 0 vertexLocs = [] numVertices = len(polygon.vertices) for i in range(numVertices): t = self.normal.dot(polygon.vertices[i].pos) - self.w loc = -1 if t < -Plane.EPSILON: loc = BACK elif t > Plane.EPSILON: loc = FRONT else: loc = COPLANAR polygonType |= loc vertexLocs.append(loc) # Put the polygon in the correct list, splitting it when necessary. if polygonType == COPLANAR: normalDotPlaneNormal = self.normal.dot(polygon.plane.normal) if normalDotPlaneNormal > 0: coplanarFront.append(polygon) else: coplanarBack.append(polygon) elif polygonType == FRONT: front.append(polygon) elif polygonType == BACK: back.append(polygon) elif polygonType == SPANNING: f = [] b = [] for i in range(numVertices): j = (i+1) % numVertices ti = vertexLocs[i] tj = vertexLocs[j] vi = polygon.vertices[i] vj = polygon.vertices[j] if ti != BACK: f.append(vi) if ti != FRONT: if ti != BACK: b.append(vi.clone()) else: b.append(vi) if (ti | tj) == SPANNING: # interpolation weight at the intersection point t = (self.w - self.normal.dot(vi.pos)) / self.normal.dot(vj.pos.minus(vi.pos)) # intersection point on the plane v = vi.interpolate(vj, t) f.append(v) b.append(v.clone()) if len(f) >= 3: front.append(Polygon(f, polygon.shared)) if len(b) >= 3: back.append(Polygon(b, polygon.shared))
python
def splitPolygon(self, polygon, coplanarFront, coplanarBack, front, back): """ Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back` """ COPLANAR = 0 # all the vertices are within EPSILON distance from plane FRONT = 1 # all the vertices are in front of the plane BACK = 2 # all the vertices are at the back of the plane SPANNING = 3 # some vertices are in front, some in the back # Classify each point as well as the entire polygon into one of the above # four classes. polygonType = 0 vertexLocs = [] numVertices = len(polygon.vertices) for i in range(numVertices): t = self.normal.dot(polygon.vertices[i].pos) - self.w loc = -1 if t < -Plane.EPSILON: loc = BACK elif t > Plane.EPSILON: loc = FRONT else: loc = COPLANAR polygonType |= loc vertexLocs.append(loc) # Put the polygon in the correct list, splitting it when necessary. if polygonType == COPLANAR: normalDotPlaneNormal = self.normal.dot(polygon.plane.normal) if normalDotPlaneNormal > 0: coplanarFront.append(polygon) else: coplanarBack.append(polygon) elif polygonType == FRONT: front.append(polygon) elif polygonType == BACK: back.append(polygon) elif polygonType == SPANNING: f = [] b = [] for i in range(numVertices): j = (i+1) % numVertices ti = vertexLocs[i] tj = vertexLocs[j] vi = polygon.vertices[i] vj = polygon.vertices[j] if ti != BACK: f.append(vi) if ti != FRONT: if ti != BACK: b.append(vi.clone()) else: b.append(vi) if (ti | tj) == SPANNING: # interpolation weight at the intersection point t = (self.w - self.normal.dot(vi.pos)) / self.normal.dot(vj.pos.minus(vi.pos)) # intersection point on the plane v = vi.interpolate(vj, t) f.append(v) b.append(v.clone()) if len(f) >= 3: front.append(Polygon(f, polygon.shared)) if len(b) >= 3: back.append(Polygon(b, polygon.shared))
[ "def", "splitPolygon", "(", "self", ",", "polygon", ",", "coplanarFront", ",", "coplanarBack", ",", "front", ",", "back", ")", ":", "COPLANAR", "=", "0", "# all the vertices are within EPSILON distance from plane", "FRONT", "=", "1", "# all the vertices are in front of the plane", "BACK", "=", "2", "# all the vertices are at the back of the plane", "SPANNING", "=", "3", "# some vertices are in front, some in the back", "# Classify each point as well as the entire polygon into one of the above", "# four classes.", "polygonType", "=", "0", "vertexLocs", "=", "[", "]", "numVertices", "=", "len", "(", "polygon", ".", "vertices", ")", "for", "i", "in", "range", "(", "numVertices", ")", ":", "t", "=", "self", ".", "normal", ".", "dot", "(", "polygon", ".", "vertices", "[", "i", "]", ".", "pos", ")", "-", "self", ".", "w", "loc", "=", "-", "1", "if", "t", "<", "-", "Plane", ".", "EPSILON", ":", "loc", "=", "BACK", "elif", "t", ">", "Plane", ".", "EPSILON", ":", "loc", "=", "FRONT", "else", ":", "loc", "=", "COPLANAR", "polygonType", "|=", "loc", "vertexLocs", ".", "append", "(", "loc", ")", "# Put the polygon in the correct list, splitting it when necessary.", "if", "polygonType", "==", "COPLANAR", ":", "normalDotPlaneNormal", "=", "self", ".", "normal", ".", "dot", "(", "polygon", ".", "plane", ".", "normal", ")", "if", "normalDotPlaneNormal", ">", "0", ":", "coplanarFront", ".", "append", "(", "polygon", ")", "else", ":", "coplanarBack", ".", "append", "(", "polygon", ")", "elif", "polygonType", "==", "FRONT", ":", "front", ".", "append", "(", "polygon", ")", "elif", "polygonType", "==", "BACK", ":", "back", ".", "append", "(", "polygon", ")", "elif", "polygonType", "==", "SPANNING", ":", "f", "=", "[", "]", "b", "=", "[", "]", "for", "i", "in", "range", "(", "numVertices", ")", ":", "j", "=", "(", "i", "+", "1", ")", "%", "numVertices", "ti", "=", "vertexLocs", "[", "i", "]", "tj", "=", "vertexLocs", "[", "j", "]", "vi", "=", "polygon", ".", "vertices", "[", "i", "]", "vj", "=", "polygon", ".", "vertices", "[", "j", "]", "if", "ti", "!=", "BACK", ":", "f", ".", "append", "(", "vi", ")", "if", "ti", "!=", "FRONT", ":", "if", "ti", "!=", "BACK", ":", "b", ".", "append", "(", "vi", ".", "clone", "(", ")", ")", "else", ":", "b", ".", "append", "(", "vi", ")", "if", "(", "ti", "|", "tj", ")", "==", "SPANNING", ":", "# interpolation weight at the intersection point", "t", "=", "(", "self", ".", "w", "-", "self", ".", "normal", ".", "dot", "(", "vi", ".", "pos", ")", ")", "/", "self", ".", "normal", ".", "dot", "(", "vj", ".", "pos", ".", "minus", "(", "vi", ".", "pos", ")", ")", "# intersection point on the plane", "v", "=", "vi", ".", "interpolate", "(", "vj", ",", "t", ")", "f", ".", "append", "(", "v", ")", "b", ".", "append", "(", "v", ".", "clone", "(", ")", ")", "if", "len", "(", "f", ")", ">=", "3", ":", "front", ".", "append", "(", "Polygon", "(", "f", ",", "polygon", ".", "shared", ")", ")", "if", "len", "(", "b", ")", ">=", "3", ":", "back", ".", "append", "(", "Polygon", "(", "b", ",", "polygon", ".", "shared", ")", ")" ]
Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back`
[ "Split", "polygon", "by", "this", "plane", "if", "needed", "then", "put", "the", "polygon", "or", "polygon", "fragments", "in", "the", "appropriate", "lists", ".", "Coplanar", "polygons", "go", "into", "either", "coplanarFront", "or", "coplanarBack", "depending", "on", "their", "orientation", "with", "respect", "to", "this", "plane", ".", "Polygons", "in", "front", "or", "in", "back", "of", "this", "plane", "go", "into", "either", "front", "or", "back" ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L192-L260
timknip/pycsg
csg/geom.py
BSPNode.invert
def invert(self): """ Convert solid space to empty space and empty space to solid space. """ for poly in self.polygons: poly.flip() self.plane.flip() if self.front: self.front.invert() if self.back: self.back.invert() temp = self.front self.front = self.back self.back = temp
python
def invert(self): """ Convert solid space to empty space and empty space to solid space. """ for poly in self.polygons: poly.flip() self.plane.flip() if self.front: self.front.invert() if self.back: self.back.invert() temp = self.front self.front = self.back self.back = temp
[ "def", "invert", "(", "self", ")", ":", "for", "poly", "in", "self", ".", "polygons", ":", "poly", ".", "flip", "(", ")", "self", ".", "plane", ".", "flip", "(", ")", "if", "self", ".", "front", ":", "self", ".", "front", ".", "invert", "(", ")", "if", "self", ".", "back", ":", "self", ".", "back", ".", "invert", "(", ")", "temp", "=", "self", ".", "front", "self", ".", "front", "=", "self", ".", "back", "self", ".", "back", "=", "temp" ]
Convert solid space to empty space and empty space to solid space.
[ "Convert", "solid", "space", "to", "empty", "space", "and", "empty", "space", "to", "solid", "space", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L323-L336
timknip/pycsg
csg/geom.py
BSPNode.clipPolygons
def clipPolygons(self, polygons): """ Recursively remove all polygons in `polygons` that are inside this BSP tree. """ if not self.plane: return polygons[:] front = [] back = [] for poly in polygons: self.plane.splitPolygon(poly, front, back, front, back) if self.front: front = self.front.clipPolygons(front) if self.back: back = self.back.clipPolygons(back) else: back = [] front.extend(back) return front
python
def clipPolygons(self, polygons): """ Recursively remove all polygons in `polygons` that are inside this BSP tree. """ if not self.plane: return polygons[:] front = [] back = [] for poly in polygons: self.plane.splitPolygon(poly, front, back, front, back) if self.front: front = self.front.clipPolygons(front) if self.back: back = self.back.clipPolygons(back) else: back = [] front.extend(back) return front
[ "def", "clipPolygons", "(", "self", ",", "polygons", ")", ":", "if", "not", "self", ".", "plane", ":", "return", "polygons", "[", ":", "]", "front", "=", "[", "]", "back", "=", "[", "]", "for", "poly", "in", "polygons", ":", "self", ".", "plane", ".", "splitPolygon", "(", "poly", ",", "front", ",", "back", ",", "front", ",", "back", ")", "if", "self", ".", "front", ":", "front", "=", "self", ".", "front", ".", "clipPolygons", "(", "front", ")", "if", "self", ".", "back", ":", "back", "=", "self", ".", "back", ".", "clipPolygons", "(", "back", ")", "else", ":", "back", "=", "[", "]", "front", ".", "extend", "(", "back", ")", "return", "front" ]
Recursively remove all polygons in `polygons` that are inside this BSP tree.
[ "Recursively", "remove", "all", "polygons", "in", "polygons", "that", "are", "inside", "this", "BSP", "tree", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L338-L360
timknip/pycsg
csg/geom.py
BSPNode.clipTo
def clipTo(self, bsp): """ Remove all polygons in this BSP tree that are inside the other BSP tree `bsp`. """ self.polygons = bsp.clipPolygons(self.polygons) if self.front: self.front.clipTo(bsp) if self.back: self.back.clipTo(bsp)
python
def clipTo(self, bsp): """ Remove all polygons in this BSP tree that are inside the other BSP tree `bsp`. """ self.polygons = bsp.clipPolygons(self.polygons) if self.front: self.front.clipTo(bsp) if self.back: self.back.clipTo(bsp)
[ "def", "clipTo", "(", "self", ",", "bsp", ")", ":", "self", ".", "polygons", "=", "bsp", ".", "clipPolygons", "(", "self", ".", "polygons", ")", "if", "self", ".", "front", ":", "self", ".", "front", ".", "clipTo", "(", "bsp", ")", "if", "self", ".", "back", ":", "self", ".", "back", ".", "clipTo", "(", "bsp", ")" ]
Remove all polygons in this BSP tree that are inside the other BSP tree `bsp`.
[ "Remove", "all", "polygons", "in", "this", "BSP", "tree", "that", "are", "inside", "the", "other", "BSP", "tree", "bsp", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L362-L371
timknip/pycsg
csg/geom.py
BSPNode.allPolygons
def allPolygons(self): """ Return a list of all polygons in this BSP tree. """ polygons = self.polygons[:] if self.front: polygons.extend(self.front.allPolygons()) if self.back: polygons.extend(self.back.allPolygons()) return polygons
python
def allPolygons(self): """ Return a list of all polygons in this BSP tree. """ polygons = self.polygons[:] if self.front: polygons.extend(self.front.allPolygons()) if self.back: polygons.extend(self.back.allPolygons()) return polygons
[ "def", "allPolygons", "(", "self", ")", ":", "polygons", "=", "self", ".", "polygons", "[", ":", "]", "if", "self", ".", "front", ":", "polygons", ".", "extend", "(", "self", ".", "front", ".", "allPolygons", "(", ")", ")", "if", "self", ".", "back", ":", "polygons", ".", "extend", "(", "self", ".", "back", ".", "allPolygons", "(", ")", ")", "return", "polygons" ]
Return a list of all polygons in this BSP tree.
[ "Return", "a", "list", "of", "all", "polygons", "in", "this", "BSP", "tree", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L373-L382
timknip/pycsg
csg/geom.py
BSPNode.build
def build(self, polygons): """ Build a BSP tree out of `polygons`. When called on an existing tree, the new polygons are filtered down to the bottom of the tree and become new nodes there. Each set of polygons is partitioned using the first polygon (no heuristic is used to pick a good split). """ if len(polygons) == 0: return if not self.plane: self.plane = polygons[0].plane.clone() # add polygon to this node self.polygons.append(polygons[0]) front = [] back = [] # split all other polygons using the first polygon's plane for poly in polygons[1:]: # coplanar front and back polygons go into self.polygons self.plane.splitPolygon(poly, self.polygons, self.polygons, front, back) # recursively build the BSP tree if len(front) > 0: if not self.front: self.front = BSPNode() self.front.build(front) if len(back) > 0: if not self.back: self.back = BSPNode() self.back.build(back)
python
def build(self, polygons): """ Build a BSP tree out of `polygons`. When called on an existing tree, the new polygons are filtered down to the bottom of the tree and become new nodes there. Each set of polygons is partitioned using the first polygon (no heuristic is used to pick a good split). """ if len(polygons) == 0: return if not self.plane: self.plane = polygons[0].plane.clone() # add polygon to this node self.polygons.append(polygons[0]) front = [] back = [] # split all other polygons using the first polygon's plane for poly in polygons[1:]: # coplanar front and back polygons go into self.polygons self.plane.splitPolygon(poly, self.polygons, self.polygons, front, back) # recursively build the BSP tree if len(front) > 0: if not self.front: self.front = BSPNode() self.front.build(front) if len(back) > 0: if not self.back: self.back = BSPNode() self.back.build(back)
[ "def", "build", "(", "self", ",", "polygons", ")", ":", "if", "len", "(", "polygons", ")", "==", "0", ":", "return", "if", "not", "self", ".", "plane", ":", "self", ".", "plane", "=", "polygons", "[", "0", "]", ".", "plane", ".", "clone", "(", ")", "# add polygon to this node", "self", ".", "polygons", ".", "append", "(", "polygons", "[", "0", "]", ")", "front", "=", "[", "]", "back", "=", "[", "]", "# split all other polygons using the first polygon's plane", "for", "poly", "in", "polygons", "[", "1", ":", "]", ":", "# coplanar front and back polygons go into self.polygons", "self", ".", "plane", ".", "splitPolygon", "(", "poly", ",", "self", ".", "polygons", ",", "self", ".", "polygons", ",", "front", ",", "back", ")", "# recursively build the BSP tree", "if", "len", "(", "front", ")", ">", "0", ":", "if", "not", "self", ".", "front", ":", "self", ".", "front", "=", "BSPNode", "(", ")", "self", ".", "front", ".", "build", "(", "front", ")", "if", "len", "(", "back", ")", ">", "0", ":", "if", "not", "self", ".", "back", ":", "self", ".", "back", "=", "BSPNode", "(", ")", "self", ".", "back", ".", "build", "(", "back", ")" ]
Build a BSP tree out of `polygons`. When called on an existing tree, the new polygons are filtered down to the bottom of the tree and become new nodes there. Each set of polygons is partitioned using the first polygon (no heuristic is used to pick a good split).
[ "Build", "a", "BSP", "tree", "out", "of", "polygons", ".", "When", "called", "on", "an", "existing", "tree", "the", "new", "polygons", "are", "filtered", "down", "to", "the", "bottom", "of", "the", "tree", "and", "become", "new", "nodes", "there", ".", "Each", "set", "of", "polygons", "is", "partitioned", "using", "the", "first", "polygon", "(", "no", "heuristic", "is", "used", "to", "pick", "a", "good", "split", ")", "." ]
train
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L384-L412
evonove/django-money-rates
djmoney_rates/utils.py
get_rate
def get_rate(currency): """Returns the rate from the default currency to `currency`.""" source = get_rate_source() try: return Rate.objects.get(source=source, currency=currency).value except Rate.DoesNotExist: raise CurrencyConversionException( "Rate for %s in %s do not exists. " "Please run python manage.py update_rates" % ( currency, source.name))
python
def get_rate(currency): """Returns the rate from the default currency to `currency`.""" source = get_rate_source() try: return Rate.objects.get(source=source, currency=currency).value except Rate.DoesNotExist: raise CurrencyConversionException( "Rate for %s in %s do not exists. " "Please run python manage.py update_rates" % ( currency, source.name))
[ "def", "get_rate", "(", "currency", ")", ":", "source", "=", "get_rate_source", "(", ")", "try", ":", "return", "Rate", ".", "objects", ".", "get", "(", "source", "=", "source", ",", "currency", "=", "currency", ")", ".", "value", "except", "Rate", ".", "DoesNotExist", ":", "raise", "CurrencyConversionException", "(", "\"Rate for %s in %s do not exists. \"", "\"Please run python manage.py update_rates\"", "%", "(", "currency", ",", "source", ".", "name", ")", ")" ]
Returns the rate from the default currency to `currency`.
[ "Returns", "the", "rate", "from", "the", "default", "currency", "to", "currency", "." ]
train
https://github.com/evonove/django-money-rates/blob/ac1f7636b9a38d3e153eb833019342c4d88634c2/djmoney_rates/utils.py#L12-L21
evonove/django-money-rates
djmoney_rates/utils.py
get_rate_source
def get_rate_source(): """Get the default Rate Source and return it.""" backend = money_rates_settings.DEFAULT_BACKEND() try: return RateSource.objects.get(name=backend.get_source_name()) except RateSource.DoesNotExist: raise CurrencyConversionException( "Rate for %s source do not exists. " "Please run python manage.py update_rates" % backend.get_source_name())
python
def get_rate_source(): """Get the default Rate Source and return it.""" backend = money_rates_settings.DEFAULT_BACKEND() try: return RateSource.objects.get(name=backend.get_source_name()) except RateSource.DoesNotExist: raise CurrencyConversionException( "Rate for %s source do not exists. " "Please run python manage.py update_rates" % backend.get_source_name())
[ "def", "get_rate_source", "(", ")", ":", "backend", "=", "money_rates_settings", ".", "DEFAULT_BACKEND", "(", ")", "try", ":", "return", "RateSource", ".", "objects", ".", "get", "(", "name", "=", "backend", ".", "get_source_name", "(", ")", ")", "except", "RateSource", ".", "DoesNotExist", ":", "raise", "CurrencyConversionException", "(", "\"Rate for %s source do not exists. \"", "\"Please run python manage.py update_rates\"", "%", "backend", ".", "get_source_name", "(", ")", ")" ]
Get the default Rate Source and return it.
[ "Get", "the", "default", "Rate", "Source", "and", "return", "it", "." ]
train
https://github.com/evonove/django-money-rates/blob/ac1f7636b9a38d3e153eb833019342c4d88634c2/djmoney_rates/utils.py#L24-L32
evonove/django-money-rates
djmoney_rates/utils.py
base_convert_money
def base_convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' """ source = get_rate_source() # Get rate for currency_from. if source.base_currency != currency_from: rate_from = get_rate(currency_from) else: # If currency from is the same as base currency its rate is 1. rate_from = Decimal(1) # Get rate for currency_to. rate_to = get_rate(currency_to) if isinstance(amount, float): amount = Decimal(amount).quantize(Decimal('.000001')) # After finishing the operation, quantize down final amount to two points. return ((amount / rate_from) * rate_to).quantize(Decimal("1.00"))
python
def base_convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' """ source = get_rate_source() # Get rate for currency_from. if source.base_currency != currency_from: rate_from = get_rate(currency_from) else: # If currency from is the same as base currency its rate is 1. rate_from = Decimal(1) # Get rate for currency_to. rate_to = get_rate(currency_to) if isinstance(amount, float): amount = Decimal(amount).quantize(Decimal('.000001')) # After finishing the operation, quantize down final amount to two points. return ((amount / rate_from) * rate_to).quantize(Decimal("1.00"))
[ "def", "base_convert_money", "(", "amount", ",", "currency_from", ",", "currency_to", ")", ":", "source", "=", "get_rate_source", "(", ")", "# Get rate for currency_from.", "if", "source", ".", "base_currency", "!=", "currency_from", ":", "rate_from", "=", "get_rate", "(", "currency_from", ")", "else", ":", "# If currency from is the same as base currency its rate is 1.", "rate_from", "=", "Decimal", "(", "1", ")", "# Get rate for currency_to.", "rate_to", "=", "get_rate", "(", "currency_to", ")", "if", "isinstance", "(", "amount", ",", "float", ")", ":", "amount", "=", "Decimal", "(", "amount", ")", ".", "quantize", "(", "Decimal", "(", "'.000001'", ")", ")", "# After finishing the operation, quantize down final amount to two points.", "return", "(", "(", "amount", "/", "rate_from", ")", "*", "rate_to", ")", ".", "quantize", "(", "Decimal", "(", "\"1.00\"", ")", ")" ]
Convert 'amount' from 'currency_from' to 'currency_to'
[ "Convert", "amount", "from", "currency_from", "to", "currency_to" ]
train
https://github.com/evonove/django-money-rates/blob/ac1f7636b9a38d3e153eb833019342c4d88634c2/djmoney_rates/utils.py#L35-L55
evonove/django-money-rates
djmoney_rates/utils.py
convert_money
def convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' and return a Money instance of the converted amount. """ new_amount = base_convert_money(amount, currency_from, currency_to) return moneyed.Money(new_amount, currency_to)
python
def convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' and return a Money instance of the converted amount. """ new_amount = base_convert_money(amount, currency_from, currency_to) return moneyed.Money(new_amount, currency_to)
[ "def", "convert_money", "(", "amount", ",", "currency_from", ",", "currency_to", ")", ":", "new_amount", "=", "base_convert_money", "(", "amount", ",", "currency_from", ",", "currency_to", ")", "return", "moneyed", ".", "Money", "(", "new_amount", ",", "currency_to", ")" ]
Convert 'amount' from 'currency_from' to 'currency_to' and return a Money instance of the converted amount.
[ "Convert", "amount", "from", "currency_from", "to", "currency_to", "and", "return", "a", "Money", "instance", "of", "the", "converted", "amount", "." ]
train
https://github.com/evonove/django-money-rates/blob/ac1f7636b9a38d3e153eb833019342c4d88634c2/djmoney_rates/utils.py#L58-L64
Blazemeter/apiritif
apiritif/utilities.py
format_date
def format_date(format_string=None, datetime_obj=None): """ Format a datetime object with Java SimpleDateFormat's-like string. If datetime_obj is not given - use current datetime. If format_string is not given - return number of millisecond since epoch. :param format_string: :param datetime_obj: :return: :rtype string """ datetime_obj = datetime_obj or datetime.now() if format_string is None: seconds = int(datetime_obj.strftime("%s")) milliseconds = datetime_obj.microsecond // 1000 return str(seconds * 1000 + milliseconds) else: formatter = SimpleDateFormat(format_string) return formatter.format_datetime(datetime_obj)
python
def format_date(format_string=None, datetime_obj=None): """ Format a datetime object with Java SimpleDateFormat's-like string. If datetime_obj is not given - use current datetime. If format_string is not given - return number of millisecond since epoch. :param format_string: :param datetime_obj: :return: :rtype string """ datetime_obj = datetime_obj or datetime.now() if format_string is None: seconds = int(datetime_obj.strftime("%s")) milliseconds = datetime_obj.microsecond // 1000 return str(seconds * 1000 + milliseconds) else: formatter = SimpleDateFormat(format_string) return formatter.format_datetime(datetime_obj)
[ "def", "format_date", "(", "format_string", "=", "None", ",", "datetime_obj", "=", "None", ")", ":", "datetime_obj", "=", "datetime_obj", "or", "datetime", ".", "now", "(", ")", "if", "format_string", "is", "None", ":", "seconds", "=", "int", "(", "datetime_obj", ".", "strftime", "(", "\"%s\"", ")", ")", "milliseconds", "=", "datetime_obj", ".", "microsecond", "//", "1000", "return", "str", "(", "seconds", "*", "1000", "+", "milliseconds", ")", "else", ":", "formatter", "=", "SimpleDateFormat", "(", "format_string", ")", "return", "formatter", ".", "format_datetime", "(", "datetime_obj", ")" ]
Format a datetime object with Java SimpleDateFormat's-like string. If datetime_obj is not given - use current datetime. If format_string is not given - return number of millisecond since epoch. :param format_string: :param datetime_obj: :return: :rtype string
[ "Format", "a", "datetime", "object", "with", "Java", "SimpleDateFormat", "s", "-", "like", "string", "." ]
train
https://github.com/Blazemeter/apiritif/blob/27b48a68425949998c2254e5e1e0226882d9eee8/apiritif/utilities.py#L93-L112
crackinglandia/pype32
pype32/utils.py
allZero
def allZero(buffer): """ Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't. """ allZero = True for byte in buffer: if byte != "\x00": allZero = False break return allZero
python
def allZero(buffer): """ Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't. """ allZero = True for byte in buffer: if byte != "\x00": allZero = False break return allZero
[ "def", "allZero", "(", "buffer", ")", ":", "allZero", "=", "True", "for", "byte", "in", "buffer", ":", "if", "byte", "!=", "\"\\x00\"", ":", "allZero", "=", "False", "break", "return", "allZero" ]
Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't.
[ "Tries", "to", "determine", "if", "a", "buffer", "is", "empty", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L65-L81
crackinglandia/pype32
pype32/utils.py
WriteData.writeByte
def writeByte(self, byte): """ Writes a byte into the L{WriteData} stream object. @type byte: int @param byte: Byte value to write into the stream. """ self.data.write(pack("B" if not self.signed else "b", byte))
python
def writeByte(self, byte): """ Writes a byte into the L{WriteData} stream object. @type byte: int @param byte: Byte value to write into the stream. """ self.data.write(pack("B" if not self.signed else "b", byte))
[ "def", "writeByte", "(", "self", ",", "byte", ")", ":", "self", ".", "data", ".", "write", "(", "pack", "(", "\"B\"", "if", "not", "self", ".", "signed", "else", "\"b\"", ",", "byte", ")", ")" ]
Writes a byte into the L{WriteData} stream object. @type byte: int @param byte: Byte value to write into the stream.
[ "Writes", "a", "byte", "into", "the", "L", "{", "WriteData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L106-L113
crackinglandia/pype32
pype32/utils.py
WriteData.writeWord
def writeWord(self, word): """ Writes a word value into the L{WriteData} stream object. @type word: int @param word: Word value to write into the stream. """ self.data.write(pack(self.endianness + ("H" if not self.signed else "h"), word))
python
def writeWord(self, word): """ Writes a word value into the L{WriteData} stream object. @type word: int @param word: Word value to write into the stream. """ self.data.write(pack(self.endianness + ("H" if not self.signed else "h"), word))
[ "def", "writeWord", "(", "self", ",", "word", ")", ":", "self", ".", "data", ".", "write", "(", "pack", "(", "self", ".", "endianness", "+", "(", "\"H\"", "if", "not", "self", ".", "signed", "else", "\"h\"", ")", ",", "word", ")", ")" ]
Writes a word value into the L{WriteData} stream object. @type word: int @param word: Word value to write into the stream.
[ "Writes", "a", "word", "value", "into", "the", "L", "{", "WriteData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L115-L122
crackinglandia/pype32
pype32/utils.py
WriteData.writeDword
def writeDword(self, dword): """ Writes a dword value into the L{WriteData} stream object. @type dword: int @param dword: Dword value to write into the stream. """ self.data.write(pack(self.endianness + ("L" if not self.signed else "l"), dword))
python
def writeDword(self, dword): """ Writes a dword value into the L{WriteData} stream object. @type dword: int @param dword: Dword value to write into the stream. """ self.data.write(pack(self.endianness + ("L" if not self.signed else "l"), dword))
[ "def", "writeDword", "(", "self", ",", "dword", ")", ":", "self", ".", "data", ".", "write", "(", "pack", "(", "self", ".", "endianness", "+", "(", "\"L\"", "if", "not", "self", ".", "signed", "else", "\"l\"", ")", ",", "dword", ")", ")" ]
Writes a dword value into the L{WriteData} stream object. @type dword: int @param dword: Dword value to write into the stream.
[ "Writes", "a", "dword", "value", "into", "the", "L", "{", "WriteData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L124-L131
crackinglandia/pype32
pype32/utils.py
WriteData.writeQword
def writeQword(self, qword): """ Writes a qword value into the L{WriteData} stream object. @type qword: int @param qword: Qword value to write into the stream. """ self.data.write(pack(self.endianness + ("Q" if not self.signed else "q"), qword))
python
def writeQword(self, qword): """ Writes a qword value into the L{WriteData} stream object. @type qword: int @param qword: Qword value to write into the stream. """ self.data.write(pack(self.endianness + ("Q" if not self.signed else "q"), qword))
[ "def", "writeQword", "(", "self", ",", "qword", ")", ":", "self", ".", "data", ".", "write", "(", "pack", "(", "self", ".", "endianness", "+", "(", "\"Q\"", "if", "not", "self", ".", "signed", "else", "\"q\"", ")", ",", "qword", ")", ")" ]
Writes a qword value into the L{WriteData} stream object. @type qword: int @param qword: Qword value to write into the stream.
[ "Writes", "a", "qword", "value", "into", "the", "L", "{", "WriteData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L133-L140
crackinglandia/pype32
pype32/utils.py
WriteData.setOffset
def setOffset(self, value): """ Sets the offset of the L{WriteData} stream object in wich the data is written. @type value: int @param value: Integer value that represent the offset we want to start writing in the L{WriteData} stream. @raise WrongOffsetValueException: The value is beyond the total length of the data. """ if value >= len(self.data.getvalue()): raise excep.WrongOffsetValueException("Wrong offset value. Must be less than %d" % len(self.data)) self.data.seek(value)
python
def setOffset(self, value): """ Sets the offset of the L{WriteData} stream object in wich the data is written. @type value: int @param value: Integer value that represent the offset we want to start writing in the L{WriteData} stream. @raise WrongOffsetValueException: The value is beyond the total length of the data. """ if value >= len(self.data.getvalue()): raise excep.WrongOffsetValueException("Wrong offset value. Must be less than %d" % len(self.data)) self.data.seek(value)
[ "def", "setOffset", "(", "self", ",", "value", ")", ":", "if", "value", ">=", "len", "(", "self", ".", "data", ".", "getvalue", "(", ")", ")", ":", "raise", "excep", ".", "WrongOffsetValueException", "(", "\"Wrong offset value. Must be less than %d\"", "%", "len", "(", "self", ".", "data", ")", ")", "self", ".", "data", ".", "seek", "(", "value", ")" ]
Sets the offset of the L{WriteData} stream object in wich the data is written. @type value: int @param value: Integer value that represent the offset we want to start writing in the L{WriteData} stream. @raise WrongOffsetValueException: The value is beyond the total length of the data.
[ "Sets", "the", "offset", "of", "the", "L", "{", "WriteData", "}", "stream", "object", "in", "wich", "the", "data", "is", "written", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L151-L162
crackinglandia/pype32
pype32/utils.py
WriteData.skipBytes
def skipBytes(self, nroBytes): """ Skips the specified number as parameter to the current value of the L{WriteData} stream. @type nroBytes: int @param nroBytes: The number of bytes to skip. """ self.data.seek(nroBytes + self.data.tell())
python
def skipBytes(self, nroBytes): """ Skips the specified number as parameter to the current value of the L{WriteData} stream. @type nroBytes: int @param nroBytes: The number of bytes to skip. """ self.data.seek(nroBytes + self.data.tell())
[ "def", "skipBytes", "(", "self", ",", "nroBytes", ")", ":", "self", ".", "data", ".", "seek", "(", "nroBytes", "+", "self", ".", "data", ".", "tell", "(", ")", ")" ]
Skips the specified number as parameter to the current value of the L{WriteData} stream. @type nroBytes: int @param nroBytes: The number of bytes to skip.
[ "Skips", "the", "specified", "number", "as", "parameter", "to", "the", "current", "value", "of", "the", "L", "{", "WriteData", "}", "stream", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L164-L171
crackinglandia/pype32
pype32/utils.py
ReadData.readDword
def readDword(self): """ Reads a dword value from the L{ReadData} stream object. @rtype: int @return: The dword value read from the L{ReadData} stream. """ dword = unpack(self.endianness + ('L' if not self.signed else 'l'), self.readAt(self.offset, 4))[0] self.offset += 4 return dword
python
def readDword(self): """ Reads a dword value from the L{ReadData} stream object. @rtype: int @return: The dword value read from the L{ReadData} stream. """ dword = unpack(self.endianness + ('L' if not self.signed else 'l'), self.readAt(self.offset, 4))[0] self.offset += 4 return dword
[ "def", "readDword", "(", "self", ")", ":", "dword", "=", "unpack", "(", "self", ".", "endianness", "+", "(", "'L'", "if", "not", "self", ".", "signed", "else", "'l'", ")", ",", "self", ".", "readAt", "(", "self", ".", "offset", ",", "4", ")", ")", "[", "0", "]", "self", ".", "offset", "+=", "4", "return", "dword" ]
Reads a dword value from the L{ReadData} stream object. @rtype: int @return: The dword value read from the L{ReadData} stream.
[ "Reads", "a", "dword", "value", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L209-L218
crackinglandia/pype32
pype32/utils.py
ReadData.readWord
def readWord(self): """ Reads a word value from the L{ReadData} stream object. @rtype: int @return: The word value read from the L{ReadData} stream. """ word = unpack(self.endianness + ('H' if not self.signed else 'h'), self.readAt(self.offset, 2))[0] self.offset += 2 return word
python
def readWord(self): """ Reads a word value from the L{ReadData} stream object. @rtype: int @return: The word value read from the L{ReadData} stream. """ word = unpack(self.endianness + ('H' if not self.signed else 'h'), self.readAt(self.offset, 2))[0] self.offset += 2 return word
[ "def", "readWord", "(", "self", ")", ":", "word", "=", "unpack", "(", "self", ".", "endianness", "+", "(", "'H'", "if", "not", "self", ".", "signed", "else", "'h'", ")", ",", "self", ".", "readAt", "(", "self", ".", "offset", ",", "2", ")", ")", "[", "0", "]", "self", ".", "offset", "+=", "2", "return", "word" ]
Reads a word value from the L{ReadData} stream object. @rtype: int @return: The word value read from the L{ReadData} stream.
[ "Reads", "a", "word", "value", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L220-L229
crackinglandia/pype32
pype32/utils.py
ReadData.readByte
def readByte(self): """ Reads a byte value from the L{ReadData} stream object. @rtype: int @return: The byte value read from the L{ReadData} stream. """ byte = unpack('B' if not self.signed else 'b', self.readAt(self.offset, 1))[0] self.offset += 1 return byte
python
def readByte(self): """ Reads a byte value from the L{ReadData} stream object. @rtype: int @return: The byte value read from the L{ReadData} stream. """ byte = unpack('B' if not self.signed else 'b', self.readAt(self.offset, 1))[0] self.offset += 1 return byte
[ "def", "readByte", "(", "self", ")", ":", "byte", "=", "unpack", "(", "'B'", "if", "not", "self", ".", "signed", "else", "'b'", ",", "self", ".", "readAt", "(", "self", ".", "offset", ",", "1", ")", ")", "[", "0", "]", "self", ".", "offset", "+=", "1", "return", "byte" ]
Reads a byte value from the L{ReadData} stream object. @rtype: int @return: The byte value read from the L{ReadData} stream.
[ "Reads", "a", "byte", "value", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L231-L240
crackinglandia/pype32
pype32/utils.py
ReadData.readQword
def readQword(self): """ Reads a qword value from the L{ReadData} stream object. @rtype: int @return: The qword value read from the L{ReadData} stream. """ qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0] self.offset += 8 return qword
python
def readQword(self): """ Reads a qword value from the L{ReadData} stream object. @rtype: int @return: The qword value read from the L{ReadData} stream. """ qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0] self.offset += 8 return qword
[ "def", "readQword", "(", "self", ")", ":", "qword", "=", "unpack", "(", "self", ".", "endianness", "+", "(", "'Q'", "if", "not", "self", ".", "signed", "else", "'b'", ")", ",", "self", ".", "readAt", "(", "self", ".", "offset", ",", "8", ")", ")", "[", "0", "]", "self", ".", "offset", "+=", "8", "return", "qword" ]
Reads a qword value from the L{ReadData} stream object. @rtype: int @return: The qword value read from the L{ReadData} stream.
[ "Reads", "a", "qword", "value", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L242-L251
crackinglandia/pype32
pype32/utils.py
ReadData.readString
def readString(self): """ Reads an ASCII string from the L{ReadData} stream object. @rtype: str @return: An ASCII string read form the stream. """ resultStr = "" while self.data[self.offset] != "\x00": resultStr += self.data[self.offset] self.offset += 1 return resultStr
python
def readString(self): """ Reads an ASCII string from the L{ReadData} stream object. @rtype: str @return: An ASCII string read form the stream. """ resultStr = "" while self.data[self.offset] != "\x00": resultStr += self.data[self.offset] self.offset += 1 return resultStr
[ "def", "readString", "(", "self", ")", ":", "resultStr", "=", "\"\"", "while", "self", ".", "data", "[", "self", ".", "offset", "]", "!=", "\"\\x00\"", ":", "resultStr", "+=", "self", ".", "data", "[", "self", ".", "offset", "]", "self", ".", "offset", "+=", "1", "return", "resultStr" ]
Reads an ASCII string from the L{ReadData} stream object. @rtype: str @return: An ASCII string read form the stream.
[ "Reads", "an", "ASCII", "string", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L253-L264
crackinglandia/pype32
pype32/utils.py
ReadData.readAlignedString
def readAlignedString(self, align = 4): """ Reads an ASCII string aligned to the next align-bytes boundary. @type align: int @param align: (Optional) The value we want the ASCII string to be aligned. @rtype: str @return: A 4-bytes aligned (default) ASCII string. """ s = self.readString() r = align - len(s) % align while r: s += self.data[self.offset] self.offset += 1 r -= 1 return s.rstrip("\x00")
python
def readAlignedString(self, align = 4): """ Reads an ASCII string aligned to the next align-bytes boundary. @type align: int @param align: (Optional) The value we want the ASCII string to be aligned. @rtype: str @return: A 4-bytes aligned (default) ASCII string. """ s = self.readString() r = align - len(s) % align while r: s += self.data[self.offset] self.offset += 1 r -= 1 return s.rstrip("\x00")
[ "def", "readAlignedString", "(", "self", ",", "align", "=", "4", ")", ":", "s", "=", "self", ".", "readString", "(", ")", "r", "=", "align", "-", "len", "(", "s", ")", "%", "align", "while", "r", ":", "s", "+=", "self", ".", "data", "[", "self", ".", "offset", "]", "self", ".", "offset", "+=", "1", "r", "-=", "1", "return", "s", ".", "rstrip", "(", "\"\\x00\"", ")" ]
Reads an ASCII string aligned to the next align-bytes boundary. @type align: int @param align: (Optional) The value we want the ASCII string to be aligned. @rtype: str @return: A 4-bytes aligned (default) ASCII string.
[ "Reads", "an", "ASCII", "string", "aligned", "to", "the", "next", "align", "-", "bytes", "boundary", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L266-L282
crackinglandia/pype32
pype32/utils.py
ReadData.read
def read(self, nroBytes): """ Reads data from the L{ReadData} stream object. @type nroBytes: int @param nroBytes: The number of bytes to read. @rtype: str @return: A string containing the read data from the L{ReadData} stream object. @raise DataLengthException: The number of bytes tried to be read are more than the remaining in the L{ReadData} stream. """ if nroBytes > self.length - self.offset: if self.log: print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset) nroBytes = self.length - self.offset resultStr = self.data[self.offset:self.offset + nroBytes] self.offset += nroBytes return resultStr
python
def read(self, nroBytes): """ Reads data from the L{ReadData} stream object. @type nroBytes: int @param nroBytes: The number of bytes to read. @rtype: str @return: A string containing the read data from the L{ReadData} stream object. @raise DataLengthException: The number of bytes tried to be read are more than the remaining in the L{ReadData} stream. """ if nroBytes > self.length - self.offset: if self.log: print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset) nroBytes = self.length - self.offset resultStr = self.data[self.offset:self.offset + nroBytes] self.offset += nroBytes return resultStr
[ "def", "read", "(", "self", ",", "nroBytes", ")", ":", "if", "nroBytes", ">", "self", ".", "length", "-", "self", ".", "offset", ":", "if", "self", ".", "log", ":", "print", "\"Warning: Trying to read: %d bytes - only %d bytes left\"", "%", "(", "nroBytes", ",", "self", ".", "length", "-", "self", ".", "offset", ")", "nroBytes", "=", "self", ".", "length", "-", "self", ".", "offset", "resultStr", "=", "self", ".", "data", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "nroBytes", "]", "self", ".", "offset", "+=", "nroBytes", "return", "resultStr" ]
Reads data from the L{ReadData} stream object. @type nroBytes: int @param nroBytes: The number of bytes to read. @rtype: str @return: A string containing the read data from the L{ReadData} stream object. @raise DataLengthException: The number of bytes tried to be read are more than the remaining in the L{ReadData} stream.
[ "Reads", "data", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L284-L303
crackinglandia/pype32
pype32/utils.py
ReadData.readAt
def readAt(self, offset, size): """ Reads as many bytes indicated in the size parameter at the specific offset. @type offset: int @param offset: Offset of the value to be read. @type size: int @param size: This parameter indicates how many bytes are going to be read from a given offset. @rtype: str @return: A packed string containing the read data. """ if offset > self.length: if self.log: print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset) offset = self.length - self.offset tmpOff = self.tell() self.setOffset(offset) r = self.read(size) self.setOffset(tmpOff) return r
python
def readAt(self, offset, size): """ Reads as many bytes indicated in the size parameter at the specific offset. @type offset: int @param offset: Offset of the value to be read. @type size: int @param size: This parameter indicates how many bytes are going to be read from a given offset. @rtype: str @return: A packed string containing the read data. """ if offset > self.length: if self.log: print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset) offset = self.length - self.offset tmpOff = self.tell() self.setOffset(offset) r = self.read(size) self.setOffset(tmpOff) return r
[ "def", "readAt", "(", "self", ",", "offset", ",", "size", ")", ":", "if", "offset", ">", "self", ".", "length", ":", "if", "self", ".", "log", ":", "print", "\"Warning: Trying to read: %d bytes - only %d bytes left\"", "%", "(", "nroBytes", ",", "self", ".", "length", "-", "self", ".", "offset", ")", "offset", "=", "self", ".", "length", "-", "self", ".", "offset", "tmpOff", "=", "self", ".", "tell", "(", ")", "self", ".", "setOffset", "(", "offset", ")", "r", "=", "self", ".", "read", "(", "size", ")", "self", ".", "setOffset", "(", "tmpOff", ")", "return", "r" ]
Reads as many bytes indicated in the size parameter at the specific offset. @type offset: int @param offset: Offset of the value to be read. @type size: int @param size: This parameter indicates how many bytes are going to be read from a given offset. @rtype: str @return: A packed string containing the read data.
[ "Reads", "as", "many", "bytes", "indicated", "in", "the", "size", "parameter", "at", "the", "specific", "offset", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L327-L348
ymyzk/kawasemi
kawasemi/kawasemi.py
Kawasemi.send
def send(self, message, channel_name=None, fail_silently=False, options=None): # type: (Text, Optional[str], bool, Optional[SendOptions]) -> None """Send a notification to channels :param message: A message """ if channel_name is None: channels = self.settings["CHANNELS"] else: try: channels = { "__selected__": self.settings["CHANNELS"][channel_name] } except KeyError: raise Exception("channels does not exist %s", channel_name) for _, config in channels.items(): if "_backend" not in config: raise ImproperlyConfigured( "Specify the backend class in the channel configuration") backend = self._load_backend(config["_backend"]) # type: Any config = deepcopy(config) del config["_backend"] channel = backend(**config) channel.send(message, fail_silently=fail_silently, options=options)
python
def send(self, message, channel_name=None, fail_silently=False, options=None): # type: (Text, Optional[str], bool, Optional[SendOptions]) -> None """Send a notification to channels :param message: A message """ if channel_name is None: channels = self.settings["CHANNELS"] else: try: channels = { "__selected__": self.settings["CHANNELS"][channel_name] } except KeyError: raise Exception("channels does not exist %s", channel_name) for _, config in channels.items(): if "_backend" not in config: raise ImproperlyConfigured( "Specify the backend class in the channel configuration") backend = self._load_backend(config["_backend"]) # type: Any config = deepcopy(config) del config["_backend"] channel = backend(**config) channel.send(message, fail_silently=fail_silently, options=options)
[ "def", "send", "(", "self", ",", "message", ",", "channel_name", "=", "None", ",", "fail_silently", "=", "False", ",", "options", "=", "None", ")", ":", "# type: (Text, Optional[str], bool, Optional[SendOptions]) -> None", "if", "channel_name", "is", "None", ":", "channels", "=", "self", ".", "settings", "[", "\"CHANNELS\"", "]", "else", ":", "try", ":", "channels", "=", "{", "\"__selected__\"", ":", "self", ".", "settings", "[", "\"CHANNELS\"", "]", "[", "channel_name", "]", "}", "except", "KeyError", ":", "raise", "Exception", "(", "\"channels does not exist %s\"", ",", "channel_name", ")", "for", "_", ",", "config", "in", "channels", ".", "items", "(", ")", ":", "if", "\"_backend\"", "not", "in", "config", ":", "raise", "ImproperlyConfigured", "(", "\"Specify the backend class in the channel configuration\"", ")", "backend", "=", "self", ".", "_load_backend", "(", "config", "[", "\"_backend\"", "]", ")", "# type: Any", "config", "=", "deepcopy", "(", "config", ")", "del", "config", "[", "\"_backend\"", "]", "channel", "=", "backend", "(", "*", "*", "config", ")", "channel", ".", "send", "(", "message", ",", "fail_silently", "=", "fail_silently", ",", "options", "=", "options", ")" ]
Send a notification to channels :param message: A message
[ "Send", "a", "notification", "to", "channels" ]
train
https://github.com/ymyzk/kawasemi/blob/a9f62f38ee7c10bf4cd353285db1f477d3d372d1/kawasemi/kawasemi.py#L35-L61
Blazemeter/apiritif
apiritif/http.py
HTTPTarget.request
def request(self, method, path, params=None, headers=None, cookies=None, data=None, json=None, allow_redirects=None, timeout=None): """ Prepares and sends an HTTP request. Returns the HTTPResponse object. :param method: str :param path: str :return: response :rtype: HTTPResponse """ headers = headers or {} timeout = timeout if timeout is not None else self._timeout allow_redirects = allow_redirects if allow_redirects is not None else self._allow_redirects if self._keep_alive and self.__session is None: self.__session = requests.Session() if self.__session is not None and not self._use_cookies: self.__session.cookies.clear() address = self._bake_address(path) req_headers = copy.deepcopy(self._additional_headers) req_headers.update(headers) response = http.request(method, address, session=self.__session, params=params, headers=headers, cookies=cookies, data=data, json=json, allow_redirects=allow_redirects, timeout=timeout) if self._auto_assert_ok: response.assert_ok() return response
python
def request(self, method, path, params=None, headers=None, cookies=None, data=None, json=None, allow_redirects=None, timeout=None): """ Prepares and sends an HTTP request. Returns the HTTPResponse object. :param method: str :param path: str :return: response :rtype: HTTPResponse """ headers = headers or {} timeout = timeout if timeout is not None else self._timeout allow_redirects = allow_redirects if allow_redirects is not None else self._allow_redirects if self._keep_alive and self.__session is None: self.__session = requests.Session() if self.__session is not None and not self._use_cookies: self.__session.cookies.clear() address = self._bake_address(path) req_headers = copy.deepcopy(self._additional_headers) req_headers.update(headers) response = http.request(method, address, session=self.__session, params=params, headers=headers, cookies=cookies, data=data, json=json, allow_redirects=allow_redirects, timeout=timeout) if self._auto_assert_ok: response.assert_ok() return response
[ "def", "request", "(", "self", ",", "method", ",", "path", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "cookies", "=", "None", ",", "data", "=", "None", ",", "json", "=", "None", ",", "allow_redirects", "=", "None", ",", "timeout", "=", "None", ")", ":", "headers", "=", "headers", "or", "{", "}", "timeout", "=", "timeout", "if", "timeout", "is", "not", "None", "else", "self", ".", "_timeout", "allow_redirects", "=", "allow_redirects", "if", "allow_redirects", "is", "not", "None", "else", "self", ".", "_allow_redirects", "if", "self", ".", "_keep_alive", "and", "self", ".", "__session", "is", "None", ":", "self", ".", "__session", "=", "requests", ".", "Session", "(", ")", "if", "self", ".", "__session", "is", "not", "None", "and", "not", "self", ".", "_use_cookies", ":", "self", ".", "__session", ".", "cookies", ".", "clear", "(", ")", "address", "=", "self", ".", "_bake_address", "(", "path", ")", "req_headers", "=", "copy", ".", "deepcopy", "(", "self", ".", "_additional_headers", ")", "req_headers", ".", "update", "(", "headers", ")", "response", "=", "http", ".", "request", "(", "method", ",", "address", ",", "session", "=", "self", ".", "__session", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "cookies", "=", "cookies", ",", "data", "=", "data", ",", "json", "=", "json", ",", "allow_redirects", "=", "allow_redirects", ",", "timeout", "=", "timeout", ")", "if", "self", ".", "_auto_assert_ok", ":", "response", ".", "assert_ok", "(", ")", "return", "response" ]
Prepares and sends an HTTP request. Returns the HTTPResponse object. :param method: str :param path: str :return: response :rtype: HTTPResponse
[ "Prepares", "and", "sends", "an", "HTTP", "request", ".", "Returns", "the", "HTTPResponse", "object", "." ]
train
https://github.com/Blazemeter/apiritif/blob/27b48a68425949998c2254e5e1e0226882d9eee8/apiritif/http.py#L392-L421
zsethna/OLGA
olga/load_model.py
load_genomic_CDR3_anchor_pos_and_functionality
def load_genomic_CDR3_anchor_pos_and_functionality(anchor_pos_file_name): """Read anchor position and functionality from file. Parameters ---------- anchor_pos_file_name : str File name for the functionality and position of a conserved residue that defines the CDR3 region for each V or J germline sequence. Returns ------- anchor_pos_and_functionality : dict Residue anchor position and functionality for each gene/allele. """ anchor_pos_and_functionality = {} anchor_pos_file = open(anchor_pos_file_name, 'r') first_line = True for line in anchor_pos_file: if first_line: first_line = False continue split_line = line.split(',') split_line = [x.strip() for x in split_line] anchor_pos_and_functionality[split_line[0]] = [int(split_line[1]), split_line[2].strip().strip('()')] return anchor_pos_and_functionality
python
def load_genomic_CDR3_anchor_pos_and_functionality(anchor_pos_file_name): """Read anchor position and functionality from file. Parameters ---------- anchor_pos_file_name : str File name for the functionality and position of a conserved residue that defines the CDR3 region for each V or J germline sequence. Returns ------- anchor_pos_and_functionality : dict Residue anchor position and functionality for each gene/allele. """ anchor_pos_and_functionality = {} anchor_pos_file = open(anchor_pos_file_name, 'r') first_line = True for line in anchor_pos_file: if first_line: first_line = False continue split_line = line.split(',') split_line = [x.strip() for x in split_line] anchor_pos_and_functionality[split_line[0]] = [int(split_line[1]), split_line[2].strip().strip('()')] return anchor_pos_and_functionality
[ "def", "load_genomic_CDR3_anchor_pos_and_functionality", "(", "anchor_pos_file_name", ")", ":", "anchor_pos_and_functionality", "=", "{", "}", "anchor_pos_file", "=", "open", "(", "anchor_pos_file_name", ",", "'r'", ")", "first_line", "=", "True", "for", "line", "in", "anchor_pos_file", ":", "if", "first_line", ":", "first_line", "=", "False", "continue", "split_line", "=", "line", ".", "split", "(", "','", ")", "split_line", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "split_line", "]", "anchor_pos_and_functionality", "[", "split_line", "[", "0", "]", "]", "=", "[", "int", "(", "split_line", "[", "1", "]", ")", ",", "split_line", "[", "2", "]", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "]", "return", "anchor_pos_and_functionality" ]
Read anchor position and functionality from file. Parameters ---------- anchor_pos_file_name : str File name for the functionality and position of a conserved residue that defines the CDR3 region for each V or J germline sequence. Returns ------- anchor_pos_and_functionality : dict Residue anchor position and functionality for each gene/allele.
[ "Read", "anchor", "position", "and", "functionality", "from", "file", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L468-L497
zsethna/OLGA
olga/load_model.py
read_igor_V_gene_parameters
def read_igor_V_gene_parameters(params_file_name): """Load raw genV from file. genV is a list of genomic V information. Each element is a list of three elements. The first is the name of the V allele, the second is the genomic sequence trimmed to the CDR3 region for productive sequences, and the last is the full germline sequence. For this 'raw genV' the middle element is an empty string to be filled in later. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genV : list List of genomic V information. """ params_file = open(params_file_name, 'r') V_gene_info = {} in_V_gene_sec = False for line in params_file: if line.startswith('#GeneChoice;V_gene;'): in_V_gene_sec = True elif in_V_gene_sec: if line[0] == '%': split_line = line[1:].split(';') V_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genV = [[]]*len(V_gene_info.keys()) for V_gene in V_gene_info.keys(): genV[V_gene_info[V_gene][1]] = [V_gene, '', V_gene_info[V_gene][0]] return genV
python
def read_igor_V_gene_parameters(params_file_name): """Load raw genV from file. genV is a list of genomic V information. Each element is a list of three elements. The first is the name of the V allele, the second is the genomic sequence trimmed to the CDR3 region for productive sequences, and the last is the full germline sequence. For this 'raw genV' the middle element is an empty string to be filled in later. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genV : list List of genomic V information. """ params_file = open(params_file_name, 'r') V_gene_info = {} in_V_gene_sec = False for line in params_file: if line.startswith('#GeneChoice;V_gene;'): in_V_gene_sec = True elif in_V_gene_sec: if line[0] == '%': split_line = line[1:].split(';') V_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genV = [[]]*len(V_gene_info.keys()) for V_gene in V_gene_info.keys(): genV[V_gene_info[V_gene][1]] = [V_gene, '', V_gene_info[V_gene][0]] return genV
[ "def", "read_igor_V_gene_parameters", "(", "params_file_name", ")", ":", "params_file", "=", "open", "(", "params_file_name", ",", "'r'", ")", "V_gene_info", "=", "{", "}", "in_V_gene_sec", "=", "False", "for", "line", "in", "params_file", ":", "if", "line", ".", "startswith", "(", "'#GeneChoice;V_gene;'", ")", ":", "in_V_gene_sec", "=", "True", "elif", "in_V_gene_sec", ":", "if", "line", "[", "0", "]", "==", "'%'", ":", "split_line", "=", "line", "[", "1", ":", "]", ".", "split", "(", "';'", ")", "V_gene_info", "[", "split_line", "[", "0", "]", "]", "=", "[", "split_line", "[", "1", "]", ",", "int", "(", "split_line", "[", "2", "]", ")", "]", "else", ":", "break", "params_file", ".", "close", "(", ")", "genV", "=", "[", "[", "]", "]", "*", "len", "(", "V_gene_info", ".", "keys", "(", ")", ")", "for", "V_gene", "in", "V_gene_info", ".", "keys", "(", ")", ":", "genV", "[", "V_gene_info", "[", "V_gene", "]", "[", "1", "]", "]", "=", "[", "V_gene", ",", "''", ",", "V_gene_info", "[", "V_gene", "]", "[", "0", "]", "]", "return", "genV" ]
Load raw genV from file. genV is a list of genomic V information. Each element is a list of three elements. The first is the name of the V allele, the second is the genomic sequence trimmed to the CDR3 region for productive sequences, and the last is the full germline sequence. For this 'raw genV' the middle element is an empty string to be filled in later. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genV : list List of genomic V information.
[ "Load", "raw", "genV", "from", "file", ".", "genV", "is", "a", "list", "of", "genomic", "V", "information", ".", "Each", "element", "is", "a", "list", "of", "three", "elements", ".", "The", "first", "is", "the", "name", "of", "the", "V", "allele", "the", "second", "is", "the", "genomic", "sequence", "trimmed", "to", "the", "CDR3", "region", "for", "productive", "sequences", "and", "the", "last", "is", "the", "full", "germline", "sequence", ".", "For", "this", "raw", "genV", "the", "middle", "element", "is", "an", "empty", "string", "to", "be", "filled", "in", "later", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L499-L540
zsethna/OLGA
olga/load_model.py
read_igor_D_gene_parameters
def read_igor_D_gene_parameters(params_file_name): """Load genD from file. genD is a list of genomic D information. Each element is a list of the name of the D allele and the germline sequence. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genD : list List of genomic D information. """ params_file = open(params_file_name, 'r') D_gene_info = {} in_D_gene_sec = False for line in params_file: if line.startswith('#GeneChoice;D_gene;'): in_D_gene_sec = True elif in_D_gene_sec: if line[0] == '%': split_line = line[1:].split(';') D_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genD = [[]]*len(D_gene_info.keys()) for D_gene in D_gene_info.keys(): genD[D_gene_info[D_gene][1]] = [D_gene, D_gene_info[D_gene][0]] return genD
python
def read_igor_D_gene_parameters(params_file_name): """Load genD from file. genD is a list of genomic D information. Each element is a list of the name of the D allele and the germline sequence. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genD : list List of genomic D information. """ params_file = open(params_file_name, 'r') D_gene_info = {} in_D_gene_sec = False for line in params_file: if line.startswith('#GeneChoice;D_gene;'): in_D_gene_sec = True elif in_D_gene_sec: if line[0] == '%': split_line = line[1:].split(';') D_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genD = [[]]*len(D_gene_info.keys()) for D_gene in D_gene_info.keys(): genD[D_gene_info[D_gene][1]] = [D_gene, D_gene_info[D_gene][0]] return genD
[ "def", "read_igor_D_gene_parameters", "(", "params_file_name", ")", ":", "params_file", "=", "open", "(", "params_file_name", ",", "'r'", ")", "D_gene_info", "=", "{", "}", "in_D_gene_sec", "=", "False", "for", "line", "in", "params_file", ":", "if", "line", ".", "startswith", "(", "'#GeneChoice;D_gene;'", ")", ":", "in_D_gene_sec", "=", "True", "elif", "in_D_gene_sec", ":", "if", "line", "[", "0", "]", "==", "'%'", ":", "split_line", "=", "line", "[", "1", ":", "]", ".", "split", "(", "';'", ")", "D_gene_info", "[", "split_line", "[", "0", "]", "]", "=", "[", "split_line", "[", "1", "]", ",", "int", "(", "split_line", "[", "2", "]", ")", "]", "else", ":", "break", "params_file", ".", "close", "(", ")", "genD", "=", "[", "[", "]", "]", "*", "len", "(", "D_gene_info", ".", "keys", "(", ")", ")", "for", "D_gene", "in", "D_gene_info", ".", "keys", "(", ")", ":", "genD", "[", "D_gene_info", "[", "D_gene", "]", "[", "1", "]", "]", "=", "[", "D_gene", ",", "D_gene_info", "[", "D_gene", "]", "[", "0", "]", "]", "return", "genD" ]
Load genD from file. genD is a list of genomic D information. Each element is a list of the name of the D allele and the germline sequence. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genD : list List of genomic D information.
[ "Load", "genD", "from", "file", ".", "genD", "is", "a", "list", "of", "genomic", "D", "information", ".", "Each", "element", "is", "a", "list", "of", "the", "name", "of", "the", "D", "allele", "and", "the", "germline", "sequence", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L542-L580
zsethna/OLGA
olga/load_model.py
read_igor_J_gene_parameters
def read_igor_J_gene_parameters(params_file_name): """Load raw genJ from file. genJ is a list of genomic J information. Each element is a list of three elements. The first is the name of the J allele, the second is the genomic sequence trimmed to the CDR3 region for productive sequences, and the last is the full germline sequence. For this 'raw genJ' the middle element is an empty string to be filled in later. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genJ : list List of genomic J information. """ params_file = open(params_file_name, 'r') J_gene_info = {} in_J_gene_sec = False for line in params_file: if line.startswith('#GeneChoice;J_gene;'): in_J_gene_sec = True elif in_J_gene_sec: if line[0] == '%': split_line = line[1:].split(';') J_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genJ = [[]]*len(J_gene_info.keys()) for J_gene in J_gene_info.keys(): genJ[J_gene_info[J_gene][1]] = [J_gene, '', J_gene_info[J_gene][0]] return genJ
python
def read_igor_J_gene_parameters(params_file_name): """Load raw genJ from file. genJ is a list of genomic J information. Each element is a list of three elements. The first is the name of the J allele, the second is the genomic sequence trimmed to the CDR3 region for productive sequences, and the last is the full germline sequence. For this 'raw genJ' the middle element is an empty string to be filled in later. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genJ : list List of genomic J information. """ params_file = open(params_file_name, 'r') J_gene_info = {} in_J_gene_sec = False for line in params_file: if line.startswith('#GeneChoice;J_gene;'): in_J_gene_sec = True elif in_J_gene_sec: if line[0] == '%': split_line = line[1:].split(';') J_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genJ = [[]]*len(J_gene_info.keys()) for J_gene in J_gene_info.keys(): genJ[J_gene_info[J_gene][1]] = [J_gene, '', J_gene_info[J_gene][0]] return genJ
[ "def", "read_igor_J_gene_parameters", "(", "params_file_name", ")", ":", "params_file", "=", "open", "(", "params_file_name", ",", "'r'", ")", "J_gene_info", "=", "{", "}", "in_J_gene_sec", "=", "False", "for", "line", "in", "params_file", ":", "if", "line", ".", "startswith", "(", "'#GeneChoice;J_gene;'", ")", ":", "in_J_gene_sec", "=", "True", "elif", "in_J_gene_sec", ":", "if", "line", "[", "0", "]", "==", "'%'", ":", "split_line", "=", "line", "[", "1", ":", "]", ".", "split", "(", "';'", ")", "J_gene_info", "[", "split_line", "[", "0", "]", "]", "=", "[", "split_line", "[", "1", "]", ",", "int", "(", "split_line", "[", "2", "]", ")", "]", "else", ":", "break", "params_file", ".", "close", "(", ")", "genJ", "=", "[", "[", "]", "]", "*", "len", "(", "J_gene_info", ".", "keys", "(", ")", ")", "for", "J_gene", "in", "J_gene_info", ".", "keys", "(", ")", ":", "genJ", "[", "J_gene_info", "[", "J_gene", "]", "[", "1", "]", "]", "=", "[", "J_gene", ",", "''", ",", "J_gene_info", "[", "J_gene", "]", "[", "0", "]", "]", "return", "genJ" ]
Load raw genJ from file. genJ is a list of genomic J information. Each element is a list of three elements. The first is the name of the J allele, the second is the genomic sequence trimmed to the CDR3 region for productive sequences, and the last is the full germline sequence. For this 'raw genJ' the middle element is an empty string to be filled in later. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genJ : list List of genomic J information.
[ "Load", "raw", "genJ", "from", "file", ".", "genJ", "is", "a", "list", "of", "genomic", "J", "information", ".", "Each", "element", "is", "a", "list", "of", "three", "elements", ".", "The", "first", "is", "the", "name", "of", "the", "J", "allele", "the", "second", "is", "the", "genomic", "sequence", "trimmed", "to", "the", "CDR3", "region", "for", "productive", "sequences", "and", "the", "last", "is", "the", "full", "germline", "sequence", ".", "For", "this", "raw", "genJ", "the", "middle", "element", "is", "an", "empty", "string", "to", "be", "filled", "in", "later", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L582-L623
zsethna/OLGA
olga/load_model.py
read_igor_marginals_txt
def read_igor_marginals_txt(marginals_file_name , dim_names=False): """Load raw IGoR model marginals. Parameters ---------- marginals_file_name : str File name for a IGOR model marginals file. Returns ------- model_dict : dict Dictionary with model marginals. dimension_names_dict : dict Dictionary that defines IGoR model dependecies. """ with open(marginals_file_name,'r') as file: #Model parameters are stored inside a dictionary of ndarrays model_dict = {} dimension_names_dict = {} element_name="" first = True first_dim_line = False element_marginal_array = [] indices_array = [] for line in file: strip_line = line.rstrip('\n') #Remove end of line character if strip_line[0]=='@': first_dim_line = True if not(first): #Add the previous to the dictionnary model_dict[element_name] = element_marginal_array else: first = False element_name = strip_line[1:] if strip_line[0]=='$': #define array dimensions coma_index = strip_line.find(',') dimensions = [] #Get rid of $Dim[ previous_coma_index = 4 while coma_index != -1: dimensions.append(int(strip_line[previous_coma_index+1:coma_index])) previous_coma_index = coma_index coma_index = strip_line.find(',',coma_index+1) #Add last dimension and get rid of the closing bracket dimensions.append(int(strip_line[previous_coma_index+1:-1])) element_marginal_array = np.ndarray(shape=dimensions) if strip_line[0]=='#': if first_dim_line: dimensions_names = [] if len(dimensions) > 1: comma_index = strip_line.find(',') opening_bracket_index = strip_line.find('[') while opening_bracket_index != -1: dimensions_names.append(strip_line[opening_bracket_index+1:comma_index]) opening_bracket_index = strip_line.find('[',comma_index) comma_index = strip_line.find(',',opening_bracket_index) first_dim_line = False dimensions_names.append(element_name) dimension_names_dict[element_name] = dimensions_names #update indices indices_array = [] if len(dimensions) > 1: comma_index = strip_line.find(',') closing_brack_index = strip_line.find(']') while closing_brack_index != -1: indices_array.append(int(strip_line[comma_index+1:closing_brack_index])) opening_bracket_index = strip_line.find('[',closing_brack_index) comma_index = strip_line.find(',',opening_bracket_index) closing_brack_index = strip_line.find(']',closing_brack_index+1) if strip_line[0]=='%': #read doubles coma_index = strip_line.find(',') marginals_values = [] #Get rid of the % previous_coma_index = 0 while coma_index != -1: marginals_values.append(float(strip_line[previous_coma_index+1:coma_index])) previous_coma_index = coma_index coma_index = strip_line.find(',',coma_index+1) #Add last dimension and get rid of the closing bracket marginals_values.append(float(strip_line[previous_coma_index+1:])) if len(marginals_values)!=dimensions[-1]: print "problem" element_marginal_array[tuple(indices_array)] = marginals_values model_dict[element_name] = element_marginal_array return [model_dict,dimension_names_dict]
python
def read_igor_marginals_txt(marginals_file_name , dim_names=False): """Load raw IGoR model marginals. Parameters ---------- marginals_file_name : str File name for a IGOR model marginals file. Returns ------- model_dict : dict Dictionary with model marginals. dimension_names_dict : dict Dictionary that defines IGoR model dependecies. """ with open(marginals_file_name,'r') as file: #Model parameters are stored inside a dictionary of ndarrays model_dict = {} dimension_names_dict = {} element_name="" first = True first_dim_line = False element_marginal_array = [] indices_array = [] for line in file: strip_line = line.rstrip('\n') #Remove end of line character if strip_line[0]=='@': first_dim_line = True if not(first): #Add the previous to the dictionnary model_dict[element_name] = element_marginal_array else: first = False element_name = strip_line[1:] if strip_line[0]=='$': #define array dimensions coma_index = strip_line.find(',') dimensions = [] #Get rid of $Dim[ previous_coma_index = 4 while coma_index != -1: dimensions.append(int(strip_line[previous_coma_index+1:coma_index])) previous_coma_index = coma_index coma_index = strip_line.find(',',coma_index+1) #Add last dimension and get rid of the closing bracket dimensions.append(int(strip_line[previous_coma_index+1:-1])) element_marginal_array = np.ndarray(shape=dimensions) if strip_line[0]=='#': if first_dim_line: dimensions_names = [] if len(dimensions) > 1: comma_index = strip_line.find(',') opening_bracket_index = strip_line.find('[') while opening_bracket_index != -1: dimensions_names.append(strip_line[opening_bracket_index+1:comma_index]) opening_bracket_index = strip_line.find('[',comma_index) comma_index = strip_line.find(',',opening_bracket_index) first_dim_line = False dimensions_names.append(element_name) dimension_names_dict[element_name] = dimensions_names #update indices indices_array = [] if len(dimensions) > 1: comma_index = strip_line.find(',') closing_brack_index = strip_line.find(']') while closing_brack_index != -1: indices_array.append(int(strip_line[comma_index+1:closing_brack_index])) opening_bracket_index = strip_line.find('[',closing_brack_index) comma_index = strip_line.find(',',opening_bracket_index) closing_brack_index = strip_line.find(']',closing_brack_index+1) if strip_line[0]=='%': #read doubles coma_index = strip_line.find(',') marginals_values = [] #Get rid of the % previous_coma_index = 0 while coma_index != -1: marginals_values.append(float(strip_line[previous_coma_index+1:coma_index])) previous_coma_index = coma_index coma_index = strip_line.find(',',coma_index+1) #Add last dimension and get rid of the closing bracket marginals_values.append(float(strip_line[previous_coma_index+1:])) if len(marginals_values)!=dimensions[-1]: print "problem" element_marginal_array[tuple(indices_array)] = marginals_values model_dict[element_name] = element_marginal_array return [model_dict,dimension_names_dict]
[ "def", "read_igor_marginals_txt", "(", "marginals_file_name", ",", "dim_names", "=", "False", ")", ":", "with", "open", "(", "marginals_file_name", ",", "'r'", ")", "as", "file", ":", "#Model parameters are stored inside a dictionary of ndarrays", "model_dict", "=", "{", "}", "dimension_names_dict", "=", "{", "}", "element_name", "=", "\"\"", "first", "=", "True", "first_dim_line", "=", "False", "element_marginal_array", "=", "[", "]", "indices_array", "=", "[", "]", "for", "line", "in", "file", ":", "strip_line", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "#Remove end of line character", "if", "strip_line", "[", "0", "]", "==", "'@'", ":", "first_dim_line", "=", "True", "if", "not", "(", "first", ")", ":", "#Add the previous to the dictionnary", "model_dict", "[", "element_name", "]", "=", "element_marginal_array", "else", ":", "first", "=", "False", "element_name", "=", "strip_line", "[", "1", ":", "]", "if", "strip_line", "[", "0", "]", "==", "'$'", ":", "#define array dimensions", "coma_index", "=", "strip_line", ".", "find", "(", "','", ")", "dimensions", "=", "[", "]", "#Get rid of $Dim[", "previous_coma_index", "=", "4", "while", "coma_index", "!=", "-", "1", ":", "dimensions", ".", "append", "(", "int", "(", "strip_line", "[", "previous_coma_index", "+", "1", ":", "coma_index", "]", ")", ")", "previous_coma_index", "=", "coma_index", "coma_index", "=", "strip_line", ".", "find", "(", "','", ",", "coma_index", "+", "1", ")", "#Add last dimension and get rid of the closing bracket ", "dimensions", ".", "append", "(", "int", "(", "strip_line", "[", "previous_coma_index", "+", "1", ":", "-", "1", "]", ")", ")", "element_marginal_array", "=", "np", ".", "ndarray", "(", "shape", "=", "dimensions", ")", "if", "strip_line", "[", "0", "]", "==", "'#'", ":", "if", "first_dim_line", ":", "dimensions_names", "=", "[", "]", "if", "len", "(", "dimensions", ")", ">", "1", ":", "comma_index", "=", "strip_line", ".", "find", "(", "','", ")", "opening_bracket_index", "=", "strip_line", ".", "find", "(", "'['", ")", "while", "opening_bracket_index", "!=", "-", "1", ":", "dimensions_names", ".", "append", "(", "strip_line", "[", "opening_bracket_index", "+", "1", ":", "comma_index", "]", ")", "opening_bracket_index", "=", "strip_line", ".", "find", "(", "'['", ",", "comma_index", ")", "comma_index", "=", "strip_line", ".", "find", "(", "','", ",", "opening_bracket_index", ")", "first_dim_line", "=", "False", "dimensions_names", ".", "append", "(", "element_name", ")", "dimension_names_dict", "[", "element_name", "]", "=", "dimensions_names", "#update indices", "indices_array", "=", "[", "]", "if", "len", "(", "dimensions", ")", ">", "1", ":", "comma_index", "=", "strip_line", ".", "find", "(", "','", ")", "closing_brack_index", "=", "strip_line", ".", "find", "(", "']'", ")", "while", "closing_brack_index", "!=", "-", "1", ":", "indices_array", ".", "append", "(", "int", "(", "strip_line", "[", "comma_index", "+", "1", ":", "closing_brack_index", "]", ")", ")", "opening_bracket_index", "=", "strip_line", ".", "find", "(", "'['", ",", "closing_brack_index", ")", "comma_index", "=", "strip_line", ".", "find", "(", "','", ",", "opening_bracket_index", ")", "closing_brack_index", "=", "strip_line", ".", "find", "(", "']'", ",", "closing_brack_index", "+", "1", ")", "if", "strip_line", "[", "0", "]", "==", "'%'", ":", "#read doubles", "coma_index", "=", "strip_line", ".", "find", "(", "','", ")", "marginals_values", "=", "[", "]", "#Get rid of the %", "previous_coma_index", "=", "0", "while", "coma_index", "!=", "-", "1", ":", "marginals_values", ".", "append", "(", "float", "(", "strip_line", "[", "previous_coma_index", "+", "1", ":", "coma_index", "]", ")", ")", "previous_coma_index", "=", "coma_index", "coma_index", "=", "strip_line", ".", "find", "(", "','", ",", "coma_index", "+", "1", ")", "#Add last dimension and get rid of the closing bracket ", "marginals_values", ".", "append", "(", "float", "(", "strip_line", "[", "previous_coma_index", "+", "1", ":", "]", ")", ")", "if", "len", "(", "marginals_values", ")", "!=", "dimensions", "[", "-", "1", "]", ":", "print", "\"problem\"", "element_marginal_array", "[", "tuple", "(", "indices_array", ")", "]", "=", "marginals_values", "model_dict", "[", "element_name", "]", "=", "element_marginal_array", "return", "[", "model_dict", ",", "dimension_names_dict", "]" ]
Load raw IGoR model marginals. Parameters ---------- marginals_file_name : str File name for a IGOR model marginals file. Returns ------- model_dict : dict Dictionary with model marginals. dimension_names_dict : dict Dictionary that defines IGoR model dependecies.
[ "Load", "raw", "IGoR", "model", "marginals", ".", "Parameters", "----------", "marginals_file_name", ":", "str", "File", "name", "for", "a", "IGOR", "model", "marginals", "file", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L790-L892
zsethna/OLGA
olga/load_model.py
GenomicData.anchor_and_curate_genV_and_genJ
def anchor_and_curate_genV_and_genJ(self, V_anchor_pos_file, J_anchor_pos_file): """Trim V and J germline sequences to the CDR3 region. Unproductive sequences have an empty string '' for the CDR3 region sequence. Edits the attributes genV and genJ Parameters ---------- V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence. """ V_anchor_pos = load_genomic_CDR3_anchor_pos_and_functionality(V_anchor_pos_file) J_anchor_pos = load_genomic_CDR3_anchor_pos_and_functionality(J_anchor_pos_file) for V in self.genV: try: if V_anchor_pos[V[0]][0] > 0 and V_anchor_pos[V[0]][1] == 'F': #Check for functionality V[1] = V[2][V_anchor_pos[V[0]][0]:] else: V[1] = '' except KeyError: V[1] = '' for J in self.genJ: try: if J_anchor_pos[J[0]][0] > 0 and J_anchor_pos[J[0]][1] == 'F': #Check for functionality J[1] = J[2][:J_anchor_pos[J[0]][0]+3] else: J[1] = '' except KeyError: J[1] = ''
python
def anchor_and_curate_genV_and_genJ(self, V_anchor_pos_file, J_anchor_pos_file): """Trim V and J germline sequences to the CDR3 region. Unproductive sequences have an empty string '' for the CDR3 region sequence. Edits the attributes genV and genJ Parameters ---------- V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence. """ V_anchor_pos = load_genomic_CDR3_anchor_pos_and_functionality(V_anchor_pos_file) J_anchor_pos = load_genomic_CDR3_anchor_pos_and_functionality(J_anchor_pos_file) for V in self.genV: try: if V_anchor_pos[V[0]][0] > 0 and V_anchor_pos[V[0]][1] == 'F': #Check for functionality V[1] = V[2][V_anchor_pos[V[0]][0]:] else: V[1] = '' except KeyError: V[1] = '' for J in self.genJ: try: if J_anchor_pos[J[0]][0] > 0 and J_anchor_pos[J[0]][1] == 'F': #Check for functionality J[1] = J[2][:J_anchor_pos[J[0]][0]+3] else: J[1] = '' except KeyError: J[1] = ''
[ "def", "anchor_and_curate_genV_and_genJ", "(", "self", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", ":", "V_anchor_pos", "=", "load_genomic_CDR3_anchor_pos_and_functionality", "(", "V_anchor_pos_file", ")", "J_anchor_pos", "=", "load_genomic_CDR3_anchor_pos_and_functionality", "(", "J_anchor_pos_file", ")", "for", "V", "in", "self", ".", "genV", ":", "try", ":", "if", "V_anchor_pos", "[", "V", "[", "0", "]", "]", "[", "0", "]", ">", "0", "and", "V_anchor_pos", "[", "V", "[", "0", "]", "]", "[", "1", "]", "==", "'F'", ":", "#Check for functionality", "V", "[", "1", "]", "=", "V", "[", "2", "]", "[", "V_anchor_pos", "[", "V", "[", "0", "]", "]", "[", "0", "]", ":", "]", "else", ":", "V", "[", "1", "]", "=", "''", "except", "KeyError", ":", "V", "[", "1", "]", "=", "''", "for", "J", "in", "self", ".", "genJ", ":", "try", ":", "if", "J_anchor_pos", "[", "J", "[", "0", "]", "]", "[", "0", "]", ">", "0", "and", "J_anchor_pos", "[", "J", "[", "0", "]", "]", "[", "1", "]", "==", "'F'", ":", "#Check for functionality", "J", "[", "1", "]", "=", "J", "[", "2", "]", "[", ":", "J_anchor_pos", "[", "J", "[", "0", "]", "]", "[", "0", "]", "+", "3", "]", "else", ":", "J", "[", "1", "]", "=", "''", "except", "KeyError", ":", "J", "[", "1", "]", "=", "''" ]
Trim V and J germline sequences to the CDR3 region. Unproductive sequences have an empty string '' for the CDR3 region sequence. Edits the attributes genV and genJ Parameters ---------- V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence.
[ "Trim", "V", "and", "J", "germline", "sequences", "to", "the", "CDR3", "region", ".", "Unproductive", "sequences", "have", "an", "empty", "string", "for", "the", "CDR3", "region", "sequence", ".", "Edits", "the", "attributes", "genV", "and", "genJ", "Parameters", "----------", "V_anchor_pos_file_name", ":", "str", "File", "name", "for", "the", "conserved", "residue", "(", "C", ")", "locations", "and", "functionality", "of", "each", "V", "genomic", "sequence", ".", "J_anchor_pos_file_name", ":", "str", "File", "name", "for", "the", "conserved", "residue", "(", "F", "/", "W", ")", "locations", "and", "functionality", "of", "each", "J", "genomic", "sequence", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L129-L167
zsethna/OLGA
olga/load_model.py
GenomicData.generate_cutV_genomic_CDR3_segs
def generate_cutV_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ max_palindrome = self.max_delV_palindrome self.cutV_genomic_CDR3_segs = [] for CDR3_V_seg in [x[1] for x in self.genV]: if len(CDR3_V_seg) < max_palindrome: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, len(CDR3_V_seg))] else: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, max_palindrome)]
python
def generate_cutV_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ max_palindrome = self.max_delV_palindrome self.cutV_genomic_CDR3_segs = [] for CDR3_V_seg in [x[1] for x in self.genV]: if len(CDR3_V_seg) < max_palindrome: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, len(CDR3_V_seg))] else: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, max_palindrome)]
[ "def", "generate_cutV_genomic_CDR3_segs", "(", "self", ")", ":", "max_palindrome", "=", "self", ".", "max_delV_palindrome", "self", ".", "cutV_genomic_CDR3_segs", "=", "[", "]", "for", "CDR3_V_seg", "in", "[", "x", "[", "1", "]", "for", "x", "in", "self", ".", "genV", "]", ":", "if", "len", "(", "CDR3_V_seg", ")", "<", "max_palindrome", ":", "self", ".", "cutV_genomic_CDR3_segs", "+=", "[", "cutR_seq", "(", "CDR3_V_seg", ",", "0", ",", "len", "(", "CDR3_V_seg", ")", ")", "]", "else", ":", "self", ".", "cutV_genomic_CDR3_segs", "+=", "[", "cutR_seq", "(", "CDR3_V_seg", ",", "0", ",", "max_palindrome", ")", "]" ]
Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs.
[ "Add", "palindromic", "inserted", "nucleotides", "to", "germline", "V", "sequences", ".", "The", "maximum", "number", "of", "palindromic", "insertions", "are", "appended", "to", "the", "germline", "V", "segments", "so", "that", "delV", "can", "index", "directly", "for", "number", "of", "nucleotides", "to", "delete", "from", "a", "segment", ".", "Sets", "the", "attribute", "cutV_genomic_CDR3_segs", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L169-L187
zsethna/OLGA
olga/load_model.py
GenomicData.generate_cutJ_genomic_CDR3_segs
def generate_cutJ_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline J sequences. The maximum number of palindromic insertions are appended to the germline J segments so that delJ can index directly for number of nucleotides to delete from a segment. Sets the attribute cutJ_genomic_CDR3_segs. """ max_palindrome = self.max_delJ_palindrome self.cutJ_genomic_CDR3_segs = [] for CDR3_J_seg in [x[1] for x in self.genJ]: if len(CDR3_J_seg) < max_palindrome: self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, len(CDR3_J_seg))] else: self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, max_palindrome)]
python
def generate_cutJ_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline J sequences. The maximum number of palindromic insertions are appended to the germline J segments so that delJ can index directly for number of nucleotides to delete from a segment. Sets the attribute cutJ_genomic_CDR3_segs. """ max_palindrome = self.max_delJ_palindrome self.cutJ_genomic_CDR3_segs = [] for CDR3_J_seg in [x[1] for x in self.genJ]: if len(CDR3_J_seg) < max_palindrome: self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, len(CDR3_J_seg))] else: self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, max_palindrome)]
[ "def", "generate_cutJ_genomic_CDR3_segs", "(", "self", ")", ":", "max_palindrome", "=", "self", ".", "max_delJ_palindrome", "self", ".", "cutJ_genomic_CDR3_segs", "=", "[", "]", "for", "CDR3_J_seg", "in", "[", "x", "[", "1", "]", "for", "x", "in", "self", ".", "genJ", "]", ":", "if", "len", "(", "CDR3_J_seg", ")", "<", "max_palindrome", ":", "self", ".", "cutJ_genomic_CDR3_segs", "+=", "[", "cutL_seq", "(", "CDR3_J_seg", ",", "0", ",", "len", "(", "CDR3_J_seg", ")", ")", "]", "else", ":", "self", ".", "cutJ_genomic_CDR3_segs", "+=", "[", "cutL_seq", "(", "CDR3_J_seg", ",", "0", ",", "max_palindrome", ")", "]" ]
Add palindromic inserted nucleotides to germline J sequences. The maximum number of palindromic insertions are appended to the germline J segments so that delJ can index directly for number of nucleotides to delete from a segment. Sets the attribute cutJ_genomic_CDR3_segs.
[ "Add", "palindromic", "inserted", "nucleotides", "to", "germline", "J", "sequences", ".", "The", "maximum", "number", "of", "palindromic", "insertions", "are", "appended", "to", "the", "germline", "J", "segments", "so", "that", "delJ", "can", "index", "directly", "for", "number", "of", "nucleotides", "to", "delete", "from", "a", "segment", ".", "Sets", "the", "attribute", "cutJ_genomic_CDR3_segs", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L189-L206
zsethna/OLGA
olga/load_model.py
GenomicDataVDJ.load_igor_genomic_data
def load_igor_genomic_data(self, params_file_name, V_anchor_pos_file, J_anchor_pos_file): """Set attributes by loading in genomic data from IGoR parameter file. Sets attributes genV, max_delV_palindrome, cutV_genomic_CDR3_segs, genD, max_delDl_palindrome, max_delDr_palindrome, cutD_genomic_CDR3_segs, genJ, max_delJ_palindrome, and cutJ_genomic_CDR3_segs. Parameters ---------- params_file_name : str File name for a IGOR parameter file. V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence. """ self.genV = read_igor_V_gene_parameters(params_file_name) self.genD = read_igor_D_gene_parameters(params_file_name) self.genJ = read_igor_J_gene_parameters(params_file_name) self.anchor_and_curate_genV_and_genJ(V_anchor_pos_file, J_anchor_pos_file) self.read_VDJ_palindrome_parameters(params_file_name) #Need palindrome info before generating cut_genomic_CDR3_segs self.generate_cutV_genomic_CDR3_segs() self.generate_cutD_genomic_CDR3_segs() self.generate_cutJ_genomic_CDR3_segs()
python
def load_igor_genomic_data(self, params_file_name, V_anchor_pos_file, J_anchor_pos_file): """Set attributes by loading in genomic data from IGoR parameter file. Sets attributes genV, max_delV_palindrome, cutV_genomic_CDR3_segs, genD, max_delDl_palindrome, max_delDr_palindrome, cutD_genomic_CDR3_segs, genJ, max_delJ_palindrome, and cutJ_genomic_CDR3_segs. Parameters ---------- params_file_name : str File name for a IGOR parameter file. V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence. """ self.genV = read_igor_V_gene_parameters(params_file_name) self.genD = read_igor_D_gene_parameters(params_file_name) self.genJ = read_igor_J_gene_parameters(params_file_name) self.anchor_and_curate_genV_and_genJ(V_anchor_pos_file, J_anchor_pos_file) self.read_VDJ_palindrome_parameters(params_file_name) #Need palindrome info before generating cut_genomic_CDR3_segs self.generate_cutV_genomic_CDR3_segs() self.generate_cutD_genomic_CDR3_segs() self.generate_cutJ_genomic_CDR3_segs()
[ "def", "load_igor_genomic_data", "(", "self", ",", "params_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", ":", "self", ".", "genV", "=", "read_igor_V_gene_parameters", "(", "params_file_name", ")", "self", ".", "genD", "=", "read_igor_D_gene_parameters", "(", "params_file_name", ")", "self", ".", "genJ", "=", "read_igor_J_gene_parameters", "(", "params_file_name", ")", "self", ".", "anchor_and_curate_genV_and_genJ", "(", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", "self", ".", "read_VDJ_palindrome_parameters", "(", "params_file_name", ")", "#Need palindrome info before generating cut_genomic_CDR3_segs", "self", ".", "generate_cutV_genomic_CDR3_segs", "(", ")", "self", ".", "generate_cutD_genomic_CDR3_segs", "(", ")", "self", ".", "generate_cutJ_genomic_CDR3_segs", "(", ")" ]
Set attributes by loading in genomic data from IGoR parameter file. Sets attributes genV, max_delV_palindrome, cutV_genomic_CDR3_segs, genD, max_delDl_palindrome, max_delDr_palindrome, cutD_genomic_CDR3_segs, genJ, max_delJ_palindrome, and cutJ_genomic_CDR3_segs. Parameters ---------- params_file_name : str File name for a IGOR parameter file. V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence.
[ "Set", "attributes", "by", "loading", "in", "genomic", "data", "from", "IGoR", "parameter", "file", ".", "Sets", "attributes", "genV", "max_delV_palindrome", "cutV_genomic_CDR3_segs", "genD", "max_delDl_palindrome", "max_delDr_palindrome", "cutD_genomic_CDR3_segs", "genJ", "max_delJ_palindrome", "and", "cutJ_genomic_CDR3_segs", ".", "Parameters", "----------", "params_file_name", ":", "str", "File", "name", "for", "a", "IGOR", "parameter", "file", ".", "V_anchor_pos_file_name", ":", "str", "File", "name", "for", "the", "conserved", "residue", "(", "C", ")", "locations", "and", "functionality", "of", "each", "V", "genomic", "sequence", ".", "J_anchor_pos_file_name", ":", "str", "File", "name", "for", "the", "conserved", "residue", "(", "F", "/", "W", ")", "locations", "and", "functionality", "of", "each", "J", "genomic", "sequence", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L258-L289
zsethna/OLGA
olga/load_model.py
GenomicDataVDJ.generate_cutD_genomic_CDR3_segs
def generate_cutD_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ max_palindrome_L = self.max_delDl_palindrome max_palindrome_R = self.max_delDr_palindrome self.cutD_genomic_CDR3_segs = [] for CDR3_D_seg in [x[1] for x in self.genD]: if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R): self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))] else: self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)]
python
def generate_cutD_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ max_palindrome_L = self.max_delDl_palindrome max_palindrome_R = self.max_delDr_palindrome self.cutD_genomic_CDR3_segs = [] for CDR3_D_seg in [x[1] for x in self.genD]: if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R): self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))] else: self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)]
[ "def", "generate_cutD_genomic_CDR3_segs", "(", "self", ")", ":", "max_palindrome_L", "=", "self", ".", "max_delDl_palindrome", "max_palindrome_R", "=", "self", ".", "max_delDr_palindrome", "self", ".", "cutD_genomic_CDR3_segs", "=", "[", "]", "for", "CDR3_D_seg", "in", "[", "x", "[", "1", "]", "for", "x", "in", "self", ".", "genD", "]", ":", "if", "len", "(", "CDR3_D_seg", ")", "<", "min", "(", "max_palindrome_L", ",", "max_palindrome_R", ")", ":", "self", ".", "cutD_genomic_CDR3_segs", "+=", "[", "cutR_seq", "(", "cutL_seq", "(", "CDR3_D_seg", ",", "0", ",", "len", "(", "CDR3_D_seg", ")", ")", ",", "0", ",", "len", "(", "CDR3_D_seg", ")", ")", "]", "else", ":", "self", ".", "cutD_genomic_CDR3_segs", "+=", "[", "cutR_seq", "(", "cutL_seq", "(", "CDR3_D_seg", ",", "0", ",", "max_palindrome_L", ")", ",", "0", ",", "max_palindrome_R", ")", "]" ]
Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs.
[ "Add", "palindromic", "inserted", "nucleotides", "to", "germline", "V", "sequences", ".", "The", "maximum", "number", "of", "palindromic", "insertions", "are", "appended", "to", "the", "germline", "D", "segments", "so", "that", "delDl", "and", "delDr", "can", "index", "directly", "for", "number", "of", "nucleotides", "to", "delete", "from", "a", "segment", ".", "Sets", "the", "attribute", "cutV_genomic_CDR3_segs", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L291-L309
zsethna/OLGA
olga/load_model.py
GenomicDataVDJ.read_VDJ_palindrome_parameters
def read_VDJ_palindrome_parameters(self, params_file_name): """Read V, D, and J palindrome parameters from file. Sets the attributes max_delV_palindrome, max_delDl_palindrome, max_delDr_palindrome, and max_delJ_palindrome. Parameters ---------- params_file_name : str File name for an IGoR parameter file of a VDJ generative model. """ params_file = open(params_file_name, 'r') in_delV = False in_delDl = False in_delDr = False in_delJ = False for line in params_file: if line.startswith('#Deletion;V_gene;'): in_delV = True in_delDl = False in_delDr = False in_delJ = False elif line.startswith('#Deletion;D_gene;Three_prime;'): in_delV = False in_delDl = False in_delDr = True in_delJ = False elif line.startswith('#Deletion;D_gene;Five_prime;'): in_delV = False in_delDl = True in_delDr = False in_delJ = False elif line.startswith('#Deletion;J_gene;'): in_delV = False in_delDl = False in_delDr = False in_delJ = True elif any([in_delV, in_delDl, in_delDr, in_delJ]) and line.startswith('%'): if int(line.split(';')[-1]) == 0: if in_delV: self.max_delV_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delDl: self.max_delDl_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delDr: self.max_delDr_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delJ: self.max_delJ_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) else: in_delV = False in_delDl = False in_delDr = False in_delJ = False
python
def read_VDJ_palindrome_parameters(self, params_file_name): """Read V, D, and J palindrome parameters from file. Sets the attributes max_delV_palindrome, max_delDl_palindrome, max_delDr_palindrome, and max_delJ_palindrome. Parameters ---------- params_file_name : str File name for an IGoR parameter file of a VDJ generative model. """ params_file = open(params_file_name, 'r') in_delV = False in_delDl = False in_delDr = False in_delJ = False for line in params_file: if line.startswith('#Deletion;V_gene;'): in_delV = True in_delDl = False in_delDr = False in_delJ = False elif line.startswith('#Deletion;D_gene;Three_prime;'): in_delV = False in_delDl = False in_delDr = True in_delJ = False elif line.startswith('#Deletion;D_gene;Five_prime;'): in_delV = False in_delDl = True in_delDr = False in_delJ = False elif line.startswith('#Deletion;J_gene;'): in_delV = False in_delDl = False in_delDr = False in_delJ = True elif any([in_delV, in_delDl, in_delDr, in_delJ]) and line.startswith('%'): if int(line.split(';')[-1]) == 0: if in_delV: self.max_delV_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delDl: self.max_delDl_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delDr: self.max_delDr_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delJ: self.max_delJ_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) else: in_delV = False in_delDl = False in_delDr = False in_delJ = False
[ "def", "read_VDJ_palindrome_parameters", "(", "self", ",", "params_file_name", ")", ":", "params_file", "=", "open", "(", "params_file_name", ",", "'r'", ")", "in_delV", "=", "False", "in_delDl", "=", "False", "in_delDr", "=", "False", "in_delJ", "=", "False", "for", "line", "in", "params_file", ":", "if", "line", ".", "startswith", "(", "'#Deletion;V_gene;'", ")", ":", "in_delV", "=", "True", "in_delDl", "=", "False", "in_delDr", "=", "False", "in_delJ", "=", "False", "elif", "line", ".", "startswith", "(", "'#Deletion;D_gene;Three_prime;'", ")", ":", "in_delV", "=", "False", "in_delDl", "=", "False", "in_delDr", "=", "True", "in_delJ", "=", "False", "elif", "line", ".", "startswith", "(", "'#Deletion;D_gene;Five_prime;'", ")", ":", "in_delV", "=", "False", "in_delDl", "=", "True", "in_delDr", "=", "False", "in_delJ", "=", "False", "elif", "line", ".", "startswith", "(", "'#Deletion;J_gene;'", ")", ":", "in_delV", "=", "False", "in_delDl", "=", "False", "in_delDr", "=", "False", "in_delJ", "=", "True", "elif", "any", "(", "[", "in_delV", ",", "in_delDl", ",", "in_delDr", ",", "in_delJ", "]", ")", "and", "line", ".", "startswith", "(", "'%'", ")", ":", "if", "int", "(", "line", ".", "split", "(", "';'", ")", "[", "-", "1", "]", ")", "==", "0", ":", "if", "in_delV", ":", "self", ".", "max_delV_palindrome", "=", "np", ".", "abs", "(", "int", "(", "line", ".", "lstrip", "(", "'%'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ")", "elif", "in_delDl", ":", "self", ".", "max_delDl_palindrome", "=", "np", ".", "abs", "(", "int", "(", "line", ".", "lstrip", "(", "'%'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ")", "elif", "in_delDr", ":", "self", ".", "max_delDr_palindrome", "=", "np", ".", "abs", "(", "int", "(", "line", ".", "lstrip", "(", "'%'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ")", "elif", "in_delJ", ":", "self", ".", "max_delJ_palindrome", "=", "np", ".", "abs", "(", "int", "(", "line", ".", "lstrip", "(", "'%'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ")", "else", ":", "in_delV", "=", "False", "in_delDl", "=", "False", "in_delDr", "=", "False", "in_delJ", "=", "False" ]
Read V, D, and J palindrome parameters from file. Sets the attributes max_delV_palindrome, max_delDl_palindrome, max_delDr_palindrome, and max_delJ_palindrome. Parameters ---------- params_file_name : str File name for an IGoR parameter file of a VDJ generative model.
[ "Read", "V", "D", "and", "J", "palindrome", "parameters", "from", "file", ".", "Sets", "the", "attributes", "max_delV_palindrome", "max_delDl_palindrome", "max_delDr_palindrome", "and", "max_delJ_palindrome", ".", "Parameters", "----------", "params_file_name", ":", "str", "File", "name", "for", "an", "IGoR", "parameter", "file", "of", "a", "VDJ", "generative", "model", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L311-L368
zsethna/OLGA
olga/load_model.py
GenomicDataVJ.load_igor_genomic_data
def load_igor_genomic_data(self, params_file_name, V_anchor_pos_file, J_anchor_pos_file): """Set attributes by loading in genomic data from IGoR parameter file. Sets attributes genV, genJ, max_delV_palindrome, max_delJ_palindrome, cutV_genomic_CDR3_segs, and cutJ_genomic_CDR3_segs. Parameters ---------- params_file_name : str File name for a IGOR parameter file. V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence. """ self.genV = read_igor_V_gene_parameters(params_file_name) self.genJ = read_igor_J_gene_parameters(params_file_name) self.anchor_and_curate_genV_and_genJ(V_anchor_pos_file, J_anchor_pos_file) self.read_igor_VJ_palindrome_parameters(params_file_name) self.generate_cutV_genomic_CDR3_segs() self.generate_cutJ_genomic_CDR3_segs()
python
def load_igor_genomic_data(self, params_file_name, V_anchor_pos_file, J_anchor_pos_file): """Set attributes by loading in genomic data from IGoR parameter file. Sets attributes genV, genJ, max_delV_palindrome, max_delJ_palindrome, cutV_genomic_CDR3_segs, and cutJ_genomic_CDR3_segs. Parameters ---------- params_file_name : str File name for a IGOR parameter file. V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence. """ self.genV = read_igor_V_gene_parameters(params_file_name) self.genJ = read_igor_J_gene_parameters(params_file_name) self.anchor_and_curate_genV_and_genJ(V_anchor_pos_file, J_anchor_pos_file) self.read_igor_VJ_palindrome_parameters(params_file_name) self.generate_cutV_genomic_CDR3_segs() self.generate_cutJ_genomic_CDR3_segs()
[ "def", "load_igor_genomic_data", "(", "self", ",", "params_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", ":", "self", ".", "genV", "=", "read_igor_V_gene_parameters", "(", "params_file_name", ")", "self", ".", "genJ", "=", "read_igor_J_gene_parameters", "(", "params_file_name", ")", "self", ".", "anchor_and_curate_genV_and_genJ", "(", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", "self", ".", "read_igor_VJ_palindrome_parameters", "(", "params_file_name", ")", "self", ".", "generate_cutV_genomic_CDR3_segs", "(", ")", "self", ".", "generate_cutJ_genomic_CDR3_segs", "(", ")" ]
Set attributes by loading in genomic data from IGoR parameter file. Sets attributes genV, genJ, max_delV_palindrome, max_delJ_palindrome, cutV_genomic_CDR3_segs, and cutJ_genomic_CDR3_segs. Parameters ---------- params_file_name : str File name for a IGOR parameter file. V_anchor_pos_file_name : str File name for the conserved residue (C) locations and functionality of each V genomic sequence. J_anchor_pos_file_name : str File name for the conserved residue (F/W) locations and functionality of each J genomic sequence.
[ "Set", "attributes", "by", "loading", "in", "genomic", "data", "from", "IGoR", "parameter", "file", ".", "Sets", "attributes", "genV", "genJ", "max_delV_palindrome", "max_delJ_palindrome", "cutV_genomic_CDR3_segs", "and", "cutJ_genomic_CDR3_segs", ".", "Parameters", "----------", "params_file_name", ":", "str", "File", "name", "for", "a", "IGOR", "parameter", "file", ".", "V_anchor_pos_file_name", ":", "str", "File", "name", "for", "the", "conserved", "residue", "(", "C", ")", "locations", "and", "functionality", "of", "each", "V", "genomic", "sequence", ".", "J_anchor_pos_file_name", ":", "str", "File", "name", "for", "the", "conserved", "residue", "(", "F", "/", "W", ")", "locations", "and", "functionality", "of", "each", "J", "genomic", "sequence", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L401-L428
zsethna/OLGA
olga/load_model.py
GenomicDataVJ.read_igor_VJ_palindrome_parameters
def read_igor_VJ_palindrome_parameters(self, params_file_name): """Read V and J palindrome parameters from file. Sets the attributes max_delV_palindrome and max_delJ_palindrome. Parameters ---------- params_file_name : str File name for an IGoR parameter file of a VJ generative model. """ params_file = open(params_file_name, 'r') in_delV = False in_delJ = False for line in params_file: if line.startswith('#Deletion;V_gene;'): in_delV = True in_delJ = False elif line.startswith('#Deletion;J_gene;'): in_delV = False in_delJ = True elif any([in_delV, in_delJ]) and line.startswith('%'): if int(line.split(';')[-1]) == 0: if in_delV: self.max_delV_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delJ: self.max_delJ_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) else: in_delV = False in_delJ = False
python
def read_igor_VJ_palindrome_parameters(self, params_file_name): """Read V and J palindrome parameters from file. Sets the attributes max_delV_palindrome and max_delJ_palindrome. Parameters ---------- params_file_name : str File name for an IGoR parameter file of a VJ generative model. """ params_file = open(params_file_name, 'r') in_delV = False in_delJ = False for line in params_file: if line.startswith('#Deletion;V_gene;'): in_delV = True in_delJ = False elif line.startswith('#Deletion;J_gene;'): in_delV = False in_delJ = True elif any([in_delV, in_delJ]) and line.startswith('%'): if int(line.split(';')[-1]) == 0: if in_delV: self.max_delV_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) elif in_delJ: self.max_delJ_palindrome = np.abs(int(line.lstrip('%').split(';')[0])) else: in_delV = False in_delJ = False
[ "def", "read_igor_VJ_palindrome_parameters", "(", "self", ",", "params_file_name", ")", ":", "params_file", "=", "open", "(", "params_file_name", ",", "'r'", ")", "in_delV", "=", "False", "in_delJ", "=", "False", "for", "line", "in", "params_file", ":", "if", "line", ".", "startswith", "(", "'#Deletion;V_gene;'", ")", ":", "in_delV", "=", "True", "in_delJ", "=", "False", "elif", "line", ".", "startswith", "(", "'#Deletion;J_gene;'", ")", ":", "in_delV", "=", "False", "in_delJ", "=", "True", "elif", "any", "(", "[", "in_delV", ",", "in_delJ", "]", ")", "and", "line", ".", "startswith", "(", "'%'", ")", ":", "if", "int", "(", "line", ".", "split", "(", "';'", ")", "[", "-", "1", "]", ")", "==", "0", ":", "if", "in_delV", ":", "self", ".", "max_delV_palindrome", "=", "np", ".", "abs", "(", "int", "(", "line", ".", "lstrip", "(", "'%'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ")", "elif", "in_delJ", ":", "self", ".", "max_delJ_palindrome", "=", "np", ".", "abs", "(", "int", "(", "line", ".", "lstrip", "(", "'%'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ")", ")", "else", ":", "in_delV", "=", "False", "in_delJ", "=", "False" ]
Read V and J palindrome parameters from file. Sets the attributes max_delV_palindrome and max_delJ_palindrome. Parameters ---------- params_file_name : str File name for an IGoR parameter file of a VJ generative model.
[ "Read", "V", "and", "J", "palindrome", "parameters", "from", "file", ".", "Sets", "the", "attributes", "max_delV_palindrome", "and", "max_delJ_palindrome", ".", "Parameters", "----------", "params_file_name", ":", "str", "File", "name", "for", "an", "IGoR", "parameter", "file", "of", "a", "VJ", "generative", "model", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L431-L464
zsethna/OLGA
olga/load_model.py
GenerativeModelVDJ.load_and_process_igor_model
def load_and_process_igor_model(self, marginals_file_name): """Set attributes by reading a generative model from IGoR marginal file. Sets attributes PV, PdelV_given_V, PDJ, PdelJ_given_J, PdelDldelDr_given_D, PinsVD, PinsDJ, Rvd, and Rdj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file. """ raw_model = read_igor_marginals_txt(marginals_file_name) self.PV = raw_model[0]['v_choice'] self.PinsVD = raw_model[0]['vd_ins'] self.PinsDJ = raw_model[0]['dj_ins'] self.PdelV_given_V = raw_model[0]['v_3_del'].T self.PdelJ_given_J = raw_model[0]['j_5_del'].T #While this class assumes P(V, D, J) factorizes into P(V)*P(D, J), the B cell model #infers allowing for the full correlation. Most of the correlation information is due to #chromosomal correlation of alleles (i.e. what chromosome each allele is found on). #While this information can be interesting for inference purposes, it is discarded here #as generally these models may be use for CDR3s from individuals the models weren't inferred #from (and thus the chromosomal correlations are incorrect). This also equates the T and B cell #models. To reintroduce the chromosomal correlations use V and J usage masks after inferring the #allele identities on each chromosome. if raw_model[1]['d_gene'] == ['j_choice', 'd_gene']: #Factorized P(V, D, J) = P(V)*P(D, J) --- correct for T cell models self.PDJ = np.multiply(raw_model[0]['d_gene'].T, raw_model[0]['j_choice']) elif raw_model[1]['d_gene'] == ['v_choice', 'j_choice', 'd_gene']: #Full P(V, D, J) for B cells --- need to compute the marginal P(D, J) PVJ = np.multiply(raw_model[0]['j_choice'].T, raw_model[0]['v_choice']).T PVDJ = np.zeros([raw_model[0]['d_gene'].shape[0], raw_model[0]['d_gene'].shape[2], raw_model[0]['d_gene'].shape[1]]) for v_in in range(raw_model[0]['d_gene'].shape[0]): for j_in in range(raw_model[0]['d_gene'].shape[1]): PVDJ[v_in, :, j_in] = PVJ[v_in, j_in]*raw_model[0]['d_gene'][v_in, j_in, :] self.PDJ = np.sum(PVDJ, 0) else: print 'Unrecognized model structure -- need to construct P(D, J)' return 0 self.PdelDldelDr_given_D = np.transpose(np.multiply(np.transpose(raw_model[0]['d_3_del'], (2, 0, 1)), raw_model[0]['d_5_del']), (2, 0 , 1)) Rvd_raw = raw_model[0]['vd_dinucl'].reshape((4, 4)).T self.Rvd = np.multiply(Rvd_raw, 1/np.sum(Rvd_raw, axis = 0)) Rdj_raw = raw_model[0]['dj_dinucl'].reshape((4, 4)).T self.Rdj = np.multiply(Rdj_raw, 1/np.sum(Rdj_raw, axis = 0))
python
def load_and_process_igor_model(self, marginals_file_name): """Set attributes by reading a generative model from IGoR marginal file. Sets attributes PV, PdelV_given_V, PDJ, PdelJ_given_J, PdelDldelDr_given_D, PinsVD, PinsDJ, Rvd, and Rdj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file. """ raw_model = read_igor_marginals_txt(marginals_file_name) self.PV = raw_model[0]['v_choice'] self.PinsVD = raw_model[0]['vd_ins'] self.PinsDJ = raw_model[0]['dj_ins'] self.PdelV_given_V = raw_model[0]['v_3_del'].T self.PdelJ_given_J = raw_model[0]['j_5_del'].T #While this class assumes P(V, D, J) factorizes into P(V)*P(D, J), the B cell model #infers allowing for the full correlation. Most of the correlation information is due to #chromosomal correlation of alleles (i.e. what chromosome each allele is found on). #While this information can be interesting for inference purposes, it is discarded here #as generally these models may be use for CDR3s from individuals the models weren't inferred #from (and thus the chromosomal correlations are incorrect). This also equates the T and B cell #models. To reintroduce the chromosomal correlations use V and J usage masks after inferring the #allele identities on each chromosome. if raw_model[1]['d_gene'] == ['j_choice', 'd_gene']: #Factorized P(V, D, J) = P(V)*P(D, J) --- correct for T cell models self.PDJ = np.multiply(raw_model[0]['d_gene'].T, raw_model[0]['j_choice']) elif raw_model[1]['d_gene'] == ['v_choice', 'j_choice', 'd_gene']: #Full P(V, D, J) for B cells --- need to compute the marginal P(D, J) PVJ = np.multiply(raw_model[0]['j_choice'].T, raw_model[0]['v_choice']).T PVDJ = np.zeros([raw_model[0]['d_gene'].shape[0], raw_model[0]['d_gene'].shape[2], raw_model[0]['d_gene'].shape[1]]) for v_in in range(raw_model[0]['d_gene'].shape[0]): for j_in in range(raw_model[0]['d_gene'].shape[1]): PVDJ[v_in, :, j_in] = PVJ[v_in, j_in]*raw_model[0]['d_gene'][v_in, j_in, :] self.PDJ = np.sum(PVDJ, 0) else: print 'Unrecognized model structure -- need to construct P(D, J)' return 0 self.PdelDldelDr_given_D = np.transpose(np.multiply(np.transpose(raw_model[0]['d_3_del'], (2, 0, 1)), raw_model[0]['d_5_del']), (2, 0 , 1)) Rvd_raw = raw_model[0]['vd_dinucl'].reshape((4, 4)).T self.Rvd = np.multiply(Rvd_raw, 1/np.sum(Rvd_raw, axis = 0)) Rdj_raw = raw_model[0]['dj_dinucl'].reshape((4, 4)).T self.Rdj = np.multiply(Rdj_raw, 1/np.sum(Rdj_raw, axis = 0))
[ "def", "load_and_process_igor_model", "(", "self", ",", "marginals_file_name", ")", ":", "raw_model", "=", "read_igor_marginals_txt", "(", "marginals_file_name", ")", "self", ".", "PV", "=", "raw_model", "[", "0", "]", "[", "'v_choice'", "]", "self", ".", "PinsVD", "=", "raw_model", "[", "0", "]", "[", "'vd_ins'", "]", "self", ".", "PinsDJ", "=", "raw_model", "[", "0", "]", "[", "'dj_ins'", "]", "self", ".", "PdelV_given_V", "=", "raw_model", "[", "0", "]", "[", "'v_3_del'", "]", ".", "T", "self", ".", "PdelJ_given_J", "=", "raw_model", "[", "0", "]", "[", "'j_5_del'", "]", ".", "T", "#While this class assumes P(V, D, J) factorizes into P(V)*P(D, J), the B cell model", "#infers allowing for the full correlation. Most of the correlation information is due to", "#chromosomal correlation of alleles (i.e. what chromosome each allele is found on).", "#While this information can be interesting for inference purposes, it is discarded here", "#as generally these models may be use for CDR3s from individuals the models weren't inferred", "#from (and thus the chromosomal correlations are incorrect). This also equates the T and B cell", "#models. To reintroduce the chromosomal correlations use V and J usage masks after inferring the ", "#allele identities on each chromosome.", "if", "raw_model", "[", "1", "]", "[", "'d_gene'", "]", "==", "[", "'j_choice'", ",", "'d_gene'", "]", ":", "#Factorized P(V, D, J) = P(V)*P(D, J) --- correct for T cell models", "self", ".", "PDJ", "=", "np", ".", "multiply", "(", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", ".", "T", ",", "raw_model", "[", "0", "]", "[", "'j_choice'", "]", ")", "elif", "raw_model", "[", "1", "]", "[", "'d_gene'", "]", "==", "[", "'v_choice'", ",", "'j_choice'", ",", "'d_gene'", "]", ":", "#Full P(V, D, J) for B cells --- need to compute the marginal P(D, J)", "PVJ", "=", "np", ".", "multiply", "(", "raw_model", "[", "0", "]", "[", "'j_choice'", "]", ".", "T", ",", "raw_model", "[", "0", "]", "[", "'v_choice'", "]", ")", ".", "T", "PVDJ", "=", "np", ".", "zeros", "(", "[", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", ".", "shape", "[", "0", "]", ",", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", ".", "shape", "[", "2", "]", ",", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", ".", "shape", "[", "1", "]", "]", ")", "for", "v_in", "in", "range", "(", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", ".", "shape", "[", "0", "]", ")", ":", "for", "j_in", "in", "range", "(", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", ".", "shape", "[", "1", "]", ")", ":", "PVDJ", "[", "v_in", ",", ":", ",", "j_in", "]", "=", "PVJ", "[", "v_in", ",", "j_in", "]", "*", "raw_model", "[", "0", "]", "[", "'d_gene'", "]", "[", "v_in", ",", "j_in", ",", ":", "]", "self", ".", "PDJ", "=", "np", ".", "sum", "(", "PVDJ", ",", "0", ")", "else", ":", "print", "'Unrecognized model structure -- need to construct P(D, J)'", "return", "0", "self", ".", "PdelDldelDr_given_D", "=", "np", ".", "transpose", "(", "np", ".", "multiply", "(", "np", ".", "transpose", "(", "raw_model", "[", "0", "]", "[", "'d_3_del'", "]", ",", "(", "2", ",", "0", ",", "1", ")", ")", ",", "raw_model", "[", "0", "]", "[", "'d_5_del'", "]", ")", ",", "(", "2", ",", "0", ",", "1", ")", ")", "Rvd_raw", "=", "raw_model", "[", "0", "]", "[", "'vd_dinucl'", "]", ".", "reshape", "(", "(", "4", ",", "4", ")", ")", ".", "T", "self", ".", "Rvd", "=", "np", ".", "multiply", "(", "Rvd_raw", ",", "1", "/", "np", ".", "sum", "(", "Rvd_raw", ",", "axis", "=", "0", ")", ")", "Rdj_raw", "=", "raw_model", "[", "0", "]", "[", "'dj_dinucl'", "]", ".", "reshape", "(", "(", "4", ",", "4", ")", ")", ".", "T", "self", ".", "Rdj", "=", "np", ".", "multiply", "(", "Rdj_raw", ",", "1", "/", "np", ".", "sum", "(", "Rdj_raw", ",", "axis", "=", "0", ")", ")" ]
Set attributes by reading a generative model from IGoR marginal file. Sets attributes PV, PdelV_given_V, PDJ, PdelJ_given_J, PdelDldelDr_given_D, PinsVD, PinsDJ, Rvd, and Rdj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file.
[ "Set", "attributes", "by", "reading", "a", "generative", "model", "from", "IGoR", "marginal", "file", ".", "Sets", "attributes", "PV", "PdelV_given_V", "PDJ", "PdelJ_given_J", "PdelDldelDr_given_D", "PinsVD", "PinsDJ", "Rvd", "and", "Rdj", ".", "Parameters", "----------", "marginals_file_name", ":", "str", "File", "name", "for", "a", "IGoR", "model", "marginals", "file", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L681-L731
zsethna/OLGA
olga/load_model.py
GenerativeModelVJ.load_and_process_igor_model
def load_and_process_igor_model(self, marginals_file_name): """Set attributes by reading a generative model from IGoR marginal file. Sets attributes PVJ, PdelV_given_V, PdelJ_given_J, PinsVJ, and Rvj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file. """ raw_model = read_igor_marginals_txt(marginals_file_name) self.PinsVJ = raw_model[0]['vj_ins'] self.PdelV_given_V = raw_model[0]['v_3_del'].T self.PdelJ_given_J = raw_model[0]['j_5_del'].T self.PVJ = np.multiply( raw_model[0]['j_choice'].T, raw_model[0]['v_choice']).T Rvj_raw = raw_model[0]['vj_dinucl'].reshape((4, 4)).T self.Rvj = np.multiply(Rvj_raw, 1/np.sum(Rvj_raw, axis = 0))
python
def load_and_process_igor_model(self, marginals_file_name): """Set attributes by reading a generative model from IGoR marginal file. Sets attributes PVJ, PdelV_given_V, PdelJ_given_J, PinsVJ, and Rvj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file. """ raw_model = read_igor_marginals_txt(marginals_file_name) self.PinsVJ = raw_model[0]['vj_ins'] self.PdelV_given_V = raw_model[0]['v_3_del'].T self.PdelJ_given_J = raw_model[0]['j_5_del'].T self.PVJ = np.multiply( raw_model[0]['j_choice'].T, raw_model[0]['v_choice']).T Rvj_raw = raw_model[0]['vj_dinucl'].reshape((4, 4)).T self.Rvj = np.multiply(Rvj_raw, 1/np.sum(Rvj_raw, axis = 0))
[ "def", "load_and_process_igor_model", "(", "self", ",", "marginals_file_name", ")", ":", "raw_model", "=", "read_igor_marginals_txt", "(", "marginals_file_name", ")", "self", ".", "PinsVJ", "=", "raw_model", "[", "0", "]", "[", "'vj_ins'", "]", "self", ".", "PdelV_given_V", "=", "raw_model", "[", "0", "]", "[", "'v_3_del'", "]", ".", "T", "self", ".", "PdelJ_given_J", "=", "raw_model", "[", "0", "]", "[", "'j_5_del'", "]", ".", "T", "self", ".", "PVJ", "=", "np", ".", "multiply", "(", "raw_model", "[", "0", "]", "[", "'j_choice'", "]", ".", "T", ",", "raw_model", "[", "0", "]", "[", "'v_choice'", "]", ")", ".", "T", "Rvj_raw", "=", "raw_model", "[", "0", "]", "[", "'vj_dinucl'", "]", ".", "reshape", "(", "(", "4", ",", "4", ")", ")", ".", "T", "self", ".", "Rvj", "=", "np", ".", "multiply", "(", "Rvj_raw", ",", "1", "/", "np", ".", "sum", "(", "Rvj_raw", ",", "axis", "=", "0", ")", ")" ]
Set attributes by reading a generative model from IGoR marginal file. Sets attributes PVJ, PdelV_given_V, PdelJ_given_J, PinsVJ, and Rvj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file.
[ "Set", "attributes", "by", "reading", "a", "generative", "model", "from", "IGoR", "marginal", "file", ".", "Sets", "attributes", "PVJ", "PdelV_given_V", "PdelJ_given_J", "PinsVJ", "and", "Rvj", ".", "Parameters", "----------", "marginals_file_name", ":", "str", "File", "name", "for", "a", "IGoR", "model", "marginals", "file", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L768-L787
crackinglandia/pype32
pype32/directories.py
ImageBoundForwarderRefEntry.parse
def parse(readDataInstance): """ Returns a new L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRefEntry} object. @rtype: L{ImageBoundForwarderRefEntry} @return: A new L{ImageBoundForwarderRefEntry} object. """ boundForwarderEntry = ImageBoundForwarderRefEntry() boundForwarderEntry.timeDateStamp.value = readDataInstance.readDword() boundForwarderEntry.offsetModuleName.value = readDataInstance.readWord() boundForwarderEntry.reserved.value = readDataInstance.readWord() return boundForwarderEntry
python
def parse(readDataInstance): """ Returns a new L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRefEntry} object. @rtype: L{ImageBoundForwarderRefEntry} @return: A new L{ImageBoundForwarderRefEntry} object. """ boundForwarderEntry = ImageBoundForwarderRefEntry() boundForwarderEntry.timeDateStamp.value = readDataInstance.readDword() boundForwarderEntry.offsetModuleName.value = readDataInstance.readWord() boundForwarderEntry.reserved.value = readDataInstance.readWord() return boundForwarderEntry
[ "def", "parse", "(", "readDataInstance", ")", ":", "boundForwarderEntry", "=", "ImageBoundForwarderRefEntry", "(", ")", "boundForwarderEntry", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "boundForwarderEntry", ".", "offsetModuleName", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "boundForwarderEntry", ".", "reserved", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "return", "boundForwarderEntry" ]
Returns a new L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRefEntry} object. @rtype: L{ImageBoundForwarderRefEntry} @return: A new L{ImageBoundForwarderRefEntry} object.
[ "Returns", "a", "new", "L", "{", "ImageBoundForwarderRefEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L75-L89
crackinglandia/pype32
pype32/directories.py
ImageBoundForwarderRef.parse
def parse(readDataInstance, numberOfEntries): """ Returns a L{ImageBoundForwarderRef} array where every element is a L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRef} object. @type numberOfEntries: int @param numberOfEntries: The number of C{IMAGE_BOUND_FORWARDER_REF} entries in the array. @rtype: L{ImageBoundForwarderRef} @return: A new L{ImageBoundForwarderRef} object. @raise DataLengthException: If the L{ReadData} instance has less data than C{NumberOfEntries} * sizeof L{ImageBoundForwarderRefEntry}. """ imageBoundForwarderRefsList = ImageBoundForwarderRef() dLength = len(readDataInstance) entryLength = ImageBoundForwarderRefEntry().sizeof() toRead = numberOfEntries * entryLength if dLength >= toRead: for i in range(numberOfEntries): entryData = readDataInstance.read(entryLength) rd = utils.ReadData(entryData) imageBoundForwarderRefsList.append(ImageBoundForwarderRefEntry.parse(rd)) else: raise excep.DataLengthException("Not enough bytes to read.") return imageBoundForwarderRefsList
python
def parse(readDataInstance, numberOfEntries): """ Returns a L{ImageBoundForwarderRef} array where every element is a L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRef} object. @type numberOfEntries: int @param numberOfEntries: The number of C{IMAGE_BOUND_FORWARDER_REF} entries in the array. @rtype: L{ImageBoundForwarderRef} @return: A new L{ImageBoundForwarderRef} object. @raise DataLengthException: If the L{ReadData} instance has less data than C{NumberOfEntries} * sizeof L{ImageBoundForwarderRefEntry}. """ imageBoundForwarderRefsList = ImageBoundForwarderRef() dLength = len(readDataInstance) entryLength = ImageBoundForwarderRefEntry().sizeof() toRead = numberOfEntries * entryLength if dLength >= toRead: for i in range(numberOfEntries): entryData = readDataInstance.read(entryLength) rd = utils.ReadData(entryData) imageBoundForwarderRefsList.append(ImageBoundForwarderRefEntry.parse(rd)) else: raise excep.DataLengthException("Not enough bytes to read.") return imageBoundForwarderRefsList
[ "def", "parse", "(", "readDataInstance", ",", "numberOfEntries", ")", ":", "imageBoundForwarderRefsList", "=", "ImageBoundForwarderRef", "(", ")", "dLength", "=", "len", "(", "readDataInstance", ")", "entryLength", "=", "ImageBoundForwarderRefEntry", "(", ")", ".", "sizeof", "(", ")", "toRead", "=", "numberOfEntries", "*", "entryLength", "if", "dLength", ">=", "toRead", ":", "for", "i", "in", "range", "(", "numberOfEntries", ")", ":", "entryData", "=", "readDataInstance", ".", "read", "(", "entryLength", ")", "rd", "=", "utils", ".", "ReadData", "(", "entryData", ")", "imageBoundForwarderRefsList", ".", "append", "(", "ImageBoundForwarderRefEntry", ".", "parse", "(", "rd", ")", ")", "else", ":", "raise", "excep", ".", "DataLengthException", "(", "\"Not enough bytes to read.\"", ")", "return", "imageBoundForwarderRefsList" ]
Returns a L{ImageBoundForwarderRef} array where every element is a L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRef} object. @type numberOfEntries: int @param numberOfEntries: The number of C{IMAGE_BOUND_FORWARDER_REF} entries in the array. @rtype: L{ImageBoundForwarderRef} @return: A new L{ImageBoundForwarderRef} object. @raise DataLengthException: If the L{ReadData} instance has less data than C{NumberOfEntries} * sizeof L{ImageBoundForwarderRefEntry}.
[ "Returns", "a", "L", "{", "ImageBoundForwarderRef", "}", "array", "where", "every", "element", "is", "a", "L", "{", "ImageBoundForwarderRefEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L107-L135
crackinglandia/pype32
pype32/directories.py
ImageBoundImportDescriptor.parse
def parse(readDataInstance): """ Returns a new L{ImageBoundImportDescriptor} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing the data to create a new L{ImageBoundImportDescriptor} object. @rtype: L{ImageBoundImportDescriptor} @return: A new {ImageBoundImportDescriptor} object. """ ibd = ImageBoundImportDescriptor() entryData = readDataInstance.read(consts.SIZEOF_IMAGE_BOUND_IMPORT_ENTRY32) readDataInstance.offset = 0 while not utils.allZero(entryData): prevOffset = readDataInstance.offset boundEntry = ImageBoundImportDescriptorEntry.parse(readDataInstance) # if the parsed entry has numberOfModuleForwarderRefs we must adjust the value in the readDataInstance.offset field # in order to point after the last ImageBoundForwarderRefEntry. if boundEntry.numberOfModuleForwarderRefs.value: readDataInstance.offset = prevOffset + (consts.SIZEOF_IMAGE_BOUND_FORWARDER_REF_ENTRY32 * boundEntry.numberOfModuleForwarderRefs.value) else: readDataInstance.offset = prevOffset ibd.append(boundEntry) entryData = readDataInstance.read(consts.SIZEOF_IMAGE_BOUND_IMPORT_ENTRY32) return ibd
python
def parse(readDataInstance): """ Returns a new L{ImageBoundImportDescriptor} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing the data to create a new L{ImageBoundImportDescriptor} object. @rtype: L{ImageBoundImportDescriptor} @return: A new {ImageBoundImportDescriptor} object. """ ibd = ImageBoundImportDescriptor() entryData = readDataInstance.read(consts.SIZEOF_IMAGE_BOUND_IMPORT_ENTRY32) readDataInstance.offset = 0 while not utils.allZero(entryData): prevOffset = readDataInstance.offset boundEntry = ImageBoundImportDescriptorEntry.parse(readDataInstance) # if the parsed entry has numberOfModuleForwarderRefs we must adjust the value in the readDataInstance.offset field # in order to point after the last ImageBoundForwarderRefEntry. if boundEntry.numberOfModuleForwarderRefs.value: readDataInstance.offset = prevOffset + (consts.SIZEOF_IMAGE_BOUND_FORWARDER_REF_ENTRY32 * boundEntry.numberOfModuleForwarderRefs.value) else: readDataInstance.offset = prevOffset ibd.append(boundEntry) entryData = readDataInstance.read(consts.SIZEOF_IMAGE_BOUND_IMPORT_ENTRY32) return ibd
[ "def", "parse", "(", "readDataInstance", ")", ":", "ibd", "=", "ImageBoundImportDescriptor", "(", ")", "entryData", "=", "readDataInstance", ".", "read", "(", "consts", ".", "SIZEOF_IMAGE_BOUND_IMPORT_ENTRY32", ")", "readDataInstance", ".", "offset", "=", "0", "while", "not", "utils", ".", "allZero", "(", "entryData", ")", ":", "prevOffset", "=", "readDataInstance", ".", "offset", "boundEntry", "=", "ImageBoundImportDescriptorEntry", ".", "parse", "(", "readDataInstance", ")", "# if the parsed entry has numberOfModuleForwarderRefs we must adjust the value in the readDataInstance.offset field", "# in order to point after the last ImageBoundForwarderRefEntry.", "if", "boundEntry", ".", "numberOfModuleForwarderRefs", ".", "value", ":", "readDataInstance", ".", "offset", "=", "prevOffset", "+", "(", "consts", ".", "SIZEOF_IMAGE_BOUND_FORWARDER_REF_ENTRY32", "*", "boundEntry", ".", "numberOfModuleForwarderRefs", ".", "value", ")", "else", ":", "readDataInstance", ".", "offset", "=", "prevOffset", "ibd", ".", "append", "(", "boundEntry", ")", "entryData", "=", "readDataInstance", ".", "read", "(", "consts", ".", "SIZEOF_IMAGE_BOUND_IMPORT_ENTRY32", ")", "return", "ibd" ]
Returns a new L{ImageBoundImportDescriptor} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing the data to create a new L{ImageBoundImportDescriptor} object. @rtype: L{ImageBoundImportDescriptor} @return: A new {ImageBoundImportDescriptor} object.
[ "Returns", "a", "new", "L", "{", "ImageBoundImportDescriptor", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L153-L182
crackinglandia/pype32
pype32/directories.py
ImageBoundImportDescriptorEntry.parse
def parse(readDataInstance): """ Returns a new L{ImageBoundImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}. @rtype: L{ImageBoundImportDescriptorEntry} @return: A new {ImageBoundImportDescriptorEntry} object. """ boundEntry = ImageBoundImportDescriptorEntry() boundEntry.timeDateStamp.value = readDataInstance.readDword() boundEntry.offsetModuleName.value = readDataInstance.readWord() boundEntry.numberOfModuleForwarderRefs.value = readDataInstance.readWord() numberOfForwarderRefsEntries = boundEntry.numberOfModuleForwarderRefs .value if numberOfForwarderRefsEntries: bytesToRead = numberOfForwarderRefsEntries * ImageBoundForwarderRefEntry().sizeof() rd = utils.ReadData(readDataInstance.read(bytesToRead)) boundEntry.forwarderRefsList = ImageBoundForwarderRef.parse(rd, numberOfForwarderRefsEntries) return boundEntry
python
def parse(readDataInstance): """ Returns a new L{ImageBoundImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}. @rtype: L{ImageBoundImportDescriptorEntry} @return: A new {ImageBoundImportDescriptorEntry} object. """ boundEntry = ImageBoundImportDescriptorEntry() boundEntry.timeDateStamp.value = readDataInstance.readDword() boundEntry.offsetModuleName.value = readDataInstance.readWord() boundEntry.numberOfModuleForwarderRefs.value = readDataInstance.readWord() numberOfForwarderRefsEntries = boundEntry.numberOfModuleForwarderRefs .value if numberOfForwarderRefsEntries: bytesToRead = numberOfForwarderRefsEntries * ImageBoundForwarderRefEntry().sizeof() rd = utils.ReadData(readDataInstance.read(bytesToRead)) boundEntry.forwarderRefsList = ImageBoundForwarderRef.parse(rd, numberOfForwarderRefsEntries) return boundEntry
[ "def", "parse", "(", "readDataInstance", ")", ":", "boundEntry", "=", "ImageBoundImportDescriptorEntry", "(", ")", "boundEntry", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "boundEntry", ".", "offsetModuleName", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "boundEntry", ".", "numberOfModuleForwarderRefs", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "numberOfForwarderRefsEntries", "=", "boundEntry", ".", "numberOfModuleForwarderRefs", ".", "value", "if", "numberOfForwarderRefsEntries", ":", "bytesToRead", "=", "numberOfForwarderRefsEntries", "*", "ImageBoundForwarderRefEntry", "(", ")", ".", "sizeof", "(", ")", "rd", "=", "utils", ".", "ReadData", "(", "readDataInstance", ".", "read", "(", "bytesToRead", ")", ")", "boundEntry", ".", "forwarderRefsList", "=", "ImageBoundForwarderRef", ".", "parse", "(", "rd", ",", "numberOfForwarderRefsEntries", ")", "return", "boundEntry" ]
Returns a new L{ImageBoundImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}. @rtype: L{ImageBoundImportDescriptorEntry} @return: A new {ImageBoundImportDescriptorEntry} object.
[ "Returns", "a", "new", "L", "{", "ImageBoundImportDescriptorEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L215-L236
crackinglandia/pype32
pype32/directories.py
TLSDirectory.parse
def parse(readDataInstance): """ Returns a new L{TLSDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory} object. @rtype: L{TLSDirectory} @return: A new {TLSDirectory} object. """ tlsDir = TLSDirectory() tlsDir.startAddressOfRawData.value = readDataInstance.readDword() tlsDir.endAddressOfRawData.value = readDataInstance.readDword() tlsDir.addressOfIndex.value = readDataInstance.readDword() tlsDir.addressOfCallbacks.value = readDataInstance.readDword() tlsDir.sizeOfZeroFill.value = readDataInstance.readDword() tlsDir.characteristics.value = readDataInstance.readDword() return tlsDir
python
def parse(readDataInstance): """ Returns a new L{TLSDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory} object. @rtype: L{TLSDirectory} @return: A new {TLSDirectory} object. """ tlsDir = TLSDirectory() tlsDir.startAddressOfRawData.value = readDataInstance.readDword() tlsDir.endAddressOfRawData.value = readDataInstance.readDword() tlsDir.addressOfIndex.value = readDataInstance.readDword() tlsDir.addressOfCallbacks.value = readDataInstance.readDword() tlsDir.sizeOfZeroFill.value = readDataInstance.readDword() tlsDir.characteristics.value = readDataInstance.readDword() return tlsDir
[ "def", "parse", "(", "readDataInstance", ")", ":", "tlsDir", "=", "TLSDirectory", "(", ")", "tlsDir", ".", "startAddressOfRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "tlsDir", ".", "endAddressOfRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "tlsDir", ".", "addressOfIndex", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "tlsDir", ".", "addressOfCallbacks", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "tlsDir", ".", "sizeOfZeroFill", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "tlsDir", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "tlsDir" ]
Returns a new L{TLSDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory} object. @rtype: L{TLSDirectory} @return: A new {TLSDirectory} object.
[ "Returns", "a", "new", "L", "{", "TLSDirectory", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L266-L284
crackinglandia/pype32
pype32/directories.py
TLSDirectory64.parse
def parse(readDataInstance): """ Returns a new L{TLSDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory64} object. @rtype: L{TLSDirectory64} @return: A new L{TLSDirectory64} object. """ tlsDir = TLSDirectory64() tlsDir.startAddressOfRawData.value = readDataInstance.readQword() tlsDir.endAddressOfRawData.value = readDataInstance.readQword() tlsDir.addressOfIndex.value = readDataInstance.readQword() tlsDir.addressOfCallbacks.value = readDataInstance.readQword() tlsDir.sizeOfZeroFill.value = readDataInstance.readDword() tlsDir.characteristics.value = readDataInstance.readDword() return tlsDir
python
def parse(readDataInstance): """ Returns a new L{TLSDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory64} object. @rtype: L{TLSDirectory64} @return: A new L{TLSDirectory64} object. """ tlsDir = TLSDirectory64() tlsDir.startAddressOfRawData.value = readDataInstance.readQword() tlsDir.endAddressOfRawData.value = readDataInstance.readQword() tlsDir.addressOfIndex.value = readDataInstance.readQword() tlsDir.addressOfCallbacks.value = readDataInstance.readQword() tlsDir.sizeOfZeroFill.value = readDataInstance.readDword() tlsDir.characteristics.value = readDataInstance.readDword() return tlsDir
[ "def", "parse", "(", "readDataInstance", ")", ":", "tlsDir", "=", "TLSDirectory64", "(", ")", "tlsDir", ".", "startAddressOfRawData", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "tlsDir", ".", "endAddressOfRawData", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "tlsDir", ".", "addressOfIndex", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "tlsDir", ".", "addressOfCallbacks", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "tlsDir", ".", "sizeOfZeroFill", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "tlsDir", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "tlsDir" ]
Returns a new L{TLSDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory64} object. @rtype: L{TLSDirectory64} @return: A new L{TLSDirectory64} object.
[ "Returns", "a", "new", "L", "{", "TLSDirectory64", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L312-L330
crackinglandia/pype32
pype32/directories.py
ImageLoadConfigDirectory64.parse
def parse(readDataInstance): """ Returns a new L{ImageLoadConfigDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageLoadConfigDirectory64} object. @rtype: L{ImageLoadConfigDirectory64} @return: A new L{ImageLoadConfigDirectory64} object. """ configDir = ImageLoadConfigDirectory64() configDir.size.value = readDataInstance.readDword() configDir.timeDateStamp.value = readDataInstance.readDword() configDir.majorVersion.value = readDataInstance.readWord() configDir.minorVersion.value = readDataInstance.readWord() configDir.globalFlagsClear.value = readDataInstance.readDword() configDir.globalFlagsSet.value = readDataInstance.readDword() configDir.criticalSectionDefaultTimeout.value = readDataInstance.readDword() configDir.deCommitFreeBlockThreshold.value = readDataInstance.readQword() configDir.deCommitTotalFreeThreshold.value = readDataInstance.readQword() configDir.lockPrefixTable.value = readDataInstance.readQword() configDir.maximumAllocationSize.value = readDataInstance.readQword() configDir.virtualMemoryThreshold.value = readDataInstance.readQword() configDir.processAffinityMask.value = readDataInstance.readQword() configDir.processHeapFlags.value = readDataInstance.readDword() configDir.cdsVersion.value = readDataInstance.readWord() configDir.reserved1.value = readDataInstance.readWord() configDir.editList.value = readDataInstance.readQword() configDir.securityCookie.value = readDataInstance.readQword() configDir.SEHandlerTable.value = readDataInstance.readQword() configDir.SEHandlerCount.value = readDataInstance.readQword() # Fields for Control Flow Guard configDir.GuardCFCheckFunctionPointer.value = readDataInstance.readQword() # VA configDir.Reserved2.value = readDataInstance.readQword() configDir.GuardCFFunctionTable.value = readDataInstance.readQword() # VA configDir.GuardCFFunctionCount.value = readDataInstance.readQword() configDir.GuardFlags.value = readDataInstance.readQword() return configDir
python
def parse(readDataInstance): """ Returns a new L{ImageLoadConfigDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageLoadConfigDirectory64} object. @rtype: L{ImageLoadConfigDirectory64} @return: A new L{ImageLoadConfigDirectory64} object. """ configDir = ImageLoadConfigDirectory64() configDir.size.value = readDataInstance.readDword() configDir.timeDateStamp.value = readDataInstance.readDword() configDir.majorVersion.value = readDataInstance.readWord() configDir.minorVersion.value = readDataInstance.readWord() configDir.globalFlagsClear.value = readDataInstance.readDword() configDir.globalFlagsSet.value = readDataInstance.readDword() configDir.criticalSectionDefaultTimeout.value = readDataInstance.readDword() configDir.deCommitFreeBlockThreshold.value = readDataInstance.readQword() configDir.deCommitTotalFreeThreshold.value = readDataInstance.readQword() configDir.lockPrefixTable.value = readDataInstance.readQword() configDir.maximumAllocationSize.value = readDataInstance.readQword() configDir.virtualMemoryThreshold.value = readDataInstance.readQword() configDir.processAffinityMask.value = readDataInstance.readQword() configDir.processHeapFlags.value = readDataInstance.readDword() configDir.cdsVersion.value = readDataInstance.readWord() configDir.reserved1.value = readDataInstance.readWord() configDir.editList.value = readDataInstance.readQword() configDir.securityCookie.value = readDataInstance.readQword() configDir.SEHandlerTable.value = readDataInstance.readQword() configDir.SEHandlerCount.value = readDataInstance.readQword() # Fields for Control Flow Guard configDir.GuardCFCheckFunctionPointer.value = readDataInstance.readQword() # VA configDir.Reserved2.value = readDataInstance.readQword() configDir.GuardCFFunctionTable.value = readDataInstance.readQword() # VA configDir.GuardCFFunctionCount.value = readDataInstance.readQword() configDir.GuardFlags.value = readDataInstance.readQword() return configDir
[ "def", "parse", "(", "readDataInstance", ")", ":", "configDir", "=", "ImageLoadConfigDirectory64", "(", ")", "configDir", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "configDir", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "configDir", ".", "majorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "configDir", ".", "minorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "configDir", ".", "globalFlagsClear", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "configDir", ".", "globalFlagsSet", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "configDir", ".", "criticalSectionDefaultTimeout", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "configDir", ".", "deCommitFreeBlockThreshold", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "deCommitTotalFreeThreshold", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "lockPrefixTable", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "maximumAllocationSize", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "virtualMemoryThreshold", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "processAffinityMask", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "processHeapFlags", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "configDir", ".", "cdsVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "configDir", ".", "reserved1", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "configDir", ".", "editList", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "securityCookie", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "SEHandlerTable", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "SEHandlerCount", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "# Fields for Control Flow Guard", "configDir", ".", "GuardCFCheckFunctionPointer", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "# VA", "configDir", ".", "Reserved2", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "GuardCFFunctionTable", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "# VA", "configDir", ".", "GuardCFFunctionCount", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "configDir", ".", "GuardFlags", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "return", "configDir" ]
Returns a new L{ImageLoadConfigDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageLoadConfigDirectory64} object. @rtype: L{ImageLoadConfigDirectory64} @return: A new L{ImageLoadConfigDirectory64} object.
[ "Returns", "a", "new", "L", "{", "ImageLoadConfigDirectory64", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L473-L512
crackinglandia/pype32
pype32/directories.py
ImageBaseRelocationEntry.parse
def parse(readDataInstance): """ Returns a new L{ImageBaseRelocationEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to parse as a L{ImageBaseRelocationEntry} object. @rtype: L{ImageBaseRelocationEntry} @return: A new L{ImageBaseRelocationEntry} object. """ reloc = ImageBaseRelocationEntry() reloc.virtualAddress.value = readDataInstance.readDword() reloc.sizeOfBlock.value = readDataInstance.readDword() toRead = (reloc.sizeOfBlock.value - 8) / len(datatypes.WORD(0)) reloc.items = datatypes.Array.parse(readDataInstance, datatypes.TYPE_WORD, toRead) return reloc
python
def parse(readDataInstance): """ Returns a new L{ImageBaseRelocationEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to parse as a L{ImageBaseRelocationEntry} object. @rtype: L{ImageBaseRelocationEntry} @return: A new L{ImageBaseRelocationEntry} object. """ reloc = ImageBaseRelocationEntry() reloc.virtualAddress.value = readDataInstance.readDword() reloc.sizeOfBlock.value = readDataInstance.readDword() toRead = (reloc.sizeOfBlock.value - 8) / len(datatypes.WORD(0)) reloc.items = datatypes.Array.parse(readDataInstance, datatypes.TYPE_WORD, toRead) return reloc
[ "def", "parse", "(", "readDataInstance", ")", ":", "reloc", "=", "ImageBaseRelocationEntry", "(", ")", "reloc", ".", "virtualAddress", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "reloc", ".", "sizeOfBlock", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "toRead", "=", "(", "reloc", ".", "sizeOfBlock", ".", "value", "-", "8", ")", "/", "len", "(", "datatypes", ".", "WORD", "(", "0", ")", ")", "reloc", ".", "items", "=", "datatypes", ".", "Array", ".", "parse", "(", "readDataInstance", ",", "datatypes", ".", "TYPE_WORD", ",", "toRead", ")", "return", "reloc" ]
Returns a new L{ImageBaseRelocationEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to parse as a L{ImageBaseRelocationEntry} object. @rtype: L{ImageBaseRelocationEntry} @return: A new L{ImageBaseRelocationEntry} object.
[ "Returns", "a", "new", "L", "{", "ImageBaseRelocationEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L537-L552
crackinglandia/pype32
pype32/directories.py
ImageDebugDirectory.parse
def parse(readDataInstance): """ Returns a new L{ImageDebugDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A new L{ReadData} object with data to be parsed as a L{ImageDebugDirectory} object. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object. """ dbgDir = ImageDebugDirectory() dbgDir.characteristics.value = readDataInstance.readDword() dbgDir.timeDateStamp.value = readDataInstance.readDword() dbgDir.majorVersion.value = readDataInstance.readWord() dbgDir.minorVersion.value = readDataInstance.readWord() dbgDir.type.value = readDataInstance.readDword() dbgDir.sizeOfData.value = readDataInstance.readDword() dbgDir.addressOfData.value = readDataInstance.readDword() dbgDir.pointerToRawData.value = readDataInstance.readDword() return dbgDir
python
def parse(readDataInstance): """ Returns a new L{ImageDebugDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A new L{ReadData} object with data to be parsed as a L{ImageDebugDirectory} object. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object. """ dbgDir = ImageDebugDirectory() dbgDir.characteristics.value = readDataInstance.readDword() dbgDir.timeDateStamp.value = readDataInstance.readDword() dbgDir.majorVersion.value = readDataInstance.readWord() dbgDir.minorVersion.value = readDataInstance.readWord() dbgDir.type.value = readDataInstance.readDword() dbgDir.sizeOfData.value = readDataInstance.readDword() dbgDir.addressOfData.value = readDataInstance.readDword() dbgDir.pointerToRawData.value = readDataInstance.readDword() return dbgDir
[ "def", "parse", "(", "readDataInstance", ")", ":", "dbgDir", "=", "ImageDebugDirectory", "(", ")", "dbgDir", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "dbgDir", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "dbgDir", ".", "majorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dbgDir", ".", "minorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "dbgDir", ".", "type", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "dbgDir", ".", "sizeOfData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "dbgDir", ".", "addressOfData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "dbgDir", ".", "pointerToRawData", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "dbgDir" ]
Returns a new L{ImageDebugDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A new L{ReadData} object with data to be parsed as a L{ImageDebugDirectory} object. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object.
[ "Returns", "a", "new", "L", "{", "ImageDebugDirectory", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L587-L608
crackinglandia/pype32
pype32/directories.py
ImageDebugDirectories.parse
def parse(readDataInstance, nDebugEntries): """ Returns a new L{ImageDebugDirectories} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageDebugDirectories} object. @type nDebugEntries: int @param nDebugEntries: Number of L{ImageDebugDirectory} objects in the C{readDataInstance} object. @rtype: L{ImageDebugDirectories} @return: A new L{ImageDebugDirectories} object. @raise DataLengthException: If not enough data to read in the C{readDataInstance} object. """ dbgEntries = ImageDebugDirectories() dataLength = len(readDataInstance) toRead = nDebugEntries * consts.SIZEOF_IMAGE_DEBUG_ENTRY32 if dataLength >= toRead: for i in range(nDebugEntries): dbgEntry = ImageDebugDirectory.parse(readDataInstance) dbgEntries.append(dbgEntry) else: raise excep.DataLengthException("Not enough bytes to read.") return dbgEntries
python
def parse(readDataInstance, nDebugEntries): """ Returns a new L{ImageDebugDirectories} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageDebugDirectories} object. @type nDebugEntries: int @param nDebugEntries: Number of L{ImageDebugDirectory} objects in the C{readDataInstance} object. @rtype: L{ImageDebugDirectories} @return: A new L{ImageDebugDirectories} object. @raise DataLengthException: If not enough data to read in the C{readDataInstance} object. """ dbgEntries = ImageDebugDirectories() dataLength = len(readDataInstance) toRead = nDebugEntries * consts.SIZEOF_IMAGE_DEBUG_ENTRY32 if dataLength >= toRead: for i in range(nDebugEntries): dbgEntry = ImageDebugDirectory.parse(readDataInstance) dbgEntries.append(dbgEntry) else: raise excep.DataLengthException("Not enough bytes to read.") return dbgEntries
[ "def", "parse", "(", "readDataInstance", ",", "nDebugEntries", ")", ":", "dbgEntries", "=", "ImageDebugDirectories", "(", ")", "dataLength", "=", "len", "(", "readDataInstance", ")", "toRead", "=", "nDebugEntries", "*", "consts", ".", "SIZEOF_IMAGE_DEBUG_ENTRY32", "if", "dataLength", ">=", "toRead", ":", "for", "i", "in", "range", "(", "nDebugEntries", ")", ":", "dbgEntry", "=", "ImageDebugDirectory", ".", "parse", "(", "readDataInstance", ")", "dbgEntries", ".", "append", "(", "dbgEntry", ")", "else", ":", "raise", "excep", ".", "DataLengthException", "(", "\"Not enough bytes to read.\"", ")", "return", "dbgEntries" ]
Returns a new L{ImageDebugDirectories} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageDebugDirectories} object. @type nDebugEntries: int @param nDebugEntries: Number of L{ImageDebugDirectory} objects in the C{readDataInstance} object. @rtype: L{ImageDebugDirectories} @return: A new L{ImageDebugDirectories} object. @raise DataLengthException: If not enough data to read in the C{readDataInstance} object.
[ "Returns", "a", "new", "L", "{", "ImageDebugDirectories", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L629-L655
crackinglandia/pype32
pype32/directories.py
ImageImportDescriptorEntry.parse
def parse(readDataInstance): """ Returns a new L{ImageImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageImportDescriptorEntry}. @rtype: L{ImageImportDescriptorEntry} @return: A new L{ImageImportDescriptorEntry} object. """ iid = ImageImportDescriptorEntry() iid.originalFirstThunk.value = readDataInstance.readDword() iid.timeDateStamp.value = readDataInstance.readDword() iid.forwarderChain.value = readDataInstance.readDword() iid.name.value = readDataInstance.readDword() iid.firstThunk.value = readDataInstance.readDword() return iid
python
def parse(readDataInstance): """ Returns a new L{ImageImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageImportDescriptorEntry}. @rtype: L{ImageImportDescriptorEntry} @return: A new L{ImageImportDescriptorEntry} object. """ iid = ImageImportDescriptorEntry() iid.originalFirstThunk.value = readDataInstance.readDword() iid.timeDateStamp.value = readDataInstance.readDword() iid.forwarderChain.value = readDataInstance.readDword() iid.name.value = readDataInstance.readDword() iid.firstThunk.value = readDataInstance.readDword() return iid
[ "def", "parse", "(", "readDataInstance", ")", ":", "iid", "=", "ImageImportDescriptorEntry", "(", ")", "iid", ".", "originalFirstThunk", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "iid", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "iid", ".", "forwarderChain", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "iid", ".", "name", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "iid", ".", "firstThunk", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "iid" ]
Returns a new L{ImageImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageImportDescriptorEntry}. @rtype: L{ImageImportDescriptorEntry} @return: A new L{ImageImportDescriptorEntry} object.
[ "Returns", "a", "new", "L", "{", "ImageImportDescriptorEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L702-L718
crackinglandia/pype32
pype32/directories.py
ImageImportDescriptor.parse
def parse(readDataInstance, nEntries): """ Returns a new L{ImageImportDescriptor} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageImportDescriptor} object. @type nEntries: int @param nEntries: The number of L{ImageImportDescriptorEntry} objects in the C{readDataInstance} object. @rtype: L{ImageImportDescriptor} @return: A new L{ImageImportDescriptor} object. @raise DataLengthException: If not enough data to read. """ importEntries = ImageImportDescriptor() dataLength = len(readDataInstance) toRead = nEntries * consts.SIZEOF_IMAGE_IMPORT_ENTRY32 if dataLength >= toRead: for i in range(nEntries): importEntry = ImageImportDescriptorEntry.parse(readDataInstance) importEntries.append(importEntry) else: raise excep.DataLengthException("Not enough bytes to read.") return importEntries
python
def parse(readDataInstance, nEntries): """ Returns a new L{ImageImportDescriptor} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageImportDescriptor} object. @type nEntries: int @param nEntries: The number of L{ImageImportDescriptorEntry} objects in the C{readDataInstance} object. @rtype: L{ImageImportDescriptor} @return: A new L{ImageImportDescriptor} object. @raise DataLengthException: If not enough data to read. """ importEntries = ImageImportDescriptor() dataLength = len(readDataInstance) toRead = nEntries * consts.SIZEOF_IMAGE_IMPORT_ENTRY32 if dataLength >= toRead: for i in range(nEntries): importEntry = ImageImportDescriptorEntry.parse(readDataInstance) importEntries.append(importEntry) else: raise excep.DataLengthException("Not enough bytes to read.") return importEntries
[ "def", "parse", "(", "readDataInstance", ",", "nEntries", ")", ":", "importEntries", "=", "ImageImportDescriptor", "(", ")", "dataLength", "=", "len", "(", "readDataInstance", ")", "toRead", "=", "nEntries", "*", "consts", ".", "SIZEOF_IMAGE_IMPORT_ENTRY32", "if", "dataLength", ">=", "toRead", ":", "for", "i", "in", "range", "(", "nEntries", ")", ":", "importEntry", "=", "ImageImportDescriptorEntry", ".", "parse", "(", "readDataInstance", ")", "importEntries", ".", "append", "(", "importEntry", ")", "else", ":", "raise", "excep", ".", "DataLengthException", "(", "\"Not enough bytes to read.\"", ")", "return", "importEntries" ]
Returns a new L{ImageImportDescriptor} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageImportDescriptor} object. @type nEntries: int @param nEntries: The number of L{ImageImportDescriptorEntry} objects in the C{readDataInstance} object. @rtype: L{ImageImportDescriptor} @return: A new L{ImageImportDescriptor} object. @raise DataLengthException: If not enough data to read.
[ "Returns", "a", "new", "L", "{", "ImageImportDescriptor", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L743-L769
crackinglandia/pype32
pype32/directories.py
ExportTableEntry.parse
def parse(readDataInstance): """ Returns a new L{ExportTableEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ExportTableEntry} object. @rtype: L{ExportTableEntry} @return: A new L{ExportTableEntry} object. """ exportEntry = ExportTableEntry() exportEntry.functionRva.value = readDataInstance.readDword() exportEntry.nameOrdinal.value = readDataInstance.readWord() exportEntry.nameRva.value = readDataInstance.readDword() exportEntry.name.value = readDataInstance.readString() return exportEntry
python
def parse(readDataInstance): """ Returns a new L{ExportTableEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ExportTableEntry} object. @rtype: L{ExportTableEntry} @return: A new L{ExportTableEntry} object. """ exportEntry = ExportTableEntry() exportEntry.functionRva.value = readDataInstance.readDword() exportEntry.nameOrdinal.value = readDataInstance.readWord() exportEntry.nameRva.value = readDataInstance.readDword() exportEntry.name.value = readDataInstance.readString() return exportEntry
[ "def", "parse", "(", "readDataInstance", ")", ":", "exportEntry", "=", "ExportTableEntry", "(", ")", "exportEntry", ".", "functionRva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "exportEntry", ".", "nameOrdinal", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "exportEntry", ".", "nameRva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "exportEntry", ".", "name", ".", "value", "=", "readDataInstance", ".", "readString", "(", ")", "return", "exportEntry" ]
Returns a new L{ExportTableEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ExportTableEntry} object. @rtype: L{ExportTableEntry} @return: A new L{ExportTableEntry} object.
[ "Returns", "a", "new", "L", "{", "ExportTableEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L850-L866
crackinglandia/pype32
pype32/directories.py
ImageExportTable.parse
def parse(readDataInstance): """ Returns a new L{ImageExportTable} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageExportTable} object. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object. """ et = ImageExportTable() et.characteristics.value = readDataInstance.readDword() et.timeDateStamp.value = readDataInstance.readDword() et.majorVersion.value = readDataInstance.readWord() et.minorVersion.value = readDataInstance.readWord() et.name.value = readDataInstance.readDword() et.base.value = readDataInstance.readDword() et.numberOfFunctions.value = readDataInstance.readDword() et.numberOfNames.value = readDataInstance.readDword() et.addressOfFunctions.value = readDataInstance.readDword() et.addressOfNames.value = readDataInstance.readDword() et.addressOfNameOrdinals.value = readDataInstance.readDword() return et
python
def parse(readDataInstance): """ Returns a new L{ImageExportTable} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageExportTable} object. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object. """ et = ImageExportTable() et.characteristics.value = readDataInstance.readDword() et.timeDateStamp.value = readDataInstance.readDword() et.majorVersion.value = readDataInstance.readWord() et.minorVersion.value = readDataInstance.readWord() et.name.value = readDataInstance.readDword() et.base.value = readDataInstance.readDword() et.numberOfFunctions.value = readDataInstance.readDword() et.numberOfNames.value = readDataInstance.readDword() et.addressOfFunctions.value = readDataInstance.readDword() et.addressOfNames.value = readDataInstance.readDword() et.addressOfNameOrdinals.value = readDataInstance.readDword() return et
[ "def", "parse", "(", "readDataInstance", ")", ":", "et", "=", "ImageExportTable", "(", ")", "et", ".", "characteristics", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "majorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "et", ".", "minorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "et", ".", "name", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "base", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "numberOfFunctions", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "numberOfNames", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "addressOfFunctions", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "addressOfNames", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "et", ".", "addressOfNameOrdinals", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "et" ]
Returns a new L{ImageExportTable} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageExportTable} object. @rtype: L{ImageExportTable} @return: A new L{ImageExportTable} object.
[ "Returns", "a", "new", "L", "{", "ImageExportTable", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L902-L925
crackinglandia/pype32
pype32/directories.py
NETDirectory.parse
def parse(readDataInstance): """ Returns a new L{NETDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object. """ nd = NETDirectory() nd.directory = NetDirectory.parse(readDataInstance) nd.netMetaDataHeader = NetMetaDataHeader.parse(readDataInstance) nd.netMetaDataStreams = NetMetaDataStreams.parse(readDataInstance) return nd
python
def parse(readDataInstance): """ Returns a new L{NETDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object. """ nd = NETDirectory() nd.directory = NetDirectory.parse(readDataInstance) nd.netMetaDataHeader = NetMetaDataHeader.parse(readDataInstance) nd.netMetaDataStreams = NetMetaDataStreams.parse(readDataInstance) return nd
[ "def", "parse", "(", "readDataInstance", ")", ":", "nd", "=", "NETDirectory", "(", ")", "nd", ".", "directory", "=", "NetDirectory", ".", "parse", "(", "readDataInstance", ")", "nd", ".", "netMetaDataHeader", "=", "NetMetaDataHeader", ".", "parse", "(", "readDataInstance", ")", "nd", ".", "netMetaDataStreams", "=", "NetMetaDataStreams", ".", "parse", "(", "readDataInstance", ")", "return", "nd" ]
Returns a new L{NETDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object.
[ "Returns", "a", "new", "L", "{", "NETDirectory", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L945-L960
crackinglandia/pype32
pype32/directories.py
NetDirectory.parse
def parse(readDataInstance): """ Returns a new L{NetDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetDirectory} object. @rtype: L{NetDirectory} @return: A new L{NetDirectory} object. """ nd = NetDirectory() nd.cb.value = readDataInstance.readDword() nd.majorRuntimeVersion.value= readDataInstance.readWord() nd.minorRuntimeVersion.value = readDataInstance.readWord() nd.metaData.rva.value = readDataInstance.readDword() nd.metaData.size.value = readDataInstance.readDword() nd.metaData.name.value = "MetaData" nd.flags.value = readDataInstance.readDword() nd.entryPointToken.value = readDataInstance.readDword() nd.resources.rva.value = readDataInstance.readDword() nd.resources.size.value = readDataInstance.readDword() nd.resources.name.value = "Resources" nd.strongNameSignature.rva.value = readDataInstance.readDword() nd.strongNameSignature.size.value = readDataInstance.readDword() nd.strongNameSignature.name.value = "StrongNameSignature" nd.codeManagerTable.rva.value = readDataInstance.readDword() nd.codeManagerTable.size.value = readDataInstance.readDword() nd.codeManagerTable.name.value = "CodeManagerTable" nd.vTableFixups.rva.value = readDataInstance.readDword() nd.vTableFixups.size.value = readDataInstance.readDword() nd.vTableFixups.name.value = "VTableFixups" nd.exportAddressTableJumps.rva.value = readDataInstance.readDword() nd.exportAddressTableJumps.size.value = readDataInstance.readDword() nd.exportAddressTableJumps.name.value = "ExportAddressTableJumps" nd.managedNativeHeader.rva.value = readDataInstance.readDword() nd.managedNativeHeader.size.value = readDataInstance.readDword() nd.managedNativeHeader.name.value = "ManagedNativeHeader" return nd
python
def parse(readDataInstance): """ Returns a new L{NetDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetDirectory} object. @rtype: L{NetDirectory} @return: A new L{NetDirectory} object. """ nd = NetDirectory() nd.cb.value = readDataInstance.readDword() nd.majorRuntimeVersion.value= readDataInstance.readWord() nd.minorRuntimeVersion.value = readDataInstance.readWord() nd.metaData.rva.value = readDataInstance.readDword() nd.metaData.size.value = readDataInstance.readDword() nd.metaData.name.value = "MetaData" nd.flags.value = readDataInstance.readDword() nd.entryPointToken.value = readDataInstance.readDword() nd.resources.rva.value = readDataInstance.readDword() nd.resources.size.value = readDataInstance.readDword() nd.resources.name.value = "Resources" nd.strongNameSignature.rva.value = readDataInstance.readDword() nd.strongNameSignature.size.value = readDataInstance.readDword() nd.strongNameSignature.name.value = "StrongNameSignature" nd.codeManagerTable.rva.value = readDataInstance.readDword() nd.codeManagerTable.size.value = readDataInstance.readDword() nd.codeManagerTable.name.value = "CodeManagerTable" nd.vTableFixups.rva.value = readDataInstance.readDword() nd.vTableFixups.size.value = readDataInstance.readDword() nd.vTableFixups.name.value = "VTableFixups" nd.exportAddressTableJumps.rva.value = readDataInstance.readDword() nd.exportAddressTableJumps.size.value = readDataInstance.readDword() nd.exportAddressTableJumps.name.value = "ExportAddressTableJumps" nd.managedNativeHeader.rva.value = readDataInstance.readDword() nd.managedNativeHeader.size.value = readDataInstance.readDword() nd.managedNativeHeader.name.value = "ManagedNativeHeader" return nd
[ "def", "parse", "(", "readDataInstance", ")", ":", "nd", "=", "NetDirectory", "(", ")", "nd", ".", "cb", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "majorRuntimeVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "nd", ".", "minorRuntimeVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "nd", ".", "metaData", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "metaData", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "metaData", ".", "name", ".", "value", "=", "\"MetaData\"", "nd", ".", "flags", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "entryPointToken", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "resources", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "resources", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "resources", ".", "name", ".", "value", "=", "\"Resources\"", "nd", ".", "strongNameSignature", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "strongNameSignature", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "strongNameSignature", ".", "name", ".", "value", "=", "\"StrongNameSignature\"", "nd", ".", "codeManagerTable", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "codeManagerTable", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "codeManagerTable", ".", "name", ".", "value", "=", "\"CodeManagerTable\"", "nd", ".", "vTableFixups", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "vTableFixups", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "vTableFixups", ".", "name", ".", "value", "=", "\"VTableFixups\"", "nd", ".", "exportAddressTableJumps", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "exportAddressTableJumps", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "exportAddressTableJumps", ".", "name", ".", "value", "=", "\"ExportAddressTableJumps\"", "nd", ".", "managedNativeHeader", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "managedNativeHeader", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nd", ".", "managedNativeHeader", ".", "name", ".", "value", "=", "\"ManagedNativeHeader\"", "return", "nd" ]
Returns a new L{NetDirectory} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetDirectory} object. @rtype: L{NetDirectory} @return: A new L{NetDirectory} object.
[ "Returns", "a", "new", "L", "{", "NetDirectory", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1001-L1048
crackinglandia/pype32
pype32/directories.py
NetMetaDataHeader.parse
def parse(readDataInstance): """ Returns a new L{NetMetaDataHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataHeader} object. @rtype: L{NetMetaDataHeader} @return: A new L{NetMetaDataHeader} object. """ nmh = NetMetaDataHeader() nmh.signature.value = readDataInstance.readDword() nmh.majorVersion.value = readDataInstance.readWord() nmh.minorVersion.value = readDataInstance.readWord() nmh.reserved.value = readDataInstance.readDword() nmh.versionLength.value = readDataInstance.readDword() nmh.versionString.value = readDataInstance.readAlignedString() nmh.flags.value = readDataInstance.readWord() nmh.numberOfStreams.value = readDataInstance.readWord() return nmh
python
def parse(readDataInstance): """ Returns a new L{NetMetaDataHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataHeader} object. @rtype: L{NetMetaDataHeader} @return: A new L{NetMetaDataHeader} object. """ nmh = NetMetaDataHeader() nmh.signature.value = readDataInstance.readDword() nmh.majorVersion.value = readDataInstance.readWord() nmh.minorVersion.value = readDataInstance.readWord() nmh.reserved.value = readDataInstance.readDword() nmh.versionLength.value = readDataInstance.readDword() nmh.versionString.value = readDataInstance.readAlignedString() nmh.flags.value = readDataInstance.readWord() nmh.numberOfStreams.value = readDataInstance.readWord() return nmh
[ "def", "parse", "(", "readDataInstance", ")", ":", "nmh", "=", "NetMetaDataHeader", "(", ")", "nmh", ".", "signature", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nmh", ".", "majorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "nmh", ".", "minorVersion", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "nmh", ".", "reserved", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nmh", ".", "versionLength", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "nmh", ".", "versionString", ".", "value", "=", "readDataInstance", ".", "readAlignedString", "(", ")", "nmh", ".", "flags", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "nmh", ".", "numberOfStreams", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "return", "nmh" ]
Returns a new L{NetMetaDataHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataHeader} object. @rtype: L{NetMetaDataHeader} @return: A new L{NetMetaDataHeader} object.
[ "Returns", "a", "new", "L", "{", "NetMetaDataHeader", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1071-L1091
crackinglandia/pype32
pype32/directories.py
NetMetaDataStreamEntry.parse
def parse(readDataInstance): """ Returns a new L{NetMetaDataStreamEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreamEntry}. @rtype: L{NetMetaDataStreamEntry} @return: A new L{NetMetaDataStreamEntry} object. """ n = NetMetaDataStreamEntry() n.offset.value = readDataInstance.readDword() n.size.value = readDataInstance.readDword() n.name.value = readDataInstance.readAlignedString() return n
python
def parse(readDataInstance): """ Returns a new L{NetMetaDataStreamEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreamEntry}. @rtype: L{NetMetaDataStreamEntry} @return: A new L{NetMetaDataStreamEntry} object. """ n = NetMetaDataStreamEntry() n.offset.value = readDataInstance.readDword() n.size.value = readDataInstance.readDword() n.name.value = readDataInstance.readAlignedString() return n
[ "def", "parse", "(", "readDataInstance", ")", ":", "n", "=", "NetMetaDataStreamEntry", "(", ")", "n", ".", "offset", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "n", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "n", ".", "name", ".", "value", "=", "readDataInstance", ".", "readAlignedString", "(", ")", "return", "n" ]
Returns a new L{NetMetaDataStreamEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreamEntry}. @rtype: L{NetMetaDataStreamEntry} @return: A new L{NetMetaDataStreamEntry} object.
[ "Returns", "a", "new", "L", "{", "NetMetaDataStreamEntry", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1113-L1127
crackinglandia/pype32
pype32/directories.py
NetMetaDataStreams.parse
def parse(readDataInstance, nStreams): """ Returns a new L{NetMetaDataStreams} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object. @type nStreams: int @param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object. @rtype: L{NetMetaDataStreams} @return: A new L{NetMetaDataStreams} object. """ streams = NetMetaDataStreams() for i in range(nStreams): streamEntry = NetMetaDataStreamEntry() streamEntry.offset.value = readDataInstance.readDword() streamEntry.size.value = readDataInstance.readDword() streamEntry.name.value = readDataInstance.readAlignedString() #streams.append(streamEntry) streams.update({ i: streamEntry, streamEntry.name.value: streamEntry }) return streams
python
def parse(readDataInstance, nStreams): """ Returns a new L{NetMetaDataStreams} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object. @type nStreams: int @param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object. @rtype: L{NetMetaDataStreams} @return: A new L{NetMetaDataStreams} object. """ streams = NetMetaDataStreams() for i in range(nStreams): streamEntry = NetMetaDataStreamEntry() streamEntry.offset.value = readDataInstance.readDword() streamEntry.size.value = readDataInstance.readDword() streamEntry.name.value = readDataInstance.readAlignedString() #streams.append(streamEntry) streams.update({ i: streamEntry, streamEntry.name.value: streamEntry }) return streams
[ "def", "parse", "(", "readDataInstance", ",", "nStreams", ")", ":", "streams", "=", "NetMetaDataStreams", "(", ")", "for", "i", "in", "range", "(", "nStreams", ")", ":", "streamEntry", "=", "NetMetaDataStreamEntry", "(", ")", "streamEntry", ".", "offset", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "streamEntry", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "streamEntry", ".", "name", ".", "value", "=", "readDataInstance", ".", "readAlignedString", "(", ")", "#streams.append(streamEntry)", "streams", ".", "update", "(", "{", "i", ":", "streamEntry", ",", "streamEntry", ".", "name", ".", "value", ":", "streamEntry", "}", ")", "return", "streams" ]
Returns a new L{NetMetaDataStreams} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object. @type nStreams: int @param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object. @rtype: L{NetMetaDataStreams} @return: A new L{NetMetaDataStreams} object.
[ "Returns", "a", "new", "L", "{", "NetMetaDataStreams", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1148-L1173
crackinglandia/pype32
pype32/directories.py
NetMetaDataTableHeader.parse
def parse(readDataInstance): """ Returns a new L{NetMetaDataTableHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataTableHeader} object. @rtype: L{NetMetaDataTableHeader} @return: A new L{NetMetaDataTableHeader} object. """ th = NetMetaDataTableHeader() th.reserved_1.value = readDataInstance.readDword() th.majorVersion.value = readDataInstance.readByte() th.minorVersion.value = readDataInstance.readByte() th.heapOffsetSizes.value = readDataInstance.readByte() th.reserved_2.value = readDataInstance.readByte() th.maskValid.value = readDataInstance.readQword() th.maskSorted.value = readDataInstance.readQword() return th
python
def parse(readDataInstance): """ Returns a new L{NetMetaDataTableHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataTableHeader} object. @rtype: L{NetMetaDataTableHeader} @return: A new L{NetMetaDataTableHeader} object. """ th = NetMetaDataTableHeader() th.reserved_1.value = readDataInstance.readDword() th.majorVersion.value = readDataInstance.readByte() th.minorVersion.value = readDataInstance.readByte() th.heapOffsetSizes.value = readDataInstance.readByte() th.reserved_2.value = readDataInstance.readByte() th.maskValid.value = readDataInstance.readQword() th.maskSorted.value = readDataInstance.readQword() return th
[ "def", "parse", "(", "readDataInstance", ")", ":", "th", "=", "NetMetaDataTableHeader", "(", ")", "th", ".", "reserved_1", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "th", ".", "majorVersion", ".", "value", "=", "readDataInstance", ".", "readByte", "(", ")", "th", ".", "minorVersion", ".", "value", "=", "readDataInstance", ".", "readByte", "(", ")", "th", ".", "heapOffsetSizes", ".", "value", "=", "readDataInstance", ".", "readByte", "(", ")", "th", ".", "reserved_2", ".", "value", "=", "readDataInstance", ".", "readByte", "(", ")", "th", ".", "maskValid", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "th", ".", "maskSorted", ".", "value", "=", "readDataInstance", ".", "readQword", "(", ")", "return", "th" ]
Returns a new L{NetMetaDataTableHeader} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataTableHeader} object. @rtype: L{NetMetaDataTableHeader} @return: A new L{NetMetaDataTableHeader} object.
[ "Returns", "a", "new", "L", "{", "NetMetaDataTableHeader", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1195-L1215
crackinglandia/pype32
pype32/directories.py
NetMetaDataTables.parse
def parse(readDataInstance, netMetaDataStreams): """ Returns a new L{NetMetaDataTables} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataTables} object. @rtype: L{NetMetaDataTables} @return: A new L{NetMetaDataTables} object. """ dt = NetMetaDataTables() dt.netMetaDataTableHeader = NetMetaDataTableHeader.parse(readDataInstance) dt.tables = {} metadataTableDefinitions = dotnet.MetadataTableDefinitions(dt, netMetaDataStreams) for i in xrange(64): dt.tables[i] = { "rows": 0 } if dt.netMetaDataTableHeader.maskValid.value >> i & 1: dt.tables[i]["rows"] = readDataInstance.readDword() if i in dotnet.MetadataTableNames: dt.tables[dotnet.MetadataTableNames[i]] = dt.tables[i] for i in xrange(64): dt.tables[i]["data"] = [] for j in range(dt.tables[i]["rows"]): row = None if i in metadataTableDefinitions: row = readDataInstance.readFields(metadataTableDefinitions[i]) dt.tables[i]["data"].append(row) for i in xrange(64): if i in dotnet.MetadataTableNames: dt.tables[dotnet.MetadataTableNames[i]] = dt.tables[i]["data"] dt.tables[i] = dt.tables[i]["data"] return dt
python
def parse(readDataInstance, netMetaDataStreams): """ Returns a new L{NetMetaDataTables} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataTables} object. @rtype: L{NetMetaDataTables} @return: A new L{NetMetaDataTables} object. """ dt = NetMetaDataTables() dt.netMetaDataTableHeader = NetMetaDataTableHeader.parse(readDataInstance) dt.tables = {} metadataTableDefinitions = dotnet.MetadataTableDefinitions(dt, netMetaDataStreams) for i in xrange(64): dt.tables[i] = { "rows": 0 } if dt.netMetaDataTableHeader.maskValid.value >> i & 1: dt.tables[i]["rows"] = readDataInstance.readDword() if i in dotnet.MetadataTableNames: dt.tables[dotnet.MetadataTableNames[i]] = dt.tables[i] for i in xrange(64): dt.tables[i]["data"] = [] for j in range(dt.tables[i]["rows"]): row = None if i in metadataTableDefinitions: row = readDataInstance.readFields(metadataTableDefinitions[i]) dt.tables[i]["data"].append(row) for i in xrange(64): if i in dotnet.MetadataTableNames: dt.tables[dotnet.MetadataTableNames[i]] = dt.tables[i]["data"] dt.tables[i] = dt.tables[i]["data"] return dt
[ "def", "parse", "(", "readDataInstance", ",", "netMetaDataStreams", ")", ":", "dt", "=", "NetMetaDataTables", "(", ")", "dt", ".", "netMetaDataTableHeader", "=", "NetMetaDataTableHeader", ".", "parse", "(", "readDataInstance", ")", "dt", ".", "tables", "=", "{", "}", "metadataTableDefinitions", "=", "dotnet", ".", "MetadataTableDefinitions", "(", "dt", ",", "netMetaDataStreams", ")", "for", "i", "in", "xrange", "(", "64", ")", ":", "dt", ".", "tables", "[", "i", "]", "=", "{", "\"rows\"", ":", "0", "}", "if", "dt", ".", "netMetaDataTableHeader", ".", "maskValid", ".", "value", ">>", "i", "&", "1", ":", "dt", ".", "tables", "[", "i", "]", "[", "\"rows\"", "]", "=", "readDataInstance", ".", "readDword", "(", ")", "if", "i", "in", "dotnet", ".", "MetadataTableNames", ":", "dt", ".", "tables", "[", "dotnet", ".", "MetadataTableNames", "[", "i", "]", "]", "=", "dt", ".", "tables", "[", "i", "]", "for", "i", "in", "xrange", "(", "64", ")", ":", "dt", ".", "tables", "[", "i", "]", "[", "\"data\"", "]", "=", "[", "]", "for", "j", "in", "range", "(", "dt", ".", "tables", "[", "i", "]", "[", "\"rows\"", "]", ")", ":", "row", "=", "None", "if", "i", "in", "metadataTableDefinitions", ":", "row", "=", "readDataInstance", ".", "readFields", "(", "metadataTableDefinitions", "[", "i", "]", ")", "dt", ".", "tables", "[", "i", "]", "[", "\"data\"", "]", ".", "append", "(", "row", ")", "for", "i", "in", "xrange", "(", "64", ")", ":", "if", "i", "in", "dotnet", ".", "MetadataTableNames", ":", "dt", ".", "tables", "[", "dotnet", ".", "MetadataTableNames", "[", "i", "]", "]", "=", "dt", ".", "tables", "[", "i", "]", "[", "\"data\"", "]", "dt", ".", "tables", "[", "i", "]", "=", "dt", ".", "tables", "[", "i", "]", "[", "\"data\"", "]", "return", "dt" ]
Returns a new L{NetMetaDataTables} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataTables} object. @rtype: L{NetMetaDataTables} @return: A new L{NetMetaDataTables} object.
[ "Returns", "a", "new", "L", "{", "NetMetaDataTables", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1237-L1273
crackinglandia/pype32
pype32/directories.py
NetResources.parse
def parse(readDataInstance): """ Returns a new L{NetResources} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetResources} object. @rtype: L{NetResources} @return: A new L{NetResources} object. """ r = NetResources() r.signature = readDataInstance.readDword() if r.signature != 0xbeefcace: return r r.readerCount = readDataInstance.readDword() r.readerTypeLength = readDataInstance.readDword() r.readerType = utils.ReadData(readDataInstance.read(r.readerTypeLength)).readDotNetBlob() r.version = readDataInstance.readDword() r.resourceCount = readDataInstance.readDword() r.resourceTypeCount = readDataInstance.readDword() r.resourceTypes = [] for i in xrange(r.resourceTypeCount): r.resourceTypes.append(readDataInstance.readDotNetBlob()) # aligned to 8 bytes readDataInstance.skipBytes(8 - readDataInstance.tell() & 0x7) r.resourceHashes = [] for i in xrange(r.resourceCount): r.resourceHashes.append(readDataInstance.readDword()) r.resourceNameOffsets = [] for i in xrange(r.resourceCount): r.resourceNameOffsets.append(readDataInstance.readDword()) r.dataSectionOffset = readDataInstance.readDword() r.resourceNames = [] r.resourceOffsets = [] base = readDataInstance.tell() for i in xrange(r.resourceCount): readDataInstance.setOffset(base + r.resourceNameOffsets[i]) r.resourceNames.append(readDataInstance.readDotNetUnicodeString()) r.resourceOffsets.append(readDataInstance.readDword()) r.info = {} for i in xrange(r.resourceCount): readDataInstance.setOffset(r.dataSectionOffset + r.resourceOffsets[i]) r.info[i] = readDataInstance.read(len(readDataInstance)) r.info[r.resourceNames[i]] = r.info[i] return r
python
def parse(readDataInstance): """ Returns a new L{NetResources} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetResources} object. @rtype: L{NetResources} @return: A new L{NetResources} object. """ r = NetResources() r.signature = readDataInstance.readDword() if r.signature != 0xbeefcace: return r r.readerCount = readDataInstance.readDword() r.readerTypeLength = readDataInstance.readDword() r.readerType = utils.ReadData(readDataInstance.read(r.readerTypeLength)).readDotNetBlob() r.version = readDataInstance.readDword() r.resourceCount = readDataInstance.readDword() r.resourceTypeCount = readDataInstance.readDword() r.resourceTypes = [] for i in xrange(r.resourceTypeCount): r.resourceTypes.append(readDataInstance.readDotNetBlob()) # aligned to 8 bytes readDataInstance.skipBytes(8 - readDataInstance.tell() & 0x7) r.resourceHashes = [] for i in xrange(r.resourceCount): r.resourceHashes.append(readDataInstance.readDword()) r.resourceNameOffsets = [] for i in xrange(r.resourceCount): r.resourceNameOffsets.append(readDataInstance.readDword()) r.dataSectionOffset = readDataInstance.readDword() r.resourceNames = [] r.resourceOffsets = [] base = readDataInstance.tell() for i in xrange(r.resourceCount): readDataInstance.setOffset(base + r.resourceNameOffsets[i]) r.resourceNames.append(readDataInstance.readDotNetUnicodeString()) r.resourceOffsets.append(readDataInstance.readDword()) r.info = {} for i in xrange(r.resourceCount): readDataInstance.setOffset(r.dataSectionOffset + r.resourceOffsets[i]) r.info[i] = readDataInstance.read(len(readDataInstance)) r.info[r.resourceNames[i]] = r.info[i] return r
[ "def", "parse", "(", "readDataInstance", ")", ":", "r", "=", "NetResources", "(", ")", "r", ".", "signature", "=", "readDataInstance", ".", "readDword", "(", ")", "if", "r", ".", "signature", "!=", "0xbeefcace", ":", "return", "r", "r", ".", "readerCount", "=", "readDataInstance", ".", "readDword", "(", ")", "r", ".", "readerTypeLength", "=", "readDataInstance", ".", "readDword", "(", ")", "r", ".", "readerType", "=", "utils", ".", "ReadData", "(", "readDataInstance", ".", "read", "(", "r", ".", "readerTypeLength", ")", ")", ".", "readDotNetBlob", "(", ")", "r", ".", "version", "=", "readDataInstance", ".", "readDword", "(", ")", "r", ".", "resourceCount", "=", "readDataInstance", ".", "readDword", "(", ")", "r", ".", "resourceTypeCount", "=", "readDataInstance", ".", "readDword", "(", ")", "r", ".", "resourceTypes", "=", "[", "]", "for", "i", "in", "xrange", "(", "r", ".", "resourceTypeCount", ")", ":", "r", ".", "resourceTypes", ".", "append", "(", "readDataInstance", ".", "readDotNetBlob", "(", ")", ")", "# aligned to 8 bytes", "readDataInstance", ".", "skipBytes", "(", "8", "-", "readDataInstance", ".", "tell", "(", ")", "&", "0x7", ")", "r", ".", "resourceHashes", "=", "[", "]", "for", "i", "in", "xrange", "(", "r", ".", "resourceCount", ")", ":", "r", ".", "resourceHashes", ".", "append", "(", "readDataInstance", ".", "readDword", "(", ")", ")", "r", ".", "resourceNameOffsets", "=", "[", "]", "for", "i", "in", "xrange", "(", "r", ".", "resourceCount", ")", ":", "r", ".", "resourceNameOffsets", ".", "append", "(", "readDataInstance", ".", "readDword", "(", ")", ")", "r", ".", "dataSectionOffset", "=", "readDataInstance", ".", "readDword", "(", ")", "r", ".", "resourceNames", "=", "[", "]", "r", ".", "resourceOffsets", "=", "[", "]", "base", "=", "readDataInstance", ".", "tell", "(", ")", "for", "i", "in", "xrange", "(", "r", ".", "resourceCount", ")", ":", "readDataInstance", ".", "setOffset", "(", "base", "+", "r", ".", "resourceNameOffsets", "[", "i", "]", ")", "r", ".", "resourceNames", ".", "append", "(", "readDataInstance", ".", "readDotNetUnicodeString", "(", ")", ")", "r", ".", "resourceOffsets", ".", "append", "(", "readDataInstance", ".", "readDword", "(", ")", ")", "r", ".", "info", "=", "{", "}", "for", "i", "in", "xrange", "(", "r", ".", "resourceCount", ")", ":", "readDataInstance", ".", "setOffset", "(", "r", ".", "dataSectionOffset", "+", "r", ".", "resourceOffsets", "[", "i", "]", ")", "r", ".", "info", "[", "i", "]", "=", "readDataInstance", ".", "read", "(", "len", "(", "readDataInstance", ")", ")", "r", ".", "info", "[", "r", ".", "resourceNames", "[", "i", "]", "]", "=", "r", ".", "info", "[", "i", "]", "return", "r" ]
Returns a new L{NetResources} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetResources} object. @rtype: L{NetResources} @return: A new L{NetResources} object.
[ "Returns", "a", "new", "L", "{", "NetResources", "}", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L1312-L1366
blockcypher/bcwallet
bcwallet/bc_utils.py
verify_and_fill_address_paths_from_bip32key
def verify_and_fill_address_paths_from_bip32key(address_paths, master_key, network): ''' Take address paths and verifies their accuracy client-side. Also fills in all the available metadata (WIF, public key, etc) ''' assert network, network wallet_obj = Wallet.deserialize(master_key, network=network) address_paths_cleaned = [] for address_path in address_paths: path = address_path['path'] input_address = address_path['address'] child_wallet = wallet_obj.get_child_for_path(path) if child_wallet.to_address() != input_address: err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % ( path, master_key, child_wallet.to_address(), input_address, ) raise Exception(err_msg) pubkeyhex = child_wallet.get_public_key_hex(compressed=True) server_pubkeyhex = address_path.get('public') if server_pubkeyhex and server_pubkeyhex != pubkeyhex: err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % ( path, master_key, pubkeyhex, server_pubkeyhex, ) raise Exception(err_msg) address_path_cleaned = { 'pub_address': input_address, 'path': path, 'pubkeyhex': pubkeyhex, } if child_wallet.private_key: privkeyhex = child_wallet.get_private_key_hex() address_path_cleaned['wif'] = child_wallet.export_to_wif() address_path_cleaned['privkeyhex'] = privkeyhex address_paths_cleaned.append(address_path_cleaned) return address_paths_cleaned
python
def verify_and_fill_address_paths_from_bip32key(address_paths, master_key, network): ''' Take address paths and verifies their accuracy client-side. Also fills in all the available metadata (WIF, public key, etc) ''' assert network, network wallet_obj = Wallet.deserialize(master_key, network=network) address_paths_cleaned = [] for address_path in address_paths: path = address_path['path'] input_address = address_path['address'] child_wallet = wallet_obj.get_child_for_path(path) if child_wallet.to_address() != input_address: err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % ( path, master_key, child_wallet.to_address(), input_address, ) raise Exception(err_msg) pubkeyhex = child_wallet.get_public_key_hex(compressed=True) server_pubkeyhex = address_path.get('public') if server_pubkeyhex and server_pubkeyhex != pubkeyhex: err_msg = 'Client Side Verification Fail for %s on %s:\n%s != %s' % ( path, master_key, pubkeyhex, server_pubkeyhex, ) raise Exception(err_msg) address_path_cleaned = { 'pub_address': input_address, 'path': path, 'pubkeyhex': pubkeyhex, } if child_wallet.private_key: privkeyhex = child_wallet.get_private_key_hex() address_path_cleaned['wif'] = child_wallet.export_to_wif() address_path_cleaned['privkeyhex'] = privkeyhex address_paths_cleaned.append(address_path_cleaned) return address_paths_cleaned
[ "def", "verify_and_fill_address_paths_from_bip32key", "(", "address_paths", ",", "master_key", ",", "network", ")", ":", "assert", "network", ",", "network", "wallet_obj", "=", "Wallet", ".", "deserialize", "(", "master_key", ",", "network", "=", "network", ")", "address_paths_cleaned", "=", "[", "]", "for", "address_path", "in", "address_paths", ":", "path", "=", "address_path", "[", "'path'", "]", "input_address", "=", "address_path", "[", "'address'", "]", "child_wallet", "=", "wallet_obj", ".", "get_child_for_path", "(", "path", ")", "if", "child_wallet", ".", "to_address", "(", ")", "!=", "input_address", ":", "err_msg", "=", "'Client Side Verification Fail for %s on %s:\\n%s != %s'", "%", "(", "path", ",", "master_key", ",", "child_wallet", ".", "to_address", "(", ")", ",", "input_address", ",", ")", "raise", "Exception", "(", "err_msg", ")", "pubkeyhex", "=", "child_wallet", ".", "get_public_key_hex", "(", "compressed", "=", "True", ")", "server_pubkeyhex", "=", "address_path", ".", "get", "(", "'public'", ")", "if", "server_pubkeyhex", "and", "server_pubkeyhex", "!=", "pubkeyhex", ":", "err_msg", "=", "'Client Side Verification Fail for %s on %s:\\n%s != %s'", "%", "(", "path", ",", "master_key", ",", "pubkeyhex", ",", "server_pubkeyhex", ",", ")", "raise", "Exception", "(", "err_msg", ")", "address_path_cleaned", "=", "{", "'pub_address'", ":", "input_address", ",", "'path'", ":", "path", ",", "'pubkeyhex'", ":", "pubkeyhex", ",", "}", "if", "child_wallet", ".", "private_key", ":", "privkeyhex", "=", "child_wallet", ".", "get_private_key_hex", "(", ")", "address_path_cleaned", "[", "'wif'", "]", "=", "child_wallet", ".", "export_to_wif", "(", ")", "address_path_cleaned", "[", "'privkeyhex'", "]", "=", "privkeyhex", "address_paths_cleaned", ".", "append", "(", "address_path_cleaned", ")", "return", "address_paths_cleaned" ]
Take address paths and verifies their accuracy client-side. Also fills in all the available metadata (WIF, public key, etc)
[ "Take", "address", "paths", "and", "verifies", "their", "accuracy", "client", "-", "side", "." ]
train
https://github.com/blockcypher/bcwallet/blob/4c623a8e19705332c9398c3fea9d5dc3c1dafb9b/bcwallet/bc_utils.py#L33-L84
tansey/gfl
pygfl/density.py
GraphFusedDensity.solution_path
def solution_path(self): '''Follows the solution path of the generalized lasso to find the best lambda value.''' lambda_grid = np.exp(np.linspace(np.log(self.max_lambda), np.log(self.min_lambda), self.lambda_bins)) aic_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The AIC score for each lambda value aicc_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The AICc score for each lambda value (correcting for finite sample size) bic_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The BIC score for each lambda value dof_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The degrees of freedom of each final solution log_likelihood_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) bic_best_idx = [None for _ in self.bins] aic_best_idx = [None for _ in self.bins] aicc_best_idx = [None for _ in self.bins] bic_best_betas = [None for _ in self.bins] aic_best_betas = [None for _ in self.bins] aicc_best_betas = [None for _ in self.bins] if self.k == 0 and self.trails is not None: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] zs = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] us = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] else: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] us = [np.zeros(self.Dk.shape[0], dtype='double') for _ in self.bins] for i, _lambda in enumerate(lambda_grid): if self.verbose: print('\n#{0} Lambda = {1}'.format(i, _lambda)) # Run the graph fused lasso over each bin with the current lambda value initial_values = (betas, zs, us) if self.k == 0 and self.trails is not None else (betas, us) self.run(_lambda, initial_values=initial_values) if self.verbose > 1: print('\tCalculating degrees of freedom and information criteria') for b, beta in enumerate(betas): if self.bins_allowed is not None and b not in self.bins_allowed: continue # Count the number of free parameters in the grid (dof) # TODO: this is not really the true DoF, since a change in a higher node multiplies # the DoF in the lower nodes # dof_trace[b,i] = len(self.calc_plateaus(beta)) dof_vals = self.Dk_minus_one.dot(beta) if self.k > 0 else beta plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=0.01) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=0.03) #plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=1e-5) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=1e-5) dof_trace[b,i] = max(1,len(plateaus)) #* (k+1) # Get the negative log-likelihood log_likelihood_trace[b,i] = self.data_log_likelihood(self.bins[b][-1], self.bins[b][-2], beta) # Calculate AIC = 2k - 2ln(L) aic_trace[b,i] = 2. * dof_trace[b,i] - 2. * log_likelihood_trace[b,i] # Calculate AICc = AIC + 2k * (k+1) / (n - k - 1) aicc_trace[b,i] = aic_trace[b,i] + 2 * dof_trace[b,i] * (dof_trace[b,i]+1) / (self.num_nodes - dof_trace[b,i] - 1.) # Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi)) bic_trace[b,i] = -2 * log_likelihood_trace[b,i] + dof_trace[b,i] * (np.log(self.num_nodes) - np.log(2 * np.pi)) # Track the best model thus far if aic_best_idx[b] is None or aic_trace[b,i] < aic_trace[b,aic_best_idx[b]]: aic_best_idx[b] = i aic_best_betas[b] = np.array(beta) # Track the best model thus far if aicc_best_idx[b] is None or aicc_trace[b,i] < aicc_trace[b,aicc_best_idx[b]]: aicc_best_idx[b] = i aicc_best_betas[b] = np.array(beta) # Track the best model thus far if bic_best_idx[b] is None or bic_trace[b,i] < bic_trace[b,bic_best_idx[b]]: bic_best_idx[b] = i bic_best_betas[b] = np.array(beta) if self.verbose and self.bins_allowed is not None: print('\tBin {0} Log-Likelihood: {1} DoF: {2} AIC: {3} AICc: {4} BIC: {5}'.format(b, log_likelihood_trace[b,i], dof_trace[b,i], aic_trace[b,i], aicc_trace[b,i], bic_trace[b,i])) if self.verbose and self.bins_allowed is None: print('Overall Log-Likelihood: {0} DoF: {1} AIC: {2} AICc: {3} BIC: {4}'.format(log_likelihood_trace[:,i].sum(), dof_trace[:,i].sum(), aic_trace[:,i].sum(), aicc_trace[:,i].sum(), bic_trace[:,i].sum())) if self.verbose: print('') print('Best settings per bin:') for b, (aic_idx, aicc_idx, bic_idx) in enumerate(zip(aic_best_idx, aicc_best_idx, bic_best_idx)): if self.bins_allowed is not None and b not in self.bins_allowed: continue left, mid, right, trials, successes = self.bins[b] print('\tBin #{0} ([{1}, {2}], split={3}) lambda: AIC={4:.2f} AICC={5:.2f} BIC={6:.2f} DoF: AIC={7:.0f} AICC={8:.0f} BIC={9:.0f}'.format( b, left, right, mid, lambda_grid[aic_idx], lambda_grid[aicc_idx], lambda_grid[bic_idx], dof_trace[b,aic_idx], dof_trace[b,aicc_idx], dof_trace[b,bic_idx])) print('') if self.bins_allowed is None: if self.verbose: print('Creating densities from betas...') bic_density = self.density_from_betas(bic_best_betas) aic_density = self.density_from_betas(aic_best_betas) aicc_density = self.density_from_betas(aicc_best_betas) self.map_density = bic_density else: aic_density, aicc_density, bic_density = None, None, None self.map_betas = bic_best_betas return {'aic': aic_trace, 'aicc': aicc_trace, 'bic': bic_trace, 'dof': dof_trace, 'loglikelihood': log_likelihood_trace, 'lambdas': lambda_grid, 'aic_betas': aic_best_betas, 'aicc_betas': aicc_best_betas, 'bic_betas': bic_best_betas, 'aic_best_idx': aic_best_idx, 'aicc_best_idx': aicc_best_idx, 'bic_best_idx': bic_best_idx, 'aic_densities': aic_density.reshape(self.data_shape), 'aicc_densities': aicc_density.reshape(self.data_shape), 'bic_densities': bic_density.reshape(self.data_shape)}
python
def solution_path(self): '''Follows the solution path of the generalized lasso to find the best lambda value.''' lambda_grid = np.exp(np.linspace(np.log(self.max_lambda), np.log(self.min_lambda), self.lambda_bins)) aic_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The AIC score for each lambda value aicc_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The AICc score for each lambda value (correcting for finite sample size) bic_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The BIC score for each lambda value dof_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) # The degrees of freedom of each final solution log_likelihood_trace = np.zeros((len(self.bins),lambda_grid.shape[0])) bic_best_idx = [None for _ in self.bins] aic_best_idx = [None for _ in self.bins] aicc_best_idx = [None for _ in self.bins] bic_best_betas = [None for _ in self.bins] aic_best_betas = [None for _ in self.bins] aicc_best_betas = [None for _ in self.bins] if self.k == 0 and self.trails is not None: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] zs = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] us = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] else: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] us = [np.zeros(self.Dk.shape[0], dtype='double') for _ in self.bins] for i, _lambda in enumerate(lambda_grid): if self.verbose: print('\n#{0} Lambda = {1}'.format(i, _lambda)) # Run the graph fused lasso over each bin with the current lambda value initial_values = (betas, zs, us) if self.k == 0 and self.trails is not None else (betas, us) self.run(_lambda, initial_values=initial_values) if self.verbose > 1: print('\tCalculating degrees of freedom and information criteria') for b, beta in enumerate(betas): if self.bins_allowed is not None and b not in self.bins_allowed: continue # Count the number of free parameters in the grid (dof) # TODO: this is not really the true DoF, since a change in a higher node multiplies # the DoF in the lower nodes # dof_trace[b,i] = len(self.calc_plateaus(beta)) dof_vals = self.Dk_minus_one.dot(beta) if self.k > 0 else beta plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=0.01) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=0.03) #plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=1e-5) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=1e-5) dof_trace[b,i] = max(1,len(plateaus)) #* (k+1) # Get the negative log-likelihood log_likelihood_trace[b,i] = self.data_log_likelihood(self.bins[b][-1], self.bins[b][-2], beta) # Calculate AIC = 2k - 2ln(L) aic_trace[b,i] = 2. * dof_trace[b,i] - 2. * log_likelihood_trace[b,i] # Calculate AICc = AIC + 2k * (k+1) / (n - k - 1) aicc_trace[b,i] = aic_trace[b,i] + 2 * dof_trace[b,i] * (dof_trace[b,i]+1) / (self.num_nodes - dof_trace[b,i] - 1.) # Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi)) bic_trace[b,i] = -2 * log_likelihood_trace[b,i] + dof_trace[b,i] * (np.log(self.num_nodes) - np.log(2 * np.pi)) # Track the best model thus far if aic_best_idx[b] is None or aic_trace[b,i] < aic_trace[b,aic_best_idx[b]]: aic_best_idx[b] = i aic_best_betas[b] = np.array(beta) # Track the best model thus far if aicc_best_idx[b] is None or aicc_trace[b,i] < aicc_trace[b,aicc_best_idx[b]]: aicc_best_idx[b] = i aicc_best_betas[b] = np.array(beta) # Track the best model thus far if bic_best_idx[b] is None or bic_trace[b,i] < bic_trace[b,bic_best_idx[b]]: bic_best_idx[b] = i bic_best_betas[b] = np.array(beta) if self.verbose and self.bins_allowed is not None: print('\tBin {0} Log-Likelihood: {1} DoF: {2} AIC: {3} AICc: {4} BIC: {5}'.format(b, log_likelihood_trace[b,i], dof_trace[b,i], aic_trace[b,i], aicc_trace[b,i], bic_trace[b,i])) if self.verbose and self.bins_allowed is None: print('Overall Log-Likelihood: {0} DoF: {1} AIC: {2} AICc: {3} BIC: {4}'.format(log_likelihood_trace[:,i].sum(), dof_trace[:,i].sum(), aic_trace[:,i].sum(), aicc_trace[:,i].sum(), bic_trace[:,i].sum())) if self.verbose: print('') print('Best settings per bin:') for b, (aic_idx, aicc_idx, bic_idx) in enumerate(zip(aic_best_idx, aicc_best_idx, bic_best_idx)): if self.bins_allowed is not None and b not in self.bins_allowed: continue left, mid, right, trials, successes = self.bins[b] print('\tBin #{0} ([{1}, {2}], split={3}) lambda: AIC={4:.2f} AICC={5:.2f} BIC={6:.2f} DoF: AIC={7:.0f} AICC={8:.0f} BIC={9:.0f}'.format( b, left, right, mid, lambda_grid[aic_idx], lambda_grid[aicc_idx], lambda_grid[bic_idx], dof_trace[b,aic_idx], dof_trace[b,aicc_idx], dof_trace[b,bic_idx])) print('') if self.bins_allowed is None: if self.verbose: print('Creating densities from betas...') bic_density = self.density_from_betas(bic_best_betas) aic_density = self.density_from_betas(aic_best_betas) aicc_density = self.density_from_betas(aicc_best_betas) self.map_density = bic_density else: aic_density, aicc_density, bic_density = None, None, None self.map_betas = bic_best_betas return {'aic': aic_trace, 'aicc': aicc_trace, 'bic': bic_trace, 'dof': dof_trace, 'loglikelihood': log_likelihood_trace, 'lambdas': lambda_grid, 'aic_betas': aic_best_betas, 'aicc_betas': aicc_best_betas, 'bic_betas': bic_best_betas, 'aic_best_idx': aic_best_idx, 'aicc_best_idx': aicc_best_idx, 'bic_best_idx': bic_best_idx, 'aic_densities': aic_density.reshape(self.data_shape), 'aicc_densities': aicc_density.reshape(self.data_shape), 'bic_densities': bic_density.reshape(self.data_shape)}
[ "def", "solution_path", "(", "self", ")", ":", "lambda_grid", "=", "np", ".", "exp", "(", "np", ".", "linspace", "(", "np", ".", "log", "(", "self", ".", "max_lambda", ")", ",", "np", ".", "log", "(", "self", ".", "min_lambda", ")", ",", "self", ".", "lambda_bins", ")", ")", "aic_trace", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "bins", ")", ",", "lambda_grid", ".", "shape", "[", "0", "]", ")", ")", "# The AIC score for each lambda value", "aicc_trace", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "bins", ")", ",", "lambda_grid", ".", "shape", "[", "0", "]", ")", ")", "# The AICc score for each lambda value (correcting for finite sample size)", "bic_trace", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "bins", ")", ",", "lambda_grid", ".", "shape", "[", "0", "]", ")", ")", "# The BIC score for each lambda value", "dof_trace", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "bins", ")", ",", "lambda_grid", ".", "shape", "[", "0", "]", ")", ")", "# The degrees of freedom of each final solution", "log_likelihood_trace", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "bins", ")", ",", "lambda_grid", ".", "shape", "[", "0", "]", ")", ")", "bic_best_idx", "=", "[", "None", "for", "_", "in", "self", ".", "bins", "]", "aic_best_idx", "=", "[", "None", "for", "_", "in", "self", ".", "bins", "]", "aicc_best_idx", "=", "[", "None", "for", "_", "in", "self", ".", "bins", "]", "bic_best_betas", "=", "[", "None", "for", "_", "in", "self", ".", "bins", "]", "aic_best_betas", "=", "[", "None", "for", "_", "in", "self", ".", "bins", "]", "aicc_best_betas", "=", "[", "None", "for", "_", "in", "self", ".", "bins", "]", "if", "self", ".", "k", "==", "0", "and", "self", ".", "trails", "is", "not", "None", ":", "betas", "=", "[", "np", ".", "zeros", "(", "self", ".", "num_nodes", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "zs", "=", "[", "np", ".", "zeros", "(", "self", ".", "breakpoints", "[", "-", "1", "]", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "us", "=", "[", "np", ".", "zeros", "(", "self", ".", "breakpoints", "[", "-", "1", "]", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "else", ":", "betas", "=", "[", "np", ".", "zeros", "(", "self", ".", "num_nodes", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "us", "=", "[", "np", ".", "zeros", "(", "self", ".", "Dk", ".", "shape", "[", "0", "]", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "for", "i", ",", "_lambda", "in", "enumerate", "(", "lambda_grid", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "'\\n#{0} Lambda = {1}'", ".", "format", "(", "i", ",", "_lambda", ")", ")", "# Run the graph fused lasso over each bin with the current lambda value", "initial_values", "=", "(", "betas", ",", "zs", ",", "us", ")", "if", "self", ".", "k", "==", "0", "and", "self", ".", "trails", "is", "not", "None", "else", "(", "betas", ",", "us", ")", "self", ".", "run", "(", "_lambda", ",", "initial_values", "=", "initial_values", ")", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "'\\tCalculating degrees of freedom and information criteria'", ")", "for", "b", ",", "beta", "in", "enumerate", "(", "betas", ")", ":", "if", "self", ".", "bins_allowed", "is", "not", "None", "and", "b", "not", "in", "self", ".", "bins_allowed", ":", "continue", "# Count the number of free parameters in the grid (dof)", "# TODO: this is not really the true DoF, since a change in a higher node multiplies", "# the DoF in the lower nodes", "# dof_trace[b,i] = len(self.calc_plateaus(beta))", "dof_vals", "=", "self", ".", "Dk_minus_one", ".", "dot", "(", "beta", ")", "if", "self", ".", "k", ">", "0", "else", "beta", "plateaus", "=", "calc_plateaus", "(", "dof_vals", ",", "self", ".", "edges", ",", "rel_tol", "=", "0.01", ")", "if", "(", "self", ".", "k", "%", "2", ")", "==", "0", "else", "nearly_unique", "(", "dof_vals", ",", "rel_tol", "=", "0.03", ")", "#plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=1e-5) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=1e-5)", "dof_trace", "[", "b", ",", "i", "]", "=", "max", "(", "1", ",", "len", "(", "plateaus", ")", ")", "#* (k+1)", "# Get the negative log-likelihood", "log_likelihood_trace", "[", "b", ",", "i", "]", "=", "self", ".", "data_log_likelihood", "(", "self", ".", "bins", "[", "b", "]", "[", "-", "1", "]", ",", "self", ".", "bins", "[", "b", "]", "[", "-", "2", "]", ",", "beta", ")", "# Calculate AIC = 2k - 2ln(L)", "aic_trace", "[", "b", ",", "i", "]", "=", "2.", "*", "dof_trace", "[", "b", ",", "i", "]", "-", "2.", "*", "log_likelihood_trace", "[", "b", ",", "i", "]", "# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)", "aicc_trace", "[", "b", ",", "i", "]", "=", "aic_trace", "[", "b", ",", "i", "]", "+", "2", "*", "dof_trace", "[", "b", ",", "i", "]", "*", "(", "dof_trace", "[", "b", ",", "i", "]", "+", "1", ")", "/", "(", "self", ".", "num_nodes", "-", "dof_trace", "[", "b", ",", "i", "]", "-", "1.", ")", "# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))", "bic_trace", "[", "b", ",", "i", "]", "=", "-", "2", "*", "log_likelihood_trace", "[", "b", ",", "i", "]", "+", "dof_trace", "[", "b", ",", "i", "]", "*", "(", "np", ".", "log", "(", "self", ".", "num_nodes", ")", "-", "np", ".", "log", "(", "2", "*", "np", ".", "pi", ")", ")", "# Track the best model thus far", "if", "aic_best_idx", "[", "b", "]", "is", "None", "or", "aic_trace", "[", "b", ",", "i", "]", "<", "aic_trace", "[", "b", ",", "aic_best_idx", "[", "b", "]", "]", ":", "aic_best_idx", "[", "b", "]", "=", "i", "aic_best_betas", "[", "b", "]", "=", "np", ".", "array", "(", "beta", ")", "# Track the best model thus far", "if", "aicc_best_idx", "[", "b", "]", "is", "None", "or", "aicc_trace", "[", "b", ",", "i", "]", "<", "aicc_trace", "[", "b", ",", "aicc_best_idx", "[", "b", "]", "]", ":", "aicc_best_idx", "[", "b", "]", "=", "i", "aicc_best_betas", "[", "b", "]", "=", "np", ".", "array", "(", "beta", ")", "# Track the best model thus far", "if", "bic_best_idx", "[", "b", "]", "is", "None", "or", "bic_trace", "[", "b", ",", "i", "]", "<", "bic_trace", "[", "b", ",", "bic_best_idx", "[", "b", "]", "]", ":", "bic_best_idx", "[", "b", "]", "=", "i", "bic_best_betas", "[", "b", "]", "=", "np", ".", "array", "(", "beta", ")", "if", "self", ".", "verbose", "and", "self", ".", "bins_allowed", "is", "not", "None", ":", "print", "(", "'\\tBin {0} Log-Likelihood: {1} DoF: {2} AIC: {3} AICc: {4} BIC: {5}'", ".", "format", "(", "b", ",", "log_likelihood_trace", "[", "b", ",", "i", "]", ",", "dof_trace", "[", "b", ",", "i", "]", ",", "aic_trace", "[", "b", ",", "i", "]", ",", "aicc_trace", "[", "b", ",", "i", "]", ",", "bic_trace", "[", "b", ",", "i", "]", ")", ")", "if", "self", ".", "verbose", "and", "self", ".", "bins_allowed", "is", "None", ":", "print", "(", "'Overall Log-Likelihood: {0} DoF: {1} AIC: {2} AICc: {3} BIC: {4}'", ".", "format", "(", "log_likelihood_trace", "[", ":", ",", "i", "]", ".", "sum", "(", ")", ",", "dof_trace", "[", ":", ",", "i", "]", ".", "sum", "(", ")", ",", "aic_trace", "[", ":", ",", "i", "]", ".", "sum", "(", ")", ",", "aicc_trace", "[", ":", ",", "i", "]", ".", "sum", "(", ")", ",", "bic_trace", "[", ":", ",", "i", "]", ".", "sum", "(", ")", ")", ")", "if", "self", ".", "verbose", ":", "print", "(", "''", ")", "print", "(", "'Best settings per bin:'", ")", "for", "b", ",", "(", "aic_idx", ",", "aicc_idx", ",", "bic_idx", ")", "in", "enumerate", "(", "zip", "(", "aic_best_idx", ",", "aicc_best_idx", ",", "bic_best_idx", ")", ")", ":", "if", "self", ".", "bins_allowed", "is", "not", "None", "and", "b", "not", "in", "self", ".", "bins_allowed", ":", "continue", "left", ",", "mid", ",", "right", ",", "trials", ",", "successes", "=", "self", ".", "bins", "[", "b", "]", "print", "(", "'\\tBin #{0} ([{1}, {2}], split={3}) lambda: AIC={4:.2f} AICC={5:.2f} BIC={6:.2f} DoF: AIC={7:.0f} AICC={8:.0f} BIC={9:.0f}'", ".", "format", "(", "b", ",", "left", ",", "right", ",", "mid", ",", "lambda_grid", "[", "aic_idx", "]", ",", "lambda_grid", "[", "aicc_idx", "]", ",", "lambda_grid", "[", "bic_idx", "]", ",", "dof_trace", "[", "b", ",", "aic_idx", "]", ",", "dof_trace", "[", "b", ",", "aicc_idx", "]", ",", "dof_trace", "[", "b", ",", "bic_idx", "]", ")", ")", "print", "(", "''", ")", "if", "self", ".", "bins_allowed", "is", "None", ":", "if", "self", ".", "verbose", ":", "print", "(", "'Creating densities from betas...'", ")", "bic_density", "=", "self", ".", "density_from_betas", "(", "bic_best_betas", ")", "aic_density", "=", "self", ".", "density_from_betas", "(", "aic_best_betas", ")", "aicc_density", "=", "self", ".", "density_from_betas", "(", "aicc_best_betas", ")", "self", ".", "map_density", "=", "bic_density", "else", ":", "aic_density", ",", "aicc_density", ",", "bic_density", "=", "None", ",", "None", ",", "None", "self", ".", "map_betas", "=", "bic_best_betas", "return", "{", "'aic'", ":", "aic_trace", ",", "'aicc'", ":", "aicc_trace", ",", "'bic'", ":", "bic_trace", ",", "'dof'", ":", "dof_trace", ",", "'loglikelihood'", ":", "log_likelihood_trace", ",", "'lambdas'", ":", "lambda_grid", ",", "'aic_betas'", ":", "aic_best_betas", ",", "'aicc_betas'", ":", "aicc_best_betas", ",", "'bic_betas'", ":", "bic_best_betas", ",", "'aic_best_idx'", ":", "aic_best_idx", ",", "'aicc_best_idx'", ":", "aicc_best_idx", ",", "'bic_best_idx'", ":", "bic_best_idx", ",", "'aic_densities'", ":", "aic_density", ".", "reshape", "(", "self", ".", "data_shape", ")", ",", "'aicc_densities'", ":", "aicc_density", ".", "reshape", "(", "self", ".", "data_shape", ")", ",", "'bic_densities'", ":", "bic_density", ".", "reshape", "(", "self", ".", "data_shape", ")", "}" ]
Follows the solution path of the generalized lasso to find the best lambda value.
[ "Follows", "the", "solution", "path", "of", "the", "generalized", "lasso", "to", "find", "the", "best", "lambda", "value", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/density.py#L123-L240
tansey/gfl
pygfl/density.py
GraphFusedDensity.run
def run(self, lam, initial_values=None): '''Run the graph-fused logit lasso with a fixed lambda penalty.''' if initial_values is not None: if self.k == 0 and self.trails is not None: betas, zs, us = initial_values else: betas, us = initial_values else: if self.k == 0 and self.trails is not None: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] zs = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] us = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] else: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] us = [np.zeros(self.Dk.shape[0], dtype='double') for _ in self.bins] for j, (left, mid, right, trials, successes) in enumerate(self.bins): if self.bins_allowed is not None and j not in self.bins_allowed: continue if self.verbose > 2: print('\tBin #{0} [{1},{2},{3}]'.format(j, left, mid, right)) # if self.verbose > 3: # print 'Trials:\n{0}'.format(pretty_str(trials)) # print '' # print 'Successes:\n{0}'.format(pretty_str(successes)) beta = betas[j] u = us[j] if self.k == 0 and self.trails is not None: z = zs[j] # Run the graph-fused lasso algorithm self.graphfl(len(beta), trials, successes, self.ntrails, self.trails, self.breakpoints, lam, self.alpha, self.inflate, self.max_steps, self.converge, beta, z, u) else: # Run the graph trend filtering algorithm self.graphtf(len(beta), trials, successes, lam, self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz, self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'), self.max_steps, self.converge, beta, u) beta = np.clip(beta, 1e-12, 1-1e-12) # numerical stability betas[j] = -np.log(1./beta - 1.) # convert back to natural parameter form return (betas, zs, us) if self.k == 0 and self.trails is not None else (betas, us)
python
def run(self, lam, initial_values=None): '''Run the graph-fused logit lasso with a fixed lambda penalty.''' if initial_values is not None: if self.k == 0 and self.trails is not None: betas, zs, us = initial_values else: betas, us = initial_values else: if self.k == 0 and self.trails is not None: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] zs = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] us = [np.zeros(self.breakpoints[-1], dtype='double') for _ in self.bins] else: betas = [np.zeros(self.num_nodes, dtype='double') for _ in self.bins] us = [np.zeros(self.Dk.shape[0], dtype='double') for _ in self.bins] for j, (left, mid, right, trials, successes) in enumerate(self.bins): if self.bins_allowed is not None and j not in self.bins_allowed: continue if self.verbose > 2: print('\tBin #{0} [{1},{2},{3}]'.format(j, left, mid, right)) # if self.verbose > 3: # print 'Trials:\n{0}'.format(pretty_str(trials)) # print '' # print 'Successes:\n{0}'.format(pretty_str(successes)) beta = betas[j] u = us[j] if self.k == 0 and self.trails is not None: z = zs[j] # Run the graph-fused lasso algorithm self.graphfl(len(beta), trials, successes, self.ntrails, self.trails, self.breakpoints, lam, self.alpha, self.inflate, self.max_steps, self.converge, beta, z, u) else: # Run the graph trend filtering algorithm self.graphtf(len(beta), trials, successes, lam, self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz, self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'), self.max_steps, self.converge, beta, u) beta = np.clip(beta, 1e-12, 1-1e-12) # numerical stability betas[j] = -np.log(1./beta - 1.) # convert back to natural parameter form return (betas, zs, us) if self.k == 0 and self.trails is not None else (betas, us)
[ "def", "run", "(", "self", ",", "lam", ",", "initial_values", "=", "None", ")", ":", "if", "initial_values", "is", "not", "None", ":", "if", "self", ".", "k", "==", "0", "and", "self", ".", "trails", "is", "not", "None", ":", "betas", ",", "zs", ",", "us", "=", "initial_values", "else", ":", "betas", ",", "us", "=", "initial_values", "else", ":", "if", "self", ".", "k", "==", "0", "and", "self", ".", "trails", "is", "not", "None", ":", "betas", "=", "[", "np", ".", "zeros", "(", "self", ".", "num_nodes", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "zs", "=", "[", "np", ".", "zeros", "(", "self", ".", "breakpoints", "[", "-", "1", "]", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "us", "=", "[", "np", ".", "zeros", "(", "self", ".", "breakpoints", "[", "-", "1", "]", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "else", ":", "betas", "=", "[", "np", ".", "zeros", "(", "self", ".", "num_nodes", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "us", "=", "[", "np", ".", "zeros", "(", "self", ".", "Dk", ".", "shape", "[", "0", "]", ",", "dtype", "=", "'double'", ")", "for", "_", "in", "self", ".", "bins", "]", "for", "j", ",", "(", "left", ",", "mid", ",", "right", ",", "trials", ",", "successes", ")", "in", "enumerate", "(", "self", ".", "bins", ")", ":", "if", "self", ".", "bins_allowed", "is", "not", "None", "and", "j", "not", "in", "self", ".", "bins_allowed", ":", "continue", "if", "self", ".", "verbose", ">", "2", ":", "print", "(", "'\\tBin #{0} [{1},{2},{3}]'", ".", "format", "(", "j", ",", "left", ",", "mid", ",", "right", ")", ")", "# if self.verbose > 3:", "# print 'Trials:\\n{0}'.format(pretty_str(trials))", "# print ''", "# print 'Successes:\\n{0}'.format(pretty_str(successes))", "beta", "=", "betas", "[", "j", "]", "u", "=", "us", "[", "j", "]", "if", "self", ".", "k", "==", "0", "and", "self", ".", "trails", "is", "not", "None", ":", "z", "=", "zs", "[", "j", "]", "# Run the graph-fused lasso algorithm", "self", ".", "graphfl", "(", "len", "(", "beta", ")", ",", "trials", ",", "successes", ",", "self", ".", "ntrails", ",", "self", ".", "trails", ",", "self", ".", "breakpoints", ",", "lam", ",", "self", ".", "alpha", ",", "self", ".", "inflate", ",", "self", ".", "max_steps", ",", "self", ".", "converge", ",", "beta", ",", "z", ",", "u", ")", "else", ":", "# Run the graph trend filtering algorithm", "self", ".", "graphtf", "(", "len", "(", "beta", ")", ",", "trials", ",", "successes", ",", "lam", ",", "self", ".", "Dk", ".", "shape", "[", "0", "]", ",", "self", ".", "Dk", ".", "shape", "[", "1", "]", ",", "self", ".", "Dk", ".", "nnz", ",", "self", ".", "Dk", ".", "row", ".", "astype", "(", "'int32'", ")", ",", "self", ".", "Dk", ".", "col", ".", "astype", "(", "'int32'", ")", ",", "self", ".", "Dk", ".", "data", ".", "astype", "(", "'double'", ")", ",", "self", ".", "max_steps", ",", "self", ".", "converge", ",", "beta", ",", "u", ")", "beta", "=", "np", ".", "clip", "(", "beta", ",", "1e-12", ",", "1", "-", "1e-12", ")", "# numerical stability", "betas", "[", "j", "]", "=", "-", "np", ".", "log", "(", "1.", "/", "beta", "-", "1.", ")", "# convert back to natural parameter form", "return", "(", "betas", ",", "zs", ",", "us", ")", "if", "self", ".", "k", "==", "0", "and", "self", ".", "trails", "is", "not", "None", "else", "(", "betas", ",", "us", ")" ]
Run the graph-fused logit lasso with a fixed lambda penalty.
[ "Run", "the", "graph", "-", "fused", "logit", "lasso", "with", "a", "fixed", "lambda", "penalty", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/density.py#L343-L391
tansey/gfl
pygfl/density.py
GraphFusedDensity.data_log_likelihood
def data_log_likelihood(self, successes, trials, beta): '''Calculates the log-likelihood of a Polya tree bin given the beta values.''' return binom.logpmf(successes, trials, 1.0 / (1 + np.exp(-beta))).sum()
python
def data_log_likelihood(self, successes, trials, beta): '''Calculates the log-likelihood of a Polya tree bin given the beta values.''' return binom.logpmf(successes, trials, 1.0 / (1 + np.exp(-beta))).sum()
[ "def", "data_log_likelihood", "(", "self", ",", "successes", ",", "trials", ",", "beta", ")", ":", "return", "binom", ".", "logpmf", "(", "successes", ",", "trials", ",", "1.0", "/", "(", "1", "+", "np", ".", "exp", "(", "-", "beta", ")", ")", ")", ".", "sum", "(", ")" ]
Calculates the log-likelihood of a Polya tree bin given the beta values.
[ "Calculates", "the", "log", "-", "likelihood", "of", "a", "Polya", "tree", "bin", "given", "the", "beta", "values", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/density.py#L393-L395
Blazemeter/apiritif
apiritif/loadgen.py
spawn_worker
def spawn_worker(params): """ This method has to be module level function :type params: Params """ setup_logging(params) log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency, params.report) worker = Worker(params) worker.start() worker.join()
python
def spawn_worker(params): """ This method has to be module level function :type params: Params """ setup_logging(params) log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency, params.report) worker = Worker(params) worker.start() worker.join()
[ "def", "spawn_worker", "(", "params", ")", ":", "setup_logging", "(", "params", ")", "log", ".", "info", "(", "\"Adding worker: idx=%s\\tconcurrency=%s\\tresults=%s\"", ",", "params", ".", "worker_index", ",", "params", ".", "concurrency", ",", "params", ".", "report", ")", "worker", "=", "Worker", "(", "params", ")", "worker", ".", "start", "(", ")", "worker", ".", "join", "(", ")" ]
This method has to be module level function :type params: Params
[ "This", "method", "has", "to", "be", "module", "level", "function" ]
train
https://github.com/Blazemeter/apiritif/blob/27b48a68425949998c2254e5e1e0226882d9eee8/apiritif/loadgen.py#L48-L59
Blazemeter/apiritif
apiritif/loadgen.py
Worker.run_nose
def run_nose(self, params): """ :type params: Params """ thread.set_index(params.thread_index) log.debug("[%s] Starting nose iterations: %s", params.worker_index, params) assert isinstance(params.tests, list) # argv.extend(['--with-apiritif', '--nocapture', '--exe', '--nologcapture']) end_time = self.params.ramp_up + self.params.hold_for end_time += time.time() if end_time else 0 time.sleep(params.delay) plugin = ApiritifPlugin(self._writer) self._writer.concurrency += 1 config = Config(env=os.environ, files=all_config_files(), plugins=DefaultPluginManager()) config.plugins.addPlugins(extraplugins=[plugin]) config.testNames = params.tests config.verbosity = 3 if params.verbose else 0 if params.verbose: config.stream = open(os.devnull, "w") # FIXME: use "with", allow writing to file/log iteration = 0 try: while True: log.debug("Starting iteration:: index=%d,start_time=%.3f", iteration, time.time()) thread.set_iteration(iteration) ApiritifTestProgram(config=config) log.debug("Finishing iteration:: index=%d,end_time=%.3f", iteration, time.time()) iteration += 1 # reasons to stop if plugin.stop_reason: log.debug("[%s] finished prematurely: %s", params.worker_index, plugin.stop_reason) elif iteration >= params.iterations: log.debug("[%s] iteration limit reached: %s", params.worker_index, params.iterations) elif 0 < end_time <= time.time(): log.debug("[%s] duration limit reached: %s", params.worker_index, params.hold_for) else: continue # continue if no one is faced break finally: self._writer.concurrency -= 1 if params.verbose: config.stream.close()
python
def run_nose(self, params): """ :type params: Params """ thread.set_index(params.thread_index) log.debug("[%s] Starting nose iterations: %s", params.worker_index, params) assert isinstance(params.tests, list) # argv.extend(['--with-apiritif', '--nocapture', '--exe', '--nologcapture']) end_time = self.params.ramp_up + self.params.hold_for end_time += time.time() if end_time else 0 time.sleep(params.delay) plugin = ApiritifPlugin(self._writer) self._writer.concurrency += 1 config = Config(env=os.environ, files=all_config_files(), plugins=DefaultPluginManager()) config.plugins.addPlugins(extraplugins=[plugin]) config.testNames = params.tests config.verbosity = 3 if params.verbose else 0 if params.verbose: config.stream = open(os.devnull, "w") # FIXME: use "with", allow writing to file/log iteration = 0 try: while True: log.debug("Starting iteration:: index=%d,start_time=%.3f", iteration, time.time()) thread.set_iteration(iteration) ApiritifTestProgram(config=config) log.debug("Finishing iteration:: index=%d,end_time=%.3f", iteration, time.time()) iteration += 1 # reasons to stop if plugin.stop_reason: log.debug("[%s] finished prematurely: %s", params.worker_index, plugin.stop_reason) elif iteration >= params.iterations: log.debug("[%s] iteration limit reached: %s", params.worker_index, params.iterations) elif 0 < end_time <= time.time(): log.debug("[%s] duration limit reached: %s", params.worker_index, params.hold_for) else: continue # continue if no one is faced break finally: self._writer.concurrency -= 1 if params.verbose: config.stream.close()
[ "def", "run_nose", "(", "self", ",", "params", ")", ":", "thread", ".", "set_index", "(", "params", ".", "thread_index", ")", "log", ".", "debug", "(", "\"[%s] Starting nose iterations: %s\"", ",", "params", ".", "worker_index", ",", "params", ")", "assert", "isinstance", "(", "params", ".", "tests", ",", "list", ")", "# argv.extend(['--with-apiritif', '--nocapture', '--exe', '--nologcapture'])", "end_time", "=", "self", ".", "params", ".", "ramp_up", "+", "self", ".", "params", ".", "hold_for", "end_time", "+=", "time", ".", "time", "(", ")", "if", "end_time", "else", "0", "time", ".", "sleep", "(", "params", ".", "delay", ")", "plugin", "=", "ApiritifPlugin", "(", "self", ".", "_writer", ")", "self", ".", "_writer", ".", "concurrency", "+=", "1", "config", "=", "Config", "(", "env", "=", "os", ".", "environ", ",", "files", "=", "all_config_files", "(", ")", ",", "plugins", "=", "DefaultPluginManager", "(", ")", ")", "config", ".", "plugins", ".", "addPlugins", "(", "extraplugins", "=", "[", "plugin", "]", ")", "config", ".", "testNames", "=", "params", ".", "tests", "config", ".", "verbosity", "=", "3", "if", "params", ".", "verbose", "else", "0", "if", "params", ".", "verbose", ":", "config", ".", "stream", "=", "open", "(", "os", ".", "devnull", ",", "\"w\"", ")", "# FIXME: use \"with\", allow writing to file/log", "iteration", "=", "0", "try", ":", "while", "True", ":", "log", ".", "debug", "(", "\"Starting iteration:: index=%d,start_time=%.3f\"", ",", "iteration", ",", "time", ".", "time", "(", ")", ")", "thread", ".", "set_iteration", "(", "iteration", ")", "ApiritifTestProgram", "(", "config", "=", "config", ")", "log", ".", "debug", "(", "\"Finishing iteration:: index=%d,end_time=%.3f\"", ",", "iteration", ",", "time", ".", "time", "(", ")", ")", "iteration", "+=", "1", "# reasons to stop", "if", "plugin", ".", "stop_reason", ":", "log", ".", "debug", "(", "\"[%s] finished prematurely: %s\"", ",", "params", ".", "worker_index", ",", "plugin", ".", "stop_reason", ")", "elif", "iteration", ">=", "params", ".", "iterations", ":", "log", ".", "debug", "(", "\"[%s] iteration limit reached: %s\"", ",", "params", ".", "worker_index", ",", "params", ".", "iterations", ")", "elif", "0", "<", "end_time", "<=", "time", ".", "time", "(", ")", ":", "log", ".", "debug", "(", "\"[%s] duration limit reached: %s\"", ",", "params", ".", "worker_index", ",", "params", ".", "hold_for", ")", "else", ":", "continue", "# continue if no one is faced", "break", "finally", ":", "self", ".", "_writer", ".", "concurrency", "-=", "1", "if", "params", ".", "verbose", ":", "config", ".", "stream", ".", "close", "(", ")" ]
:type params: Params
[ ":", "type", "params", ":", "Params" ]
train
https://github.com/Blazemeter/apiritif/blob/27b48a68425949998c2254e5e1e0226882d9eee8/apiritif/loadgen.py#L161-L209
Blazemeter/apiritif
apiritif/loadgen.py
JTLSampleWriter._write_single_sample
def _write_single_sample(self, sample): """ :type sample: Sample """ bytes = sample.extras.get("responseHeadersSize", 0) + 2 + sample.extras.get("responseBodySize", 0) message = sample.error_msg if not message: message = sample.extras.get("responseMessage") if not message: for sample in sample.subsamples: if sample.error_msg: message = sample.error_msg break elif sample.extras.get("responseMessage"): message = sample.extras.get("responseMessage") break self.writer.writerow({ "timeStamp": int(1000 * sample.start_time), "elapsed": int(1000 * sample.duration), "Latency": 0, # TODO "label": sample.test_case, "bytes": bytes, "responseCode": sample.extras.get("responseCode"), "responseMessage": message, "allThreads": self.concurrency, # TODO: there will be a problem aggregating concurrency for rare samples "success": "true" if sample.status == "PASSED" else "false", }) self.out_stream.flush()
python
def _write_single_sample(self, sample): """ :type sample: Sample """ bytes = sample.extras.get("responseHeadersSize", 0) + 2 + sample.extras.get("responseBodySize", 0) message = sample.error_msg if not message: message = sample.extras.get("responseMessage") if not message: for sample in sample.subsamples: if sample.error_msg: message = sample.error_msg break elif sample.extras.get("responseMessage"): message = sample.extras.get("responseMessage") break self.writer.writerow({ "timeStamp": int(1000 * sample.start_time), "elapsed": int(1000 * sample.duration), "Latency": 0, # TODO "label": sample.test_case, "bytes": bytes, "responseCode": sample.extras.get("responseCode"), "responseMessage": message, "allThreads": self.concurrency, # TODO: there will be a problem aggregating concurrency for rare samples "success": "true" if sample.status == "PASSED" else "false", }) self.out_stream.flush()
[ "def", "_write_single_sample", "(", "self", ",", "sample", ")", ":", "bytes", "=", "sample", ".", "extras", ".", "get", "(", "\"responseHeadersSize\"", ",", "0", ")", "+", "2", "+", "sample", ".", "extras", ".", "get", "(", "\"responseBodySize\"", ",", "0", ")", "message", "=", "sample", ".", "error_msg", "if", "not", "message", ":", "message", "=", "sample", ".", "extras", ".", "get", "(", "\"responseMessage\"", ")", "if", "not", "message", ":", "for", "sample", "in", "sample", ".", "subsamples", ":", "if", "sample", ".", "error_msg", ":", "message", "=", "sample", ".", "error_msg", "break", "elif", "sample", ".", "extras", ".", "get", "(", "\"responseMessage\"", ")", ":", "message", "=", "sample", ".", "extras", ".", "get", "(", "\"responseMessage\"", ")", "break", "self", ".", "writer", ".", "writerow", "(", "{", "\"timeStamp\"", ":", "int", "(", "1000", "*", "sample", ".", "start_time", ")", ",", "\"elapsed\"", ":", "int", "(", "1000", "*", "sample", ".", "duration", ")", ",", "\"Latency\"", ":", "0", ",", "# TODO", "\"label\"", ":", "sample", ".", "test_case", ",", "\"bytes\"", ":", "bytes", ",", "\"responseCode\"", ":", "sample", ".", "extras", ".", "get", "(", "\"responseCode\"", ")", ",", "\"responseMessage\"", ":", "message", ",", "\"allThreads\"", ":", "self", ".", "concurrency", ",", "# TODO: there will be a problem aggregating concurrency for rare samples", "\"success\"", ":", "\"true\"", "if", "sample", ".", "status", "==", "\"PASSED\"", "else", "\"false\"", ",", "}", ")", "self", ".", "out_stream", ".", "flush", "(", ")" ]
:type sample: Sample
[ ":", "type", "sample", ":", "Sample" ]
train
https://github.com/Blazemeter/apiritif/blob/27b48a68425949998c2254e5e1e0226882d9eee8/apiritif/loadgen.py#L346-L376
Blazemeter/apiritif
apiritif/loadgen.py
ApiritifPlugin.addError
def addError(self, test, error): """ when a test raises an uncaught exception :param test: :param error: :return: """ # test_dict will be None if startTest wasn't called (i.e. exception in setUp/setUpClass) # status=BROKEN if self.current_sample is not None: assertion_name = error[0].__name__ error_msg = str(error[1]).split('\n')[0] error_trace = self._get_trace(error) self.current_sample.add_assertion(assertion_name) self.current_sample.set_assertion_failed(assertion_name, error_msg, error_trace)
python
def addError(self, test, error): """ when a test raises an uncaught exception :param test: :param error: :return: """ # test_dict will be None if startTest wasn't called (i.e. exception in setUp/setUpClass) # status=BROKEN if self.current_sample is not None: assertion_name = error[0].__name__ error_msg = str(error[1]).split('\n')[0] error_trace = self._get_trace(error) self.current_sample.add_assertion(assertion_name) self.current_sample.set_assertion_failed(assertion_name, error_msg, error_trace)
[ "def", "addError", "(", "self", ",", "test", ",", "error", ")", ":", "# test_dict will be None if startTest wasn't called (i.e. exception in setUp/setUpClass)", "# status=BROKEN", "if", "self", ".", "current_sample", "is", "not", "None", ":", "assertion_name", "=", "error", "[", "0", "]", ".", "__name__", "error_msg", "=", "str", "(", "error", "[", "1", "]", ")", ".", "split", "(", "'\\n'", ")", "[", "0", "]", "error_trace", "=", "self", ".", "_get_trace", "(", "error", ")", "self", ".", "current_sample", ".", "add_assertion", "(", "assertion_name", ")", "self", ".", "current_sample", ".", "set_assertion_failed", "(", "assertion_name", ",", "error_msg", ",", "error_trace", ")" ]
when a test raises an uncaught exception :param test: :param error: :return:
[ "when", "a", "test", "raises", "an", "uncaught", "exception", ":", "param", "test", ":", ":", "param", "error", ":", ":", "return", ":" ]
train
https://github.com/Blazemeter/apiritif/blob/27b48a68425949998c2254e5e1e0226882d9eee8/apiritif/loadgen.py#L490-L504
crackinglandia/pype32
pype32/datadirs.py
Directory.parse
def parse(readDataInstance): """ Returns a L{Directory}-like object. @type readDataInstance: L{ReadData} @param readDataInstance: L{ReadData} object to read from. @rtype: L{Directory} @return: L{Directory} object. """ d = Directory() d.rva.value = readDataInstance.readDword() d.size.value = readDataInstance.readDword() return d
python
def parse(readDataInstance): """ Returns a L{Directory}-like object. @type readDataInstance: L{ReadData} @param readDataInstance: L{ReadData} object to read from. @rtype: L{Directory} @return: L{Directory} object. """ d = Directory() d.rva.value = readDataInstance.readDword() d.size.value = readDataInstance.readDword() return d
[ "def", "parse", "(", "readDataInstance", ")", ":", "d", "=", "Directory", "(", ")", "d", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "d", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "return", "d" ]
Returns a L{Directory}-like object. @type readDataInstance: L{ReadData} @param readDataInstance: L{ReadData} object to read from. @rtype: L{Directory} @return: L{Directory} object.
[ "Returns", "a", "L", "{", "Directory", "}", "-", "like", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/datadirs.py#L79-L92
crackinglandia/pype32
pype32/datadirs.py
DataDirectory.parse
def parse(readDataInstance): """Returns a L{DataDirectory}-like object. @type readDataInstance: L{ReadData} @param readDataInstance: L{ReadData} object to read from. @rtype: L{DataDirectory} @return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects. @raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects. """ if len(readDataInstance) == consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8: newDataDirectory = DataDirectory() for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES): newDataDirectory[i].name.value = dirs[i] newDataDirectory[i].rva.value = readDataInstance.readDword() newDataDirectory[i].size.value = readDataInstance.readDword() else: raise excep.DirectoryEntriesLengthException("The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.") return newDataDirectory
python
def parse(readDataInstance): """Returns a L{DataDirectory}-like object. @type readDataInstance: L{ReadData} @param readDataInstance: L{ReadData} object to read from. @rtype: L{DataDirectory} @return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects. @raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects. """ if len(readDataInstance) == consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8: newDataDirectory = DataDirectory() for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES): newDataDirectory[i].name.value = dirs[i] newDataDirectory[i].rva.value = readDataInstance.readDword() newDataDirectory[i].size.value = readDataInstance.readDword() else: raise excep.DirectoryEntriesLengthException("The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.") return newDataDirectory
[ "def", "parse", "(", "readDataInstance", ")", ":", "if", "len", "(", "readDataInstance", ")", "==", "consts", ".", "IMAGE_NUMBEROF_DIRECTORY_ENTRIES", "*", "8", ":", "newDataDirectory", "=", "DataDirectory", "(", ")", "for", "i", "in", "range", "(", "consts", ".", "IMAGE_NUMBEROF_DIRECTORY_ENTRIES", ")", ":", "newDataDirectory", "[", "i", "]", ".", "name", ".", "value", "=", "dirs", "[", "i", "]", "newDataDirectory", "[", "i", "]", ".", "rva", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "newDataDirectory", "[", "i", "]", ".", "size", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "else", ":", "raise", "excep", ".", "DirectoryEntriesLengthException", "(", "\"The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.\"", ")", "return", "newDataDirectory" ]
Returns a L{DataDirectory}-like object. @type readDataInstance: L{ReadData} @param readDataInstance: L{ReadData} object to read from. @rtype: L{DataDirectory} @return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects. @raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects.
[ "Returns", "a", "L", "{", "DataDirectory", "}", "-", "like", "object", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/datadirs.py#L121-L140
zsethna/OLGA
olga/compute_pgen.py
main
def main(): """Compute Pgens from a file and output to another file.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--set_custom_model_VDJ', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--set_custom_model_VJ', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-i', '--infile', dest = 'infile_name',metavar='PATH/TO/FILE', help='read in CDR3 sequences (and optionally V/J masks) from PATH/TO/FILE') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences and pgens to PATH/TO/FILE') parser.add_option('--seq_in', '--seq_index', type='int', metavar='INDEX', dest='seq_in_index', default = 0, help='specifies sequences to be read in are in column INDEX. Default is index 0 (the first column).') parser.add_option('--v_in', '--v_mask_index', type='int', metavar='INDEX', dest='V_mask_index', help='specifies V_masks are found in column INDEX in the input file. Default is no V mask.') parser.add_option('--j_in', '--j_mask_index', type='int', metavar='INDEX', dest='J_mask_index', help='specifies J_masks are found in column INDEX in the input file. Default is no J mask.') parser.add_option('--v_mask', type='string', dest='V_mask', help='specify V usage to condition Pgen on for seqs read in as arguments.') parser.add_option('--j_mask', type='string', dest='J_mask', help='specify J usage to condition Pgen on for seqs read in as arguments.') parser.add_option('-m', '--max_number_of_seqs', type='int',metavar='N', dest='max_number_of_seqs', help='compute Pgens for at most N sequences.') parser.add_option('--lines_to_skip', type='int',metavar='N', dest='lines_to_skip', default = 0, help='skip the first N lines of the file. Default is 0.') parser.add_option('-a', '--alphabet_filename', dest='alphabet_filename', metavar='PATH/TO/FILE', help="specify PATH/TO/FILE defining a custom 'amino acid' alphabet. Default is no custom alphabet.") parser.add_option('--seq_type_out', type='choice',metavar='SEQ_TYPE', dest='seq_type_out', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="if read in sequences are ntseqs, declare what type of sequence to compute pgen for. Default is all. Choices: 'all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--skip_off','--skip_empty_off', action='store_true', dest = 'skip_empty', default=True, help='stop skipping empty or blank sequences/lines (if for example you want to keep line index fidelity between the infile and outfile).') parser.add_option('--display_off', action='store_false', dest='display_seqs', default=True, help='turn the sequence display off (only applies in write-to-file mode). Default is on.') parser.add_option('--num_lines_for_display', type='int', metavar='N', default = 50, dest='num_lines_for_display', help='N lines of the output file are displayed when sequence display is on. Also used to determine the number of sequences to average over for speed and time estimates.') parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off (only applies when sequence display is disabled).') parser.add_option('--seqs_per_time_update', type='float', metavar='N', default = 100, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare infile delimiter. Default is tab for .tsv input files, comma for .csv files, and any whitespace for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare infile delimiter as a raw string.") parser.add_option('--delimiter_out', type='choice', dest='delimiter_out', choices=['tab', 'space', ',', ';', ':'], help="declare outfile delimiter. Default is tab for .tsv output files, comma for .csv files, and the infile delimiter for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter_out', type='str', dest='delimiter_out', help="declare for the delimiter outfile as a raw string.") parser.add_option('--gene_mask_delimiter', type='choice', dest='gene_mask_delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare gene mask delimiter. Default comma unless infile delimiter is comma, then default is a semicolon. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_gene_mask_delimiter', type='str', dest='gene_mask_delimiter', help="declare delimiter of gene masks as a raw string.") parser.add_option('--comment_delimiter', type='str', dest='comment_delimiter', help="character or string to indicate comment or header lines to skip.") (options, args) = parser.parse_args() #Check that the model is specified properly main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 alphabet_filename = options.alphabet_filename #used if a custom alphabet is to be specified if alphabet_filename is not None: if not os.path.isfile(alphabet_filename): print 'Cannot find custom alphabet file: ' + infile_name print 'Exiting...' return -1 #Load up model based on recomb_type #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) pgen_model = generation_probability.GenerationProbabilityVDJ(generative_model, genomic_data, alphabet_filename) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) pgen_model = generation_probability.GenerationProbabilityVJ(generative_model, genomic_data, alphabet_filename) aa_alphabet = ''.join(pgen_model.codons_dict.keys()) if options.infile_name is not None: infile_name = options.infile_name if not os.path.isfile(infile_name): print 'Cannot find input file: ' + infile_name print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse delimiter delimiter = options.delimiter if delimiter is None: #Default case if options.infile_name is None: delimiter = '\t' elif infile_name.endswith('.tsv'): #parse TAB separated value file delimiter = '\t' elif infile_name.endswith('.csv'): #parse COMMA separated value file delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other string passed as the delimiter. #Parse delimiter_out delimiter_out = options.delimiter_out if delimiter_out is None: #Default case if delimiter is None: delimiter_out = '\t' else: delimiter_out = delimiter if options.outfile_name is None: pass elif outfile_name.endswith('.tsv'): #output TAB separated value file delimiter_out = '\t' elif outfile_name.endswith('.csv'): #output COMMA separated value file delimiter_out = ',' else: try: delimiter_out = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter_out] except KeyError: pass #Other string passed as the delimiter. #Parse gene_delimiter gene_mask_delimiter = options.gene_mask_delimiter if gene_mask_delimiter is None: #Default case gene_mask_delimiter = ',' if delimiter == ',': gene_mask_delimiter = ';' else: try: gene_mask_delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[gene_mask_delimiter] except KeyError: pass #Other string passed as the delimiter. #More options time_updates = options.time_updates display_seqs = options.display_seqs num_lines_for_display = options.num_lines_for_display seq_in_index = options.seq_in_index #where in the line the sequence is after line.split(delimiter) lines_to_skip = options.lines_to_skip #one method of skipping header comment_delimiter = options.comment_delimiter #another method of skipping header seqs_per_time_update = options.seqs_per_time_update max_number_of_seqs = options.max_number_of_seqs V_mask_index = options.V_mask_index #Default is not conditioning on V identity J_mask_index = options.J_mask_index #Default is not conditioning on J identity skip_empty = options.skip_empty seq_type_out = options.seq_type_out #type of pgens to be computed. Can be ntseq, aaseq, or both if seq_type_out is not None: seq_type_out = {'all': None, 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[seq_type_out] if options.infile_name is None: #No infile specified -- args should be the input seqs print_warnings = True seqs = args seq_types = [determine_seq_type(seq, aa_alphabet) for seq in seqs] unrecognized_seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is None] if len(unrecognized_seqs) > 0 and print_warnings: print 'The following sequences/arguments were not recognized: ' + ', '.join(unrecognized_seqs) seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is not None] seq_types = [seq_type for seq_type in seq_types if seq_type is not None] #Format V and J masks -- uniform for all argument input sequences try: V_mask = options.V_mask.split(',') unrecognized_v_genes = [v for v in V_mask if v not in pgen_model.V_mask_mapping.keys()] V_mask = [v for v in V_mask if v in pgen_model.V_mask_mapping.keys()] if len(unrecognized_v_genes) > 0: print 'These V genes/alleles are not recognized: ' + ', '.join(unrecognized_v_genes) if len(V_mask) == 0: print 'No recognized V genes/alleles in the provided V_mask. Continuing without conditioning on V usage.' V_mask = None except AttributeError: V_mask = options.V_mask #Default is None, i.e. not conditioning on V identity try: J_mask = options.J_mask.split(',') unrecognized_j_genes = [j for j in J_mask if j not in pgen_model.J_mask_mapping.keys()] J_mask = [j for j in J_mask if j in pgen_model.J_mask_mapping.keys()] if len(unrecognized_j_genes) > 0: print 'These J genes/alleles are not recognized: ' + ', '.join(unrecognized_j_genes) if len(J_mask) == 0: print 'No recognized J genes/alleles in the provided J_mask. Continuing without conditioning on J usage.' J_mask = None except AttributeError: J_mask = options.J_mask #Default is None, i.e. not conditioning on J identity print '' start_time = time.time() for seq, seq_type in zip(seqs, seq_types): if seq_type == 'aaseq': c_pgen = pgen_model.compute_aa_CDR3_pgen(seq, V_mask, J_mask, print_warnings) print 'Pgen of the amino acid sequence ' + seq + ': ' + str(c_pgen) print '' elif seq_type == 'regex': c_pgen = pgen_model.compute_regex_CDR3_template_pgen(seq, V_mask, J_mask, print_warnings) print 'Pgen of the regular expression sequence ' + seq + ': ' + str(c_pgen) print '' elif seq_type == 'ntseq': if seq_type_out is None or seq_type_out == 'ntseq': c_pgen_nt = pgen_model.compute_nt_CDR3_pgen(seq, V_mask, J_mask, print_warnings) print 'Pgen of the nucleotide sequence ' + seq + ': ' + str(c_pgen_nt) if seq_type_out is None or seq_type_out == 'aaseq': c_pgen_aa = pgen_model.compute_aa_CDR3_pgen(nt2aa(seq), V_mask, J_mask, print_warnings) print 'Pgen of the amino acid sequence nt2aa(' + seq + ') = ' + nt2aa(seq) + ': ' + str(c_pgen_aa) print '' c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed pgen computation in: ' + c_time_str else: #Read sequences in from file print_warnings = False #Most cases of reading in from file should have warnings disabled seqs = [] seq_types = [] V_usage_masks = [] J_usage_masks = [] infile = open(infile_name, 'r') for i, line in enumerate(infile): if comment_delimiter is not None: #Default case -- no comments/header delimiter if line.startswith(comment_delimiter): #allow comments continue if i < lines_to_skip: continue if delimiter is None: #Default delimiter is any whitespace split_line = line.split() else: split_line = line.split(delimiter) #Find the seq try: seq = split_line[seq_in_index].strip() if len(seq.strip()) == 0: if skip_empty: continue else: seqs.append(seq) #keep the blank seq as a placeholder seq_types.append('aaseq') else: seqs.append(seq) seq_types.append(determine_seq_type(seq, aa_alphabet)) except IndexError: #no index match for seq if skip_empty and len(line.strip()) == 0: continue print 'seq_in_index is out of range' print 'Exiting...' infile.close() return -1 #Find and format V_usage_mask if V_mask_index is None: V_usage_masks.append(None) #default mask else: try: V_usage_mask = split_line[V_mask_index].strip().split(gene_mask_delimiter) #check that all V gene/allele names are recognized if all([v in pgen_model.V_mask_mapping for v in V_usage_mask]): V_usage_masks.append(V_usage_mask) else: print str(V_usage_mask) + " is not a usable V_usage_mask composed exclusively of recognized V gene/allele names" print 'Unrecognized V gene/allele names: ' + ', '.join([v for v in V_usage_mask if not v in pgen_model.V_mask_mapping.keys()]) print 'Exiting...' infile.close() return -1 except IndexError: #no index match for V_mask_index print 'V_mask_index is out of range' print 'Exiting...' infile.close() return -1 #Find and format J_usage_mask if J_mask_index is None: J_usage_masks.append(None) #default mask else: try: J_usage_mask = split_line[J_mask_index].strip().split(gene_mask_delimiter) #check that all V gene/allele names are recognized if all([j in pgen_model.J_mask_mapping for j in J_usage_mask]): J_usage_masks.append(J_usage_mask) else: print str(J_usage_mask) + " is not a usable J_usage_mask composed exclusively of recognized J gene/allele names" print 'Unrecognized J gene/allele names: ' + ', '.join([j for j in J_usage_mask if not j in pgen_model.J_mask_mapping.keys()]) print 'Exiting...' infile.close() return -1 except IndexError: #no index match for J_mask_index print 'J_mask_index is out of range' print 'Exiting...' infile.close() return -1 if max_number_of_seqs is not None: if len(seqs) >= max_number_of_seqs: break unrecognized_seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is None] if len(unrecognized_seqs) > 0 and len(unrecognized_seqs) < len(seqs): if print_warnings or options.outfile_name is not None: print 'Some strings read in were not parsed as sequences -- they will be omitted.' print 'Examples of improperly read strings: ' for unrecognized_seq in unrecognized_seqs[:10]: print unrecognized_seq seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is not None] V_usage_masks = [V_usage_mask for i, V_usage_mask in enumerate(V_usage_masks) if seq_types[i] is not None] seq_types = [seq_type for seq_type in seq_types if seq_type is not None] elif len(unrecognized_seqs) > 0 and len(unrecognized_seqs) == len(seqs): print 'None of the read in strings were parsed as sequences. Check input file.' print 'Examples of improperly read strings:' for unrecognized_seq in unrecognized_seqs[:10]: print unrecognized_seq print 'Exiting...' return -1 infile.close() if options.outfile_name is not None: #OUTFILE SPECIFIED, allow printed info/display print 'Successfully read in and formatted ' + str(len(seqs)) + ' sequences and any V or J usages.' if display_seqs: sys.stdout.write('\r'+'Continuing to Pgen computation in 3... ') sys.stdout.flush() time.sleep(0.4) sys.stdout.write('\r'+'Continuing to Pgen computation in 2... ') sys.stdout.flush() time.sleep(0.4) sys.stdout.write('\r'+'Continuing to Pgen computation in 1... ') sys.stdout.flush() time.sleep(0.4) else: print 'Continuing to Pgen computation.' print_warnings = True #Display is off, can print warnings if display_seqs: lines_for_display = [] times_for_speed_calc = [time.time()] outfile = open(outfile_name, 'w') start_time = time.time() for i, seq in enumerate(seqs): if seq_types[i] == 'aaseq': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) if seq_types[i] == 'regex': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_regex_CDR3_template_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_types[i] == 'ntseq': ntseq = seq if len(ntseq) % 3 == 0: #inframe sequence aaseq = nt2aa(ntseq) #Compute Pgen and print out based on recomb_type and seq_type_out if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) + delimiter_out + aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'aaseq': c_pgen_line = aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) else: #out of frame sequence -- Pgens are 0 and use 'out_of_frame' for aaseq if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + '0' + delimiter_out + 'out_of_frame' + delimiter_out + '0' elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + '0' elif seq_type_out == 'aaseq': c_pgen_line = 'out_of_frame' + delimiter_out + '0' outfile.write(c_pgen_line + '\n') #Print time update if display_seqs: cc_time = time.time() c_time = cc_time - start_time times_for_speed_calc = [cc_time] + times_for_speed_calc[:num_lines_for_display] c_avg_speed = (len(times_for_speed_calc)-1)/float(times_for_speed_calc[0] - times_for_speed_calc[-1]) #eta = ((len(seqs) - (i+1))/float(i+1))*c_time eta = (len(seqs) - (i+1))/c_avg_speed lines_for_display = [c_pgen_line] + lines_for_display[:num_lines_for_display] c_time_str = '%s hours, %s minutes, and %s seconds.'%(repr(int(c_time)/3600).rjust(3), repr((int(c_time)/60)%60).rjust(2), repr(int(c_time)%60).rjust(2)) eta_str = '%s hours, %s minutes, and %s seconds.'%(repr(int(eta)/3600).rjust(3), repr((int(eta)/60)%60).rjust(2), repr(int(eta)%60).rjust(2)) time_str = 'Time to compute Pgen on %s seqs: %s \nEst. time for remaining %s seqs: %s'%(repr(i+1).rjust(9), c_time_str, repr(len(seqs) - (i + 1)).rjust(9), eta_str) speed_str = 'Current Pgen computation speed: %s seqs/min'%(repr(round((len(times_for_speed_calc)-1)*60/float(times_for_speed_calc[0] - times_for_speed_calc[-1]), 2)).rjust(8)) display_str = '\n'.join(lines_for_display[::-1]) + '\n' + '-'*80 + '\n' + time_str + '\n' + speed_str + '\n' + '-'*80 print '\033[2J' + display_str elif (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((len(seqs) - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print 'Pgen computed for %d sequences in: %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed Pgen computation for %d sequences: in %s'%(len(seqs), c_time_str) outfile.close() else: #NO OUTFILE -- print directly to stdout start_time = time.time() for i, seq in enumerate(seqs): if seq_types[i] == 'aaseq': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) if seq_types[i] == 'regex': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_regex_CDR3_template_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_types[i] == 'ntseq': ntseq = seq if len(ntseq) % 3 == 0: #inframe sequence aaseq = nt2aa(ntseq) #Compute Pgen and print out based on recomb_type and seq_type_out if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) + delimiter_out + aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'aaseq': c_pgen_line = aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) else: #out of frame sequence -- Pgens are 0 and use 'out_of_frame' for aaseq if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + '0' + delimiter_out + 'out_of_frame' + delimiter_out + '0' elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + '0' elif seq_type_out == 'aaseq': c_pgen_line = 'out_of_frame' + delimiter_out + '0' print c_pgen_line
python
def main(): """Compute Pgens from a file and output to another file.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--set_custom_model_VDJ', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--set_custom_model_VJ', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-i', '--infile', dest = 'infile_name',metavar='PATH/TO/FILE', help='read in CDR3 sequences (and optionally V/J masks) from PATH/TO/FILE') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences and pgens to PATH/TO/FILE') parser.add_option('--seq_in', '--seq_index', type='int', metavar='INDEX', dest='seq_in_index', default = 0, help='specifies sequences to be read in are in column INDEX. Default is index 0 (the first column).') parser.add_option('--v_in', '--v_mask_index', type='int', metavar='INDEX', dest='V_mask_index', help='specifies V_masks are found in column INDEX in the input file. Default is no V mask.') parser.add_option('--j_in', '--j_mask_index', type='int', metavar='INDEX', dest='J_mask_index', help='specifies J_masks are found in column INDEX in the input file. Default is no J mask.') parser.add_option('--v_mask', type='string', dest='V_mask', help='specify V usage to condition Pgen on for seqs read in as arguments.') parser.add_option('--j_mask', type='string', dest='J_mask', help='specify J usage to condition Pgen on for seqs read in as arguments.') parser.add_option('-m', '--max_number_of_seqs', type='int',metavar='N', dest='max_number_of_seqs', help='compute Pgens for at most N sequences.') parser.add_option('--lines_to_skip', type='int',metavar='N', dest='lines_to_skip', default = 0, help='skip the first N lines of the file. Default is 0.') parser.add_option('-a', '--alphabet_filename', dest='alphabet_filename', metavar='PATH/TO/FILE', help="specify PATH/TO/FILE defining a custom 'amino acid' alphabet. Default is no custom alphabet.") parser.add_option('--seq_type_out', type='choice',metavar='SEQ_TYPE', dest='seq_type_out', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="if read in sequences are ntseqs, declare what type of sequence to compute pgen for. Default is all. Choices: 'all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--skip_off','--skip_empty_off', action='store_true', dest = 'skip_empty', default=True, help='stop skipping empty or blank sequences/lines (if for example you want to keep line index fidelity between the infile and outfile).') parser.add_option('--display_off', action='store_false', dest='display_seqs', default=True, help='turn the sequence display off (only applies in write-to-file mode). Default is on.') parser.add_option('--num_lines_for_display', type='int', metavar='N', default = 50, dest='num_lines_for_display', help='N lines of the output file are displayed when sequence display is on. Also used to determine the number of sequences to average over for speed and time estimates.') parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off (only applies when sequence display is disabled).') parser.add_option('--seqs_per_time_update', type='float', metavar='N', default = 100, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare infile delimiter. Default is tab for .tsv input files, comma for .csv files, and any whitespace for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare infile delimiter as a raw string.") parser.add_option('--delimiter_out', type='choice', dest='delimiter_out', choices=['tab', 'space', ',', ';', ':'], help="declare outfile delimiter. Default is tab for .tsv output files, comma for .csv files, and the infile delimiter for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter_out', type='str', dest='delimiter_out', help="declare for the delimiter outfile as a raw string.") parser.add_option('--gene_mask_delimiter', type='choice', dest='gene_mask_delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare gene mask delimiter. Default comma unless infile delimiter is comma, then default is a semicolon. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_gene_mask_delimiter', type='str', dest='gene_mask_delimiter', help="declare delimiter of gene masks as a raw string.") parser.add_option('--comment_delimiter', type='str', dest='comment_delimiter', help="character or string to indicate comment or header lines to skip.") (options, args) = parser.parse_args() #Check that the model is specified properly main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 alphabet_filename = options.alphabet_filename #used if a custom alphabet is to be specified if alphabet_filename is not None: if not os.path.isfile(alphabet_filename): print 'Cannot find custom alphabet file: ' + infile_name print 'Exiting...' return -1 #Load up model based on recomb_type #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) pgen_model = generation_probability.GenerationProbabilityVDJ(generative_model, genomic_data, alphabet_filename) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) pgen_model = generation_probability.GenerationProbabilityVJ(generative_model, genomic_data, alphabet_filename) aa_alphabet = ''.join(pgen_model.codons_dict.keys()) if options.infile_name is not None: infile_name = options.infile_name if not os.path.isfile(infile_name): print 'Cannot find input file: ' + infile_name print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse delimiter delimiter = options.delimiter if delimiter is None: #Default case if options.infile_name is None: delimiter = '\t' elif infile_name.endswith('.tsv'): #parse TAB separated value file delimiter = '\t' elif infile_name.endswith('.csv'): #parse COMMA separated value file delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other string passed as the delimiter. #Parse delimiter_out delimiter_out = options.delimiter_out if delimiter_out is None: #Default case if delimiter is None: delimiter_out = '\t' else: delimiter_out = delimiter if options.outfile_name is None: pass elif outfile_name.endswith('.tsv'): #output TAB separated value file delimiter_out = '\t' elif outfile_name.endswith('.csv'): #output COMMA separated value file delimiter_out = ',' else: try: delimiter_out = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter_out] except KeyError: pass #Other string passed as the delimiter. #Parse gene_delimiter gene_mask_delimiter = options.gene_mask_delimiter if gene_mask_delimiter is None: #Default case gene_mask_delimiter = ',' if delimiter == ',': gene_mask_delimiter = ';' else: try: gene_mask_delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[gene_mask_delimiter] except KeyError: pass #Other string passed as the delimiter. #More options time_updates = options.time_updates display_seqs = options.display_seqs num_lines_for_display = options.num_lines_for_display seq_in_index = options.seq_in_index #where in the line the sequence is after line.split(delimiter) lines_to_skip = options.lines_to_skip #one method of skipping header comment_delimiter = options.comment_delimiter #another method of skipping header seqs_per_time_update = options.seqs_per_time_update max_number_of_seqs = options.max_number_of_seqs V_mask_index = options.V_mask_index #Default is not conditioning on V identity J_mask_index = options.J_mask_index #Default is not conditioning on J identity skip_empty = options.skip_empty seq_type_out = options.seq_type_out #type of pgens to be computed. Can be ntseq, aaseq, or both if seq_type_out is not None: seq_type_out = {'all': None, 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[seq_type_out] if options.infile_name is None: #No infile specified -- args should be the input seqs print_warnings = True seqs = args seq_types = [determine_seq_type(seq, aa_alphabet) for seq in seqs] unrecognized_seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is None] if len(unrecognized_seqs) > 0 and print_warnings: print 'The following sequences/arguments were not recognized: ' + ', '.join(unrecognized_seqs) seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is not None] seq_types = [seq_type for seq_type in seq_types if seq_type is not None] #Format V and J masks -- uniform for all argument input sequences try: V_mask = options.V_mask.split(',') unrecognized_v_genes = [v for v in V_mask if v not in pgen_model.V_mask_mapping.keys()] V_mask = [v for v in V_mask if v in pgen_model.V_mask_mapping.keys()] if len(unrecognized_v_genes) > 0: print 'These V genes/alleles are not recognized: ' + ', '.join(unrecognized_v_genes) if len(V_mask) == 0: print 'No recognized V genes/alleles in the provided V_mask. Continuing without conditioning on V usage.' V_mask = None except AttributeError: V_mask = options.V_mask #Default is None, i.e. not conditioning on V identity try: J_mask = options.J_mask.split(',') unrecognized_j_genes = [j for j in J_mask if j not in pgen_model.J_mask_mapping.keys()] J_mask = [j for j in J_mask if j in pgen_model.J_mask_mapping.keys()] if len(unrecognized_j_genes) > 0: print 'These J genes/alleles are not recognized: ' + ', '.join(unrecognized_j_genes) if len(J_mask) == 0: print 'No recognized J genes/alleles in the provided J_mask. Continuing without conditioning on J usage.' J_mask = None except AttributeError: J_mask = options.J_mask #Default is None, i.e. not conditioning on J identity print '' start_time = time.time() for seq, seq_type in zip(seqs, seq_types): if seq_type == 'aaseq': c_pgen = pgen_model.compute_aa_CDR3_pgen(seq, V_mask, J_mask, print_warnings) print 'Pgen of the amino acid sequence ' + seq + ': ' + str(c_pgen) print '' elif seq_type == 'regex': c_pgen = pgen_model.compute_regex_CDR3_template_pgen(seq, V_mask, J_mask, print_warnings) print 'Pgen of the regular expression sequence ' + seq + ': ' + str(c_pgen) print '' elif seq_type == 'ntseq': if seq_type_out is None or seq_type_out == 'ntseq': c_pgen_nt = pgen_model.compute_nt_CDR3_pgen(seq, V_mask, J_mask, print_warnings) print 'Pgen of the nucleotide sequence ' + seq + ': ' + str(c_pgen_nt) if seq_type_out is None or seq_type_out == 'aaseq': c_pgen_aa = pgen_model.compute_aa_CDR3_pgen(nt2aa(seq), V_mask, J_mask, print_warnings) print 'Pgen of the amino acid sequence nt2aa(' + seq + ') = ' + nt2aa(seq) + ': ' + str(c_pgen_aa) print '' c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed pgen computation in: ' + c_time_str else: #Read sequences in from file print_warnings = False #Most cases of reading in from file should have warnings disabled seqs = [] seq_types = [] V_usage_masks = [] J_usage_masks = [] infile = open(infile_name, 'r') for i, line in enumerate(infile): if comment_delimiter is not None: #Default case -- no comments/header delimiter if line.startswith(comment_delimiter): #allow comments continue if i < lines_to_skip: continue if delimiter is None: #Default delimiter is any whitespace split_line = line.split() else: split_line = line.split(delimiter) #Find the seq try: seq = split_line[seq_in_index].strip() if len(seq.strip()) == 0: if skip_empty: continue else: seqs.append(seq) #keep the blank seq as a placeholder seq_types.append('aaseq') else: seqs.append(seq) seq_types.append(determine_seq_type(seq, aa_alphabet)) except IndexError: #no index match for seq if skip_empty and len(line.strip()) == 0: continue print 'seq_in_index is out of range' print 'Exiting...' infile.close() return -1 #Find and format V_usage_mask if V_mask_index is None: V_usage_masks.append(None) #default mask else: try: V_usage_mask = split_line[V_mask_index].strip().split(gene_mask_delimiter) #check that all V gene/allele names are recognized if all([v in pgen_model.V_mask_mapping for v in V_usage_mask]): V_usage_masks.append(V_usage_mask) else: print str(V_usage_mask) + " is not a usable V_usage_mask composed exclusively of recognized V gene/allele names" print 'Unrecognized V gene/allele names: ' + ', '.join([v for v in V_usage_mask if not v in pgen_model.V_mask_mapping.keys()]) print 'Exiting...' infile.close() return -1 except IndexError: #no index match for V_mask_index print 'V_mask_index is out of range' print 'Exiting...' infile.close() return -1 #Find and format J_usage_mask if J_mask_index is None: J_usage_masks.append(None) #default mask else: try: J_usage_mask = split_line[J_mask_index].strip().split(gene_mask_delimiter) #check that all V gene/allele names are recognized if all([j in pgen_model.J_mask_mapping for j in J_usage_mask]): J_usage_masks.append(J_usage_mask) else: print str(J_usage_mask) + " is not a usable J_usage_mask composed exclusively of recognized J gene/allele names" print 'Unrecognized J gene/allele names: ' + ', '.join([j for j in J_usage_mask if not j in pgen_model.J_mask_mapping.keys()]) print 'Exiting...' infile.close() return -1 except IndexError: #no index match for J_mask_index print 'J_mask_index is out of range' print 'Exiting...' infile.close() return -1 if max_number_of_seqs is not None: if len(seqs) >= max_number_of_seqs: break unrecognized_seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is None] if len(unrecognized_seqs) > 0 and len(unrecognized_seqs) < len(seqs): if print_warnings or options.outfile_name is not None: print 'Some strings read in were not parsed as sequences -- they will be omitted.' print 'Examples of improperly read strings: ' for unrecognized_seq in unrecognized_seqs[:10]: print unrecognized_seq seqs = [seq for i, seq in enumerate(seqs) if seq_types[i] is not None] V_usage_masks = [V_usage_mask for i, V_usage_mask in enumerate(V_usage_masks) if seq_types[i] is not None] seq_types = [seq_type for seq_type in seq_types if seq_type is not None] elif len(unrecognized_seqs) > 0 and len(unrecognized_seqs) == len(seqs): print 'None of the read in strings were parsed as sequences. Check input file.' print 'Examples of improperly read strings:' for unrecognized_seq in unrecognized_seqs[:10]: print unrecognized_seq print 'Exiting...' return -1 infile.close() if options.outfile_name is not None: #OUTFILE SPECIFIED, allow printed info/display print 'Successfully read in and formatted ' + str(len(seqs)) + ' sequences and any V or J usages.' if display_seqs: sys.stdout.write('\r'+'Continuing to Pgen computation in 3... ') sys.stdout.flush() time.sleep(0.4) sys.stdout.write('\r'+'Continuing to Pgen computation in 2... ') sys.stdout.flush() time.sleep(0.4) sys.stdout.write('\r'+'Continuing to Pgen computation in 1... ') sys.stdout.flush() time.sleep(0.4) else: print 'Continuing to Pgen computation.' print_warnings = True #Display is off, can print warnings if display_seqs: lines_for_display = [] times_for_speed_calc = [time.time()] outfile = open(outfile_name, 'w') start_time = time.time() for i, seq in enumerate(seqs): if seq_types[i] == 'aaseq': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) if seq_types[i] == 'regex': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_regex_CDR3_template_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_types[i] == 'ntseq': ntseq = seq if len(ntseq) % 3 == 0: #inframe sequence aaseq = nt2aa(ntseq) #Compute Pgen and print out based on recomb_type and seq_type_out if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) + delimiter_out + aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'aaseq': c_pgen_line = aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) else: #out of frame sequence -- Pgens are 0 and use 'out_of_frame' for aaseq if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + '0' + delimiter_out + 'out_of_frame' + delimiter_out + '0' elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + '0' elif seq_type_out == 'aaseq': c_pgen_line = 'out_of_frame' + delimiter_out + '0' outfile.write(c_pgen_line + '\n') #Print time update if display_seqs: cc_time = time.time() c_time = cc_time - start_time times_for_speed_calc = [cc_time] + times_for_speed_calc[:num_lines_for_display] c_avg_speed = (len(times_for_speed_calc)-1)/float(times_for_speed_calc[0] - times_for_speed_calc[-1]) #eta = ((len(seqs) - (i+1))/float(i+1))*c_time eta = (len(seqs) - (i+1))/c_avg_speed lines_for_display = [c_pgen_line] + lines_for_display[:num_lines_for_display] c_time_str = '%s hours, %s minutes, and %s seconds.'%(repr(int(c_time)/3600).rjust(3), repr((int(c_time)/60)%60).rjust(2), repr(int(c_time)%60).rjust(2)) eta_str = '%s hours, %s minutes, and %s seconds.'%(repr(int(eta)/3600).rjust(3), repr((int(eta)/60)%60).rjust(2), repr(int(eta)%60).rjust(2)) time_str = 'Time to compute Pgen on %s seqs: %s \nEst. time for remaining %s seqs: %s'%(repr(i+1).rjust(9), c_time_str, repr(len(seqs) - (i + 1)).rjust(9), eta_str) speed_str = 'Current Pgen computation speed: %s seqs/min'%(repr(round((len(times_for_speed_calc)-1)*60/float(times_for_speed_calc[0] - times_for_speed_calc[-1]), 2)).rjust(8)) display_str = '\n'.join(lines_for_display[::-1]) + '\n' + '-'*80 + '\n' + time_str + '\n' + speed_str + '\n' + '-'*80 print '\033[2J' + display_str elif (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((len(seqs) - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print 'Pgen computed for %d sequences in: %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed Pgen computation for %d sequences: in %s'%(len(seqs), c_time_str) outfile.close() else: #NO OUTFILE -- print directly to stdout start_time = time.time() for i, seq in enumerate(seqs): if seq_types[i] == 'aaseq': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) if seq_types[i] == 'regex': #Compute Pgen and print out c_pgen_line = seq + delimiter_out + str(pgen_model.compute_regex_CDR3_template_pgen(seq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_types[i] == 'ntseq': ntseq = seq if len(ntseq) % 3 == 0: #inframe sequence aaseq = nt2aa(ntseq) #Compute Pgen and print out based on recomb_type and seq_type_out if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) + delimiter_out + aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + str(pgen_model.compute_nt_CDR3_pgen(ntseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) elif seq_type_out == 'aaseq': c_pgen_line = aaseq + delimiter_out + str(pgen_model.compute_aa_CDR3_pgen(aaseq, V_usage_masks[i], J_usage_masks[i], print_warnings)) else: #out of frame sequence -- Pgens are 0 and use 'out_of_frame' for aaseq if seq_type_out is None: c_pgen_line = ntseq + delimiter_out + '0' + delimiter_out + 'out_of_frame' + delimiter_out + '0' elif seq_type_out == 'ntseq': c_pgen_line = ntseq + delimiter_out + '0' elif seq_type_out == 'aaseq': c_pgen_line = 'out_of_frame' + delimiter_out + '0' print c_pgen_line
[ "def", "main", "(", ")", ":", "parser", "=", "OptionParser", "(", "conflict_handler", "=", "\"resolve\"", ")", "parser", ".", "add_option", "(", "'--humanTRA'", ",", "'--human_T_alpha'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'humanTRA'", ",", "default", "=", "False", ",", "help", "=", "'use default human TRA model (T cell alpha chain)'", ")", "parser", ".", "add_option", "(", "'--humanTRB'", ",", "'--human_T_beta'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'humanTRB'", ",", "default", "=", "False", ",", "help", "=", "'use default human TRB model (T cell beta chain)'", ")", "parser", ".", "add_option", "(", "'--mouseTRB'", ",", "'--mouse_T_beta'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'mouseTRB'", ",", "default", "=", "False", ",", "help", "=", "'use default mouse TRB model (T cell beta chain)'", ")", "parser", ".", "add_option", "(", "'--humanIGH'", ",", "'--human_B_heavy'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'humanIGH'", ",", "default", "=", "False", ",", "help", "=", "'use default human IGH model (B cell heavy chain)'", ")", "parser", ".", "add_option", "(", "'--set_custom_model_VDJ'", ",", "dest", "=", "'vdj_model_folder'", ",", "metavar", "=", "'PATH/TO/FOLDER/'", ",", "help", "=", "'specify PATH/TO/FOLDER/ for a custom VDJ generative model'", ")", "parser", ".", "add_option", "(", "'--set_custom_model_VJ'", ",", "dest", "=", "'vj_model_folder'", ",", "metavar", "=", "'PATH/TO/FOLDER/'", ",", "help", "=", "'specify PATH/TO/FOLDER/ for a custom VJ generative model'", ")", "parser", ".", "add_option", "(", "'-i'", ",", "'--infile'", ",", "dest", "=", "'infile_name'", ",", "metavar", "=", "'PATH/TO/FILE'", ",", "help", "=", "'read in CDR3 sequences (and optionally V/J masks) from PATH/TO/FILE'", ")", "parser", ".", "add_option", "(", "'-o'", ",", "'--outfile'", ",", "dest", "=", "'outfile_name'", ",", "metavar", "=", "'PATH/TO/FILE'", ",", "help", "=", "'write CDR3 sequences and pgens to PATH/TO/FILE'", ")", "parser", ".", "add_option", "(", "'--seq_in'", ",", "'--seq_index'", ",", "type", "=", "'int'", ",", "metavar", "=", "'INDEX'", ",", "dest", "=", "'seq_in_index'", ",", "default", "=", "0", ",", "help", "=", "'specifies sequences to be read in are in column INDEX. Default is index 0 (the first column).'", ")", "parser", ".", "add_option", "(", "'--v_in'", ",", "'--v_mask_index'", ",", "type", "=", "'int'", ",", "metavar", "=", "'INDEX'", ",", "dest", "=", "'V_mask_index'", ",", "help", "=", "'specifies V_masks are found in column INDEX in the input file. Default is no V mask.'", ")", "parser", ".", "add_option", "(", "'--j_in'", ",", "'--j_mask_index'", ",", "type", "=", "'int'", ",", "metavar", "=", "'INDEX'", ",", "dest", "=", "'J_mask_index'", ",", "help", "=", "'specifies J_masks are found in column INDEX in the input file. Default is no J mask.'", ")", "parser", ".", "add_option", "(", "'--v_mask'", ",", "type", "=", "'string'", ",", "dest", "=", "'V_mask'", ",", "help", "=", "'specify V usage to condition Pgen on for seqs read in as arguments.'", ")", "parser", ".", "add_option", "(", "'--j_mask'", ",", "type", "=", "'string'", ",", "dest", "=", "'J_mask'", ",", "help", "=", "'specify J usage to condition Pgen on for seqs read in as arguments.'", ")", "parser", ".", "add_option", "(", "'-m'", ",", "'--max_number_of_seqs'", ",", "type", "=", "'int'", ",", "metavar", "=", "'N'", ",", "dest", "=", "'max_number_of_seqs'", ",", "help", "=", "'compute Pgens for at most N sequences.'", ")", "parser", ".", "add_option", "(", "'--lines_to_skip'", ",", "type", "=", "'int'", ",", "metavar", "=", "'N'", ",", "dest", "=", "'lines_to_skip'", ",", "default", "=", "0", ",", "help", "=", "'skip the first N lines of the file. Default is 0.'", ")", "parser", ".", "add_option", "(", "'-a'", ",", "'--alphabet_filename'", ",", "dest", "=", "'alphabet_filename'", ",", "metavar", "=", "'PATH/TO/FILE'", ",", "help", "=", "\"specify PATH/TO/FILE defining a custom 'amino acid' alphabet. Default is no custom alphabet.\"", ")", "parser", ".", "add_option", "(", "'--seq_type_out'", ",", "type", "=", "'choice'", ",", "metavar", "=", "'SEQ_TYPE'", ",", "dest", "=", "'seq_type_out'", ",", "choices", "=", "[", "'all'", ",", "'ntseq'", ",", "'nucleotide'", ",", "'aaseq'", ",", "'amino_acid'", "]", ",", "help", "=", "\"if read in sequences are ntseqs, declare what type of sequence to compute pgen for. Default is all. Choices: 'all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'\"", ")", "parser", ".", "add_option", "(", "'--skip_off'", ",", "'--skip_empty_off'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'skip_empty'", ",", "default", "=", "True", ",", "help", "=", "'stop skipping empty or blank sequences/lines (if for example you want to keep line index fidelity between the infile and outfile).'", ")", "parser", ".", "add_option", "(", "'--display_off'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'display_seqs'", ",", "default", "=", "True", ",", "help", "=", "'turn the sequence display off (only applies in write-to-file mode). Default is on.'", ")", "parser", ".", "add_option", "(", "'--num_lines_for_display'", ",", "type", "=", "'int'", ",", "metavar", "=", "'N'", ",", "default", "=", "50", ",", "dest", "=", "'num_lines_for_display'", ",", "help", "=", "'N lines of the output file are displayed when sequence display is on. Also used to determine the number of sequences to average over for speed and time estimates.'", ")", "parser", ".", "add_option", "(", "'--time_updates_off'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'time_updates'", ",", "default", "=", "True", ",", "help", "=", "'turn time updates off (only applies when sequence display is disabled).'", ")", "parser", ".", "add_option", "(", "'--seqs_per_time_update'", ",", "type", "=", "'float'", ",", "metavar", "=", "'N'", ",", "default", "=", "100", ",", "dest", "=", "'seqs_per_time_update'", ",", "help", "=", "'specify the number of sequences between time updates. Default is 1e5.'", ")", "parser", ".", "add_option", "(", "'-d'", ",", "'--delimiter'", ",", "type", "=", "'choice'", ",", "dest", "=", "'delimiter'", ",", "choices", "=", "[", "'tab'", ",", "'space'", ",", "','", ",", "';'", ",", "':'", "]", ",", "help", "=", "\"declare infile delimiter. Default is tab for .tsv input files, comma for .csv files, and any whitespace for all others. Choices: 'tab', 'space', ',', ';', ':'\"", ")", "parser", ".", "add_option", "(", "'--raw_delimiter'", ",", "type", "=", "'str'", ",", "dest", "=", "'delimiter'", ",", "help", "=", "\"declare infile delimiter as a raw string.\"", ")", "parser", ".", "add_option", "(", "'--delimiter_out'", ",", "type", "=", "'choice'", ",", "dest", "=", "'delimiter_out'", ",", "choices", "=", "[", "'tab'", ",", "'space'", ",", "','", ",", "';'", ",", "':'", "]", ",", "help", "=", "\"declare outfile delimiter. Default is tab for .tsv output files, comma for .csv files, and the infile delimiter for all others. Choices: 'tab', 'space', ',', ';', ':'\"", ")", "parser", ".", "add_option", "(", "'--raw_delimiter_out'", ",", "type", "=", "'str'", ",", "dest", "=", "'delimiter_out'", ",", "help", "=", "\"declare for the delimiter outfile as a raw string.\"", ")", "parser", ".", "add_option", "(", "'--gene_mask_delimiter'", ",", "type", "=", "'choice'", ",", "dest", "=", "'gene_mask_delimiter'", ",", "choices", "=", "[", "'tab'", ",", "'space'", ",", "','", ",", "';'", ",", "':'", "]", ",", "help", "=", "\"declare gene mask delimiter. Default comma unless infile delimiter is comma, then default is a semicolon. Choices: 'tab', 'space', ',', ';', ':'\"", ")", "parser", ".", "add_option", "(", "'--raw_gene_mask_delimiter'", ",", "type", "=", "'str'", ",", "dest", "=", "'gene_mask_delimiter'", ",", "help", "=", "\"declare delimiter of gene masks as a raw string.\"", ")", "parser", ".", "add_option", "(", "'--comment_delimiter'", ",", "type", "=", "'str'", ",", "dest", "=", "'comment_delimiter'", ",", "help", "=", "\"character or string to indicate comment or header lines to skip.\"", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "#Check that the model is specified properly", "main_folder", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "default_models", "=", "{", "}", "default_models", "[", "'humanTRA'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'human_T_alpha'", ")", ",", "'VJ'", "]", "default_models", "[", "'humanTRB'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'human_T_beta'", ")", ",", "'VDJ'", "]", "default_models", "[", "'mouseTRB'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'mouse_T_beta'", ")", ",", "'VDJ'", "]", "default_models", "[", "'humanIGH'", "]", "=", "[", "os", ".", "path", ".", "join", "(", "main_folder", ",", "'default_models'", ",", "'human_B_heavy'", ")", ",", "'VDJ'", "]", "num_models_specified", "=", "sum", "(", "[", "1", "for", "x", "in", "default_models", ".", "keys", "(", ")", "+", "[", "'vj_model_folder'", ",", "'vdj_model_folder'", "]", "if", "getattr", "(", "options", ",", "x", ")", "]", ")", "if", "num_models_specified", "==", "1", ":", "#exactly one model specified", "try", ":", "d_model", "=", "[", "x", "for", "x", "in", "default_models", ".", "keys", "(", ")", "if", "getattr", "(", "options", ",", "x", ")", "]", "[", "0", "]", "model_folder", "=", "default_models", "[", "d_model", "]", "[", "0", "]", "recomb_type", "=", "default_models", "[", "d_model", "]", "[", "1", "]", "except", "IndexError", ":", "if", "options", ".", "vdj_model_folder", ":", "#custom VDJ model specified", "model_folder", "=", "options", ".", "vdj_model_folder", "recomb_type", "=", "'VDJ'", "elif", "options", ".", "vj_model_folder", ":", "#custom VJ model specified", "model_folder", "=", "options", ".", "vj_model_folder", "recomb_type", "=", "'VJ'", "elif", "num_models_specified", "==", "0", ":", "print", "'Need to indicate generative model.'", "print", "'Exiting...'", "return", "-", "1", "elif", "num_models_specified", ">", "1", ":", "print", "'Only specify one model'", "print", "'Exiting...'", "return", "-", "1", "#Check that all model and genomic files exist in the indicated model folder", "if", "not", "os", ".", "path", ".", "isdir", "(", "model_folder", ")", ":", "print", "'Check pathing... cannot find the model folder: '", "+", "model_folder", "print", "'Exiting...'", "return", "-", "1", "params_file_name", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'model_params.txt'", ")", "marginals_file_name", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'model_marginals.txt'", ")", "V_anchor_pos_file", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'V_gene_CDR3_anchors.csv'", ")", "J_anchor_pos_file", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "'J_gene_CDR3_anchors.csv'", ")", "for", "x", "in", "[", "params_file_name", ",", "marginals_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", "]", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "x", ")", ":", "print", "'Cannot find: '", "+", "x", "print", "'Please check the files (and naming conventions) in the model folder '", "+", "model_folder", "print", "'Exiting...'", "return", "-", "1", "alphabet_filename", "=", "options", ".", "alphabet_filename", "#used if a custom alphabet is to be specified", "if", "alphabet_filename", "is", "not", "None", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "alphabet_filename", ")", ":", "print", "'Cannot find custom alphabet file: '", "+", "infile_name", "print", "'Exiting...'", "return", "-", "1", "#Load up model based on recomb_type", "#VDJ recomb case --- used for TCRB and IGH", "if", "recomb_type", "==", "'VDJ'", ":", "genomic_data", "=", "load_model", ".", "GenomicDataVDJ", "(", ")", "genomic_data", ".", "load_igor_genomic_data", "(", "params_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", "generative_model", "=", "load_model", ".", "GenerativeModelVDJ", "(", ")", "generative_model", ".", "load_and_process_igor_model", "(", "marginals_file_name", ")", "pgen_model", "=", "generation_probability", ".", "GenerationProbabilityVDJ", "(", "generative_model", ",", "genomic_data", ",", "alphabet_filename", ")", "#VJ recomb case --- used for TCRA and light chain", "elif", "recomb_type", "==", "'VJ'", ":", "genomic_data", "=", "load_model", ".", "GenomicDataVJ", "(", ")", "genomic_data", ".", "load_igor_genomic_data", "(", "params_file_name", ",", "V_anchor_pos_file", ",", "J_anchor_pos_file", ")", "generative_model", "=", "load_model", ".", "GenerativeModelVJ", "(", ")", "generative_model", ".", "load_and_process_igor_model", "(", "marginals_file_name", ")", "pgen_model", "=", "generation_probability", ".", "GenerationProbabilityVJ", "(", "generative_model", ",", "genomic_data", ",", "alphabet_filename", ")", "aa_alphabet", "=", "''", ".", "join", "(", "pgen_model", ".", "codons_dict", ".", "keys", "(", ")", ")", "if", "options", ".", "infile_name", "is", "not", "None", ":", "infile_name", "=", "options", ".", "infile_name", "if", "not", "os", ".", "path", ".", "isfile", "(", "infile_name", ")", ":", "print", "'Cannot find input file: '", "+", "infile_name", "print", "'Exiting...'", "return", "-", "1", "if", "options", ".", "outfile_name", "is", "not", "None", ":", "outfile_name", "=", "options", ".", "outfile_name", "if", "os", ".", "path", ".", "isfile", "(", "outfile_name", ")", ":", "if", "not", "raw_input", "(", "outfile_name", "+", "' already exists. Overwrite (y/n)? '", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "in", "[", "'y'", ",", "'yes'", "]", ":", "print", "'Exiting...'", "return", "-", "1", "#Parse delimiter", "delimiter", "=", "options", ".", "delimiter", "if", "delimiter", "is", "None", ":", "#Default case", "if", "options", ".", "infile_name", "is", "None", ":", "delimiter", "=", "'\\t'", "elif", "infile_name", ".", "endswith", "(", "'.tsv'", ")", ":", "#parse TAB separated value file", "delimiter", "=", "'\\t'", "elif", "infile_name", ".", "endswith", "(", "'.csv'", ")", ":", "#parse COMMA separated value file", "delimiter", "=", "','", "else", ":", "try", ":", "delimiter", "=", "{", "'tab'", ":", "'\\t'", ",", "'space'", ":", "' '", ",", "','", ":", "','", ",", "';'", ":", "';'", ",", "':'", ":", "':'", "}", "[", "delimiter", "]", "except", "KeyError", ":", "pass", "#Other string passed as the delimiter.", "#Parse delimiter_out", "delimiter_out", "=", "options", ".", "delimiter_out", "if", "delimiter_out", "is", "None", ":", "#Default case", "if", "delimiter", "is", "None", ":", "delimiter_out", "=", "'\\t'", "else", ":", "delimiter_out", "=", "delimiter", "if", "options", ".", "outfile_name", "is", "None", ":", "pass", "elif", "outfile_name", ".", "endswith", "(", "'.tsv'", ")", ":", "#output TAB separated value file", "delimiter_out", "=", "'\\t'", "elif", "outfile_name", ".", "endswith", "(", "'.csv'", ")", ":", "#output COMMA separated value file", "delimiter_out", "=", "','", "else", ":", "try", ":", "delimiter_out", "=", "{", "'tab'", ":", "'\\t'", ",", "'space'", ":", "' '", ",", "','", ":", "','", ",", "';'", ":", "';'", ",", "':'", ":", "':'", "}", "[", "delimiter_out", "]", "except", "KeyError", ":", "pass", "#Other string passed as the delimiter.", "#Parse gene_delimiter", "gene_mask_delimiter", "=", "options", ".", "gene_mask_delimiter", "if", "gene_mask_delimiter", "is", "None", ":", "#Default case", "gene_mask_delimiter", "=", "','", "if", "delimiter", "==", "','", ":", "gene_mask_delimiter", "=", "';'", "else", ":", "try", ":", "gene_mask_delimiter", "=", "{", "'tab'", ":", "'\\t'", ",", "'space'", ":", "' '", ",", "','", ":", "','", ",", "';'", ":", "';'", ",", "':'", ":", "':'", "}", "[", "gene_mask_delimiter", "]", "except", "KeyError", ":", "pass", "#Other string passed as the delimiter.", "#More options", "time_updates", "=", "options", ".", "time_updates", "display_seqs", "=", "options", ".", "display_seqs", "num_lines_for_display", "=", "options", ".", "num_lines_for_display", "seq_in_index", "=", "options", ".", "seq_in_index", "#where in the line the sequence is after line.split(delimiter)", "lines_to_skip", "=", "options", ".", "lines_to_skip", "#one method of skipping header", "comment_delimiter", "=", "options", ".", "comment_delimiter", "#another method of skipping header", "seqs_per_time_update", "=", "options", ".", "seqs_per_time_update", "max_number_of_seqs", "=", "options", ".", "max_number_of_seqs", "V_mask_index", "=", "options", ".", "V_mask_index", "#Default is not conditioning on V identity", "J_mask_index", "=", "options", ".", "J_mask_index", "#Default is not conditioning on J identity", "skip_empty", "=", "options", ".", "skip_empty", "seq_type_out", "=", "options", ".", "seq_type_out", "#type of pgens to be computed. Can be ntseq, aaseq, or both", "if", "seq_type_out", "is", "not", "None", ":", "seq_type_out", "=", "{", "'all'", ":", "None", ",", "'ntseq'", ":", "'ntseq'", ",", "'nucleotide'", ":", "'ntseq'", ",", "'aaseq'", ":", "'aaseq'", ",", "'amino_acid'", ":", "'aaseq'", "}", "[", "seq_type_out", "]", "if", "options", ".", "infile_name", "is", "None", ":", "#No infile specified -- args should be the input seqs", "print_warnings", "=", "True", "seqs", "=", "args", "seq_types", "=", "[", "determine_seq_type", "(", "seq", ",", "aa_alphabet", ")", "for", "seq", "in", "seqs", "]", "unrecognized_seqs", "=", "[", "seq", "for", "i", ",", "seq", "in", "enumerate", "(", "seqs", ")", "if", "seq_types", "[", "i", "]", "is", "None", "]", "if", "len", "(", "unrecognized_seqs", ")", ">", "0", "and", "print_warnings", ":", "print", "'The following sequences/arguments were not recognized: '", "+", "', '", ".", "join", "(", "unrecognized_seqs", ")", "seqs", "=", "[", "seq", "for", "i", ",", "seq", "in", "enumerate", "(", "seqs", ")", "if", "seq_types", "[", "i", "]", "is", "not", "None", "]", "seq_types", "=", "[", "seq_type", "for", "seq_type", "in", "seq_types", "if", "seq_type", "is", "not", "None", "]", "#Format V and J masks -- uniform for all argument input sequences", "try", ":", "V_mask", "=", "options", ".", "V_mask", ".", "split", "(", "','", ")", "unrecognized_v_genes", "=", "[", "v", "for", "v", "in", "V_mask", "if", "v", "not", "in", "pgen_model", ".", "V_mask_mapping", ".", "keys", "(", ")", "]", "V_mask", "=", "[", "v", "for", "v", "in", "V_mask", "if", "v", "in", "pgen_model", ".", "V_mask_mapping", ".", "keys", "(", ")", "]", "if", "len", "(", "unrecognized_v_genes", ")", ">", "0", ":", "print", "'These V genes/alleles are not recognized: '", "+", "', '", ".", "join", "(", "unrecognized_v_genes", ")", "if", "len", "(", "V_mask", ")", "==", "0", ":", "print", "'No recognized V genes/alleles in the provided V_mask. Continuing without conditioning on V usage.'", "V_mask", "=", "None", "except", "AttributeError", ":", "V_mask", "=", "options", ".", "V_mask", "#Default is None, i.e. not conditioning on V identity", "try", ":", "J_mask", "=", "options", ".", "J_mask", ".", "split", "(", "','", ")", "unrecognized_j_genes", "=", "[", "j", "for", "j", "in", "J_mask", "if", "j", "not", "in", "pgen_model", ".", "J_mask_mapping", ".", "keys", "(", ")", "]", "J_mask", "=", "[", "j", "for", "j", "in", "J_mask", "if", "j", "in", "pgen_model", ".", "J_mask_mapping", ".", "keys", "(", ")", "]", "if", "len", "(", "unrecognized_j_genes", ")", ">", "0", ":", "print", "'These J genes/alleles are not recognized: '", "+", "', '", ".", "join", "(", "unrecognized_j_genes", ")", "if", "len", "(", "J_mask", ")", "==", "0", ":", "print", "'No recognized J genes/alleles in the provided J_mask. Continuing without conditioning on J usage.'", "J_mask", "=", "None", "except", "AttributeError", ":", "J_mask", "=", "options", ".", "J_mask", "#Default is None, i.e. not conditioning on J identity", "print", "''", "start_time", "=", "time", ".", "time", "(", ")", "for", "seq", ",", "seq_type", "in", "zip", "(", "seqs", ",", "seq_types", ")", ":", "if", "seq_type", "==", "'aaseq'", ":", "c_pgen", "=", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "seq", ",", "V_mask", ",", "J_mask", ",", "print_warnings", ")", "print", "'Pgen of the amino acid sequence '", "+", "seq", "+", "': '", "+", "str", "(", "c_pgen", ")", "print", "''", "elif", "seq_type", "==", "'regex'", ":", "c_pgen", "=", "pgen_model", ".", "compute_regex_CDR3_template_pgen", "(", "seq", ",", "V_mask", ",", "J_mask", ",", "print_warnings", ")", "print", "'Pgen of the regular expression sequence '", "+", "seq", "+", "': '", "+", "str", "(", "c_pgen", ")", "print", "''", "elif", "seq_type", "==", "'ntseq'", ":", "if", "seq_type_out", "is", "None", "or", "seq_type_out", "==", "'ntseq'", ":", "c_pgen_nt", "=", "pgen_model", ".", "compute_nt_CDR3_pgen", "(", "seq", ",", "V_mask", ",", "J_mask", ",", "print_warnings", ")", "print", "'Pgen of the nucleotide sequence '", "+", "seq", "+", "': '", "+", "str", "(", "c_pgen_nt", ")", "if", "seq_type_out", "is", "None", "or", "seq_type_out", "==", "'aaseq'", ":", "c_pgen_aa", "=", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "nt2aa", "(", "seq", ")", ",", "V_mask", ",", "J_mask", ",", "print_warnings", ")", "print", "'Pgen of the amino acid sequence nt2aa('", "+", "seq", "+", "') = '", "+", "nt2aa", "(", "seq", ")", "+", "': '", "+", "str", "(", "c_pgen_aa", ")", "print", "''", "c_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "if", "c_time", ">", "86400", ":", "#more than a day", "c_time_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "c_time", ")", "/", "86400", ",", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "3600", ":", "#more than an hr", "c_time_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "60", ":", "#more than a min", "c_time_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "else", ":", "c_time_str", "=", "'%.2f seconds.'", "%", "(", "c_time", ")", "print", "'Completed pgen computation in: '", "+", "c_time_str", "else", ":", "#Read sequences in from file", "print_warnings", "=", "False", "#Most cases of reading in from file should have warnings disabled", "seqs", "=", "[", "]", "seq_types", "=", "[", "]", "V_usage_masks", "=", "[", "]", "J_usage_masks", "=", "[", "]", "infile", "=", "open", "(", "infile_name", ",", "'r'", ")", "for", "i", ",", "line", "in", "enumerate", "(", "infile", ")", ":", "if", "comment_delimiter", "is", "not", "None", ":", "#Default case -- no comments/header delimiter", "if", "line", ".", "startswith", "(", "comment_delimiter", ")", ":", "#allow comments", "continue", "if", "i", "<", "lines_to_skip", ":", "continue", "if", "delimiter", "is", "None", ":", "#Default delimiter is any whitespace", "split_line", "=", "line", ".", "split", "(", ")", "else", ":", "split_line", "=", "line", ".", "split", "(", "delimiter", ")", "#Find the seq", "try", ":", "seq", "=", "split_line", "[", "seq_in_index", "]", ".", "strip", "(", ")", "if", "len", "(", "seq", ".", "strip", "(", ")", ")", "==", "0", ":", "if", "skip_empty", ":", "continue", "else", ":", "seqs", ".", "append", "(", "seq", ")", "#keep the blank seq as a placeholder", "seq_types", ".", "append", "(", "'aaseq'", ")", "else", ":", "seqs", ".", "append", "(", "seq", ")", "seq_types", ".", "append", "(", "determine_seq_type", "(", "seq", ",", "aa_alphabet", ")", ")", "except", "IndexError", ":", "#no index match for seq", "if", "skip_empty", "and", "len", "(", "line", ".", "strip", "(", ")", ")", "==", "0", ":", "continue", "print", "'seq_in_index is out of range'", "print", "'Exiting...'", "infile", ".", "close", "(", ")", "return", "-", "1", "#Find and format V_usage_mask", "if", "V_mask_index", "is", "None", ":", "V_usage_masks", ".", "append", "(", "None", ")", "#default mask", "else", ":", "try", ":", "V_usage_mask", "=", "split_line", "[", "V_mask_index", "]", ".", "strip", "(", ")", ".", "split", "(", "gene_mask_delimiter", ")", "#check that all V gene/allele names are recognized", "if", "all", "(", "[", "v", "in", "pgen_model", ".", "V_mask_mapping", "for", "v", "in", "V_usage_mask", "]", ")", ":", "V_usage_masks", ".", "append", "(", "V_usage_mask", ")", "else", ":", "print", "str", "(", "V_usage_mask", ")", "+", "\" is not a usable V_usage_mask composed exclusively of recognized V gene/allele names\"", "print", "'Unrecognized V gene/allele names: '", "+", "', '", ".", "join", "(", "[", "v", "for", "v", "in", "V_usage_mask", "if", "not", "v", "in", "pgen_model", ".", "V_mask_mapping", ".", "keys", "(", ")", "]", ")", "print", "'Exiting...'", "infile", ".", "close", "(", ")", "return", "-", "1", "except", "IndexError", ":", "#no index match for V_mask_index", "print", "'V_mask_index is out of range'", "print", "'Exiting...'", "infile", ".", "close", "(", ")", "return", "-", "1", "#Find and format J_usage_mask", "if", "J_mask_index", "is", "None", ":", "J_usage_masks", ".", "append", "(", "None", ")", "#default mask", "else", ":", "try", ":", "J_usage_mask", "=", "split_line", "[", "J_mask_index", "]", ".", "strip", "(", ")", ".", "split", "(", "gene_mask_delimiter", ")", "#check that all V gene/allele names are recognized", "if", "all", "(", "[", "j", "in", "pgen_model", ".", "J_mask_mapping", "for", "j", "in", "J_usage_mask", "]", ")", ":", "J_usage_masks", ".", "append", "(", "J_usage_mask", ")", "else", ":", "print", "str", "(", "J_usage_mask", ")", "+", "\" is not a usable J_usage_mask composed exclusively of recognized J gene/allele names\"", "print", "'Unrecognized J gene/allele names: '", "+", "', '", ".", "join", "(", "[", "j", "for", "j", "in", "J_usage_mask", "if", "not", "j", "in", "pgen_model", ".", "J_mask_mapping", ".", "keys", "(", ")", "]", ")", "print", "'Exiting...'", "infile", ".", "close", "(", ")", "return", "-", "1", "except", "IndexError", ":", "#no index match for J_mask_index", "print", "'J_mask_index is out of range'", "print", "'Exiting...'", "infile", ".", "close", "(", ")", "return", "-", "1", "if", "max_number_of_seqs", "is", "not", "None", ":", "if", "len", "(", "seqs", ")", ">=", "max_number_of_seqs", ":", "break", "unrecognized_seqs", "=", "[", "seq", "for", "i", ",", "seq", "in", "enumerate", "(", "seqs", ")", "if", "seq_types", "[", "i", "]", "is", "None", "]", "if", "len", "(", "unrecognized_seqs", ")", ">", "0", "and", "len", "(", "unrecognized_seqs", ")", "<", "len", "(", "seqs", ")", ":", "if", "print_warnings", "or", "options", ".", "outfile_name", "is", "not", "None", ":", "print", "'Some strings read in were not parsed as sequences -- they will be omitted.'", "print", "'Examples of improperly read strings: '", "for", "unrecognized_seq", "in", "unrecognized_seqs", "[", ":", "10", "]", ":", "print", "unrecognized_seq", "seqs", "=", "[", "seq", "for", "i", ",", "seq", "in", "enumerate", "(", "seqs", ")", "if", "seq_types", "[", "i", "]", "is", "not", "None", "]", "V_usage_masks", "=", "[", "V_usage_mask", "for", "i", ",", "V_usage_mask", "in", "enumerate", "(", "V_usage_masks", ")", "if", "seq_types", "[", "i", "]", "is", "not", "None", "]", "seq_types", "=", "[", "seq_type", "for", "seq_type", "in", "seq_types", "if", "seq_type", "is", "not", "None", "]", "elif", "len", "(", "unrecognized_seqs", ")", ">", "0", "and", "len", "(", "unrecognized_seqs", ")", "==", "len", "(", "seqs", ")", ":", "print", "'None of the read in strings were parsed as sequences. Check input file.'", "print", "'Examples of improperly read strings:'", "for", "unrecognized_seq", "in", "unrecognized_seqs", "[", ":", "10", "]", ":", "print", "unrecognized_seq", "print", "'Exiting...'", "return", "-", "1", "infile", ".", "close", "(", ")", "if", "options", ".", "outfile_name", "is", "not", "None", ":", "#OUTFILE SPECIFIED, allow printed info/display", "print", "'Successfully read in and formatted '", "+", "str", "(", "len", "(", "seqs", ")", ")", "+", "' sequences and any V or J usages.'", "if", "display_seqs", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "'Continuing to Pgen computation in 3... '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "0.4", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "'Continuing to Pgen computation in 2... '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "0.4", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "'Continuing to Pgen computation in 1... '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "0.4", ")", "else", ":", "print", "'Continuing to Pgen computation.'", "print_warnings", "=", "True", "#Display is off, can print warnings", "if", "display_seqs", ":", "lines_for_display", "=", "[", "]", "times_for_speed_calc", "=", "[", "time", ".", "time", "(", ")", "]", "outfile", "=", "open", "(", "outfile_name", ",", "'w'", ")", "start_time", "=", "time", ".", "time", "(", ")", "for", "i", ",", "seq", "in", "enumerate", "(", "seqs", ")", ":", "if", "seq_types", "[", "i", "]", "==", "'aaseq'", ":", "#Compute Pgen and print out", "c_pgen_line", "=", "seq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "seq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "if", "seq_types", "[", "i", "]", "==", "'regex'", ":", "#Compute Pgen and print out", "c_pgen_line", "=", "seq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_regex_CDR3_template_pgen", "(", "seq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "elif", "seq_types", "[", "i", "]", "==", "'ntseq'", ":", "ntseq", "=", "seq", "if", "len", "(", "ntseq", ")", "%", "3", "==", "0", ":", "#inframe sequence", "aaseq", "=", "nt2aa", "(", "ntseq", ")", "#Compute Pgen and print out based on recomb_type and seq_type_out", "if", "seq_type_out", "is", "None", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_nt_CDR3_pgen", "(", "ntseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "+", "delimiter_out", "+", "aaseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "aaseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "elif", "seq_type_out", "==", "'ntseq'", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_nt_CDR3_pgen", "(", "ntseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "elif", "seq_type_out", "==", "'aaseq'", ":", "c_pgen_line", "=", "aaseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "aaseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "else", ":", "#out of frame sequence -- Pgens are 0 and use 'out_of_frame' for aaseq", "if", "seq_type_out", "is", "None", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "'0'", "+", "delimiter_out", "+", "'out_of_frame'", "+", "delimiter_out", "+", "'0'", "elif", "seq_type_out", "==", "'ntseq'", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "'0'", "elif", "seq_type_out", "==", "'aaseq'", ":", "c_pgen_line", "=", "'out_of_frame'", "+", "delimiter_out", "+", "'0'", "outfile", ".", "write", "(", "c_pgen_line", "+", "'\\n'", ")", "#Print time update", "if", "display_seqs", ":", "cc_time", "=", "time", ".", "time", "(", ")", "c_time", "=", "cc_time", "-", "start_time", "times_for_speed_calc", "=", "[", "cc_time", "]", "+", "times_for_speed_calc", "[", ":", "num_lines_for_display", "]", "c_avg_speed", "=", "(", "len", "(", "times_for_speed_calc", ")", "-", "1", ")", "/", "float", "(", "times_for_speed_calc", "[", "0", "]", "-", "times_for_speed_calc", "[", "-", "1", "]", ")", "#eta = ((len(seqs) - (i+1))/float(i+1))*c_time", "eta", "=", "(", "len", "(", "seqs", ")", "-", "(", "i", "+", "1", ")", ")", "/", "c_avg_speed", "lines_for_display", "=", "[", "c_pgen_line", "]", "+", "lines_for_display", "[", ":", "num_lines_for_display", "]", "c_time_str", "=", "'%s hours, %s minutes, and %s seconds.'", "%", "(", "repr", "(", "int", "(", "c_time", ")", "/", "3600", ")", ".", "rjust", "(", "3", ")", ",", "repr", "(", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ")", ".", "rjust", "(", "2", ")", ",", "repr", "(", "int", "(", "c_time", ")", "%", "60", ")", ".", "rjust", "(", "2", ")", ")", "eta_str", "=", "'%s hours, %s minutes, and %s seconds.'", "%", "(", "repr", "(", "int", "(", "eta", ")", "/", "3600", ")", ".", "rjust", "(", "3", ")", ",", "repr", "(", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ")", ".", "rjust", "(", "2", ")", ",", "repr", "(", "int", "(", "eta", ")", "%", "60", ")", ".", "rjust", "(", "2", ")", ")", "time_str", "=", "'Time to compute Pgen on %s seqs: %s \\nEst. time for remaining %s seqs: %s'", "%", "(", "repr", "(", "i", "+", "1", ")", ".", "rjust", "(", "9", ")", ",", "c_time_str", ",", "repr", "(", "len", "(", "seqs", ")", "-", "(", "i", "+", "1", ")", ")", ".", "rjust", "(", "9", ")", ",", "eta_str", ")", "speed_str", "=", "'Current Pgen computation speed: %s seqs/min'", "%", "(", "repr", "(", "round", "(", "(", "len", "(", "times_for_speed_calc", ")", "-", "1", ")", "*", "60", "/", "float", "(", "times_for_speed_calc", "[", "0", "]", "-", "times_for_speed_calc", "[", "-", "1", "]", ")", ",", "2", ")", ")", ".", "rjust", "(", "8", ")", ")", "display_str", "=", "'\\n'", ".", "join", "(", "lines_for_display", "[", ":", ":", "-", "1", "]", ")", "+", "'\\n'", "+", "'-'", "*", "80", "+", "'\\n'", "+", "time_str", "+", "'\\n'", "+", "speed_str", "+", "'\\n'", "+", "'-'", "*", "80", "print", "'\\033[2J'", "+", "display_str", "elif", "(", "i", "+", "1", ")", "%", "seqs_per_time_update", "==", "0", "and", "time_updates", ":", "c_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "eta", "=", "(", "(", "len", "(", "seqs", ")", "-", "(", "i", "+", "1", ")", ")", "/", "float", "(", "i", "+", "1", ")", ")", "*", "c_time", "if", "c_time", ">", "86400", ":", "#more than a day", "c_time_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "c_time", ")", "/", "86400", ",", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "3600", ":", "#more than an hr", "c_time_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "60", ":", "#more than a min", "c_time_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "else", ":", "c_time_str", "=", "'%.2f seconds.'", "%", "(", "c_time", ")", "if", "eta", ">", "86400", ":", "#more than a day", "eta_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "eta", ")", "/", "86400", ",", "(", "int", "(", "eta", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ",", "eta", "%", "60", ")", "elif", "eta", ">", "3600", ":", "#more than an hr", "eta_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "eta", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ",", "eta", "%", "60", ")", "elif", "eta", ">", "60", ":", "#more than a min", "eta_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "eta", ")", "/", "60", ")", "%", "60", ",", "eta", "%", "60", ")", "else", ":", "eta_str", "=", "'%.2f seconds.'", "%", "(", "eta", ")", "print", "'Pgen computed for %d sequences in: %s Estimated time remaining: %s'", "%", "(", "i", "+", "1", ",", "c_time_str", ",", "eta_str", ")", "c_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "if", "c_time", ">", "86400", ":", "#more than a day", "c_time_str", "=", "'%d days, %d hours, %d minutes, and %.2f seconds.'", "%", "(", "int", "(", "c_time", ")", "/", "86400", ",", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "3600", ":", "#more than an hr", "c_time_str", "=", "'%d hours, %d minutes, and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "3600", ")", "%", "24", ",", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "elif", "c_time", ">", "60", ":", "#more than a min", "c_time_str", "=", "'%d minutes and %.2f seconds.'", "%", "(", "(", "int", "(", "c_time", ")", "/", "60", ")", "%", "60", ",", "c_time", "%", "60", ")", "else", ":", "c_time_str", "=", "'%.2f seconds.'", "%", "(", "c_time", ")", "print", "'Completed Pgen computation for %d sequences: in %s'", "%", "(", "len", "(", "seqs", ")", ",", "c_time_str", ")", "outfile", ".", "close", "(", ")", "else", ":", "#NO OUTFILE -- print directly to stdout", "start_time", "=", "time", ".", "time", "(", ")", "for", "i", ",", "seq", "in", "enumerate", "(", "seqs", ")", ":", "if", "seq_types", "[", "i", "]", "==", "'aaseq'", ":", "#Compute Pgen and print out", "c_pgen_line", "=", "seq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "seq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "if", "seq_types", "[", "i", "]", "==", "'regex'", ":", "#Compute Pgen and print out", "c_pgen_line", "=", "seq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_regex_CDR3_template_pgen", "(", "seq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "elif", "seq_types", "[", "i", "]", "==", "'ntseq'", ":", "ntseq", "=", "seq", "if", "len", "(", "ntseq", ")", "%", "3", "==", "0", ":", "#inframe sequence", "aaseq", "=", "nt2aa", "(", "ntseq", ")", "#Compute Pgen and print out based on recomb_type and seq_type_out", "if", "seq_type_out", "is", "None", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_nt_CDR3_pgen", "(", "ntseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "+", "delimiter_out", "+", "aaseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "aaseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "elif", "seq_type_out", "==", "'ntseq'", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_nt_CDR3_pgen", "(", "ntseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "elif", "seq_type_out", "==", "'aaseq'", ":", "c_pgen_line", "=", "aaseq", "+", "delimiter_out", "+", "str", "(", "pgen_model", ".", "compute_aa_CDR3_pgen", "(", "aaseq", ",", "V_usage_masks", "[", "i", "]", ",", "J_usage_masks", "[", "i", "]", ",", "print_warnings", ")", ")", "else", ":", "#out of frame sequence -- Pgens are 0 and use 'out_of_frame' for aaseq", "if", "seq_type_out", "is", "None", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "'0'", "+", "delimiter_out", "+", "'out_of_frame'", "+", "delimiter_out", "+", "'0'", "elif", "seq_type_out", "==", "'ntseq'", ":", "c_pgen_line", "=", "ntseq", "+", "delimiter_out", "+", "'0'", "elif", "seq_type_out", "==", "'aaseq'", ":", "c_pgen_line", "=", "'out_of_frame'", "+", "delimiter_out", "+", "'0'", "print", "c_pgen_line" ]
Compute Pgens from a file and output to another file.
[ "Compute", "Pgens", "from", "a", "file", "and", "output", "to", "another", "file", "." ]
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/compute_pgen.py#L270-L780
tansey/gfl
pygfl/utils.py
create_plateaus
def create_plateaus(data, edges, plateau_size, plateau_vals, plateaus=None): '''Creates plateaus of constant value in the data.''' nodes = set(edges.keys()) if plateaus is None: plateaus = [] for i in range(len(plateau_vals)): if len(nodes) == 0: break node = np.random.choice(list(nodes)) nodes.remove(node) plateau = [node] available = set(edges[node]) & nodes while len(nodes) > 0 and len(available) > 0 and len(plateau) < plateau_size: node = np.random.choice(list(available)) plateau.append(node) available |= nodes & set(edges[node]) available.remove(node) nodes -= set(plateau) plateaus.append(set(plateau)) for p,v in zip(plateaus, plateau_vals): data[np.array(list(p), dtype=int)] = v return plateaus
python
def create_plateaus(data, edges, plateau_size, plateau_vals, plateaus=None): '''Creates plateaus of constant value in the data.''' nodes = set(edges.keys()) if plateaus is None: plateaus = [] for i in range(len(plateau_vals)): if len(nodes) == 0: break node = np.random.choice(list(nodes)) nodes.remove(node) plateau = [node] available = set(edges[node]) & nodes while len(nodes) > 0 and len(available) > 0 and len(plateau) < plateau_size: node = np.random.choice(list(available)) plateau.append(node) available |= nodes & set(edges[node]) available.remove(node) nodes -= set(plateau) plateaus.append(set(plateau)) for p,v in zip(plateaus, plateau_vals): data[np.array(list(p), dtype=int)] = v return plateaus
[ "def", "create_plateaus", "(", "data", ",", "edges", ",", "plateau_size", ",", "plateau_vals", ",", "plateaus", "=", "None", ")", ":", "nodes", "=", "set", "(", "edges", ".", "keys", "(", ")", ")", "if", "plateaus", "is", "None", ":", "plateaus", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "plateau_vals", ")", ")", ":", "if", "len", "(", "nodes", ")", "==", "0", ":", "break", "node", "=", "np", ".", "random", ".", "choice", "(", "list", "(", "nodes", ")", ")", "nodes", ".", "remove", "(", "node", ")", "plateau", "=", "[", "node", "]", "available", "=", "set", "(", "edges", "[", "node", "]", ")", "&", "nodes", "while", "len", "(", "nodes", ")", ">", "0", "and", "len", "(", "available", ")", ">", "0", "and", "len", "(", "plateau", ")", "<", "plateau_size", ":", "node", "=", "np", ".", "random", ".", "choice", "(", "list", "(", "available", ")", ")", "plateau", ".", "append", "(", "node", ")", "available", "|=", "nodes", "&", "set", "(", "edges", "[", "node", "]", ")", "available", ".", "remove", "(", "node", ")", "nodes", "-=", "set", "(", "plateau", ")", "plateaus", ".", "append", "(", "set", "(", "plateau", ")", ")", "for", "p", ",", "v", "in", "zip", "(", "plateaus", ",", "plateau_vals", ")", ":", "data", "[", "np", ".", "array", "(", "list", "(", "p", ")", ",", "dtype", "=", "int", ")", "]", "=", "v", "return", "plateaus" ]
Creates plateaus of constant value in the data.
[ "Creates", "plateaus", "of", "constant", "value", "in", "the", "data", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L29-L50
tansey/gfl
pygfl/utils.py
pretty_str
def pretty_str(p, decimal_places=2, print_zero=True, label_columns=False): '''Pretty-print a matrix or vector.''' if len(p.shape) == 1: return vector_str(p, decimal_places, print_zero) if len(p.shape) == 2: return matrix_str(p, decimal_places, print_zero, label_columns) raise Exception('Invalid array with shape {0}'.format(p.shape))
python
def pretty_str(p, decimal_places=2, print_zero=True, label_columns=False): '''Pretty-print a matrix or vector.''' if len(p.shape) == 1: return vector_str(p, decimal_places, print_zero) if len(p.shape) == 2: return matrix_str(p, decimal_places, print_zero, label_columns) raise Exception('Invalid array with shape {0}'.format(p.shape))
[ "def", "pretty_str", "(", "p", ",", "decimal_places", "=", "2", ",", "print_zero", "=", "True", ",", "label_columns", "=", "False", ")", ":", "if", "len", "(", "p", ".", "shape", ")", "==", "1", ":", "return", "vector_str", "(", "p", ",", "decimal_places", ",", "print_zero", ")", "if", "len", "(", "p", ".", "shape", ")", "==", "2", ":", "return", "matrix_str", "(", "p", ",", "decimal_places", ",", "print_zero", ",", "label_columns", ")", "raise", "Exception", "(", "'Invalid array with shape {0}'", ".", "format", "(", "p", ".", "shape", ")", ")" ]
Pretty-print a matrix or vector.
[ "Pretty", "-", "print", "a", "matrix", "or", "vector", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L148-L154
tansey/gfl
pygfl/utils.py
matrix_str
def matrix_str(p, decimal_places=2, print_zero=True, label_columns=False): '''Pretty-print the matrix.''' return '[{0}]'.format("\n ".join([(str(i) if label_columns else '') + vector_str(a, decimal_places, print_zero) for i, a in enumerate(p)]))
python
def matrix_str(p, decimal_places=2, print_zero=True, label_columns=False): '''Pretty-print the matrix.''' return '[{0}]'.format("\n ".join([(str(i) if label_columns else '') + vector_str(a, decimal_places, print_zero) for i, a in enumerate(p)]))
[ "def", "matrix_str", "(", "p", ",", "decimal_places", "=", "2", ",", "print_zero", "=", "True", ",", "label_columns", "=", "False", ")", ":", "return", "'[{0}]'", ".", "format", "(", "\"\\n \"", ".", "join", "(", "[", "(", "str", "(", "i", ")", "if", "label_columns", "else", "''", ")", "+", "vector_str", "(", "a", ",", "decimal_places", ",", "print_zero", ")", "for", "i", ",", "a", "in", "enumerate", "(", "p", ")", "]", ")", ")" ]
Pretty-print the matrix.
[ "Pretty", "-", "print", "the", "matrix", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L156-L158
tansey/gfl
pygfl/utils.py
vector_str
def vector_str(p, decimal_places=2, print_zero=True): '''Pretty-print the vector values.''' style = '{0:.' + str(decimal_places) + 'f}' return '[{0}]'.format(", ".join([' ' if not print_zero and a == 0 else style.format(a) for a in p]))
python
def vector_str(p, decimal_places=2, print_zero=True): '''Pretty-print the vector values.''' style = '{0:.' + str(decimal_places) + 'f}' return '[{0}]'.format(", ".join([' ' if not print_zero and a == 0 else style.format(a) for a in p]))
[ "def", "vector_str", "(", "p", ",", "decimal_places", "=", "2", ",", "print_zero", "=", "True", ")", ":", "style", "=", "'{0:.'", "+", "str", "(", "decimal_places", ")", "+", "'f}'", "return", "'[{0}]'", ".", "format", "(", "\", \"", ".", "join", "(", "[", "' '", "if", "not", "print_zero", "and", "a", "==", "0", "else", "style", ".", "format", "(", "a", ")", "for", "a", "in", "p", "]", ")", ")" ]
Pretty-print the vector values.
[ "Pretty", "-", "print", "the", "vector", "values", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L160-L163
tansey/gfl
pygfl/utils.py
calc_plateaus
def calc_plateaus(beta, edges, rel_tol=1e-4, verbose=0): '''Calculate the plateaus (degrees of freedom) of a graph of beta values in linear time.''' if not isinstance(edges, dict): raise Exception('Edges must be a map from each node to a list of neighbors.') to_check = deque(range(len(beta))) check_map = np.zeros(beta.shape, dtype=bool) check_map[np.isnan(beta)] = True plateaus = [] if verbose: print('\tCalculating plateaus...') if verbose > 1: print('\tIndices to check {0} {1}'.format(len(to_check), check_map.shape)) # Loop until every beta index has been checked while to_check: if verbose > 1: print('\t\tPlateau #{0}'.format(len(plateaus) + 1)) # Get the next unchecked point on the grid idx = to_check.popleft() # If we already have checked this one, just pop it off while to_check and check_map[idx]: try: idx = to_check.popleft() except: break # Edge case -- If we went through all the indices without reaching an unchecked one. if check_map[idx]: break # Create the plateau and calculate the inclusion conditions cur_plateau = set([idx]) cur_unchecked = deque([idx]) val = beta[idx] min_member = val - rel_tol max_member = val + rel_tol # Check every possible boundary of the plateau while cur_unchecked: idx = cur_unchecked.popleft() # neighbors to check local_check = [] # Generic graph case, get all neighbors of this node local_check.extend(edges[idx]) # Check the index's unchecked neighbors for local_idx in local_check: if not check_map[local_idx] \ and beta[local_idx] >= min_member \ and beta[local_idx] <= max_member: # Label this index as being checked so it's not re-checked unnecessarily check_map[local_idx] = True # Add it to the plateau and the list of local unchecked locations cur_unchecked.append(local_idx) cur_plateau.add(local_idx) # Track each plateau's indices plateaus.append((val, cur_plateau)) # Returns the list of plateaus and their values return plateaus
python
def calc_plateaus(beta, edges, rel_tol=1e-4, verbose=0): '''Calculate the plateaus (degrees of freedom) of a graph of beta values in linear time.''' if not isinstance(edges, dict): raise Exception('Edges must be a map from each node to a list of neighbors.') to_check = deque(range(len(beta))) check_map = np.zeros(beta.shape, dtype=bool) check_map[np.isnan(beta)] = True plateaus = [] if verbose: print('\tCalculating plateaus...') if verbose > 1: print('\tIndices to check {0} {1}'.format(len(to_check), check_map.shape)) # Loop until every beta index has been checked while to_check: if verbose > 1: print('\t\tPlateau #{0}'.format(len(plateaus) + 1)) # Get the next unchecked point on the grid idx = to_check.popleft() # If we already have checked this one, just pop it off while to_check and check_map[idx]: try: idx = to_check.popleft() except: break # Edge case -- If we went through all the indices without reaching an unchecked one. if check_map[idx]: break # Create the plateau and calculate the inclusion conditions cur_plateau = set([idx]) cur_unchecked = deque([idx]) val = beta[idx] min_member = val - rel_tol max_member = val + rel_tol # Check every possible boundary of the plateau while cur_unchecked: idx = cur_unchecked.popleft() # neighbors to check local_check = [] # Generic graph case, get all neighbors of this node local_check.extend(edges[idx]) # Check the index's unchecked neighbors for local_idx in local_check: if not check_map[local_idx] \ and beta[local_idx] >= min_member \ and beta[local_idx] <= max_member: # Label this index as being checked so it's not re-checked unnecessarily check_map[local_idx] = True # Add it to the plateau and the list of local unchecked locations cur_unchecked.append(local_idx) cur_plateau.add(local_idx) # Track each plateau's indices plateaus.append((val, cur_plateau)) # Returns the list of plateaus and their values return plateaus
[ "def", "calc_plateaus", "(", "beta", ",", "edges", ",", "rel_tol", "=", "1e-4", ",", "verbose", "=", "0", ")", ":", "if", "not", "isinstance", "(", "edges", ",", "dict", ")", ":", "raise", "Exception", "(", "'Edges must be a map from each node to a list of neighbors.'", ")", "to_check", "=", "deque", "(", "range", "(", "len", "(", "beta", ")", ")", ")", "check_map", "=", "np", ".", "zeros", "(", "beta", ".", "shape", ",", "dtype", "=", "bool", ")", "check_map", "[", "np", ".", "isnan", "(", "beta", ")", "]", "=", "True", "plateaus", "=", "[", "]", "if", "verbose", ":", "print", "(", "'\\tCalculating plateaus...'", ")", "if", "verbose", ">", "1", ":", "print", "(", "'\\tIndices to check {0} {1}'", ".", "format", "(", "len", "(", "to_check", ")", ",", "check_map", ".", "shape", ")", ")", "# Loop until every beta index has been checked", "while", "to_check", ":", "if", "verbose", ">", "1", ":", "print", "(", "'\\t\\tPlateau #{0}'", ".", "format", "(", "len", "(", "plateaus", ")", "+", "1", ")", ")", "# Get the next unchecked point on the grid", "idx", "=", "to_check", ".", "popleft", "(", ")", "# If we already have checked this one, just pop it off", "while", "to_check", "and", "check_map", "[", "idx", "]", ":", "try", ":", "idx", "=", "to_check", ".", "popleft", "(", ")", "except", ":", "break", "# Edge case -- If we went through all the indices without reaching an unchecked one.", "if", "check_map", "[", "idx", "]", ":", "break", "# Create the plateau and calculate the inclusion conditions", "cur_plateau", "=", "set", "(", "[", "idx", "]", ")", "cur_unchecked", "=", "deque", "(", "[", "idx", "]", ")", "val", "=", "beta", "[", "idx", "]", "min_member", "=", "val", "-", "rel_tol", "max_member", "=", "val", "+", "rel_tol", "# Check every possible boundary of the plateau", "while", "cur_unchecked", ":", "idx", "=", "cur_unchecked", ".", "popleft", "(", ")", "# neighbors to check", "local_check", "=", "[", "]", "# Generic graph case, get all neighbors of this node", "local_check", ".", "extend", "(", "edges", "[", "idx", "]", ")", "# Check the index's unchecked neighbors", "for", "local_idx", "in", "local_check", ":", "if", "not", "check_map", "[", "local_idx", "]", "and", "beta", "[", "local_idx", "]", ">=", "min_member", "and", "beta", "[", "local_idx", "]", "<=", "max_member", ":", "# Label this index as being checked so it's not re-checked unnecessarily", "check_map", "[", "local_idx", "]", "=", "True", "# Add it to the plateau and the list of local unchecked locations", "cur_unchecked", ".", "append", "(", "local_idx", ")", "cur_plateau", ".", "add", "(", "local_idx", ")", "# Track each plateau's indices", "plateaus", ".", "append", "(", "(", "val", ",", "cur_plateau", ")", ")", "# Returns the list of plateaus and their values", "return", "plateaus" ]
Calculate the plateaus (degrees of freedom) of a graph of beta values in linear time.
[ "Calculate", "the", "plateaus", "(", "degrees", "of", "freedom", ")", "of", "a", "graph", "of", "beta", "values", "in", "linear", "time", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L176-L243
tansey/gfl
pygfl/utils.py
nearly_unique
def nearly_unique(arr, rel_tol=1e-4, verbose=0): '''Heuristic method to return the uniques within some precision in a numpy array''' results = np.array([arr[0]]) for x in arr: if np.abs(results - x).min() > rel_tol: results = np.append(results, x) return results
python
def nearly_unique(arr, rel_tol=1e-4, verbose=0): '''Heuristic method to return the uniques within some precision in a numpy array''' results = np.array([arr[0]]) for x in arr: if np.abs(results - x).min() > rel_tol: results = np.append(results, x) return results
[ "def", "nearly_unique", "(", "arr", ",", "rel_tol", "=", "1e-4", ",", "verbose", "=", "0", ")", ":", "results", "=", "np", ".", "array", "(", "[", "arr", "[", "0", "]", "]", ")", "for", "x", "in", "arr", ":", "if", "np", ".", "abs", "(", "results", "-", "x", ")", ".", "min", "(", ")", ">", "rel_tol", ":", "results", "=", "np", ".", "append", "(", "results", ",", "x", ")", "return", "results" ]
Heuristic method to return the uniques within some precision in a numpy array
[ "Heuristic", "method", "to", "return", "the", "uniques", "within", "some", "precision", "in", "a", "numpy", "array" ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L245-L251
tansey/gfl
pygfl/utils.py
hypercube_edges
def hypercube_edges(dims, use_map=False): '''Create edge lists for an arbitrary hypercube. TODO: this is probably not the fastest way.''' edges = [] nodes = np.arange(np.product(dims)).reshape(dims) for i,d in enumerate(dims): for j in range(d-1): for n1, n2 in zip(np.take(nodes, [j], axis=i).flatten(), np.take(nodes,[j+1], axis=i).flatten()): edges.append((n1,n2)) if use_map: return edge_map_from_edge_list(edges) return edges
python
def hypercube_edges(dims, use_map=False): '''Create edge lists for an arbitrary hypercube. TODO: this is probably not the fastest way.''' edges = [] nodes = np.arange(np.product(dims)).reshape(dims) for i,d in enumerate(dims): for j in range(d-1): for n1, n2 in zip(np.take(nodes, [j], axis=i).flatten(), np.take(nodes,[j+1], axis=i).flatten()): edges.append((n1,n2)) if use_map: return edge_map_from_edge_list(edges) return edges
[ "def", "hypercube_edges", "(", "dims", ",", "use_map", "=", "False", ")", ":", "edges", "=", "[", "]", "nodes", "=", "np", ".", "arange", "(", "np", ".", "product", "(", "dims", ")", ")", ".", "reshape", "(", "dims", ")", "for", "i", ",", "d", "in", "enumerate", "(", "dims", ")", ":", "for", "j", "in", "range", "(", "d", "-", "1", ")", ":", "for", "n1", ",", "n2", "in", "zip", "(", "np", ".", "take", "(", "nodes", ",", "[", "j", "]", ",", "axis", "=", "i", ")", ".", "flatten", "(", ")", ",", "np", ".", "take", "(", "nodes", ",", "[", "j", "+", "1", "]", ",", "axis", "=", "i", ")", ".", "flatten", "(", ")", ")", ":", "edges", ".", "append", "(", "(", "n1", ",", "n2", ")", ")", "if", "use_map", ":", "return", "edge_map_from_edge_list", "(", "edges", ")", "return", "edges" ]
Create edge lists for an arbitrary hypercube. TODO: this is probably not the fastest way.
[ "Create", "edge", "lists", "for", "an", "arbitrary", "hypercube", ".", "TODO", ":", "this", "is", "probably", "not", "the", "fastest", "way", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L299-L309
tansey/gfl
pygfl/utils.py
get_delta
def get_delta(D, k): '''Calculate the k-th order trend filtering matrix given the oriented edge incidence matrix and the value of k.''' if k < 0: raise Exception('k must be at least 0th order.') result = D for i in range(k): result = D.T.dot(result) if i % 2 == 0 else D.dot(result) return result
python
def get_delta(D, k): '''Calculate the k-th order trend filtering matrix given the oriented edge incidence matrix and the value of k.''' if k < 0: raise Exception('k must be at least 0th order.') result = D for i in range(k): result = D.T.dot(result) if i % 2 == 0 else D.dot(result) return result
[ "def", "get_delta", "(", "D", ",", "k", ")", ":", "if", "k", "<", "0", ":", "raise", "Exception", "(", "'k must be at least 0th order.'", ")", "result", "=", "D", "for", "i", "in", "range", "(", "k", ")", ":", "result", "=", "D", ".", "T", ".", "dot", "(", "result", ")", "if", "i", "%", "2", "==", "0", "else", "D", ".", "dot", "(", "result", ")", "return", "result" ]
Calculate the k-th order trend filtering matrix given the oriented edge incidence matrix and the value of k.
[ "Calculate", "the", "k", "-", "th", "order", "trend", "filtering", "matrix", "given", "the", "oriented", "edge", "incidence", "matrix", "and", "the", "value", "of", "k", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L390-L398
tansey/gfl
pygfl/utils.py
decompose_delta
def decompose_delta(deltak): '''Decomposes the k-th order trend filtering matrix into a c-compatible set of arrays.''' if not isspmatrix_coo(deltak): deltak = coo_matrix(deltak) dk_rows = deltak.shape[0] dk_rowbreaks = np.cumsum(deltak.getnnz(1), dtype="int32") dk_cols = deltak.col.astype('int32') dk_vals = deltak.data.astype('double') return dk_rows, dk_rowbreaks, dk_cols, dk_vals
python
def decompose_delta(deltak): '''Decomposes the k-th order trend filtering matrix into a c-compatible set of arrays.''' if not isspmatrix_coo(deltak): deltak = coo_matrix(deltak) dk_rows = deltak.shape[0] dk_rowbreaks = np.cumsum(deltak.getnnz(1), dtype="int32") dk_cols = deltak.col.astype('int32') dk_vals = deltak.data.astype('double') return dk_rows, dk_rowbreaks, dk_cols, dk_vals
[ "def", "decompose_delta", "(", "deltak", ")", ":", "if", "not", "isspmatrix_coo", "(", "deltak", ")", ":", "deltak", "=", "coo_matrix", "(", "deltak", ")", "dk_rows", "=", "deltak", ".", "shape", "[", "0", "]", "dk_rowbreaks", "=", "np", ".", "cumsum", "(", "deltak", ".", "getnnz", "(", "1", ")", ",", "dtype", "=", "\"int32\"", ")", "dk_cols", "=", "deltak", ".", "col", ".", "astype", "(", "'int32'", ")", "dk_vals", "=", "deltak", ".", "data", ".", "astype", "(", "'double'", ")", "return", "dk_rows", ",", "dk_rowbreaks", ",", "dk_cols", ",", "dk_vals" ]
Decomposes the k-th order trend filtering matrix into a c-compatible set of arrays.
[ "Decomposes", "the", "k", "-", "th", "order", "trend", "filtering", "matrix", "into", "a", "c", "-", "compatible", "set", "of", "arrays", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L400-L409
tansey/gfl
pygfl/utils.py
matrix_from_edges
def matrix_from_edges(edges): '''Returns a sparse penalty matrix (D) from a list of edge pairs. Each edge can have an optional weight associated with it.''' max_col = 0 cols = [] rows = [] vals = [] if type(edges) is defaultdict: edge_list = [] for i, neighbors in edges.items(): for j in neighbors: if i <= j: edge_list.append((i,j)) edges = edge_list for i, edge in enumerate(edges): s, t = edge[0], edge[1] weight = 1 if len(edge) == 2 else edge[2] cols.append(min(s,t)) cols.append(max(s,t)) rows.append(i) rows.append(i) vals.append(weight) vals.append(-weight) if cols[-1] > max_col: max_col = cols[-1] return coo_matrix((vals, (rows, cols)), shape=(rows[-1]+1, max_col+1))
python
def matrix_from_edges(edges): '''Returns a sparse penalty matrix (D) from a list of edge pairs. Each edge can have an optional weight associated with it.''' max_col = 0 cols = [] rows = [] vals = [] if type(edges) is defaultdict: edge_list = [] for i, neighbors in edges.items(): for j in neighbors: if i <= j: edge_list.append((i,j)) edges = edge_list for i, edge in enumerate(edges): s, t = edge[0], edge[1] weight = 1 if len(edge) == 2 else edge[2] cols.append(min(s,t)) cols.append(max(s,t)) rows.append(i) rows.append(i) vals.append(weight) vals.append(-weight) if cols[-1] > max_col: max_col = cols[-1] return coo_matrix((vals, (rows, cols)), shape=(rows[-1]+1, max_col+1))
[ "def", "matrix_from_edges", "(", "edges", ")", ":", "max_col", "=", "0", "cols", "=", "[", "]", "rows", "=", "[", "]", "vals", "=", "[", "]", "if", "type", "(", "edges", ")", "is", "defaultdict", ":", "edge_list", "=", "[", "]", "for", "i", ",", "neighbors", "in", "edges", ".", "items", "(", ")", ":", "for", "j", "in", "neighbors", ":", "if", "i", "<=", "j", ":", "edge_list", ".", "append", "(", "(", "i", ",", "j", ")", ")", "edges", "=", "edge_list", "for", "i", ",", "edge", "in", "enumerate", "(", "edges", ")", ":", "s", ",", "t", "=", "edge", "[", "0", "]", ",", "edge", "[", "1", "]", "weight", "=", "1", "if", "len", "(", "edge", ")", "==", "2", "else", "edge", "[", "2", "]", "cols", ".", "append", "(", "min", "(", "s", ",", "t", ")", ")", "cols", ".", "append", "(", "max", "(", "s", ",", "t", ")", ")", "rows", ".", "append", "(", "i", ")", "rows", ".", "append", "(", "i", ")", "vals", ".", "append", "(", "weight", ")", "vals", ".", "append", "(", "-", "weight", ")", "if", "cols", "[", "-", "1", "]", ">", "max_col", ":", "max_col", "=", "cols", "[", "-", "1", "]", "return", "coo_matrix", "(", "(", "vals", ",", "(", "rows", ",", "cols", ")", ")", ",", "shape", "=", "(", "rows", "[", "-", "1", "]", "+", "1", ",", "max_col", "+", "1", ")", ")" ]
Returns a sparse penalty matrix (D) from a list of edge pairs. Each edge can have an optional weight associated with it.
[ "Returns", "a", "sparse", "penalty", "matrix", "(", "D", ")", "from", "a", "list", "of", "edge", "pairs", ".", "Each", "edge", "can", "have", "an", "optional", "weight", "associated", "with", "it", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L411-L436
tansey/gfl
pygfl/utils.py
ks_distance
def ks_distance(a, b): '''Get the Kolmogorov-Smirnov (KS) distance between two densities a and b.''' if len(a.shape) == 1: return np.max(np.abs(a.cumsum() - b.cumsum())) return np.max(np.abs(a.cumsum(axis=1) - b.cumsum(axis=1)), axis=1)
python
def ks_distance(a, b): '''Get the Kolmogorov-Smirnov (KS) distance between two densities a and b.''' if len(a.shape) == 1: return np.max(np.abs(a.cumsum() - b.cumsum())) return np.max(np.abs(a.cumsum(axis=1) - b.cumsum(axis=1)), axis=1)
[ "def", "ks_distance", "(", "a", ",", "b", ")", ":", "if", "len", "(", "a", ".", "shape", ")", "==", "1", ":", "return", "np", ".", "max", "(", "np", ".", "abs", "(", "a", ".", "cumsum", "(", ")", "-", "b", ".", "cumsum", "(", ")", ")", ")", "return", "np", ".", "max", "(", "np", ".", "abs", "(", "a", ".", "cumsum", "(", "axis", "=", "1", ")", "-", "b", ".", "cumsum", "(", "axis", "=", "1", ")", ")", ",", "axis", "=", "1", ")" ]
Get the Kolmogorov-Smirnov (KS) distance between two densities a and b.
[ "Get", "the", "Kolmogorov", "-", "Smirnov", "(", "KS", ")", "distance", "between", "two", "densities", "a", "and", "b", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L438-L442
tansey/gfl
pygfl/utils.py
tv_distance
def tv_distance(a, b): '''Get the Total Variation (TV) distance between two densities a and b.''' if len(a.shape) == 1: return np.sum(np.abs(a - b)) return np.sum(np.abs(a - b), axis=1)
python
def tv_distance(a, b): '''Get the Total Variation (TV) distance between two densities a and b.''' if len(a.shape) == 1: return np.sum(np.abs(a - b)) return np.sum(np.abs(a - b), axis=1)
[ "def", "tv_distance", "(", "a", ",", "b", ")", ":", "if", "len", "(", "a", ".", "shape", ")", "==", "1", ":", "return", "np", ".", "sum", "(", "np", ".", "abs", "(", "a", "-", "b", ")", ")", "return", "np", ".", "sum", "(", "np", ".", "abs", "(", "a", "-", "b", ")", ",", "axis", "=", "1", ")" ]
Get the Total Variation (TV) distance between two densities a and b.
[ "Get", "the", "Total", "Variation", "(", "TV", ")", "distance", "between", "two", "densities", "a", "and", "b", "." ]
train
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L444-L448
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
jdFromDate
def jdFromDate(dd, mm, yy): '''def jdFromDate(dd, mm, yy): Compute the (integral) Julian day number of day dd/mm/yyyy, i.e., the number of days between 1/1/4713 BC (Julian calendar) and dd/mm/yyyy.''' a = int((14 - mm) / 12.) y = yy + 4800 - a m = mm + 12 * a - 3 jd = dd + int((153 * m + 2) / 5.) \ + 365 * y + int(y / 4.) - int(y / 100.) \ + int(y / 400.) - 32045 if (jd < 2299161): jd = dd + int((153 * m + 2) / 5.) \ + 365 * y + int(y / 4.) - 32083 return jd
python
def jdFromDate(dd, mm, yy): '''def jdFromDate(dd, mm, yy): Compute the (integral) Julian day number of day dd/mm/yyyy, i.e., the number of days between 1/1/4713 BC (Julian calendar) and dd/mm/yyyy.''' a = int((14 - mm) / 12.) y = yy + 4800 - a m = mm + 12 * a - 3 jd = dd + int((153 * m + 2) / 5.) \ + 365 * y + int(y / 4.) - int(y / 100.) \ + int(y / 400.) - 32045 if (jd < 2299161): jd = dd + int((153 * m + 2) / 5.) \ + 365 * y + int(y / 4.) - 32083 return jd
[ "def", "jdFromDate", "(", "dd", ",", "mm", ",", "yy", ")", ":", "a", "=", "int", "(", "(", "14", "-", "mm", ")", "/", "12.", ")", "y", "=", "yy", "+", "4800", "-", "a", "m", "=", "mm", "+", "12", "*", "a", "-", "3", "jd", "=", "dd", "+", "int", "(", "(", "153", "*", "m", "+", "2", ")", "/", "5.", ")", "+", "365", "*", "y", "+", "int", "(", "y", "/", "4.", ")", "-", "int", "(", "y", "/", "100.", ")", "+", "int", "(", "y", "/", "400.", ")", "-", "32045", "if", "(", "jd", "<", "2299161", ")", ":", "jd", "=", "dd", "+", "int", "(", "(", "153", "*", "m", "+", "2", ")", "/", "5.", ")", "+", "365", "*", "y", "+", "int", "(", "y", "/", "4.", ")", "-", "32083", "return", "jd" ]
def jdFromDate(dd, mm, yy): Compute the (integral) Julian day number of day dd/mm/yyyy, i.e., the number of days between 1/1/4713 BC (Julian calendar) and dd/mm/yyyy.
[ "def", "jdFromDate", "(", "dd", "mm", "yy", ")", ":", "Compute", "the", "(", "integral", ")", "Julian", "day", "number", "of", "day", "dd", "/", "mm", "/", "yyyy", "i", ".", "e", ".", "the", "number", "of", "days", "between", "1", "/", "1", "/", "4713", "BC", "(", "Julian", "calendar", ")", "and", "dd", "/", "mm", "/", "yyyy", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L10-L23
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
jdToDate
def jdToDate(jd): '''def jdToDate(jd): Convert a Julian day number to day/month/year. jd is an integer.''' if (jd > 2299160): # After 5/10/1582, Gregorian calendar a = jd + 32044 b = int((4 * a + 3) / 146097.) c = a - int((b * 146097) / 4.) else: b = 0 c = jd + 32082 d = int((4 * c + 3) / 1461.) e = c - int((1461 * d) / 4.) m = int((5 * e + 2) / 153.) day = e - int((153 * m + 2) / 5.) + 1 month = m + 3 - 12 * int(m / 10.) year = b * 100 + d - 4800 + int(m / 10.) return [day, month, year]
python
def jdToDate(jd): '''def jdToDate(jd): Convert a Julian day number to day/month/year. jd is an integer.''' if (jd > 2299160): # After 5/10/1582, Gregorian calendar a = jd + 32044 b = int((4 * a + 3) / 146097.) c = a - int((b * 146097) / 4.) else: b = 0 c = jd + 32082 d = int((4 * c + 3) / 1461.) e = c - int((1461 * d) / 4.) m = int((5 * e + 2) / 153.) day = e - int((153 * m + 2) / 5.) + 1 month = m + 3 - 12 * int(m / 10.) year = b * 100 + d - 4800 + int(m / 10.) return [day, month, year]
[ "def", "jdToDate", "(", "jd", ")", ":", "if", "(", "jd", ">", "2299160", ")", ":", "# After 5/10/1582, Gregorian calendar", "a", "=", "jd", "+", "32044", "b", "=", "int", "(", "(", "4", "*", "a", "+", "3", ")", "/", "146097.", ")", "c", "=", "a", "-", "int", "(", "(", "b", "*", "146097", ")", "/", "4.", ")", "else", ":", "b", "=", "0", "c", "=", "jd", "+", "32082", "d", "=", "int", "(", "(", "4", "*", "c", "+", "3", ")", "/", "1461.", ")", "e", "=", "c", "-", "int", "(", "(", "1461", "*", "d", ")", "/", "4.", ")", "m", "=", "int", "(", "(", "5", "*", "e", "+", "2", ")", "/", "153.", ")", "day", "=", "e", "-", "int", "(", "(", "153", "*", "m", "+", "2", ")", "/", "5.", ")", "+", "1", "month", "=", "m", "+", "3", "-", "12", "*", "int", "(", "m", "/", "10.", ")", "year", "=", "b", "*", "100", "+", "d", "-", "4800", "+", "int", "(", "m", "/", "10.", ")", "return", "[", "day", ",", "month", ",", "year", "]" ]
def jdToDate(jd): Convert a Julian day number to day/month/year. jd is an integer.
[ "def", "jdToDate", "(", "jd", ")", ":", "Convert", "a", "Julian", "day", "number", "to", "day", "/", "month", "/", "year", ".", "jd", "is", "an", "integer", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L26-L43
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
NewMoon
def NewMoon(k): '''def NewMoon(k): Compute the time of the k-th new moon after the new moon of 1/1/1900 13:52 UCT (measured as the number of days since 1/1/4713 BC noon UCT, e.g., 2451545.125 is 1/1/2000 15:00 UTC. Returns a floating number, e.g., 2415079.9758617813 for k=2 or 2414961.935157746 for k=-2.''' # Time in Julian centuries from 1900 January 0.5 T = k / 1236.85 T2 = T * T T3 = T2 * T dr = math.pi / 180. Jd1 = 2415020.75933 + 29.53058868 * k \ + 0.0001178 * T2 - 0.000000155 * T3 Jd1 = Jd1 + 0.00033 * math.sin( (166.56 + 132.87 * T - 0.009173 * T2) * dr) # Mean new moon M = 359.2242 + 29.10535608 * k \ - 0.0000333 * T2 - 0.00000347 * T3 # Sun's mean anomaly Mpr = 306.0253 + 385.81691806 * k \ + 0.0107306 * T2 + 0.00001236 * T3 # Moon's mean anomaly F = 21.2964 + 390.67050646 * k - 0.0016528 * T2 \ - 0.00000239 * T3 # Moon's argument of latitude C1 = (0.1734 - 0.000393 * T) * math.sin(M * dr) \ + 0.0021 * math.sin(2 * dr * M) C1 = C1 - 0.4068 * math.sin(Mpr * dr) \ + 0.0161 * math.sin(dr * 2 * Mpr) C1 = C1 - 0.0004 * math.sin(dr * 3 * Mpr) C1 = C1 + 0.0104 * math.sin(dr * 2 * F) \ - 0.0051 * math.sin(dr * (M + Mpr)) C1 = C1 - 0.0074 * math.sin(dr * (M - Mpr)) \ + 0.0004 * math.sin(dr * (2 * F + M)) C1 = C1 - 0.0004 * math.sin(dr * (2 * F - M)) \ - 0.0006 * math.sin(dr * (2 * F + Mpr)) C1 = C1 + 0.0010 * math.sin(dr * (2 * F - Mpr)) \ + 0.0005 * math.sin(dr * (2 * Mpr + M)) if (T < -11): deltat = 0.001 + 0.000839 * T + 0.0002261 * T2 \ - 0.00000845 * T3 - 0.000000081 * T * T3 else: deltat = -0.000278 + 0.000265 * T + 0.000262 * T2 JdNew = Jd1 + C1 - deltat return JdNew
python
def NewMoon(k): '''def NewMoon(k): Compute the time of the k-th new moon after the new moon of 1/1/1900 13:52 UCT (measured as the number of days since 1/1/4713 BC noon UCT, e.g., 2451545.125 is 1/1/2000 15:00 UTC. Returns a floating number, e.g., 2415079.9758617813 for k=2 or 2414961.935157746 for k=-2.''' # Time in Julian centuries from 1900 January 0.5 T = k / 1236.85 T2 = T * T T3 = T2 * T dr = math.pi / 180. Jd1 = 2415020.75933 + 29.53058868 * k \ + 0.0001178 * T2 - 0.000000155 * T3 Jd1 = Jd1 + 0.00033 * math.sin( (166.56 + 132.87 * T - 0.009173 * T2) * dr) # Mean new moon M = 359.2242 + 29.10535608 * k \ - 0.0000333 * T2 - 0.00000347 * T3 # Sun's mean anomaly Mpr = 306.0253 + 385.81691806 * k \ + 0.0107306 * T2 + 0.00001236 * T3 # Moon's mean anomaly F = 21.2964 + 390.67050646 * k - 0.0016528 * T2 \ - 0.00000239 * T3 # Moon's argument of latitude C1 = (0.1734 - 0.000393 * T) * math.sin(M * dr) \ + 0.0021 * math.sin(2 * dr * M) C1 = C1 - 0.4068 * math.sin(Mpr * dr) \ + 0.0161 * math.sin(dr * 2 * Mpr) C1 = C1 - 0.0004 * math.sin(dr * 3 * Mpr) C1 = C1 + 0.0104 * math.sin(dr * 2 * F) \ - 0.0051 * math.sin(dr * (M + Mpr)) C1 = C1 - 0.0074 * math.sin(dr * (M - Mpr)) \ + 0.0004 * math.sin(dr * (2 * F + M)) C1 = C1 - 0.0004 * math.sin(dr * (2 * F - M)) \ - 0.0006 * math.sin(dr * (2 * F + Mpr)) C1 = C1 + 0.0010 * math.sin(dr * (2 * F - Mpr)) \ + 0.0005 * math.sin(dr * (2 * Mpr + M)) if (T < -11): deltat = 0.001 + 0.000839 * T + 0.0002261 * T2 \ - 0.00000845 * T3 - 0.000000081 * T * T3 else: deltat = -0.000278 + 0.000265 * T + 0.000262 * T2 JdNew = Jd1 + C1 - deltat return JdNew
[ "def", "NewMoon", "(", "k", ")", ":", "# Time in Julian centuries from 1900 January 0.5", "T", "=", "k", "/", "1236.85", "T2", "=", "T", "*", "T", "T3", "=", "T2", "*", "T", "dr", "=", "math", ".", "pi", "/", "180.", "Jd1", "=", "2415020.75933", "+", "29.53058868", "*", "k", "+", "0.0001178", "*", "T2", "-", "0.000000155", "*", "T3", "Jd1", "=", "Jd1", "+", "0.00033", "*", "math", ".", "sin", "(", "(", "166.56", "+", "132.87", "*", "T", "-", "0.009173", "*", "T2", ")", "*", "dr", ")", "# Mean new moon", "M", "=", "359.2242", "+", "29.10535608", "*", "k", "-", "0.0000333", "*", "T2", "-", "0.00000347", "*", "T3", "# Sun's mean anomaly", "Mpr", "=", "306.0253", "+", "385.81691806", "*", "k", "+", "0.0107306", "*", "T2", "+", "0.00001236", "*", "T3", "# Moon's mean anomaly", "F", "=", "21.2964", "+", "390.67050646", "*", "k", "-", "0.0016528", "*", "T2", "-", "0.00000239", "*", "T3", "# Moon's argument of latitude", "C1", "=", "(", "0.1734", "-", "0.000393", "*", "T", ")", "*", "math", ".", "sin", "(", "M", "*", "dr", ")", "+", "0.0021", "*", "math", ".", "sin", "(", "2", "*", "dr", "*", "M", ")", "C1", "=", "C1", "-", "0.4068", "*", "math", ".", "sin", "(", "Mpr", "*", "dr", ")", "+", "0.0161", "*", "math", ".", "sin", "(", "dr", "*", "2", "*", "Mpr", ")", "C1", "=", "C1", "-", "0.0004", "*", "math", ".", "sin", "(", "dr", "*", "3", "*", "Mpr", ")", "C1", "=", "C1", "+", "0.0104", "*", "math", ".", "sin", "(", "dr", "*", "2", "*", "F", ")", "-", "0.0051", "*", "math", ".", "sin", "(", "dr", "*", "(", "M", "+", "Mpr", ")", ")", "C1", "=", "C1", "-", "0.0074", "*", "math", ".", "sin", "(", "dr", "*", "(", "M", "-", "Mpr", ")", ")", "+", "0.0004", "*", "math", ".", "sin", "(", "dr", "*", "(", "2", "*", "F", "+", "M", ")", ")", "C1", "=", "C1", "-", "0.0004", "*", "math", ".", "sin", "(", "dr", "*", "(", "2", "*", "F", "-", "M", ")", ")", "-", "0.0006", "*", "math", ".", "sin", "(", "dr", "*", "(", "2", "*", "F", "+", "Mpr", ")", ")", "C1", "=", "C1", "+", "0.0010", "*", "math", ".", "sin", "(", "dr", "*", "(", "2", "*", "F", "-", "Mpr", ")", ")", "+", "0.0005", "*", "math", ".", "sin", "(", "dr", "*", "(", "2", "*", "Mpr", "+", "M", ")", ")", "if", "(", "T", "<", "-", "11", ")", ":", "deltat", "=", "0.001", "+", "0.000839", "*", "T", "+", "0.0002261", "*", "T2", "-", "0.00000845", "*", "T3", "-", "0.000000081", "*", "T", "*", "T3", "else", ":", "deltat", "=", "-", "0.000278", "+", "0.000265", "*", "T", "+", "0.000262", "*", "T2", "JdNew", "=", "Jd1", "+", "C1", "-", "deltat", "return", "JdNew" ]
def NewMoon(k): Compute the time of the k-th new moon after the new moon of 1/1/1900 13:52 UCT (measured as the number of days since 1/1/4713 BC noon UCT, e.g., 2451545.125 is 1/1/2000 15:00 UTC. Returns a floating number, e.g., 2415079.9758617813 for k=2 or 2414961.935157746 for k=-2.
[ "def", "NewMoon", "(", "k", ")", ":", "Compute", "the", "time", "of", "the", "k", "-", "th", "new", "moon", "after", "the", "new", "moon", "of", "1", "/", "1", "/", "1900", "13", ":", "52", "UCT", "(", "measured", "as", "the", "number", "of", "days", "since", "1", "/", "1", "/", "4713", "BC", "noon", "UCT", "e", ".", "g", ".", "2451545", ".", "125", "is", "1", "/", "1", "/", "2000", "15", ":", "00", "UTC", ".", "Returns", "a", "floating", "number", "e", ".", "g", ".", "2415079", ".", "9758617813", "for", "k", "=", "2", "or", "2414961", ".", "935157746", "for", "k", "=", "-", "2", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L46-L90
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
SunLongitude
def SunLongitude(jdn): '''def SunLongitude(jdn): Compute the longitude of the sun at any time. Parameter: floating number jdn, the number of days since 1/1/4713 BC noon. ''' T = (jdn - 2451545.0) / 36525. # Time in Julian centuries # from 2000-01-01 12:00:00 GMT T2 = T * T dr = math.pi / 180. # degree to radian M = 357.52910 + 35999.05030 * T \ - 0.0001559 * T2 - 0.00000048 * T * T2 # mean anomaly, degree L0 = 280.46645 + 36000.76983 * T + 0.0003032 * T2 # mean longitude, degree DL = (1.914600 - 0.004817 * T - 0.000014 * T2) \ * math.sin(dr * M) DL += (0.019993 - 0.000101 * T) * math.sin(dr * 2 * M) \ + 0.000290 * math.sin(dr * 3 * M) L = L0 + DL # true longitude, degree L = L * dr L = L - math.pi * 2 * (float(L / (math.pi * 2))) # Normalize to (0, 2*math.pi) return L
python
def SunLongitude(jdn): '''def SunLongitude(jdn): Compute the longitude of the sun at any time. Parameter: floating number jdn, the number of days since 1/1/4713 BC noon. ''' T = (jdn - 2451545.0) / 36525. # Time in Julian centuries # from 2000-01-01 12:00:00 GMT T2 = T * T dr = math.pi / 180. # degree to radian M = 357.52910 + 35999.05030 * T \ - 0.0001559 * T2 - 0.00000048 * T * T2 # mean anomaly, degree L0 = 280.46645 + 36000.76983 * T + 0.0003032 * T2 # mean longitude, degree DL = (1.914600 - 0.004817 * T - 0.000014 * T2) \ * math.sin(dr * M) DL += (0.019993 - 0.000101 * T) * math.sin(dr * 2 * M) \ + 0.000290 * math.sin(dr * 3 * M) L = L0 + DL # true longitude, degree L = L * dr L = L - math.pi * 2 * (float(L / (math.pi * 2))) # Normalize to (0, 2*math.pi) return L
[ "def", "SunLongitude", "(", "jdn", ")", ":", "T", "=", "(", "jdn", "-", "2451545.0", ")", "/", "36525.", "# Time in Julian centuries", "# from 2000-01-01 12:00:00 GMT", "T2", "=", "T", "*", "T", "dr", "=", "math", ".", "pi", "/", "180.", "# degree to radian", "M", "=", "357.52910", "+", "35999.05030", "*", "T", "-", "0.0001559", "*", "T2", "-", "0.00000048", "*", "T", "*", "T2", "# mean anomaly, degree", "L0", "=", "280.46645", "+", "36000.76983", "*", "T", "+", "0.0003032", "*", "T2", "# mean longitude, degree", "DL", "=", "(", "1.914600", "-", "0.004817", "*", "T", "-", "0.000014", "*", "T2", ")", "*", "math", ".", "sin", "(", "dr", "*", "M", ")", "DL", "+=", "(", "0.019993", "-", "0.000101", "*", "T", ")", "*", "math", ".", "sin", "(", "dr", "*", "2", "*", "M", ")", "+", "0.000290", "*", "math", ".", "sin", "(", "dr", "*", "3", "*", "M", ")", "L", "=", "L0", "+", "DL", "# true longitude, degree", "L", "=", "L", "*", "dr", "L", "=", "L", "-", "math", ".", "pi", "*", "2", "*", "(", "float", "(", "L", "/", "(", "math", ".", "pi", "*", "2", ")", ")", ")", "# Normalize to (0, 2*math.pi)", "return", "L" ]
def SunLongitude(jdn): Compute the longitude of the sun at any time. Parameter: floating number jdn, the number of days since 1/1/4713 BC noon.
[ "def", "SunLongitude", "(", "jdn", ")", ":", "Compute", "the", "longitude", "of", "the", "sun", "at", "any", "time", ".", "Parameter", ":", "floating", "number", "jdn", "the", "number", "of", "days", "since", "1", "/", "1", "/", "4713", "BC", "noon", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L93-L115
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
getLunarMonth11
def getLunarMonth11(yy, timeZone): '''def getLunarMonth11(yy, timeZone): Find the day that starts the luner month 11of the given year for the given time zone.''' # off = jdFromDate(31, 12, yy) \ # - 2415021.076998695 off = jdFromDate(31, 12, yy) - 2415021. k = int(off / 29.530588853) nm = getNewMoonDay(k, timeZone) sunLong = getSunLongitude(nm, timeZone) # sun longitude at local midnight if (sunLong >= 9): nm = getNewMoonDay(k - 1, timeZone) return nm
python
def getLunarMonth11(yy, timeZone): '''def getLunarMonth11(yy, timeZone): Find the day that starts the luner month 11of the given year for the given time zone.''' # off = jdFromDate(31, 12, yy) \ # - 2415021.076998695 off = jdFromDate(31, 12, yy) - 2415021. k = int(off / 29.530588853) nm = getNewMoonDay(k, timeZone) sunLong = getSunLongitude(nm, timeZone) # sun longitude at local midnight if (sunLong >= 9): nm = getNewMoonDay(k - 1, timeZone) return nm
[ "def", "getLunarMonth11", "(", "yy", ",", "timeZone", ")", ":", "# off = jdFromDate(31, 12, yy) \\", "# - 2415021.076998695", "off", "=", "jdFromDate", "(", "31", ",", "12", ",", "yy", ")", "-", "2415021.", "k", "=", "int", "(", "off", "/", "29.530588853", ")", "nm", "=", "getNewMoonDay", "(", "k", ",", "timeZone", ")", "sunLong", "=", "getSunLongitude", "(", "nm", ",", "timeZone", ")", "# sun longitude at local midnight", "if", "(", "sunLong", ">=", "9", ")", ":", "nm", "=", "getNewMoonDay", "(", "k", "-", "1", ",", "timeZone", ")", "return", "nm" ]
def getLunarMonth11(yy, timeZone): Find the day that starts the luner month 11of the given year for the given time zone.
[ "def", "getLunarMonth11", "(", "yy", "timeZone", ")", ":", "Find", "the", "day", "that", "starts", "the", "luner", "month", "11of", "the", "given", "year", "for", "the", "given", "time", "zone", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L153-L165
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
getLeapMonthOffset
def getLeapMonthOffset(a11, timeZone): '''def getLeapMonthOffset(a11, timeZone): Find the index of the leap month after the month starting on the day a11.''' k = int((a11 - 2415021.076998695) / 29.530588853 + 0.5) last = 0 i = 1 # start with month following lunar month 11 arc = getSunLongitude( getNewMoonDay(k + i, timeZone), timeZone) while True: last = arc i += 1 arc = getSunLongitude( getNewMoonDay(k + i, timeZone), timeZone) if not (arc != last and i < 14): break return i - 1
python
def getLeapMonthOffset(a11, timeZone): '''def getLeapMonthOffset(a11, timeZone): Find the index of the leap month after the month starting on the day a11.''' k = int((a11 - 2415021.076998695) / 29.530588853 + 0.5) last = 0 i = 1 # start with month following lunar month 11 arc = getSunLongitude( getNewMoonDay(k + i, timeZone), timeZone) while True: last = arc i += 1 arc = getSunLongitude( getNewMoonDay(k + i, timeZone), timeZone) if not (arc != last and i < 14): break return i - 1
[ "def", "getLeapMonthOffset", "(", "a11", ",", "timeZone", ")", ":", "k", "=", "int", "(", "(", "a11", "-", "2415021.076998695", ")", "/", "29.530588853", "+", "0.5", ")", "last", "=", "0", "i", "=", "1", "# start with month following lunar month 11", "arc", "=", "getSunLongitude", "(", "getNewMoonDay", "(", "k", "+", "i", ",", "timeZone", ")", ",", "timeZone", ")", "while", "True", ":", "last", "=", "arc", "i", "+=", "1", "arc", "=", "getSunLongitude", "(", "getNewMoonDay", "(", "k", "+", "i", ",", "timeZone", ")", ",", "timeZone", ")", "if", "not", "(", "arc", "!=", "last", "and", "i", "<", "14", ")", ":", "break", "return", "i", "-", "1" ]
def getLeapMonthOffset(a11, timeZone): Find the index of the leap month after the month starting on the day a11.
[ "def", "getLeapMonthOffset", "(", "a11", "timeZone", ")", ":", "Find", "the", "index", "of", "the", "leap", "month", "after", "the", "month", "starting", "on", "the", "day", "a11", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L168-L184
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
S2L
def S2L(dd, mm, yy, timeZone=7): '''def S2L(dd, mm, yy, timeZone = 7): Convert solar date dd/mm/yyyy to the corresponding lunar date.''' dayNumber = jdFromDate(dd, mm, yy) k = int((dayNumber - 2415021.076998695) / 29.530588853) monthStart = getNewMoonDay(k + 1, timeZone) if (monthStart > dayNumber): monthStart = getNewMoonDay(k, timeZone) # alert(dayNumber + " -> " + monthStart) a11 = getLunarMonth11(yy, timeZone) b11 = a11 if (a11 >= monthStart): lunarYear = yy a11 = getLunarMonth11(yy - 1, timeZone) else: lunarYear = yy + 1 b11 = getLunarMonth11(yy + 1, timeZone) lunarDay = dayNumber - monthStart + 1 diff = int((monthStart - a11) / 29.) lunarLeap = 0 lunarMonth = diff + 11 if (b11 - a11 > 365): leapMonthDiff = \ getLeapMonthOffset(a11, timeZone) if (diff >= leapMonthDiff): lunarMonth = diff + 10 if (diff == leapMonthDiff): lunarLeap = 1 if (lunarMonth > 12): lunarMonth = lunarMonth - 12 if (lunarMonth >= 11 and diff < 4): lunarYear -= 1 # print [lunarDay, lunarMonth, lunarYear, lunarLeap] return \ [lunarDay, lunarMonth, lunarYear, lunarLeap]
python
def S2L(dd, mm, yy, timeZone=7): '''def S2L(dd, mm, yy, timeZone = 7): Convert solar date dd/mm/yyyy to the corresponding lunar date.''' dayNumber = jdFromDate(dd, mm, yy) k = int((dayNumber - 2415021.076998695) / 29.530588853) monthStart = getNewMoonDay(k + 1, timeZone) if (monthStart > dayNumber): monthStart = getNewMoonDay(k, timeZone) # alert(dayNumber + " -> " + monthStart) a11 = getLunarMonth11(yy, timeZone) b11 = a11 if (a11 >= monthStart): lunarYear = yy a11 = getLunarMonth11(yy - 1, timeZone) else: lunarYear = yy + 1 b11 = getLunarMonth11(yy + 1, timeZone) lunarDay = dayNumber - monthStart + 1 diff = int((monthStart - a11) / 29.) lunarLeap = 0 lunarMonth = diff + 11 if (b11 - a11 > 365): leapMonthDiff = \ getLeapMonthOffset(a11, timeZone) if (diff >= leapMonthDiff): lunarMonth = diff + 10 if (diff == leapMonthDiff): lunarLeap = 1 if (lunarMonth > 12): lunarMonth = lunarMonth - 12 if (lunarMonth >= 11 and diff < 4): lunarYear -= 1 # print [lunarDay, lunarMonth, lunarYear, lunarLeap] return \ [lunarDay, lunarMonth, lunarYear, lunarLeap]
[ "def", "S2L", "(", "dd", ",", "mm", ",", "yy", ",", "timeZone", "=", "7", ")", ":", "dayNumber", "=", "jdFromDate", "(", "dd", ",", "mm", ",", "yy", ")", "k", "=", "int", "(", "(", "dayNumber", "-", "2415021.076998695", ")", "/", "29.530588853", ")", "monthStart", "=", "getNewMoonDay", "(", "k", "+", "1", ",", "timeZone", ")", "if", "(", "monthStart", ">", "dayNumber", ")", ":", "monthStart", "=", "getNewMoonDay", "(", "k", ",", "timeZone", ")", "# alert(dayNumber + \" -> \" + monthStart)", "a11", "=", "getLunarMonth11", "(", "yy", ",", "timeZone", ")", "b11", "=", "a11", "if", "(", "a11", ">=", "monthStart", ")", ":", "lunarYear", "=", "yy", "a11", "=", "getLunarMonth11", "(", "yy", "-", "1", ",", "timeZone", ")", "else", ":", "lunarYear", "=", "yy", "+", "1", "b11", "=", "getLunarMonth11", "(", "yy", "+", "1", ",", "timeZone", ")", "lunarDay", "=", "dayNumber", "-", "monthStart", "+", "1", "diff", "=", "int", "(", "(", "monthStart", "-", "a11", ")", "/", "29.", ")", "lunarLeap", "=", "0", "lunarMonth", "=", "diff", "+", "11", "if", "(", "b11", "-", "a11", ">", "365", ")", ":", "leapMonthDiff", "=", "getLeapMonthOffset", "(", "a11", ",", "timeZone", ")", "if", "(", "diff", ">=", "leapMonthDiff", ")", ":", "lunarMonth", "=", "diff", "+", "10", "if", "(", "diff", "==", "leapMonthDiff", ")", ":", "lunarLeap", "=", "1", "if", "(", "lunarMonth", ">", "12", ")", ":", "lunarMonth", "=", "lunarMonth", "-", "12", "if", "(", "lunarMonth", ">=", "11", "and", "diff", "<", "4", ")", ":", "lunarYear", "-=", "1", "# print [lunarDay, lunarMonth, lunarYear, lunarLeap]", "return", "[", "lunarDay", ",", "lunarMonth", ",", "lunarYear", ",", "lunarLeap", "]" ]
def S2L(dd, mm, yy, timeZone = 7): Convert solar date dd/mm/yyyy to the corresponding lunar date.
[ "def", "S2L", "(", "dd", "mm", "yy", "timeZone", "=", "7", ")", ":", "Convert", "solar", "date", "dd", "/", "mm", "/", "yyyy", "to", "the", "corresponding", "lunar", "date", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L187-L223
doanguyen/lasotuvi
lasotuvi/Lich_HND.py
L2S
def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ=7): '''def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date to the corresponding solar date.''' if (lunarM < 11): a11 = getLunarMonth11(lunarY - 1, tZ) b11 = getLunarMonth11(lunarY, tZ) else: a11 = getLunarMonth11(lunarY, tZ) b11 = getLunarMonth11(lunarY + 1, tZ) k = int(0.5 + (a11 - 2415021.076998695) / 29.530588853) off = lunarM - 11 if (off < 0): off += 12 if (b11 - a11 > 365): leapOff = getLeapMonthOffset(a11, tZ) leapM = leapOff - 2 if (leapM < 0): leapM += 12 if (lunarLeap != 0 and lunarM != leapM): return [0, 0, 0] elif (lunarLeap != 0 or off >= leapOff): off += 1 monthStart = getNewMoonDay(k + off, tZ) return jdToDate(monthStart + lunarD - 1)
python
def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ=7): '''def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date to the corresponding solar date.''' if (lunarM < 11): a11 = getLunarMonth11(lunarY - 1, tZ) b11 = getLunarMonth11(lunarY, tZ) else: a11 = getLunarMonth11(lunarY, tZ) b11 = getLunarMonth11(lunarY + 1, tZ) k = int(0.5 + (a11 - 2415021.076998695) / 29.530588853) off = lunarM - 11 if (off < 0): off += 12 if (b11 - a11 > 365): leapOff = getLeapMonthOffset(a11, tZ) leapM = leapOff - 2 if (leapM < 0): leapM += 12 if (lunarLeap != 0 and lunarM != leapM): return [0, 0, 0] elif (lunarLeap != 0 or off >= leapOff): off += 1 monthStart = getNewMoonDay(k + off, tZ) return jdToDate(monthStart + lunarD - 1)
[ "def", "L2S", "(", "lunarD", ",", "lunarM", ",", "lunarY", ",", "lunarLeap", ",", "tZ", "=", "7", ")", ":", "if", "(", "lunarM", "<", "11", ")", ":", "a11", "=", "getLunarMonth11", "(", "lunarY", "-", "1", ",", "tZ", ")", "b11", "=", "getLunarMonth11", "(", "lunarY", ",", "tZ", ")", "else", ":", "a11", "=", "getLunarMonth11", "(", "lunarY", ",", "tZ", ")", "b11", "=", "getLunarMonth11", "(", "lunarY", "+", "1", ",", "tZ", ")", "k", "=", "int", "(", "0.5", "+", "(", "a11", "-", "2415021.076998695", ")", "/", "29.530588853", ")", "off", "=", "lunarM", "-", "11", "if", "(", "off", "<", "0", ")", ":", "off", "+=", "12", "if", "(", "b11", "-", "a11", ">", "365", ")", ":", "leapOff", "=", "getLeapMonthOffset", "(", "a11", ",", "tZ", ")", "leapM", "=", "leapOff", "-", "2", "if", "(", "leapM", "<", "0", ")", ":", "leapM", "+=", "12", "if", "(", "lunarLeap", "!=", "0", "and", "lunarM", "!=", "leapM", ")", ":", "return", "[", "0", ",", "0", ",", "0", "]", "elif", "(", "lunarLeap", "!=", "0", "or", "off", ">=", "leapOff", ")", ":", "off", "+=", "1", "monthStart", "=", "getNewMoonDay", "(", "k", "+", "off", ",", "tZ", ")", "return", "jdToDate", "(", "monthStart", "+", "lunarD", "-", "1", ")" ]
def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date to the corresponding solar date.
[ "def", "L2S", "(", "lunarD", "lunarM", "lunarY", "lunarLeap", "tZ", "=", "7", ")", ":", "Convert", "a", "lunar", "date", "to", "the", "corresponding", "solar", "date", "." ]
train
https://github.com/doanguyen/lasotuvi/blob/98383a3056f0a0633d6937d364c37eb788661c0d/lasotuvi/Lich_HND.py#L226-L250
crackinglandia/pype32
pype32/pype32.py
PE.hasMZSignature
def hasMZSignature(self, rd): """ Check for MZ signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False. """ rd.setOffset(0) sign = rd.read(2) if sign == "MZ": return True return False
python
def hasMZSignature(self, rd): """ Check for MZ signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False. """ rd.setOffset(0) sign = rd.read(2) if sign == "MZ": return True return False
[ "def", "hasMZSignature", "(", "self", ",", "rd", ")", ":", "rd", ".", "setOffset", "(", "0", ")", "sign", "=", "rd", ".", "read", "(", "2", ")", "if", "sign", "==", "\"MZ\"", ":", "return", "True", "return", "False" ]
Check for MZ signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False.
[ "Check", "for", "MZ", "signature", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L134-L148
crackinglandia/pype32
pype32/pype32.py
PE.hasPESignature
def hasPESignature(self, rd): """ Check for PE signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the PE signature. Otherwise, False. """ rd.setOffset(0) e_lfanew_offset = unpack("<L", rd.readAt(0x3c, 4))[0] sign = rd.readAt(e_lfanew_offset, 2) if sign == "PE": return True return False
python
def hasPESignature(self, rd): """ Check for PE signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the PE signature. Otherwise, False. """ rd.setOffset(0) e_lfanew_offset = unpack("<L", rd.readAt(0x3c, 4))[0] sign = rd.readAt(e_lfanew_offset, 2) if sign == "PE": return True return False
[ "def", "hasPESignature", "(", "self", ",", "rd", ")", ":", "rd", ".", "setOffset", "(", "0", ")", "e_lfanew_offset", "=", "unpack", "(", "\"<L\"", ",", "rd", ".", "readAt", "(", "0x3c", ",", "4", ")", ")", "[", "0", "]", "sign", "=", "rd", ".", "readAt", "(", "e_lfanew_offset", ",", "2", ")", "if", "sign", "==", "\"PE\"", ":", "return", "True", "return", "False" ]
Check for PE signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the PE signature. Otherwise, False.
[ "Check", "for", "PE", "signature", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L150-L165
crackinglandia/pype32
pype32/pype32.py
PE.validate
def validate(self): """ Performs validations over some fields of the PE structure to determine if the loaded file has a valid PE format. @raise PEException: If an invalid value is found into the PE instance. """ # Ange Albertini (@angie4771) can kill me for this! :) if self.dosHeader.e_magic.value != consts.MZ_SIGNATURE: raise excep.PEException("Invalid MZ signature. Found %d instead of %d." % (self.dosHeader.magic.value, consts.MZ_SIGNATURE)) if self.dosHeader.e_lfanew.value > len(self): raise excep.PEException("Invalid e_lfanew value. Probably not a PE file.") if self.ntHeaders.signature.value != consts.PE_SIGNATURE: raise excep.PEException("Invalid PE signature. Found %d instead of %d." % (self.ntHeaders.optionaHeader.signature.value, consts.PE_SIGNATURE)) if self.ntHeaders.optionalHeader.numberOfRvaAndSizes.value > 0x10: print excep.PEWarning("Suspicious value for NumberOfRvaAndSizes: %d." % self.ntHeaders.optionaHeader.numberOfRvaAndSizes.value)
python
def validate(self): """ Performs validations over some fields of the PE structure to determine if the loaded file has a valid PE format. @raise PEException: If an invalid value is found into the PE instance. """ # Ange Albertini (@angie4771) can kill me for this! :) if self.dosHeader.e_magic.value != consts.MZ_SIGNATURE: raise excep.PEException("Invalid MZ signature. Found %d instead of %d." % (self.dosHeader.magic.value, consts.MZ_SIGNATURE)) if self.dosHeader.e_lfanew.value > len(self): raise excep.PEException("Invalid e_lfanew value. Probably not a PE file.") if self.ntHeaders.signature.value != consts.PE_SIGNATURE: raise excep.PEException("Invalid PE signature. Found %d instead of %d." % (self.ntHeaders.optionaHeader.signature.value, consts.PE_SIGNATURE)) if self.ntHeaders.optionalHeader.numberOfRvaAndSizes.value > 0x10: print excep.PEWarning("Suspicious value for NumberOfRvaAndSizes: %d." % self.ntHeaders.optionaHeader.numberOfRvaAndSizes.value)
[ "def", "validate", "(", "self", ")", ":", "# Ange Albertini (@angie4771) can kill me for this! :)", "if", "self", ".", "dosHeader", ".", "e_magic", ".", "value", "!=", "consts", ".", "MZ_SIGNATURE", ":", "raise", "excep", ".", "PEException", "(", "\"Invalid MZ signature. Found %d instead of %d.\"", "%", "(", "self", ".", "dosHeader", ".", "magic", ".", "value", ",", "consts", ".", "MZ_SIGNATURE", ")", ")", "if", "self", ".", "dosHeader", ".", "e_lfanew", ".", "value", ">", "len", "(", "self", ")", ":", "raise", "excep", ".", "PEException", "(", "\"Invalid e_lfanew value. Probably not a PE file.\"", ")", "if", "self", ".", "ntHeaders", ".", "signature", ".", "value", "!=", "consts", ".", "PE_SIGNATURE", ":", "raise", "excep", ".", "PEException", "(", "\"Invalid PE signature. Found %d instead of %d.\"", "%", "(", "self", ".", "ntHeaders", ".", "optionaHeader", ".", "signature", ".", "value", ",", "consts", ".", "PE_SIGNATURE", ")", ")", "if", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "numberOfRvaAndSizes", ".", "value", ">", "0x10", ":", "print", "excep", ".", "PEWarning", "(", "\"Suspicious value for NumberOfRvaAndSizes: %d.\"", "%", "self", ".", "ntHeaders", ".", "optionaHeader", ".", "numberOfRvaAndSizes", ".", "value", ")" ]
Performs validations over some fields of the PE structure to determine if the loaded file has a valid PE format. @raise PEException: If an invalid value is found into the PE instance.
[ "Performs", "validations", "over", "some", "fields", "of", "the", "PE", "structure", "to", "determine", "if", "the", "loaded", "file", "has", "a", "valid", "PE", "format", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L167-L184
crackinglandia/pype32
pype32/pype32.py
PE.readFile
def readFile(self, pathToFile): """ Returns data from a file. @type pathToFile: str @param pathToFile: Path to the file. @rtype: str @return: The data from file. """ fd = open(pathToFile, "rb") data = fd.read() fd.close() return data
python
def readFile(self, pathToFile): """ Returns data from a file. @type pathToFile: str @param pathToFile: Path to the file. @rtype: str @return: The data from file. """ fd = open(pathToFile, "rb") data = fd.read() fd.close() return data
[ "def", "readFile", "(", "self", ",", "pathToFile", ")", ":", "fd", "=", "open", "(", "pathToFile", ",", "\"rb\"", ")", "data", "=", "fd", ".", "read", "(", ")", "fd", ".", "close", "(", ")", "return", "data" ]
Returns data from a file. @type pathToFile: str @param pathToFile: Path to the file. @rtype: str @return: The data from file.
[ "Returns", "data", "from", "a", "file", "." ]
train
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L186-L199