id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
238,600
|
zkbt/the-friendly-stars
|
thefriendlystars/constellations/constellation.py
|
Constellation.from_coordinates
|
def from_coordinates(cls, ra=None, dec=None,
distance=None,
pm_ra_cosdec=None, pm_dec=None,
radial_velocity=None,
obstime=2000.0*u.year,
id=None, mag=None,
**kwargs):
'''
Iniitalize a constellation object.
Parameters
----------
ra, dec, distance, pm_ra_cosdec, pm_dec, radial_velocity
These must be able to initialize a SkyCoord.
id : list, array
Identifications for the entries.
mag : list, array
Magnitudes for the entries.
**kwargs
All arguments and keyword arguments are passed along
to SkyCoord. They can be coordinates in the first place,
or, for example, ra and dec with units, or any other
inputs that can initialize a SkyCoord.
'''
# make sure we can initialzie some coordinates
# coordinates = coord.SkyCoord(ra=ra, dec=dec, distance=distance, pm_ra_cosdec=pm_ra_cosdec, pm_dec=pm_dec, radial_velocity=radial_velocity)
N = len(np.atleast_1d(ra))
if id is None:
id = ['{}'.format(i) for i in range(N)]
if mag is None:
mag = np.zeros(N)
standardized = Table(data=[id, mag], names=['object-id', 'filter-mag'])
for k in cls.coordinate_keys:
if locals()[k] is not None:
standardized[k] = locals()[k]
return cls(standardized)
|
python
|
def from_coordinates(cls, ra=None, dec=None,
distance=None,
pm_ra_cosdec=None, pm_dec=None,
radial_velocity=None,
obstime=2000.0*u.year,
id=None, mag=None,
**kwargs):
'''
Iniitalize a constellation object.
Parameters
----------
ra, dec, distance, pm_ra_cosdec, pm_dec, radial_velocity
These must be able to initialize a SkyCoord.
id : list, array
Identifications for the entries.
mag : list, array
Magnitudes for the entries.
**kwargs
All arguments and keyword arguments are passed along
to SkyCoord. They can be coordinates in the first place,
or, for example, ra and dec with units, or any other
inputs that can initialize a SkyCoord.
'''
# make sure we can initialzie some coordinates
# coordinates = coord.SkyCoord(ra=ra, dec=dec, distance=distance, pm_ra_cosdec=pm_ra_cosdec, pm_dec=pm_dec, radial_velocity=radial_velocity)
N = len(np.atleast_1d(ra))
if id is None:
id = ['{}'.format(i) for i in range(N)]
if mag is None:
mag = np.zeros(N)
standardized = Table(data=[id, mag], names=['object-id', 'filter-mag'])
for k in cls.coordinate_keys:
if locals()[k] is not None:
standardized[k] = locals()[k]
return cls(standardized)
|
[
"def",
"from_coordinates",
"(",
"cls",
",",
"ra",
"=",
"None",
",",
"dec",
"=",
"None",
",",
"distance",
"=",
"None",
",",
"pm_ra_cosdec",
"=",
"None",
",",
"pm_dec",
"=",
"None",
",",
"radial_velocity",
"=",
"None",
",",
"obstime",
"=",
"2000.0",
"*",
"u",
".",
"year",
",",
"id",
"=",
"None",
",",
"mag",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# make sure we can initialzie some coordinates",
"# coordinates = coord.SkyCoord(ra=ra, dec=dec, distance=distance, pm_ra_cosdec=pm_ra_cosdec, pm_dec=pm_dec, radial_velocity=radial_velocity)",
"N",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"ra",
")",
")",
"if",
"id",
"is",
"None",
":",
"id",
"=",
"[",
"'{}'",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
"]",
"if",
"mag",
"is",
"None",
":",
"mag",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"standardized",
"=",
"Table",
"(",
"data",
"=",
"[",
"id",
",",
"mag",
"]",
",",
"names",
"=",
"[",
"'object-id'",
",",
"'filter-mag'",
"]",
")",
"for",
"k",
"in",
"cls",
".",
"coordinate_keys",
":",
"if",
"locals",
"(",
")",
"[",
"k",
"]",
"is",
"not",
"None",
":",
"standardized",
"[",
"k",
"]",
"=",
"locals",
"(",
")",
"[",
"k",
"]",
"return",
"cls",
"(",
"standardized",
")"
] |
Iniitalize a constellation object.
Parameters
----------
ra, dec, distance, pm_ra_cosdec, pm_dec, radial_velocity
These must be able to initialize a SkyCoord.
id : list, array
Identifications for the entries.
mag : list, array
Magnitudes for the entries.
**kwargs
All arguments and keyword arguments are passed along
to SkyCoord. They can be coordinates in the first place,
or, for example, ra and dec with units, or any other
inputs that can initialize a SkyCoord.
|
[
"Iniitalize",
"a",
"constellation",
"object",
"."
] |
50d3f979e79e63c66629065c75595696dc79802e
|
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L73-L115
|
238,601
|
zkbt/the-friendly-stars
|
thefriendlystars/constellations/constellation.py
|
Constellation.to_text
|
def to_text(self, filename=None, overwrite=True):
'''
Write this catalog out to a text file.
'''
table = self.standardized
#table = hstack([self.identifiers,
# self._coordinate_table(),
# self.magnitudes,
# self.errors])
if filename == None:
filename = '{}.txt'.format(self.name)
self.speak('saving to {}'.format(filename))
table.write(filename, format='ascii.ecsv', overwrite=overwrite)
|
python
|
def to_text(self, filename=None, overwrite=True):
'''
Write this catalog out to a text file.
'''
table = self.standardized
#table = hstack([self.identifiers,
# self._coordinate_table(),
# self.magnitudes,
# self.errors])
if filename == None:
filename = '{}.txt'.format(self.name)
self.speak('saving to {}'.format(filename))
table.write(filename, format='ascii.ecsv', overwrite=overwrite)
|
[
"def",
"to_text",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"overwrite",
"=",
"True",
")",
":",
"table",
"=",
"self",
".",
"standardized",
"#table = hstack([self.identifiers,",
"# self._coordinate_table(),",
"# self.magnitudes,",
"# self.errors])",
"if",
"filename",
"==",
"None",
":",
"filename",
"=",
"'{}.txt'",
".",
"format",
"(",
"self",
".",
"name",
")",
"self",
".",
"speak",
"(",
"'saving to {}'",
".",
"format",
"(",
"filename",
")",
")",
"table",
".",
"write",
"(",
"filename",
",",
"format",
"=",
"'ascii.ecsv'",
",",
"overwrite",
"=",
"overwrite",
")"
] |
Write this catalog out to a text file.
|
[
"Write",
"this",
"catalog",
"out",
"to",
"a",
"text",
"file",
"."
] |
50d3f979e79e63c66629065c75595696dc79802e
|
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L166-L180
|
238,602
|
zkbt/the-friendly-stars
|
thefriendlystars/constellations/constellation.py
|
Constellation.plot
|
def plot(self, sizescale=10, color=None, alpha=0.5, label=None, edgecolor='none', **kw):
'''
Plot the ra and dec of the coordinates,
at a given epoch, scaled by their magnitude.
(This does *not* create a new empty figure.)
Parameters
----------
sizescale : (optional) float
The marker size for scatter for a star at the magnitudelimit.
color : (optional) any valid color
The color to plot (but there is a default for this catalog.)
**kw : dict
Additional keywords will be passed on to plt.scatter.
Returns
-------
plotted : outputs from the plots
'''
# calculate the sizes of the stars (logarithmic with brightness?)
size = np.maximum(sizescale*(1 + self.magnitudelimit - self.magnitude), 1)
# make a scatter plot of the RA + Dec
scatter = plt.scatter(self.ra, self.dec,
s=size,
color=color or self.color,
label=label or '{} ({:.1f})'.format(self.name, self.epoch),
alpha=alpha,
edgecolor=edgecolor,
**kw)
return scatter
|
python
|
def plot(self, sizescale=10, color=None, alpha=0.5, label=None, edgecolor='none', **kw):
'''
Plot the ra and dec of the coordinates,
at a given epoch, scaled by their magnitude.
(This does *not* create a new empty figure.)
Parameters
----------
sizescale : (optional) float
The marker size for scatter for a star at the magnitudelimit.
color : (optional) any valid color
The color to plot (but there is a default for this catalog.)
**kw : dict
Additional keywords will be passed on to plt.scatter.
Returns
-------
plotted : outputs from the plots
'''
# calculate the sizes of the stars (logarithmic with brightness?)
size = np.maximum(sizescale*(1 + self.magnitudelimit - self.magnitude), 1)
# make a scatter plot of the RA + Dec
scatter = plt.scatter(self.ra, self.dec,
s=size,
color=color or self.color,
label=label or '{} ({:.1f})'.format(self.name, self.epoch),
alpha=alpha,
edgecolor=edgecolor,
**kw)
return scatter
|
[
"def",
"plot",
"(",
"self",
",",
"sizescale",
"=",
"10",
",",
"color",
"=",
"None",
",",
"alpha",
"=",
"0.5",
",",
"label",
"=",
"None",
",",
"edgecolor",
"=",
"'none'",
",",
"*",
"*",
"kw",
")",
":",
"# calculate the sizes of the stars (logarithmic with brightness?)",
"size",
"=",
"np",
".",
"maximum",
"(",
"sizescale",
"*",
"(",
"1",
"+",
"self",
".",
"magnitudelimit",
"-",
"self",
".",
"magnitude",
")",
",",
"1",
")",
"# make a scatter plot of the RA + Dec",
"scatter",
"=",
"plt",
".",
"scatter",
"(",
"self",
".",
"ra",
",",
"self",
".",
"dec",
",",
"s",
"=",
"size",
",",
"color",
"=",
"color",
"or",
"self",
".",
"color",
",",
"label",
"=",
"label",
"or",
"'{} ({:.1f})'",
".",
"format",
"(",
"self",
".",
"name",
",",
"self",
".",
"epoch",
")",
",",
"alpha",
"=",
"alpha",
",",
"edgecolor",
"=",
"edgecolor",
",",
"*",
"*",
"kw",
")",
"return",
"scatter"
] |
Plot the ra and dec of the coordinates,
at a given epoch, scaled by their magnitude.
(This does *not* create a new empty figure.)
Parameters
----------
sizescale : (optional) float
The marker size for scatter for a star at the magnitudelimit.
color : (optional) any valid color
The color to plot (but there is a default for this catalog.)
**kw : dict
Additional keywords will be passed on to plt.scatter.
Returns
-------
plotted : outputs from the plots
|
[
"Plot",
"the",
"ra",
"and",
"dec",
"of",
"the",
"coordinates",
"at",
"a",
"given",
"epoch",
"scaled",
"by",
"their",
"magnitude",
"."
] |
50d3f979e79e63c66629065c75595696dc79802e
|
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L281-L314
|
238,603
|
zkbt/the-friendly-stars
|
thefriendlystars/constellations/constellation.py
|
Constellation.animate
|
def animate(self, filename='constellation.mp4', epochs=[1900,2100], dt=5, dpi=300, fps=10, **kw):
'''
Animate a finder chart.
'''
scatter = self.finder(**kw)
plt.tight_layout()
figure = plt.gcf()
if '.gif' in filename:
try:
writer = ani.writers['pillow'](fps=fps)
except (RuntimeError, KeyError):
writer = ani.writers['imagemagick'](fps=fps)
except:
raise RuntimeError('This python seems unable to make an animated gif.')
else:
try:
writer = ani.writers['ffmpeg'](fps=fps)
except (RuntimeError,KeyError):
raise RuntimeError('This computer seems unable to ffmpeg.')
with writer.saving(figure, filename, dpi or figure.get_dpi()):
for epoch in tqdm(np.arange(epochs[0], epochs[1]+dt, dt)):
# update the illustration to a new time
coords = self.atEpoch(epoch)
scatter.set_offsets(list(zip(coords.ra.value, coords.dec.value)))
plt.title('{} in {:.1f}'.format(self.name, epoch))
writer.grab_frame()
|
python
|
def animate(self, filename='constellation.mp4', epochs=[1900,2100], dt=5, dpi=300, fps=10, **kw):
'''
Animate a finder chart.
'''
scatter = self.finder(**kw)
plt.tight_layout()
figure = plt.gcf()
if '.gif' in filename:
try:
writer = ani.writers['pillow'](fps=fps)
except (RuntimeError, KeyError):
writer = ani.writers['imagemagick'](fps=fps)
except:
raise RuntimeError('This python seems unable to make an animated gif.')
else:
try:
writer = ani.writers['ffmpeg'](fps=fps)
except (RuntimeError,KeyError):
raise RuntimeError('This computer seems unable to ffmpeg.')
with writer.saving(figure, filename, dpi or figure.get_dpi()):
for epoch in tqdm(np.arange(epochs[0], epochs[1]+dt, dt)):
# update the illustration to a new time
coords = self.atEpoch(epoch)
scatter.set_offsets(list(zip(coords.ra.value, coords.dec.value)))
plt.title('{} in {:.1f}'.format(self.name, epoch))
writer.grab_frame()
|
[
"def",
"animate",
"(",
"self",
",",
"filename",
"=",
"'constellation.mp4'",
",",
"epochs",
"=",
"[",
"1900",
",",
"2100",
"]",
",",
"dt",
"=",
"5",
",",
"dpi",
"=",
"300",
",",
"fps",
"=",
"10",
",",
"*",
"*",
"kw",
")",
":",
"scatter",
"=",
"self",
".",
"finder",
"(",
"*",
"*",
"kw",
")",
"plt",
".",
"tight_layout",
"(",
")",
"figure",
"=",
"plt",
".",
"gcf",
"(",
")",
"if",
"'.gif'",
"in",
"filename",
":",
"try",
":",
"writer",
"=",
"ani",
".",
"writers",
"[",
"'pillow'",
"]",
"(",
"fps",
"=",
"fps",
")",
"except",
"(",
"RuntimeError",
",",
"KeyError",
")",
":",
"writer",
"=",
"ani",
".",
"writers",
"[",
"'imagemagick'",
"]",
"(",
"fps",
"=",
"fps",
")",
"except",
":",
"raise",
"RuntimeError",
"(",
"'This python seems unable to make an animated gif.'",
")",
"else",
":",
"try",
":",
"writer",
"=",
"ani",
".",
"writers",
"[",
"'ffmpeg'",
"]",
"(",
"fps",
"=",
"fps",
")",
"except",
"(",
"RuntimeError",
",",
"KeyError",
")",
":",
"raise",
"RuntimeError",
"(",
"'This computer seems unable to ffmpeg.'",
")",
"with",
"writer",
".",
"saving",
"(",
"figure",
",",
"filename",
",",
"dpi",
"or",
"figure",
".",
"get_dpi",
"(",
")",
")",
":",
"for",
"epoch",
"in",
"tqdm",
"(",
"np",
".",
"arange",
"(",
"epochs",
"[",
"0",
"]",
",",
"epochs",
"[",
"1",
"]",
"+",
"dt",
",",
"dt",
")",
")",
":",
"# update the illustration to a new time",
"coords",
"=",
"self",
".",
"atEpoch",
"(",
"epoch",
")",
"scatter",
".",
"set_offsets",
"(",
"list",
"(",
"zip",
"(",
"coords",
".",
"ra",
".",
"value",
",",
"coords",
".",
"dec",
".",
"value",
")",
")",
")",
"plt",
".",
"title",
"(",
"'{} in {:.1f}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"epoch",
")",
")",
"writer",
".",
"grab_frame",
"(",
")"
] |
Animate a finder chart.
|
[
"Animate",
"a",
"finder",
"chart",
"."
] |
50d3f979e79e63c66629065c75595696dc79802e
|
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L352-L383
|
238,604
|
mensi/gittornado
|
gittornado/__init__.py
|
BaseHandler.get_gitdir
|
def get_gitdir(self):
"""Determine the git repository for this request"""
if self.gitlookup is None:
raise tornado.web.HTTPError(500, 'no git lookup configured')
gitdir = self.gitlookup(self.request)
if gitdir is None:
raise tornado.web.HTTPError(404, 'unable to find repository')
logger.debug("Accessing git at: %s", gitdir)
return gitdir
|
python
|
def get_gitdir(self):
"""Determine the git repository for this request"""
if self.gitlookup is None:
raise tornado.web.HTTPError(500, 'no git lookup configured')
gitdir = self.gitlookup(self.request)
if gitdir is None:
raise tornado.web.HTTPError(404, 'unable to find repository')
logger.debug("Accessing git at: %s", gitdir)
return gitdir
|
[
"def",
"get_gitdir",
"(",
"self",
")",
":",
"if",
"self",
".",
"gitlookup",
"is",
"None",
":",
"raise",
"tornado",
".",
"web",
".",
"HTTPError",
"(",
"500",
",",
"'no git lookup configured'",
")",
"gitdir",
"=",
"self",
".",
"gitlookup",
"(",
"self",
".",
"request",
")",
"if",
"gitdir",
"is",
"None",
":",
"raise",
"tornado",
".",
"web",
".",
"HTTPError",
"(",
"404",
",",
"'unable to find repository'",
")",
"logger",
".",
"debug",
"(",
"\"Accessing git at: %s\"",
",",
"gitdir",
")",
"return",
"gitdir"
] |
Determine the git repository for this request
|
[
"Determine",
"the",
"git",
"repository",
"for",
"this",
"request"
] |
adf86b5537064337c806cce0e71eacaabc8bb610
|
https://github.com/mensi/gittornado/blob/adf86b5537064337c806cce0e71eacaabc8bb610/gittornado/__init__.py#L59-L69
|
238,605
|
reefab/foobot_async
|
foobot_async/__init__.py
|
FoobotClient.get_last_data
|
def get_last_data(self, uuid, period=0, average_by=0):
"""
Get the data from one device for period till now.
:param uuid: Id of the device
:type uuid: str
:param period: Number of seconds between start time of search and now
:type period: integer
:param average_by: amount of seconds to average data over.
0 or 300 for no average. Use 3600 (average hourly) or a multiple for
long range requests (e.g. more than 1 day)
:type average_by: integer
:returns: list of datapoints
:raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess,
TooManyRequests, InternalError
.. note::
Use period = 0 and averageBy = 0 to get the very last data point.
If you only need one average for a period, the average_by needs to
be bigger than the period (eg, for a 10 minutes average: period = 600,
average_by = 601)
.. seealso:: :func:`parse_data` for return data syntax
"""
return self.parse_data((yield from self._get(
LAST_DATA_URL.format(uuid= uuid,
period= trunc(period),
average_by= trunc(average_by)))))
|
python
|
def get_last_data(self, uuid, period=0, average_by=0):
"""
Get the data from one device for period till now.
:param uuid: Id of the device
:type uuid: str
:param period: Number of seconds between start time of search and now
:type period: integer
:param average_by: amount of seconds to average data over.
0 or 300 for no average. Use 3600 (average hourly) or a multiple for
long range requests (e.g. more than 1 day)
:type average_by: integer
:returns: list of datapoints
:raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess,
TooManyRequests, InternalError
.. note::
Use period = 0 and averageBy = 0 to get the very last data point.
If you only need one average for a period, the average_by needs to
be bigger than the period (eg, for a 10 minutes average: period = 600,
average_by = 601)
.. seealso:: :func:`parse_data` for return data syntax
"""
return self.parse_data((yield from self._get(
LAST_DATA_URL.format(uuid= uuid,
period= trunc(period),
average_by= trunc(average_by)))))
|
[
"def",
"get_last_data",
"(",
"self",
",",
"uuid",
",",
"period",
"=",
"0",
",",
"average_by",
"=",
"0",
")",
":",
"return",
"self",
".",
"parse_data",
"(",
"(",
"yield",
"from",
"self",
".",
"_get",
"(",
"LAST_DATA_URL",
".",
"format",
"(",
"uuid",
"=",
"uuid",
",",
"period",
"=",
"trunc",
"(",
"period",
")",
",",
"average_by",
"=",
"trunc",
"(",
"average_by",
")",
")",
")",
")",
")"
] |
Get the data from one device for period till now.
:param uuid: Id of the device
:type uuid: str
:param period: Number of seconds between start time of search and now
:type period: integer
:param average_by: amount of seconds to average data over.
0 or 300 for no average. Use 3600 (average hourly) or a multiple for
long range requests (e.g. more than 1 day)
:type average_by: integer
:returns: list of datapoints
:raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess,
TooManyRequests, InternalError
.. note::
Use period = 0 and averageBy = 0 to get the very last data point.
If you only need one average for a period, the average_by needs to
be bigger than the period (eg, for a 10 minutes average: period = 600,
average_by = 601)
.. seealso:: :func:`parse_data` for return data syntax
|
[
"Get",
"the",
"data",
"from",
"one",
"device",
"for",
"period",
"till",
"now",
"."
] |
73dd27e8f3b99df284c326dccc2372e9368d808d
|
https://github.com/reefab/foobot_async/blob/73dd27e8f3b99df284c326dccc2372e9368d808d/foobot_async/__init__.py#L60-L87
|
238,606
|
reefab/foobot_async
|
foobot_async/__init__.py
|
FoobotClient.get_historical_data
|
def get_historical_data(self, uuid, start, end, average_by=0):
"""
Get the data from one device for a specified time range.
.. note::
Can fetch a maximum of 42 days of data.
To speed up query processing, you can use a combination of average factor
multiple of 1H in seconds (e.g. 3600), and o'clock start and end times
:param uuid: Id of the device
:type uuid: str
:param start: start of the range
:type start: datetime
:param end: end of the range
:type end: datetime
:param average_by: amount of seconds to average data over.
0 or 300 for no average. Use 3600 (average hourly) or a multiple for
long range requests (e.g. more than 1 day)
:type average_by: integer
:returns: list of datapoints
:raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess,
TooManyRequests, InternalError
.. seealso:: :func:`parse_data` for return data syntax
"""
return self.parse_data((yield from self._get(
HISTORICAL_DATA_URL.format(uuid= uuid,
start = trunc(start.replace(tzinfo=timezone.utc).timestamp()),
end = trunc(end.replace(tzinfo=timezone.utc).timestamp()),
average_by= trunc(average_by)))))
|
python
|
def get_historical_data(self, uuid, start, end, average_by=0):
"""
Get the data from one device for a specified time range.
.. note::
Can fetch a maximum of 42 days of data.
To speed up query processing, you can use a combination of average factor
multiple of 1H in seconds (e.g. 3600), and o'clock start and end times
:param uuid: Id of the device
:type uuid: str
:param start: start of the range
:type start: datetime
:param end: end of the range
:type end: datetime
:param average_by: amount of seconds to average data over.
0 or 300 for no average. Use 3600 (average hourly) or a multiple for
long range requests (e.g. more than 1 day)
:type average_by: integer
:returns: list of datapoints
:raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess,
TooManyRequests, InternalError
.. seealso:: :func:`parse_data` for return data syntax
"""
return self.parse_data((yield from self._get(
HISTORICAL_DATA_URL.format(uuid= uuid,
start = trunc(start.replace(tzinfo=timezone.utc).timestamp()),
end = trunc(end.replace(tzinfo=timezone.utc).timestamp()),
average_by= trunc(average_by)))))
|
[
"def",
"get_historical_data",
"(",
"self",
",",
"uuid",
",",
"start",
",",
"end",
",",
"average_by",
"=",
"0",
")",
":",
"return",
"self",
".",
"parse_data",
"(",
"(",
"yield",
"from",
"self",
".",
"_get",
"(",
"HISTORICAL_DATA_URL",
".",
"format",
"(",
"uuid",
"=",
"uuid",
",",
"start",
"=",
"trunc",
"(",
"start",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
".",
"timestamp",
"(",
")",
")",
",",
"end",
"=",
"trunc",
"(",
"end",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
".",
"timestamp",
"(",
")",
")",
",",
"average_by",
"=",
"trunc",
"(",
"average_by",
")",
")",
")",
")",
")"
] |
Get the data from one device for a specified time range.
.. note::
Can fetch a maximum of 42 days of data.
To speed up query processing, you can use a combination of average factor
multiple of 1H in seconds (e.g. 3600), and o'clock start and end times
:param uuid: Id of the device
:type uuid: str
:param start: start of the range
:type start: datetime
:param end: end of the range
:type end: datetime
:param average_by: amount of seconds to average data over.
0 or 300 for no average. Use 3600 (average hourly) or a multiple for
long range requests (e.g. more than 1 day)
:type average_by: integer
:returns: list of datapoints
:raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess,
TooManyRequests, InternalError
.. seealso:: :func:`parse_data` for return data syntax
|
[
"Get",
"the",
"data",
"from",
"one",
"device",
"for",
"a",
"specified",
"time",
"range",
"."
] |
73dd27e8f3b99df284c326dccc2372e9368d808d
|
https://github.com/reefab/foobot_async/blob/73dd27e8f3b99df284c326dccc2372e9368d808d/foobot_async/__init__.py#L90-L119
|
238,607
|
reefab/foobot_async
|
foobot_async/__init__.py
|
FoobotClient.parse_data
|
def parse_data(self, response):
"""
Convert the weird list format used for datapoints to a more usable
dictionnary one
:param response: dictionnary from API json response
:type response: dict
:returns: list of datapoints
.. note::
Datapoint content:
* time: UTC timestamp, unit: seconds
* pm: Particulate Matter, unit: ugm3
* tmp: temperature, unit: C
* hum: humidity, unit: %
* co2: Carbon Dioxide, unit: ppm
* voc: Volatile Organic Compounds, unit: ppb
* allpollu: `foobot index <https://help.foobot.io/hc/en-us/articles/204814371-What-does-central-number-mean->`_, unit: %
"""
parsed = []
try:
items = response['sensors']
for datapoint in response['datapoints']:
line = {}
for index, data in enumerate(datapoint):
line[items[index]] = data
parsed.append(line)
return parsed
except (KeyError, IndexError, TypeError):
raise FoobotClient.InvalidData()
|
python
|
def parse_data(self, response):
"""
Convert the weird list format used for datapoints to a more usable
dictionnary one
:param response: dictionnary from API json response
:type response: dict
:returns: list of datapoints
.. note::
Datapoint content:
* time: UTC timestamp, unit: seconds
* pm: Particulate Matter, unit: ugm3
* tmp: temperature, unit: C
* hum: humidity, unit: %
* co2: Carbon Dioxide, unit: ppm
* voc: Volatile Organic Compounds, unit: ppb
* allpollu: `foobot index <https://help.foobot.io/hc/en-us/articles/204814371-What-does-central-number-mean->`_, unit: %
"""
parsed = []
try:
items = response['sensors']
for datapoint in response['datapoints']:
line = {}
for index, data in enumerate(datapoint):
line[items[index]] = data
parsed.append(line)
return parsed
except (KeyError, IndexError, TypeError):
raise FoobotClient.InvalidData()
|
[
"def",
"parse_data",
"(",
"self",
",",
"response",
")",
":",
"parsed",
"=",
"[",
"]",
"try",
":",
"items",
"=",
"response",
"[",
"'sensors'",
"]",
"for",
"datapoint",
"in",
"response",
"[",
"'datapoints'",
"]",
":",
"line",
"=",
"{",
"}",
"for",
"index",
",",
"data",
"in",
"enumerate",
"(",
"datapoint",
")",
":",
"line",
"[",
"items",
"[",
"index",
"]",
"]",
"=",
"data",
"parsed",
".",
"append",
"(",
"line",
")",
"return",
"parsed",
"except",
"(",
"KeyError",
",",
"IndexError",
",",
"TypeError",
")",
":",
"raise",
"FoobotClient",
".",
"InvalidData",
"(",
")"
] |
Convert the weird list format used for datapoints to a more usable
dictionnary one
:param response: dictionnary from API json response
:type response: dict
:returns: list of datapoints
.. note::
Datapoint content:
* time: UTC timestamp, unit: seconds
* pm: Particulate Matter, unit: ugm3
* tmp: temperature, unit: C
* hum: humidity, unit: %
* co2: Carbon Dioxide, unit: ppm
* voc: Volatile Organic Compounds, unit: ppb
* allpollu: `foobot index <https://help.foobot.io/hc/en-us/articles/204814371-What-does-central-number-mean->`_, unit: %
|
[
"Convert",
"the",
"weird",
"list",
"format",
"used",
"for",
"datapoints",
"to",
"a",
"more",
"usable",
"dictionnary",
"one"
] |
73dd27e8f3b99df284c326dccc2372e9368d808d
|
https://github.com/reefab/foobot_async/blob/73dd27e8f3b99df284c326dccc2372e9368d808d/foobot_async/__init__.py#L121-L150
|
238,608
|
PSU-OIT-ARC/django-perms
|
permissions/registry.py
|
PermissionsRegistry.metaclass
|
def metaclass(self):
"""Get a metaclass configured to use this registry."""
if '_metaclass' not in self.__dict__:
self._metaclass = type('PermissionsMeta', (PermissionsMeta,), {'registry': self})
return self._metaclass
|
python
|
def metaclass(self):
"""Get a metaclass configured to use this registry."""
if '_metaclass' not in self.__dict__:
self._metaclass = type('PermissionsMeta', (PermissionsMeta,), {'registry': self})
return self._metaclass
|
[
"def",
"metaclass",
"(",
"self",
")",
":",
"if",
"'_metaclass'",
"not",
"in",
"self",
".",
"__dict__",
":",
"self",
".",
"_metaclass",
"=",
"type",
"(",
"'PermissionsMeta'",
",",
"(",
"PermissionsMeta",
",",
")",
",",
"{",
"'registry'",
":",
"self",
"}",
")",
"return",
"self",
".",
"_metaclass"
] |
Get a metaclass configured to use this registry.
|
[
"Get",
"a",
"metaclass",
"configured",
"to",
"use",
"this",
"registry",
"."
] |
083610582ed83b83ad07121cb658f6fb1aca04ee
|
https://github.com/PSU-OIT-ARC/django-perms/blob/083610582ed83b83ad07121cb658f6fb1aca04ee/permissions/registry.py#L179-L183
|
238,609
|
PSU-OIT-ARC/django-perms
|
permissions/registry.py
|
PermissionsRegistry.register
|
def register(self, perm_func=None, model=None, allow_staff=None, allow_superuser=None,
allow_anonymous=None, unauthenticated_handler=None, request_types=None, name=None,
replace=False, _return_entry=False):
"""Register permission function & return the original function.
This is typically used as a decorator::
permissions = PermissionsRegistry()
@permissions.register
def can_do_something(user):
...
For internal use only: you can pass ``_return_entry=True`` to
have the registry :class:`.Entry` returned instead of
``perm_func``.
"""
allow_staff = _default(allow_staff, self._allow_staff)
allow_superuser = _default(allow_superuser, self._allow_superuser)
allow_anonymous = _default(allow_anonymous, self._allow_anonymous)
unauthenticated_handler = _default(unauthenticated_handler, self._unauthenticated_handler)
request_types = _default(request_types, self._request_types)
if perm_func is None:
return (
lambda perm_func_:
self.register(
perm_func_, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, name, replace, _return_entry)
)
name = _default(name, perm_func.__name__)
if name == 'register':
raise PermissionsError('register cannot be used as a permission name')
elif name in self._registry and not replace:
raise DuplicatePermissionError(name)
view_decorator = self._make_view_decorator(
name, perm_func, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types)
entry = Entry(
name, perm_func, view_decorator, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, set())
self._registry[name] = entry
@wraps(perm_func)
def wrapped_func(user, instance=NO_VALUE):
if user is None:
return False
if not allow_anonymous and user.is_anonymous():
return False
test = lambda: perm_func(user) if instance is NO_VALUE else perm_func(user, instance)
return (
allow_staff and user.is_staff or
allow_superuser and user.is_superuser or
test()
)
register.filter(name, wrapped_func)
log.debug('Registered permission: {0}'.format(name))
return entry if _return_entry else wrapped_func
|
python
|
def register(self, perm_func=None, model=None, allow_staff=None, allow_superuser=None,
allow_anonymous=None, unauthenticated_handler=None, request_types=None, name=None,
replace=False, _return_entry=False):
"""Register permission function & return the original function.
This is typically used as a decorator::
permissions = PermissionsRegistry()
@permissions.register
def can_do_something(user):
...
For internal use only: you can pass ``_return_entry=True`` to
have the registry :class:`.Entry` returned instead of
``perm_func``.
"""
allow_staff = _default(allow_staff, self._allow_staff)
allow_superuser = _default(allow_superuser, self._allow_superuser)
allow_anonymous = _default(allow_anonymous, self._allow_anonymous)
unauthenticated_handler = _default(unauthenticated_handler, self._unauthenticated_handler)
request_types = _default(request_types, self._request_types)
if perm_func is None:
return (
lambda perm_func_:
self.register(
perm_func_, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, name, replace, _return_entry)
)
name = _default(name, perm_func.__name__)
if name == 'register':
raise PermissionsError('register cannot be used as a permission name')
elif name in self._registry and not replace:
raise DuplicatePermissionError(name)
view_decorator = self._make_view_decorator(
name, perm_func, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types)
entry = Entry(
name, perm_func, view_decorator, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, set())
self._registry[name] = entry
@wraps(perm_func)
def wrapped_func(user, instance=NO_VALUE):
if user is None:
return False
if not allow_anonymous and user.is_anonymous():
return False
test = lambda: perm_func(user) if instance is NO_VALUE else perm_func(user, instance)
return (
allow_staff and user.is_staff or
allow_superuser and user.is_superuser or
test()
)
register.filter(name, wrapped_func)
log.debug('Registered permission: {0}'.format(name))
return entry if _return_entry else wrapped_func
|
[
"def",
"register",
"(",
"self",
",",
"perm_func",
"=",
"None",
",",
"model",
"=",
"None",
",",
"allow_staff",
"=",
"None",
",",
"allow_superuser",
"=",
"None",
",",
"allow_anonymous",
"=",
"None",
",",
"unauthenticated_handler",
"=",
"None",
",",
"request_types",
"=",
"None",
",",
"name",
"=",
"None",
",",
"replace",
"=",
"False",
",",
"_return_entry",
"=",
"False",
")",
":",
"allow_staff",
"=",
"_default",
"(",
"allow_staff",
",",
"self",
".",
"_allow_staff",
")",
"allow_superuser",
"=",
"_default",
"(",
"allow_superuser",
",",
"self",
".",
"_allow_superuser",
")",
"allow_anonymous",
"=",
"_default",
"(",
"allow_anonymous",
",",
"self",
".",
"_allow_anonymous",
")",
"unauthenticated_handler",
"=",
"_default",
"(",
"unauthenticated_handler",
",",
"self",
".",
"_unauthenticated_handler",
")",
"request_types",
"=",
"_default",
"(",
"request_types",
",",
"self",
".",
"_request_types",
")",
"if",
"perm_func",
"is",
"None",
":",
"return",
"(",
"lambda",
"perm_func_",
":",
"self",
".",
"register",
"(",
"perm_func_",
",",
"model",
",",
"allow_staff",
",",
"allow_superuser",
",",
"allow_anonymous",
",",
"unauthenticated_handler",
",",
"request_types",
",",
"name",
",",
"replace",
",",
"_return_entry",
")",
")",
"name",
"=",
"_default",
"(",
"name",
",",
"perm_func",
".",
"__name__",
")",
"if",
"name",
"==",
"'register'",
":",
"raise",
"PermissionsError",
"(",
"'register cannot be used as a permission name'",
")",
"elif",
"name",
"in",
"self",
".",
"_registry",
"and",
"not",
"replace",
":",
"raise",
"DuplicatePermissionError",
"(",
"name",
")",
"view_decorator",
"=",
"self",
".",
"_make_view_decorator",
"(",
"name",
",",
"perm_func",
",",
"model",
",",
"allow_staff",
",",
"allow_superuser",
",",
"allow_anonymous",
",",
"unauthenticated_handler",
",",
"request_types",
")",
"entry",
"=",
"Entry",
"(",
"name",
",",
"perm_func",
",",
"view_decorator",
",",
"model",
",",
"allow_staff",
",",
"allow_superuser",
",",
"allow_anonymous",
",",
"unauthenticated_handler",
",",
"request_types",
",",
"set",
"(",
")",
")",
"self",
".",
"_registry",
"[",
"name",
"]",
"=",
"entry",
"@",
"wraps",
"(",
"perm_func",
")",
"def",
"wrapped_func",
"(",
"user",
",",
"instance",
"=",
"NO_VALUE",
")",
":",
"if",
"user",
"is",
"None",
":",
"return",
"False",
"if",
"not",
"allow_anonymous",
"and",
"user",
".",
"is_anonymous",
"(",
")",
":",
"return",
"False",
"test",
"=",
"lambda",
":",
"perm_func",
"(",
"user",
")",
"if",
"instance",
"is",
"NO_VALUE",
"else",
"perm_func",
"(",
"user",
",",
"instance",
")",
"return",
"(",
"allow_staff",
"and",
"user",
".",
"is_staff",
"or",
"allow_superuser",
"and",
"user",
".",
"is_superuser",
"or",
"test",
"(",
")",
")",
"register",
".",
"filter",
"(",
"name",
",",
"wrapped_func",
")",
"log",
".",
"debug",
"(",
"'Registered permission: {0}'",
".",
"format",
"(",
"name",
")",
")",
"return",
"entry",
"if",
"_return_entry",
"else",
"wrapped_func"
] |
Register permission function & return the original function.
This is typically used as a decorator::
permissions = PermissionsRegistry()
@permissions.register
def can_do_something(user):
...
For internal use only: you can pass ``_return_entry=True`` to
have the registry :class:`.Entry` returned instead of
``perm_func``.
|
[
"Register",
"permission",
"function",
"&",
"return",
"the",
"original",
"function",
"."
] |
083610582ed83b83ad07121cb658f6fb1aca04ee
|
https://github.com/PSU-OIT-ARC/django-perms/blob/083610582ed83b83ad07121cb658f6fb1aca04ee/permissions/registry.py#L185-L246
|
238,610
|
PSU-OIT-ARC/django-perms
|
permissions/registry.py
|
PermissionsRegistry.require
|
def require(self, perm_name, **kwargs):
"""Use as a decorator on a view to require a permission.
Optional args:
- ``field`` The name of the model field to use for lookup
(this is only relevant when requiring a permission that
was registered with ``model=SomeModelClass``)
Examples::
@registry.require('can_do_stuff')
def view(request):
...
@registry.require('can_do_stuff_with_model', field='alt_id')
def view_model(request, model_id):
...
"""
view_decorator = self._get_entry(perm_name).view_decorator
return view_decorator(**kwargs) if kwargs else view_decorator
|
python
|
def require(self, perm_name, **kwargs):
"""Use as a decorator on a view to require a permission.
Optional args:
- ``field`` The name of the model field to use for lookup
(this is only relevant when requiring a permission that
was registered with ``model=SomeModelClass``)
Examples::
@registry.require('can_do_stuff')
def view(request):
...
@registry.require('can_do_stuff_with_model', field='alt_id')
def view_model(request, model_id):
...
"""
view_decorator = self._get_entry(perm_name).view_decorator
return view_decorator(**kwargs) if kwargs else view_decorator
|
[
"def",
"require",
"(",
"self",
",",
"perm_name",
",",
"*",
"*",
"kwargs",
")",
":",
"view_decorator",
"=",
"self",
".",
"_get_entry",
"(",
"perm_name",
")",
".",
"view_decorator",
"return",
"view_decorator",
"(",
"*",
"*",
"kwargs",
")",
"if",
"kwargs",
"else",
"view_decorator"
] |
Use as a decorator on a view to require a permission.
Optional args:
- ``field`` The name of the model field to use for lookup
(this is only relevant when requiring a permission that
was registered with ``model=SomeModelClass``)
Examples::
@registry.require('can_do_stuff')
def view(request):
...
@registry.require('can_do_stuff_with_model', field='alt_id')
def view_model(request, model_id):
...
|
[
"Use",
"as",
"a",
"decorator",
"on",
"a",
"view",
"to",
"require",
"a",
"permission",
"."
] |
083610582ed83b83ad07121cb658f6fb1aca04ee
|
https://github.com/PSU-OIT-ARC/django-perms/blob/083610582ed83b83ad07121cb658f6fb1aca04ee/permissions/registry.py#L250-L271
|
238,611
|
PSU-OIT-ARC/django-perms
|
permissions/registry.py
|
PermissionsRegistry.entry_for_view
|
def entry_for_view(self, view, perm_name):
"""Get registry entry for permission if ``view`` requires it.
In other words, if ``view`` requires the permission specified by
``perm_name``, return the :class:`Entry` associated with the
permission. If ``view`` doesn't require the permission, return
``None`` instead.
"""
view_name = self._get_view_name(view)
entry = self._get_entry(perm_name)
if view_name in entry.views:
return entry
return None
|
python
|
def entry_for_view(self, view, perm_name):
"""Get registry entry for permission if ``view`` requires it.
In other words, if ``view`` requires the permission specified by
``perm_name``, return the :class:`Entry` associated with the
permission. If ``view`` doesn't require the permission, return
``None`` instead.
"""
view_name = self._get_view_name(view)
entry = self._get_entry(perm_name)
if view_name in entry.views:
return entry
return None
|
[
"def",
"entry_for_view",
"(",
"self",
",",
"view",
",",
"perm_name",
")",
":",
"view_name",
"=",
"self",
".",
"_get_view_name",
"(",
"view",
")",
"entry",
"=",
"self",
".",
"_get_entry",
"(",
"perm_name",
")",
"if",
"view_name",
"in",
"entry",
".",
"views",
":",
"return",
"entry",
"return",
"None"
] |
Get registry entry for permission if ``view`` requires it.
In other words, if ``view`` requires the permission specified by
``perm_name``, return the :class:`Entry` associated with the
permission. If ``view`` doesn't require the permission, return
``None`` instead.
|
[
"Get",
"registry",
"entry",
"for",
"permission",
"if",
"view",
"requires",
"it",
"."
] |
083610582ed83b83ad07121cb658f6fb1aca04ee
|
https://github.com/PSU-OIT-ARC/django-perms/blob/083610582ed83b83ad07121cb658f6fb1aca04ee/permissions/registry.py#L412-L425
|
238,612
|
praekelt/panya-music
|
music/importer.py
|
TrackImporter.get_importer
|
def get_importer(self):
"""
Resolve importer from TRACK_IMPORTER_CLASS setting.
"""
try:
importer_path = settings.TRACK_IMPORTER_CLASS
except AttributeError:
raise ImproperlyConfigured('No TRACK_IMPORTER_CLASS setting found.')
try:
dot = importer_path.rindex('.')
except ValueError:
raise ImproperlyConfigured('%s isn\'t a Track Importer module.' % importer_path)
module, classname = importer_path[:dot], importer_path[dot+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Could not import Track Importer %s: "%s".' % (module, e))
try:
importer_class = getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Track Importer module "%s" does not define a "%s" class.' % (module, classname))
importer_instance = importer_class()
if not hasattr(importer_instance, 'run'):
raise ImproperlyConfigured('Track Importer class "%s" does not define a run method. Implement the method to return a list of Track objects.' % classname)
return importer_instance
|
python
|
def get_importer(self):
"""
Resolve importer from TRACK_IMPORTER_CLASS setting.
"""
try:
importer_path = settings.TRACK_IMPORTER_CLASS
except AttributeError:
raise ImproperlyConfigured('No TRACK_IMPORTER_CLASS setting found.')
try:
dot = importer_path.rindex('.')
except ValueError:
raise ImproperlyConfigured('%s isn\'t a Track Importer module.' % importer_path)
module, classname = importer_path[:dot], importer_path[dot+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Could not import Track Importer %s: "%s".' % (module, e))
try:
importer_class = getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Track Importer module "%s" does not define a "%s" class.' % (module, classname))
importer_instance = importer_class()
if not hasattr(importer_instance, 'run'):
raise ImproperlyConfigured('Track Importer class "%s" does not define a run method. Implement the method to return a list of Track objects.' % classname)
return importer_instance
|
[
"def",
"get_importer",
"(",
"self",
")",
":",
"try",
":",
"importer_path",
"=",
"settings",
".",
"TRACK_IMPORTER_CLASS",
"except",
"AttributeError",
":",
"raise",
"ImproperlyConfigured",
"(",
"'No TRACK_IMPORTER_CLASS setting found.'",
")",
"try",
":",
"dot",
"=",
"importer_path",
".",
"rindex",
"(",
"'.'",
")",
"except",
"ValueError",
":",
"raise",
"ImproperlyConfigured",
"(",
"'%s isn\\'t a Track Importer module.'",
"%",
"importer_path",
")",
"module",
",",
"classname",
"=",
"importer_path",
"[",
":",
"dot",
"]",
",",
"importer_path",
"[",
"dot",
"+",
"1",
":",
"]",
"try",
":",
"mod",
"=",
"import_module",
"(",
"module",
")",
"except",
"ImportError",
",",
"e",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Could not import Track Importer %s: \"%s\".'",
"%",
"(",
"module",
",",
"e",
")",
")",
"try",
":",
"importer_class",
"=",
"getattr",
"(",
"mod",
",",
"classname",
")",
"except",
"AttributeError",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Track Importer module \"%s\" does not define a \"%s\" class.'",
"%",
"(",
"module",
",",
"classname",
")",
")",
"importer_instance",
"=",
"importer_class",
"(",
")",
"if",
"not",
"hasattr",
"(",
"importer_instance",
",",
"'run'",
")",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Track Importer class \"%s\" does not define a run method. Implement the method to return a list of Track objects.'",
"%",
"classname",
")",
"return",
"importer_instance"
] |
Resolve importer from TRACK_IMPORTER_CLASS setting.
|
[
"Resolve",
"importer",
"from",
"TRACK_IMPORTER_CLASS",
"setting",
"."
] |
9300b1866bc33178e721b6de4771ba866bfc4b11
|
https://github.com/praekelt/panya-music/blob/9300b1866bc33178e721b6de4771ba866bfc4b11/music/importer.py#L10-L36
|
238,613
|
praekelt/panya-music
|
music/importer.py
|
TrackImporter.lookup_track
|
def lookup_track(self, track):
"""
Looks up Django Track object for provided raw importing track object.
"""
tracks = Track.objects.filter(title__iexact=track.title)
for track_obj in tracks:
for contributor in track_obj.get_primary_contributors(permitted=False):
if contributor.title == track.artist:
return track_obj
return None
|
python
|
def lookup_track(self, track):
"""
Looks up Django Track object for provided raw importing track object.
"""
tracks = Track.objects.filter(title__iexact=track.title)
for track_obj in tracks:
for contributor in track_obj.get_primary_contributors(permitted=False):
if contributor.title == track.artist:
return track_obj
return None
|
[
"def",
"lookup_track",
"(",
"self",
",",
"track",
")",
":",
"tracks",
"=",
"Track",
".",
"objects",
".",
"filter",
"(",
"title__iexact",
"=",
"track",
".",
"title",
")",
"for",
"track_obj",
"in",
"tracks",
":",
"for",
"contributor",
"in",
"track_obj",
".",
"get_primary_contributors",
"(",
"permitted",
"=",
"False",
")",
":",
"if",
"contributor",
".",
"title",
"==",
"track",
".",
"artist",
":",
"return",
"track_obj",
"return",
"None"
] |
Looks up Django Track object for provided raw importing track object.
|
[
"Looks",
"up",
"Django",
"Track",
"object",
"for",
"provided",
"raw",
"importing",
"track",
"object",
"."
] |
9300b1866bc33178e721b6de4771ba866bfc4b11
|
https://github.com/praekelt/panya-music/blob/9300b1866bc33178e721b6de4771ba866bfc4b11/music/importer.py#L38-L47
|
238,614
|
praekelt/panya-music
|
music/importer.py
|
TrackImporter.run
|
def run(self):
"""
Run import.
"""
latest_track = Track.objects.all().order_by('-last_played')
latest_track = latest_track[0] if latest_track else None
importer = self.get_importer()
tracks = importer.run()
# Create/update Django Track objects for importer tracks.
for track in tracks:
# Only create/update if tracks with start times greater than what already exists are imported.
if not latest_track or not latest_track.last_played \
or track.start_time > latest_track.last_played:
obj = self.lookup_track(track)
# Don't update importing track that is regarded as the latest. This prevents start times from constantly incrementing.
if latest_track and obj == latest_track:
print "[%s-%s]: Start time not updated as it is the latest track." % (track.title, track.artist)
continue
# If no existing track object could be resolved, create it.
if not obj:
print "[%s-%s]: Created." % (track.title, track.artist)
obj = Track.objects.create(title=track.title)
obj.length = track.length
roles = MusicCreditOption.objects.all().order_by('role_priority')
role = roles[0].role_priority if roles else 1
obj.create_credit(track.artist, role)
else:
print "[%s-%s]: Not created as it already exists." % (track.title, track.artist)
# Update last played time to start time.
obj.last_played = track.start_time
obj.save()
print "[%s-%s]: Start time updated to %s." % (track.title, track.artist, track.start_time)
else:
print "[%s-%s]: Not created as it has a past start time of %s (latest %s). " % (track.title, track.artist, track.start_time, latest_track.last_played)
|
python
|
def run(self):
"""
Run import.
"""
latest_track = Track.objects.all().order_by('-last_played')
latest_track = latest_track[0] if latest_track else None
importer = self.get_importer()
tracks = importer.run()
# Create/update Django Track objects for importer tracks.
for track in tracks:
# Only create/update if tracks with start times greater than what already exists are imported.
if not latest_track or not latest_track.last_played \
or track.start_time > latest_track.last_played:
obj = self.lookup_track(track)
# Don't update importing track that is regarded as the latest. This prevents start times from constantly incrementing.
if latest_track and obj == latest_track:
print "[%s-%s]: Start time not updated as it is the latest track." % (track.title, track.artist)
continue
# If no existing track object could be resolved, create it.
if not obj:
print "[%s-%s]: Created." % (track.title, track.artist)
obj = Track.objects.create(title=track.title)
obj.length = track.length
roles = MusicCreditOption.objects.all().order_by('role_priority')
role = roles[0].role_priority if roles else 1
obj.create_credit(track.artist, role)
else:
print "[%s-%s]: Not created as it already exists." % (track.title, track.artist)
# Update last played time to start time.
obj.last_played = track.start_time
obj.save()
print "[%s-%s]: Start time updated to %s." % (track.title, track.artist, track.start_time)
else:
print "[%s-%s]: Not created as it has a past start time of %s (latest %s). " % (track.title, track.artist, track.start_time, latest_track.last_played)
|
[
"def",
"run",
"(",
"self",
")",
":",
"latest_track",
"=",
"Track",
".",
"objects",
".",
"all",
"(",
")",
".",
"order_by",
"(",
"'-last_played'",
")",
"latest_track",
"=",
"latest_track",
"[",
"0",
"]",
"if",
"latest_track",
"else",
"None",
"importer",
"=",
"self",
".",
"get_importer",
"(",
")",
"tracks",
"=",
"importer",
".",
"run",
"(",
")",
"# Create/update Django Track objects for importer tracks.",
"for",
"track",
"in",
"tracks",
":",
"# Only create/update if tracks with start times greater than what already exists are imported. ",
"if",
"not",
"latest_track",
"or",
"not",
"latest_track",
".",
"last_played",
"or",
"track",
".",
"start_time",
">",
"latest_track",
".",
"last_played",
":",
"obj",
"=",
"self",
".",
"lookup_track",
"(",
"track",
")",
"# Don't update importing track that is regarded as the latest. This prevents start times from constantly incrementing.",
"if",
"latest_track",
"and",
"obj",
"==",
"latest_track",
":",
"print",
"\"[%s-%s]: Start time not updated as it is the latest track.\"",
"%",
"(",
"track",
".",
"title",
",",
"track",
".",
"artist",
")",
"continue",
"# If no existing track object could be resolved, create it.",
"if",
"not",
"obj",
":",
"print",
"\"[%s-%s]: Created.\"",
"%",
"(",
"track",
".",
"title",
",",
"track",
".",
"artist",
")",
"obj",
"=",
"Track",
".",
"objects",
".",
"create",
"(",
"title",
"=",
"track",
".",
"title",
")",
"obj",
".",
"length",
"=",
"track",
".",
"length",
"roles",
"=",
"MusicCreditOption",
".",
"objects",
".",
"all",
"(",
")",
".",
"order_by",
"(",
"'role_priority'",
")",
"role",
"=",
"roles",
"[",
"0",
"]",
".",
"role_priority",
"if",
"roles",
"else",
"1",
"obj",
".",
"create_credit",
"(",
"track",
".",
"artist",
",",
"role",
")",
"else",
":",
"print",
"\"[%s-%s]: Not created as it already exists.\"",
"%",
"(",
"track",
".",
"title",
",",
"track",
".",
"artist",
")",
"# Update last played time to start time.",
"obj",
".",
"last_played",
"=",
"track",
".",
"start_time",
"obj",
".",
"save",
"(",
")",
"print",
"\"[%s-%s]: Start time updated to %s.\"",
"%",
"(",
"track",
".",
"title",
",",
"track",
".",
"artist",
",",
"track",
".",
"start_time",
")",
"else",
":",
"print",
"\"[%s-%s]: Not created as it has a past start time of %s (latest %s). \"",
"%",
"(",
"track",
".",
"title",
",",
"track",
".",
"artist",
",",
"track",
".",
"start_time",
",",
"latest_track",
".",
"last_played",
")"
] |
Run import.
|
[
"Run",
"import",
"."
] |
9300b1866bc33178e721b6de4771ba866bfc4b11
|
https://github.com/praekelt/panya-music/blob/9300b1866bc33178e721b6de4771ba866bfc4b11/music/importer.py#L49-L86
|
238,615
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/TypeMatcher.py
|
TypeMatcher.match_value_by_name
|
def match_value_by_name(expected_type, actual_value):
"""
Matches expected type to a type of a value.
:param expected_type: an expected type name to match.
:param actual_value: a value to match its type to the expected one.
:return: true if types are matching and false if they don't.
"""
if expected_type == None:
return True
if actual_value == None:
raise Exception("Actual value cannot be null")
return TypeMatcher.match_type_by_name(expected_type, type(actual_value))
|
python
|
def match_value_by_name(expected_type, actual_value):
"""
Matches expected type to a type of a value.
:param expected_type: an expected type name to match.
:param actual_value: a value to match its type to the expected one.
:return: true if types are matching and false if they don't.
"""
if expected_type == None:
return True
if actual_value == None:
raise Exception("Actual value cannot be null")
return TypeMatcher.match_type_by_name(expected_type, type(actual_value))
|
[
"def",
"match_value_by_name",
"(",
"expected_type",
",",
"actual_value",
")",
":",
"if",
"expected_type",
"==",
"None",
":",
"return",
"True",
"if",
"actual_value",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Actual value cannot be null\"",
")",
"return",
"TypeMatcher",
".",
"match_type_by_name",
"(",
"expected_type",
",",
"type",
"(",
"actual_value",
")",
")"
] |
Matches expected type to a type of a value.
:param expected_type: an expected type name to match.
:param actual_value: a value to match its type to the expected one.
:return: true if types are matching and false if they don't.
|
[
"Matches",
"expected",
"type",
"to",
"a",
"type",
"of",
"a",
"value",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeMatcher.py#L68-L83
|
238,616
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/TypeMatcher.py
|
TypeMatcher.match_type_by_name
|
def match_type_by_name(expected_type, actual_type):
"""
Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't.
"""
if expected_type == None:
return True
if actual_type == None:
raise Exception("Actual type cannot be null")
expected_type = expected_type.lower()
if actual_type.__name__.lower() == expected_type:
return True
elif expected_type == "object":
return True
elif expected_type == "int" or expected_type == "integer":
return issubclass(actual_type, int) #or issubclass(actual_type, long)
elif expected_type == "long":
return issubclass(actual_type, int)
elif expected_type == "float" or expected_type == "double":
return issubclass(actual_type, float)
elif expected_type == "string":
return issubclass(actual_type, str) #or issubclass(actual_type, unicode)
elif expected_type == "bool" or expected_type == "boolean":
return issubclass(actual_type, bool)
elif expected_type == "date" or expected_type == "datetime":
return issubclass(actual_type, datetime.datetime) or issubclass(actual_type. datetime.date)
elif expected_type == "timespan" or expected_type == "duration":
return issubclass(actual_type, int) or issubclass(actual_type, float)
elif expected_type == "enum":
return issubclass(actual_type, str) or issubclass(actual_type, int)
elif expected_type == "map" or expected_type == "dict" or expected_type == "dictionary":
return issubclass(actual_type, dict)
elif expected_type == "array" or expected_type == "list":
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
elif expected_type.endswith("[]"):
# Todo: Check subtype
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
else:
return False
|
python
|
def match_type_by_name(expected_type, actual_type):
"""
Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't.
"""
if expected_type == None:
return True
if actual_type == None:
raise Exception("Actual type cannot be null")
expected_type = expected_type.lower()
if actual_type.__name__.lower() == expected_type:
return True
elif expected_type == "object":
return True
elif expected_type == "int" or expected_type == "integer":
return issubclass(actual_type, int) #or issubclass(actual_type, long)
elif expected_type == "long":
return issubclass(actual_type, int)
elif expected_type == "float" or expected_type == "double":
return issubclass(actual_type, float)
elif expected_type == "string":
return issubclass(actual_type, str) #or issubclass(actual_type, unicode)
elif expected_type == "bool" or expected_type == "boolean":
return issubclass(actual_type, bool)
elif expected_type == "date" or expected_type == "datetime":
return issubclass(actual_type, datetime.datetime) or issubclass(actual_type. datetime.date)
elif expected_type == "timespan" or expected_type == "duration":
return issubclass(actual_type, int) or issubclass(actual_type, float)
elif expected_type == "enum":
return issubclass(actual_type, str) or issubclass(actual_type, int)
elif expected_type == "map" or expected_type == "dict" or expected_type == "dictionary":
return issubclass(actual_type, dict)
elif expected_type == "array" or expected_type == "list":
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
elif expected_type.endswith("[]"):
# Todo: Check subtype
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
else:
return False
|
[
"def",
"match_type_by_name",
"(",
"expected_type",
",",
"actual_type",
")",
":",
"if",
"expected_type",
"==",
"None",
":",
"return",
"True",
"if",
"actual_type",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Actual type cannot be null\"",
")",
"expected_type",
"=",
"expected_type",
".",
"lower",
"(",
")",
"if",
"actual_type",
".",
"__name__",
".",
"lower",
"(",
")",
"==",
"expected_type",
":",
"return",
"True",
"elif",
"expected_type",
"==",
"\"object\"",
":",
"return",
"True",
"elif",
"expected_type",
"==",
"\"int\"",
"or",
"expected_type",
"==",
"\"integer\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"int",
")",
"#or issubclass(actual_type, long)",
"elif",
"expected_type",
"==",
"\"long\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"int",
")",
"elif",
"expected_type",
"==",
"\"float\"",
"or",
"expected_type",
"==",
"\"double\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"float",
")",
"elif",
"expected_type",
"==",
"\"string\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"str",
")",
"#or issubclass(actual_type, unicode)",
"elif",
"expected_type",
"==",
"\"bool\"",
"or",
"expected_type",
"==",
"\"boolean\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"bool",
")",
"elif",
"expected_type",
"==",
"\"date\"",
"or",
"expected_type",
"==",
"\"datetime\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"datetime",
".",
"datetime",
")",
"or",
"issubclass",
"(",
"actual_type",
".",
"datetime",
".",
"date",
")",
"elif",
"expected_type",
"==",
"\"timespan\"",
"or",
"expected_type",
"==",
"\"duration\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"int",
")",
"or",
"issubclass",
"(",
"actual_type",
",",
"float",
")",
"elif",
"expected_type",
"==",
"\"enum\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"str",
")",
"or",
"issubclass",
"(",
"actual_type",
",",
"int",
")",
"elif",
"expected_type",
"==",
"\"map\"",
"or",
"expected_type",
"==",
"\"dict\"",
"or",
"expected_type",
"==",
"\"dictionary\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"dict",
")",
"elif",
"expected_type",
"==",
"\"array\"",
"or",
"expected_type",
"==",
"\"list\"",
":",
"return",
"issubclass",
"(",
"actual_type",
",",
"list",
")",
"or",
"issubclass",
"(",
"actual_type",
",",
"tuple",
")",
"or",
"issubclass",
"(",
"actual_type",
",",
"set",
")",
"elif",
"expected_type",
".",
"endswith",
"(",
"\"[]\"",
")",
":",
"# Todo: Check subtype",
"return",
"issubclass",
"(",
"actual_type",
",",
"list",
")",
"or",
"issubclass",
"(",
"actual_type",
",",
"tuple",
")",
"or",
"issubclass",
"(",
"actual_type",
",",
"set",
")",
"else",
":",
"return",
"False"
] |
Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't.
|
[
"Matches",
"expected",
"type",
"to",
"an",
"actual",
"type",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeMatcher.py#L87-L132
|
238,617
|
tarvitz/django-unity-asset-server-http-client
|
duashttp/views/api.py
|
AssetVersionViewSetBase.blob
|
def blob(self, request, pk=None):
"""
fetch large object from pg and gives it back to user via HTTP 1.1
request
:param request: django request instance
:param pk: requested resource primary key
:rtype: django.http.HttpResponse
:rtype: HttpResponse
:return: file with its filename stored in database
"""
obj = self.get_object_or_none()
if obj:
blob = obj.get_blob_data()
content_type = 'octet/stream'
response = HttpResponse(blob, content_type=content_type,
status=status.HTTP_200_OK)
response['Content-Disposition'] = (
'attachment; filename="%s"' % obj.name
)
return response
return HttpResponse('404', status=status.HTTP_404_NOT_FOUND,
content_type='application/json')
|
python
|
def blob(self, request, pk=None):
"""
fetch large object from pg and gives it back to user via HTTP 1.1
request
:param request: django request instance
:param pk: requested resource primary key
:rtype: django.http.HttpResponse
:rtype: HttpResponse
:return: file with its filename stored in database
"""
obj = self.get_object_or_none()
if obj:
blob = obj.get_blob_data()
content_type = 'octet/stream'
response = HttpResponse(blob, content_type=content_type,
status=status.HTTP_200_OK)
response['Content-Disposition'] = (
'attachment; filename="%s"' % obj.name
)
return response
return HttpResponse('404', status=status.HTTP_404_NOT_FOUND,
content_type='application/json')
|
[
"def",
"blob",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"obj",
"=",
"self",
".",
"get_object_or_none",
"(",
")",
"if",
"obj",
":",
"blob",
"=",
"obj",
".",
"get_blob_data",
"(",
")",
"content_type",
"=",
"'octet/stream'",
"response",
"=",
"HttpResponse",
"(",
"blob",
",",
"content_type",
"=",
"content_type",
",",
"status",
"=",
"status",
".",
"HTTP_200_OK",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"(",
"'attachment; filename=\"%s\"'",
"%",
"obj",
".",
"name",
")",
"return",
"response",
"return",
"HttpResponse",
"(",
"'404'",
",",
"status",
"=",
"status",
".",
"HTTP_404_NOT_FOUND",
",",
"content_type",
"=",
"'application/json'",
")"
] |
fetch large object from pg and gives it back to user via HTTP 1.1
request
:param request: django request instance
:param pk: requested resource primary key
:rtype: django.http.HttpResponse
:rtype: HttpResponse
:return: file with its filename stored in database
|
[
"fetch",
"large",
"object",
"from",
"pg",
"and",
"gives",
"it",
"back",
"to",
"user",
"via",
"HTTP",
"1",
".",
"1",
"request"
] |
2b7e3b1116b5e98b31c2e267bdd66a77e0579ad1
|
https://github.com/tarvitz/django-unity-asset-server-http-client/blob/2b7e3b1116b5e98b31c2e267bdd66a77e0579ad1/duashttp/views/api.py#L27-L49
|
238,618
|
rosenbrockc/ci
|
pyci/config.py
|
RepositorySettings._get_github
|
def _get_github(self):
"""Creates an instance of github.Github to interact with the repos via the
API interface in pygithub.
"""
from github import Github
vms("Querying github with user '{}'.".format(self.username))
g = Github(self.username, self.apikey)
self._user = g.get_user()
if self._user is None:
raise ValueError("Can't authenticate to github with '{}'.".format(self.username))
#The github user authenticating always has to be specified; however the user
#may not be able to see the repo, even if it has access to it. We may need
#to check the organization repos.
if self.organization is not None:
self._org = g.get_organization(self.organization)
vms("Found github organization '{}'.".format(self._org.name), 2)
#Next we need to find this repository in the lists available to both
#the user *and* the organization. If they specified an organization, then we
#should check that first/exclusively.
for repo in self._org.get_repos():
if repo.full_name.lower() == self.name.lower():
self._repo = repo
vms("Found organization repository '{}'.".format(self._repo.full_name), 2)
break
else:
for repo in self._user.get_repos():
if repo.full_name.lower() == self.name.lower():
self._repo = repo
vms("Found user repository '{}'.".format(self._repo.full_name), 2)
break
|
python
|
def _get_github(self):
"""Creates an instance of github.Github to interact with the repos via the
API interface in pygithub.
"""
from github import Github
vms("Querying github with user '{}'.".format(self.username))
g = Github(self.username, self.apikey)
self._user = g.get_user()
if self._user is None:
raise ValueError("Can't authenticate to github with '{}'.".format(self.username))
#The github user authenticating always has to be specified; however the user
#may not be able to see the repo, even if it has access to it. We may need
#to check the organization repos.
if self.organization is not None:
self._org = g.get_organization(self.organization)
vms("Found github organization '{}'.".format(self._org.name), 2)
#Next we need to find this repository in the lists available to both
#the user *and* the organization. If they specified an organization, then we
#should check that first/exclusively.
for repo in self._org.get_repos():
if repo.full_name.lower() == self.name.lower():
self._repo = repo
vms("Found organization repository '{}'.".format(self._repo.full_name), 2)
break
else:
for repo in self._user.get_repos():
if repo.full_name.lower() == self.name.lower():
self._repo = repo
vms("Found user repository '{}'.".format(self._repo.full_name), 2)
break
|
[
"def",
"_get_github",
"(",
"self",
")",
":",
"from",
"github",
"import",
"Github",
"vms",
"(",
"\"Querying github with user '{}'.\"",
".",
"format",
"(",
"self",
".",
"username",
")",
")",
"g",
"=",
"Github",
"(",
"self",
".",
"username",
",",
"self",
".",
"apikey",
")",
"self",
".",
"_user",
"=",
"g",
".",
"get_user",
"(",
")",
"if",
"self",
".",
"_user",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Can't authenticate to github with '{}'.\"",
".",
"format",
"(",
"self",
".",
"username",
")",
")",
"#The github user authenticating always has to be specified; however the user",
"#may not be able to see the repo, even if it has access to it. We may need",
"#to check the organization repos.",
"if",
"self",
".",
"organization",
"is",
"not",
"None",
":",
"self",
".",
"_org",
"=",
"g",
".",
"get_organization",
"(",
"self",
".",
"organization",
")",
"vms",
"(",
"\"Found github organization '{}'.\"",
".",
"format",
"(",
"self",
".",
"_org",
".",
"name",
")",
",",
"2",
")",
"#Next we need to find this repository in the lists available to both",
"#the user *and* the organization. If they specified an organization, then we",
"#should check that first/exclusively.",
"for",
"repo",
"in",
"self",
".",
"_org",
".",
"get_repos",
"(",
")",
":",
"if",
"repo",
".",
"full_name",
".",
"lower",
"(",
")",
"==",
"self",
".",
"name",
".",
"lower",
"(",
")",
":",
"self",
".",
"_repo",
"=",
"repo",
"vms",
"(",
"\"Found organization repository '{}'.\"",
".",
"format",
"(",
"self",
".",
"_repo",
".",
"full_name",
")",
",",
"2",
")",
"break",
"else",
":",
"for",
"repo",
"in",
"self",
".",
"_user",
".",
"get_repos",
"(",
")",
":",
"if",
"repo",
".",
"full_name",
".",
"lower",
"(",
")",
"==",
"self",
".",
"name",
".",
"lower",
"(",
")",
":",
"self",
".",
"_repo",
"=",
"repo",
"vms",
"(",
"\"Found user repository '{}'.\"",
".",
"format",
"(",
"self",
".",
"_repo",
".",
"full_name",
")",
",",
"2",
")",
"break"
] |
Creates an instance of github.Github to interact with the repos via the
API interface in pygithub.
|
[
"Creates",
"an",
"instance",
"of",
"github",
".",
"Github",
"to",
"interact",
"with",
"the",
"repos",
"via",
"the",
"API",
"interface",
"in",
"pygithub",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L86-L116
|
238,619
|
rosenbrockc/ci
|
pyci/config.py
|
RepositorySettings._parse_xml
|
def _parse_xml(self):
"""Extracts the XML settings into class instances that can operate on
the settings to perform the testing functions.
"""
import xml.etree.ElementTree as ET
from os import path
#This dict has the keys of XML tags that are required in order for the
#CI server to run the repo. When each one is parsed, we change its value
#to True and then check that they are all true at the end.
required = {"testing": False, "wiki": False}
#Make sure the file exists and then import it as XML and read the values out.
if path.isfile(self.filepath):
tree = ET.parse(self.filepath)
vms("Parsing XML tree from {}.".format(self.filepath), 2)
root = tree.getroot()
if root.tag != "cirepo":
raise ValueError("The root tag in a continuous integration settings XML "
"file should be a <cirepo> tag.")
self._parse_repo(root)
for child in root:
if child.tag == "cron":
if self.server is not None:
self.server.cron.settings[self.name] = CronSettings(child)
if child.tag == "testing":
self.testing = TestingSettings(child)
if child.tag == "static":
self.static = StaticSettings(child)
if child.tag == "wiki":
self.wiki["user"] = get_attrib(child, "user", "wiki")
self.wiki["password"] = get_attrib(child, "password", "wiki")
self.wiki["basepage"] = get_attrib(child, "basepage", "wiki")
if child.tag in required:
required[child.tag] = True
if not all(required.values()):
tags = ', '.join(["<{}>".format(t) for t in required])
raise ValueError("{} are required tags in the repo's XML settings file.".format(tags))
|
python
|
def _parse_xml(self):
"""Extracts the XML settings into class instances that can operate on
the settings to perform the testing functions.
"""
import xml.etree.ElementTree as ET
from os import path
#This dict has the keys of XML tags that are required in order for the
#CI server to run the repo. When each one is parsed, we change its value
#to True and then check that they are all true at the end.
required = {"testing": False, "wiki": False}
#Make sure the file exists and then import it as XML and read the values out.
if path.isfile(self.filepath):
tree = ET.parse(self.filepath)
vms("Parsing XML tree from {}.".format(self.filepath), 2)
root = tree.getroot()
if root.tag != "cirepo":
raise ValueError("The root tag in a continuous integration settings XML "
"file should be a <cirepo> tag.")
self._parse_repo(root)
for child in root:
if child.tag == "cron":
if self.server is not None:
self.server.cron.settings[self.name] = CronSettings(child)
if child.tag == "testing":
self.testing = TestingSettings(child)
if child.tag == "static":
self.static = StaticSettings(child)
if child.tag == "wiki":
self.wiki["user"] = get_attrib(child, "user", "wiki")
self.wiki["password"] = get_attrib(child, "password", "wiki")
self.wiki["basepage"] = get_attrib(child, "basepage", "wiki")
if child.tag in required:
required[child.tag] = True
if not all(required.values()):
tags = ', '.join(["<{}>".format(t) for t in required])
raise ValueError("{} are required tags in the repo's XML settings file.".format(tags))
|
[
"def",
"_parse_xml",
"(",
"self",
")",
":",
"import",
"xml",
".",
"etree",
".",
"ElementTree",
"as",
"ET",
"from",
"os",
"import",
"path",
"#This dict has the keys of XML tags that are required in order for the",
"#CI server to run the repo. When each one is parsed, we change its value",
"#to True and then check that they are all true at the end.",
"required",
"=",
"{",
"\"testing\"",
":",
"False",
",",
"\"wiki\"",
":",
"False",
"}",
"#Make sure the file exists and then import it as XML and read the values out.",
"if",
"path",
".",
"isfile",
"(",
"self",
".",
"filepath",
")",
":",
"tree",
"=",
"ET",
".",
"parse",
"(",
"self",
".",
"filepath",
")",
"vms",
"(",
"\"Parsing XML tree from {}.\"",
".",
"format",
"(",
"self",
".",
"filepath",
")",
",",
"2",
")",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"if",
"root",
".",
"tag",
"!=",
"\"cirepo\"",
":",
"raise",
"ValueError",
"(",
"\"The root tag in a continuous integration settings XML \"",
"\"file should be a <cirepo> tag.\"",
")",
"self",
".",
"_parse_repo",
"(",
"root",
")",
"for",
"child",
"in",
"root",
":",
"if",
"child",
".",
"tag",
"==",
"\"cron\"",
":",
"if",
"self",
".",
"server",
"is",
"not",
"None",
":",
"self",
".",
"server",
".",
"cron",
".",
"settings",
"[",
"self",
".",
"name",
"]",
"=",
"CronSettings",
"(",
"child",
")",
"if",
"child",
".",
"tag",
"==",
"\"testing\"",
":",
"self",
".",
"testing",
"=",
"TestingSettings",
"(",
"child",
")",
"if",
"child",
".",
"tag",
"==",
"\"static\"",
":",
"self",
".",
"static",
"=",
"StaticSettings",
"(",
"child",
")",
"if",
"child",
".",
"tag",
"==",
"\"wiki\"",
":",
"self",
".",
"wiki",
"[",
"\"user\"",
"]",
"=",
"get_attrib",
"(",
"child",
",",
"\"user\"",
",",
"\"wiki\"",
")",
"self",
".",
"wiki",
"[",
"\"password\"",
"]",
"=",
"get_attrib",
"(",
"child",
",",
"\"password\"",
",",
"\"wiki\"",
")",
"self",
".",
"wiki",
"[",
"\"basepage\"",
"]",
"=",
"get_attrib",
"(",
"child",
",",
"\"basepage\"",
",",
"\"wiki\"",
")",
"if",
"child",
".",
"tag",
"in",
"required",
":",
"required",
"[",
"child",
".",
"tag",
"]",
"=",
"True",
"if",
"not",
"all",
"(",
"required",
".",
"values",
"(",
")",
")",
":",
"tags",
"=",
"', '",
".",
"join",
"(",
"[",
"\"<{}>\"",
".",
"format",
"(",
"t",
")",
"for",
"t",
"in",
"required",
"]",
")",
"raise",
"ValueError",
"(",
"\"{} are required tags in the repo's XML settings file.\"",
".",
"format",
"(",
"tags",
")",
")"
] |
Extracts the XML settings into class instances that can operate on
the settings to perform the testing functions.
|
[
"Extracts",
"the",
"XML",
"settings",
"into",
"class",
"instances",
"that",
"can",
"operate",
"on",
"the",
"settings",
"to",
"perform",
"the",
"testing",
"functions",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L129-L166
|
238,620
|
rosenbrockc/ci
|
pyci/config.py
|
CronSettings._parse_xml
|
def _parse_xml(self, xml):
"""Extracts the attributes from the XMLElement instance."""
from re import split
vms("Parsing <cron> XML child tag.", 2)
self.frequency = get_attrib(xml, "frequency", default=5, cast=int)
self.emails = split(",\s*", get_attrib(xml, "emails", default=""))
self.notify = split(",\s*", get_attrib(xml, "notify", default=""))
|
python
|
def _parse_xml(self, xml):
"""Extracts the attributes from the XMLElement instance."""
from re import split
vms("Parsing <cron> XML child tag.", 2)
self.frequency = get_attrib(xml, "frequency", default=5, cast=int)
self.emails = split(",\s*", get_attrib(xml, "emails", default=""))
self.notify = split(",\s*", get_attrib(xml, "notify", default=""))
|
[
"def",
"_parse_xml",
"(",
"self",
",",
"xml",
")",
":",
"from",
"re",
"import",
"split",
"vms",
"(",
"\"Parsing <cron> XML child tag.\"",
",",
"2",
")",
"self",
".",
"frequency",
"=",
"get_attrib",
"(",
"xml",
",",
"\"frequency\"",
",",
"default",
"=",
"5",
",",
"cast",
"=",
"int",
")",
"self",
".",
"emails",
"=",
"split",
"(",
"\",\\s*\"",
",",
"get_attrib",
"(",
"xml",
",",
"\"emails\"",
",",
"default",
"=",
"\"\"",
")",
")",
"self",
".",
"notify",
"=",
"split",
"(",
"\",\\s*\"",
",",
"get_attrib",
"(",
"xml",
",",
"\"notify\"",
",",
"default",
"=",
"\"\"",
")",
")"
] |
Extracts the attributes from the XMLElement instance.
|
[
"Extracts",
"the",
"attributes",
"from",
"the",
"XMLElement",
"instance",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L197-L203
|
238,621
|
rosenbrockc/ci
|
pyci/config.py
|
StaticSettings._parse_xml
|
def _parse_xml(self, xml):
"""Extracts objects representing and interacting with the settings in the
xml tag.
"""
vms("Parsing <static> XML child tag.", 2)
for child in xml:
if "path" in child.attrib and "target" in child.attrib:
if child.tag == "file":
self.files.append({"source": child.attrib["path"],
"target": child.attrib["target"]})
elif child.tag == "folder":
self.folders.append({"source": child.attrib["path"],
"target": child.attrib["target"]})
|
python
|
def _parse_xml(self, xml):
"""Extracts objects representing and interacting with the settings in the
xml tag.
"""
vms("Parsing <static> XML child tag.", 2)
for child in xml:
if "path" in child.attrib and "target" in child.attrib:
if child.tag == "file":
self.files.append({"source": child.attrib["path"],
"target": child.attrib["target"]})
elif child.tag == "folder":
self.folders.append({"source": child.attrib["path"],
"target": child.attrib["target"]})
|
[
"def",
"_parse_xml",
"(",
"self",
",",
"xml",
")",
":",
"vms",
"(",
"\"Parsing <static> XML child tag.\"",
",",
"2",
")",
"for",
"child",
"in",
"xml",
":",
"if",
"\"path\"",
"in",
"child",
".",
"attrib",
"and",
"\"target\"",
"in",
"child",
".",
"attrib",
":",
"if",
"child",
".",
"tag",
"==",
"\"file\"",
":",
"self",
".",
"files",
".",
"append",
"(",
"{",
"\"source\"",
":",
"child",
".",
"attrib",
"[",
"\"path\"",
"]",
",",
"\"target\"",
":",
"child",
".",
"attrib",
"[",
"\"target\"",
"]",
"}",
")",
"elif",
"child",
".",
"tag",
"==",
"\"folder\"",
":",
"self",
".",
"folders",
".",
"append",
"(",
"{",
"\"source\"",
":",
"child",
".",
"attrib",
"[",
"\"path\"",
"]",
",",
"\"target\"",
":",
"child",
".",
"attrib",
"[",
"\"target\"",
"]",
"}",
")"
] |
Extracts objects representing and interacting with the settings in the
xml tag.
|
[
"Extracts",
"objects",
"representing",
"and",
"interacting",
"with",
"the",
"settings",
"in",
"the",
"xml",
"tag",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L230-L242
|
238,622
|
rosenbrockc/ci
|
pyci/config.py
|
StaticSettings.copy
|
def copy(self, repodir):
"""Copies the static files and folders specified in these settings into the
locally-cloned repository directory.
:arg repodir: the full path to the directory with the locally-cloned version
of the pull request being unit tested.
"""
#Instead of using the built-in shell copy, we make shell calls to rsync.
#This allows us to copy only changes across between runs of pull-requests.
from os import system, path
vms("Running static file copy locally.", 2)
for file in self.files:
fullpath = path.expanduser(file["source"])
if path.isfile(fullpath):
vms("Running 'rsync' for {}.".format(fullpath), 3)
system("rsync -t -u {} {}".format(fullpath, get_repo_relpath(repodir, file["target"])))
for folder in self.folders:
fullpath = path.expanduser(folder["source"])
if path.isdir(fullpath):
vms("Running 'rsync' for {}.".format(fullpath), 3)
system("rsync -t -u -r {} {}".format(path.join(fullpath, ""),
path.join(get_repo_relpath(repodir, folder["target"]), "")))
|
python
|
def copy(self, repodir):
"""Copies the static files and folders specified in these settings into the
locally-cloned repository directory.
:arg repodir: the full path to the directory with the locally-cloned version
of the pull request being unit tested.
"""
#Instead of using the built-in shell copy, we make shell calls to rsync.
#This allows us to copy only changes across between runs of pull-requests.
from os import system, path
vms("Running static file copy locally.", 2)
for file in self.files:
fullpath = path.expanduser(file["source"])
if path.isfile(fullpath):
vms("Running 'rsync' for {}.".format(fullpath), 3)
system("rsync -t -u {} {}".format(fullpath, get_repo_relpath(repodir, file["target"])))
for folder in self.folders:
fullpath = path.expanduser(folder["source"])
if path.isdir(fullpath):
vms("Running 'rsync' for {}.".format(fullpath), 3)
system("rsync -t -u -r {} {}".format(path.join(fullpath, ""),
path.join(get_repo_relpath(repodir, folder["target"]), "")))
|
[
"def",
"copy",
"(",
"self",
",",
"repodir",
")",
":",
"#Instead of using the built-in shell copy, we make shell calls to rsync.",
"#This allows us to copy only changes across between runs of pull-requests.",
"from",
"os",
"import",
"system",
",",
"path",
"vms",
"(",
"\"Running static file copy locally.\"",
",",
"2",
")",
"for",
"file",
"in",
"self",
".",
"files",
":",
"fullpath",
"=",
"path",
".",
"expanduser",
"(",
"file",
"[",
"\"source\"",
"]",
")",
"if",
"path",
".",
"isfile",
"(",
"fullpath",
")",
":",
"vms",
"(",
"\"Running 'rsync' for {}.\"",
".",
"format",
"(",
"fullpath",
")",
",",
"3",
")",
"system",
"(",
"\"rsync -t -u {} {}\"",
".",
"format",
"(",
"fullpath",
",",
"get_repo_relpath",
"(",
"repodir",
",",
"file",
"[",
"\"target\"",
"]",
")",
")",
")",
"for",
"folder",
"in",
"self",
".",
"folders",
":",
"fullpath",
"=",
"path",
".",
"expanduser",
"(",
"folder",
"[",
"\"source\"",
"]",
")",
"if",
"path",
".",
"isdir",
"(",
"fullpath",
")",
":",
"vms",
"(",
"\"Running 'rsync' for {}.\"",
".",
"format",
"(",
"fullpath",
")",
",",
"3",
")",
"system",
"(",
"\"rsync -t -u -r {} {}\"",
".",
"format",
"(",
"path",
".",
"join",
"(",
"fullpath",
",",
"\"\"",
")",
",",
"path",
".",
"join",
"(",
"get_repo_relpath",
"(",
"repodir",
",",
"folder",
"[",
"\"target\"",
"]",
")",
",",
"\"\"",
")",
")",
")"
] |
Copies the static files and folders specified in these settings into the
locally-cloned repository directory.
:arg repodir: the full path to the directory with the locally-cloned version
of the pull request being unit tested.
|
[
"Copies",
"the",
"static",
"files",
"and",
"folders",
"specified",
"in",
"these",
"settings",
"into",
"the",
"locally",
"-",
"cloned",
"repository",
"directory",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L244-L266
|
238,623
|
rosenbrockc/ci
|
pyci/config.py
|
GlobalSettings.serial
|
def serial(self):
"""Returns true if the CI server should run in serial mode.
"""
serial = self.property_get("SERIAL", False)
if isinstance(serial, str):
return serial.lower() == "true"
else:
return serial
|
python
|
def serial(self):
"""Returns true if the CI server should run in serial mode.
"""
serial = self.property_get("SERIAL", False)
if isinstance(serial, str):
return serial.lower() == "true"
else:
return serial
|
[
"def",
"serial",
"(",
"self",
")",
":",
"serial",
"=",
"self",
".",
"property_get",
"(",
"\"SERIAL\"",
",",
"False",
")",
"if",
"isinstance",
"(",
"serial",
",",
"str",
")",
":",
"return",
"serial",
".",
"lower",
"(",
")",
"==",
"\"true\"",
"else",
":",
"return",
"serial"
] |
Returns true if the CI server should run in serial mode.
|
[
"Returns",
"true",
"if",
"the",
"CI",
"server",
"should",
"run",
"in",
"serial",
"mode",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L429-L436
|
238,624
|
rosenbrockc/ci
|
pyci/config.py
|
GlobalSettings.var_replace
|
def var_replace(self, text):
"""Replaces all instances of @VAR with their values in the specified text.
"""
result = text
for var in self._vardict:
result = result.replace("@{}".format(var), self._vardict[var])
return result
|
python
|
def var_replace(self, text):
"""Replaces all instances of @VAR with their values in the specified text.
"""
result = text
for var in self._vardict:
result = result.replace("@{}".format(var), self._vardict[var])
return result
|
[
"def",
"var_replace",
"(",
"self",
",",
"text",
")",
":",
"result",
"=",
"text",
"for",
"var",
"in",
"self",
".",
"_vardict",
":",
"result",
"=",
"result",
".",
"replace",
"(",
"\"@{}\"",
".",
"format",
"(",
"var",
")",
",",
"self",
".",
"_vardict",
"[",
"var",
"]",
")",
"return",
"result"
] |
Replaces all instances of @VAR with their values in the specified text.
|
[
"Replaces",
"all",
"instances",
"of"
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L454-L460
|
238,625
|
siemens/django-dingos
|
dingos/models.py
|
get_or_create_iobject
|
def get_or_create_iobject(identifier_uid,
identifier_namespace_uri,
iobject_type_name,
iobject_type_namespace_uri,
iobject_type_revision_name,
iobject_family_name,
iobject_family_revision_name="",
identifier_namespace_name="",
timestamp=None,
create_timestamp=None,
overwrite=False,
dingos_class_map=dingos_class_map):
"""
Get or create an information object.
"""
# create or retrieve the iobject type and revision
# create or retrieve identifier
if not timestamp:
raise StandardError("You must supply a timestamp.")
id_namespace, created = dingos_class_map['IdentifierNameSpace'].objects.get_or_create(uri=identifier_namespace_uri)
if created and identifier_namespace_name:
id_namespace.name = identifier_namespace_name
id_namespace.save()
identifier, created = dingos_class_map['Identifier'].objects.get_or_create(uid=identifier_uid,
namespace=id_namespace,
defaults={'latest': None})
iobject_type_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=iobject_type_namespace_uri)
iobject_family, created = dingos_class_map['InfoObjectFamily'].objects.get_or_create(name=iobject_family_name)
iobject_family_revision, created = dingos_class_map['Revision'].objects.get_or_create(
name=iobject_family_revision_name)
# create or retrieve the iobject type
iobject_type, created = dingos_class_map['InfoObjectType'].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
iobject_type_revision, created = dingos_class_map['Revision'].objects.get_or_create(name=iobject_type_revision_name)
if not create_timestamp:
create_timestamp = timezone.now()
#if not timestamp:
# timestamp = create_timestamp
# iobject = overwrite
# created = False
iobject, created = dingos_class_map["InfoObject"].objects.get_or_create(identifier=identifier,
timestamp=timestamp,
defaults={'iobject_family': iobject_family,
'iobject_family_revision': iobject_family_revision,
'iobject_type': iobject_type,
'iobject_type_revision': iobject_type_revision,
'create_timestamp': create_timestamp})
if created:
iobject.set_name()
iobject.save()
identifier.latest = iobject
identifier.save()
elif overwrite:
iobject.timestamp = timestamp
iobject.create_timestamp = create_timestamp
iobject.iobject_family = iobject_family
iobject.iobject_family_revision = iobject_family_revision
iobject.iobject_type = iobject_type
iobject.iobject_type_revision = iobject_type_revision
iobject.set_name()
iobject.save()
logger.debug(
"Created iobject id with %s , ts %s (created was %s) and overwrite as %s" % (iobject.identifier, timestamp, created, overwrite))
return iobject, created
|
python
|
def get_or_create_iobject(identifier_uid,
identifier_namespace_uri,
iobject_type_name,
iobject_type_namespace_uri,
iobject_type_revision_name,
iobject_family_name,
iobject_family_revision_name="",
identifier_namespace_name="",
timestamp=None,
create_timestamp=None,
overwrite=False,
dingos_class_map=dingos_class_map):
"""
Get or create an information object.
"""
# create or retrieve the iobject type and revision
# create or retrieve identifier
if not timestamp:
raise StandardError("You must supply a timestamp.")
id_namespace, created = dingos_class_map['IdentifierNameSpace'].objects.get_or_create(uri=identifier_namespace_uri)
if created and identifier_namespace_name:
id_namespace.name = identifier_namespace_name
id_namespace.save()
identifier, created = dingos_class_map['Identifier'].objects.get_or_create(uid=identifier_uid,
namespace=id_namespace,
defaults={'latest': None})
iobject_type_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=iobject_type_namespace_uri)
iobject_family, created = dingos_class_map['InfoObjectFamily'].objects.get_or_create(name=iobject_family_name)
iobject_family_revision, created = dingos_class_map['Revision'].objects.get_or_create(
name=iobject_family_revision_name)
# create or retrieve the iobject type
iobject_type, created = dingos_class_map['InfoObjectType'].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
iobject_type_revision, created = dingos_class_map['Revision'].objects.get_or_create(name=iobject_type_revision_name)
if not create_timestamp:
create_timestamp = timezone.now()
#if not timestamp:
# timestamp = create_timestamp
# iobject = overwrite
# created = False
iobject, created = dingos_class_map["InfoObject"].objects.get_or_create(identifier=identifier,
timestamp=timestamp,
defaults={'iobject_family': iobject_family,
'iobject_family_revision': iobject_family_revision,
'iobject_type': iobject_type,
'iobject_type_revision': iobject_type_revision,
'create_timestamp': create_timestamp})
if created:
iobject.set_name()
iobject.save()
identifier.latest = iobject
identifier.save()
elif overwrite:
iobject.timestamp = timestamp
iobject.create_timestamp = create_timestamp
iobject.iobject_family = iobject_family
iobject.iobject_family_revision = iobject_family_revision
iobject.iobject_type = iobject_type
iobject.iobject_type_revision = iobject_type_revision
iobject.set_name()
iobject.save()
logger.debug(
"Created iobject id with %s , ts %s (created was %s) and overwrite as %s" % (iobject.identifier, timestamp, created, overwrite))
return iobject, created
|
[
"def",
"get_or_create_iobject",
"(",
"identifier_uid",
",",
"identifier_namespace_uri",
",",
"iobject_type_name",
",",
"iobject_type_namespace_uri",
",",
"iobject_type_revision_name",
",",
"iobject_family_name",
",",
"iobject_family_revision_name",
"=",
"\"\"",
",",
"identifier_namespace_name",
"=",
"\"\"",
",",
"timestamp",
"=",
"None",
",",
"create_timestamp",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"dingos_class_map",
"=",
"dingos_class_map",
")",
":",
"# create or retrieve the iobject type and revision",
"# create or retrieve identifier",
"if",
"not",
"timestamp",
":",
"raise",
"StandardError",
"(",
"\"You must supply a timestamp.\"",
")",
"id_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'IdentifierNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"identifier_namespace_uri",
")",
"if",
"created",
"and",
"identifier_namespace_name",
":",
"id_namespace",
".",
"name",
"=",
"identifier_namespace_name",
"id_namespace",
".",
"save",
"(",
")",
"identifier",
",",
"created",
"=",
"dingos_class_map",
"[",
"'Identifier'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uid",
"=",
"identifier_uid",
",",
"namespace",
"=",
"id_namespace",
",",
"defaults",
"=",
"{",
"'latest'",
":",
"None",
"}",
")",
"iobject_type_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'DataTypeNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"iobject_type_namespace_uri",
")",
"iobject_family",
",",
"created",
"=",
"dingos_class_map",
"[",
"'InfoObjectFamily'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_family_name",
")",
"iobject_family_revision",
",",
"created",
"=",
"dingos_class_map",
"[",
"'Revision'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_family_revision_name",
")",
"# create or retrieve the iobject type",
"iobject_type",
",",
"created",
"=",
"dingos_class_map",
"[",
"'InfoObjectType'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_type_name",
",",
"iobject_family",
"=",
"iobject_family",
",",
"namespace",
"=",
"iobject_type_namespace",
")",
"iobject_type_revision",
",",
"created",
"=",
"dingos_class_map",
"[",
"'Revision'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_type_revision_name",
")",
"if",
"not",
"create_timestamp",
":",
"create_timestamp",
"=",
"timezone",
".",
"now",
"(",
")",
"#if not timestamp:",
"# timestamp = create_timestamp",
"# iobject = overwrite",
"# created = False",
"iobject",
",",
"created",
"=",
"dingos_class_map",
"[",
"\"InfoObject\"",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"identifier",
"=",
"identifier",
",",
"timestamp",
"=",
"timestamp",
",",
"defaults",
"=",
"{",
"'iobject_family'",
":",
"iobject_family",
",",
"'iobject_family_revision'",
":",
"iobject_family_revision",
",",
"'iobject_type'",
":",
"iobject_type",
",",
"'iobject_type_revision'",
":",
"iobject_type_revision",
",",
"'create_timestamp'",
":",
"create_timestamp",
"}",
")",
"if",
"created",
":",
"iobject",
".",
"set_name",
"(",
")",
"iobject",
".",
"save",
"(",
")",
"identifier",
".",
"latest",
"=",
"iobject",
"identifier",
".",
"save",
"(",
")",
"elif",
"overwrite",
":",
"iobject",
".",
"timestamp",
"=",
"timestamp",
"iobject",
".",
"create_timestamp",
"=",
"create_timestamp",
"iobject",
".",
"iobject_family",
"=",
"iobject_family",
"iobject",
".",
"iobject_family_revision",
"=",
"iobject_family_revision",
"iobject",
".",
"iobject_type",
"=",
"iobject_type",
"iobject",
".",
"iobject_type_revision",
"=",
"iobject_type_revision",
"iobject",
".",
"set_name",
"(",
")",
"iobject",
".",
"save",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Created iobject id with %s , ts %s (created was %s) and overwrite as %s\"",
"%",
"(",
"iobject",
".",
"identifier",
",",
"timestamp",
",",
"created",
",",
"overwrite",
")",
")",
"return",
"iobject",
",",
"created"
] |
Get or create an information object.
|
[
"Get",
"or",
"create",
"an",
"information",
"object",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L1860-L1940
|
238,626
|
siemens/django-dingos
|
dingos/models.py
|
get_or_create_fact
|
def get_or_create_fact(fact_term,
fact_dt_name='String',
fact_dt_namespace_uri=DINGOS_NAMESPACE_URI,
values=None,
value_iobject_id=None,
value_iobject_ts=None,
):
"""
Get or create a fact object.
"""
if not values:
values = []
vocab_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=fact_dt_namespace_uri)
fact_data_type, created = dingos_class_map['FactDataType'].objects.get_or_create(name=fact_dt_name,
namespace=vocab_namespace)
# Maybe we already have a fact with exactly the same fact term and the same fact values?
# We start by looking at the number of values
value_objects = []
for value in values:
storage_location=dingos.DINGOS_VALUES_TABLE
# collect (create or get) the required value objects
if value == None:
value = ''
if isinstance(value,tuple):
# If a value is wrapped in a tuple, the second component of the tuple
# specifies the storage location of the value.
value, storage_location = value
if storage_location == dingos.DINGOS_VALUES_TABLE:
# If the value is larger than a given size, the value is written to disk, instead.
# We use this to keep too large values out of the database. Depending on how the
# database is set up, this may be necessary to allow indexing, which in turn is
# required to check uniqueness on values.
if len(value) > dingos.DINGOS_MAX_VALUE_SIZE_WRITTEN_TO_VALUE_TABLE:
(value_hash,storage_location) = write_large_value(value)
value = value_hash
fact_value, created = dingos_class_map['FactValue'].objects.get_or_create(value=value,
fact_data_type=fact_data_type,
storage_location=storage_location)
value_objects.append(fact_value)
# Do we already have a fact with given fact term and given values?
#
# For understanding the query below better, see https://groups.google.com/forum/#!topic/django-users/X9TCSrBn57Y.
# The double query is necessary, because the first count counts the number of selected
# fact_value objects, not the number of total objects for each fact.
possibly_matching_facts = Fact.objects.filter(fact_values__in=value_objects,
value_iobject_id=value_iobject_id,
value_iobject_ts=value_iobject_ts,
fact_term=fact_term
).values_list('pk',flat=True)
matching_facts = Fact.objects.filter(pk__in=list(possibly_matching_facts)). \
annotate(num_values=Count('fact_values')). \
filter(num_values=len(value_objects)). \
exclude(id__in= \
Fact.objects.filter(pk__in=possibly_matching_facts).annotate(total_values=Count('fact_values')). \
filter(total_values__gt=len(value_objects)))
# Below, for educational purposes, the original query until Dingos 0.2.0, which got *really*
# slow with lot's of objects in the system. The reason for this are the last three lines:
# the exclude-statement required the database to count the the number of values for each
# Fact in the system... but we are really only interested into facts with the same
# fact_term, value_iobject_id and value_iobject_ts...
#matching_facts = Fact.objects.filter(fact_values__in=value_objects). \
# annotate(num_values=Count('fact_values')). \
# filter(num_values=len(value_objects)). \
# filter(value_iobject_id=value_iobject_id). \
# filter(value_iobject_ts=value_iobject_ts). \
# filter(fact_term=fact_term). \
# exclude(id__in= \
# Fact.objects.annotate(total_values=Count('fact_values')). \
# filter(total_values__gt=len(value_objects)))
created = True
try:
fact_obj = matching_facts[0]
created = False
logger.debug("FOUND MATCHING OBJECT with pk %s" % fact_obj.pk)
except:
fact_obj = dingos_class_map['Fact'].objects.create(fact_term=fact_term,
value_iobject_id=value_iobject_id,
value_iobject_ts=value_iobject_ts,
)
fact_obj.fact_values.add(*value_objects)
fact_obj.save()
return fact_obj, created
|
python
|
def get_or_create_fact(fact_term,
fact_dt_name='String',
fact_dt_namespace_uri=DINGOS_NAMESPACE_URI,
values=None,
value_iobject_id=None,
value_iobject_ts=None,
):
"""
Get or create a fact object.
"""
if not values:
values = []
vocab_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=fact_dt_namespace_uri)
fact_data_type, created = dingos_class_map['FactDataType'].objects.get_or_create(name=fact_dt_name,
namespace=vocab_namespace)
# Maybe we already have a fact with exactly the same fact term and the same fact values?
# We start by looking at the number of values
value_objects = []
for value in values:
storage_location=dingos.DINGOS_VALUES_TABLE
# collect (create or get) the required value objects
if value == None:
value = ''
if isinstance(value,tuple):
# If a value is wrapped in a tuple, the second component of the tuple
# specifies the storage location of the value.
value, storage_location = value
if storage_location == dingos.DINGOS_VALUES_TABLE:
# If the value is larger than a given size, the value is written to disk, instead.
# We use this to keep too large values out of the database. Depending on how the
# database is set up, this may be necessary to allow indexing, which in turn is
# required to check uniqueness on values.
if len(value) > dingos.DINGOS_MAX_VALUE_SIZE_WRITTEN_TO_VALUE_TABLE:
(value_hash,storage_location) = write_large_value(value)
value = value_hash
fact_value, created = dingos_class_map['FactValue'].objects.get_or_create(value=value,
fact_data_type=fact_data_type,
storage_location=storage_location)
value_objects.append(fact_value)
# Do we already have a fact with given fact term and given values?
#
# For understanding the query below better, see https://groups.google.com/forum/#!topic/django-users/X9TCSrBn57Y.
# The double query is necessary, because the first count counts the number of selected
# fact_value objects, not the number of total objects for each fact.
possibly_matching_facts = Fact.objects.filter(fact_values__in=value_objects,
value_iobject_id=value_iobject_id,
value_iobject_ts=value_iobject_ts,
fact_term=fact_term
).values_list('pk',flat=True)
matching_facts = Fact.objects.filter(pk__in=list(possibly_matching_facts)). \
annotate(num_values=Count('fact_values')). \
filter(num_values=len(value_objects)). \
exclude(id__in= \
Fact.objects.filter(pk__in=possibly_matching_facts).annotate(total_values=Count('fact_values')). \
filter(total_values__gt=len(value_objects)))
# Below, for educational purposes, the original query until Dingos 0.2.0, which got *really*
# slow with lot's of objects in the system. The reason for this are the last three lines:
# the exclude-statement required the database to count the the number of values for each
# Fact in the system... but we are really only interested into facts with the same
# fact_term, value_iobject_id and value_iobject_ts...
#matching_facts = Fact.objects.filter(fact_values__in=value_objects). \
# annotate(num_values=Count('fact_values')). \
# filter(num_values=len(value_objects)). \
# filter(value_iobject_id=value_iobject_id). \
# filter(value_iobject_ts=value_iobject_ts). \
# filter(fact_term=fact_term). \
# exclude(id__in= \
# Fact.objects.annotate(total_values=Count('fact_values')). \
# filter(total_values__gt=len(value_objects)))
created = True
try:
fact_obj = matching_facts[0]
created = False
logger.debug("FOUND MATCHING OBJECT with pk %s" % fact_obj.pk)
except:
fact_obj = dingos_class_map['Fact'].objects.create(fact_term=fact_term,
value_iobject_id=value_iobject_id,
value_iobject_ts=value_iobject_ts,
)
fact_obj.fact_values.add(*value_objects)
fact_obj.save()
return fact_obj, created
|
[
"def",
"get_or_create_fact",
"(",
"fact_term",
",",
"fact_dt_name",
"=",
"'String'",
",",
"fact_dt_namespace_uri",
"=",
"DINGOS_NAMESPACE_URI",
",",
"values",
"=",
"None",
",",
"value_iobject_id",
"=",
"None",
",",
"value_iobject_ts",
"=",
"None",
",",
")",
":",
"if",
"not",
"values",
":",
"values",
"=",
"[",
"]",
"vocab_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'DataTypeNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"fact_dt_namespace_uri",
")",
"fact_data_type",
",",
"created",
"=",
"dingos_class_map",
"[",
"'FactDataType'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"fact_dt_name",
",",
"namespace",
"=",
"vocab_namespace",
")",
"# Maybe we already have a fact with exactly the same fact term and the same fact values?",
"# We start by looking at the number of values",
"value_objects",
"=",
"[",
"]",
"for",
"value",
"in",
"values",
":",
"storage_location",
"=",
"dingos",
".",
"DINGOS_VALUES_TABLE",
"# collect (create or get) the required value objects",
"if",
"value",
"==",
"None",
":",
"value",
"=",
"''",
"if",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"# If a value is wrapped in a tuple, the second component of the tuple",
"# specifies the storage location of the value.",
"value",
",",
"storage_location",
"=",
"value",
"if",
"storage_location",
"==",
"dingos",
".",
"DINGOS_VALUES_TABLE",
":",
"# If the value is larger than a given size, the value is written to disk, instead.",
"# We use this to keep too large values out of the database. Depending on how the",
"# database is set up, this may be necessary to allow indexing, which in turn is",
"# required to check uniqueness on values.",
"if",
"len",
"(",
"value",
")",
">",
"dingos",
".",
"DINGOS_MAX_VALUE_SIZE_WRITTEN_TO_VALUE_TABLE",
":",
"(",
"value_hash",
",",
"storage_location",
")",
"=",
"write_large_value",
"(",
"value",
")",
"value",
"=",
"value_hash",
"fact_value",
",",
"created",
"=",
"dingos_class_map",
"[",
"'FactValue'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"value",
"=",
"value",
",",
"fact_data_type",
"=",
"fact_data_type",
",",
"storage_location",
"=",
"storage_location",
")",
"value_objects",
".",
"append",
"(",
"fact_value",
")",
"# Do we already have a fact with given fact term and given values?",
"#",
"# For understanding the query below better, see https://groups.google.com/forum/#!topic/django-users/X9TCSrBn57Y.",
"# The double query is necessary, because the first count counts the number of selected",
"# fact_value objects, not the number of total objects for each fact.",
"possibly_matching_facts",
"=",
"Fact",
".",
"objects",
".",
"filter",
"(",
"fact_values__in",
"=",
"value_objects",
",",
"value_iobject_id",
"=",
"value_iobject_id",
",",
"value_iobject_ts",
"=",
"value_iobject_ts",
",",
"fact_term",
"=",
"fact_term",
")",
".",
"values_list",
"(",
"'pk'",
",",
"flat",
"=",
"True",
")",
"matching_facts",
"=",
"Fact",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"list",
"(",
"possibly_matching_facts",
")",
")",
".",
"annotate",
"(",
"num_values",
"=",
"Count",
"(",
"'fact_values'",
")",
")",
".",
"filter",
"(",
"num_values",
"=",
"len",
"(",
"value_objects",
")",
")",
".",
"exclude",
"(",
"id__in",
"=",
"Fact",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"possibly_matching_facts",
")",
".",
"annotate",
"(",
"total_values",
"=",
"Count",
"(",
"'fact_values'",
")",
")",
".",
"filter",
"(",
"total_values__gt",
"=",
"len",
"(",
"value_objects",
")",
")",
")",
"# Below, for educational purposes, the original query until Dingos 0.2.0, which got *really*",
"# slow with lot's of objects in the system. The reason for this are the last three lines:",
"# the exclude-statement required the database to count the the number of values for each",
"# Fact in the system... but we are really only interested into facts with the same",
"# fact_term, value_iobject_id and value_iobject_ts...",
"#matching_facts = Fact.objects.filter(fact_values__in=value_objects). \\",
"# annotate(num_values=Count('fact_values')). \\",
"# filter(num_values=len(value_objects)). \\",
"# filter(value_iobject_id=value_iobject_id). \\",
"# filter(value_iobject_ts=value_iobject_ts). \\",
"# filter(fact_term=fact_term). \\",
"# exclude(id__in= \\",
"# Fact.objects.annotate(total_values=Count('fact_values')). \\",
"# filter(total_values__gt=len(value_objects)))",
"created",
"=",
"True",
"try",
":",
"fact_obj",
"=",
"matching_facts",
"[",
"0",
"]",
"created",
"=",
"False",
"logger",
".",
"debug",
"(",
"\"FOUND MATCHING OBJECT with pk %s\"",
"%",
"fact_obj",
".",
"pk",
")",
"except",
":",
"fact_obj",
"=",
"dingos_class_map",
"[",
"'Fact'",
"]",
".",
"objects",
".",
"create",
"(",
"fact_term",
"=",
"fact_term",
",",
"value_iobject_id",
"=",
"value_iobject_id",
",",
"value_iobject_ts",
"=",
"value_iobject_ts",
",",
")",
"fact_obj",
".",
"fact_values",
".",
"add",
"(",
"*",
"value_objects",
")",
"fact_obj",
".",
"save",
"(",
")",
"return",
"fact_obj",
",",
"created"
] |
Get or create a fact object.
|
[
"Get",
"or",
"create",
"a",
"fact",
"object",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L1943-L2047
|
238,627
|
siemens/django-dingos
|
dingos/models.py
|
get_or_create_fact_term
|
def get_or_create_fact_term(iobject_family_name,
fact_term_name,
fact_term_attribute,
iobject_type_name,
iobject_type_namespace_uri,
fact_dt_name=DINGOS_DEFAULT_FACT_DATATYPE,
fact_dt_namespace_name=None,
fact_dt_kind=FactDataType.UNKNOWN_KIND,
fact_dt_namespace_uri=DINGOS_NAMESPACE_URI,
dingos_class_map=dingos_class_map
):
"""
Get or create a fact term.
"""
if not fact_term_attribute:
fact_term_attribute = ''
# create or retrieve the enrichment type and revision
iobject_family, created = dingos_class_map['InfoObjectFamily'].objects.get_or_create(name=iobject_family_name)
# create or retrieve namespace of data type
fact_dt_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=fact_dt_namespace_uri)
# create or retrieve namespace of the infoobject type
iobject_type_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=iobject_type_namespace_uri)
if created and fact_dt_namespace_name:
fact_dt_namespace.name = fact_dt_namespace_name
fact_dt_namespace.save()
# create or retrieve the fact-value data type object
fact_dt, created = dingos_class_map['FactDataType'].objects.get_or_create(name=fact_dt_name,
namespace=fact_dt_namespace)
if created:
fact_dt.kind = fact_dt_kind
fact_dt.save()
# create or retreive the iobject type
iobject_type, created = dingos_class_map['InfoObjectType'].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
fact_term, created = dingos_class_map['FactTerm'].objects.get_or_create(term=fact_term_name,
attribute=fact_term_attribute)
fact_term_2_type, dummy = dingos_class_map['FactTerm2Type'].objects.get_or_create(fact_term=fact_term,
iobject_type=iobject_type,
)
fact_term_2_type.fact_data_types.add(fact_dt)
fact_term_2_type.save()
return fact_term, created
|
python
|
def get_or_create_fact_term(iobject_family_name,
fact_term_name,
fact_term_attribute,
iobject_type_name,
iobject_type_namespace_uri,
fact_dt_name=DINGOS_DEFAULT_FACT_DATATYPE,
fact_dt_namespace_name=None,
fact_dt_kind=FactDataType.UNKNOWN_KIND,
fact_dt_namespace_uri=DINGOS_NAMESPACE_URI,
dingos_class_map=dingos_class_map
):
"""
Get or create a fact term.
"""
if not fact_term_attribute:
fact_term_attribute = ''
# create or retrieve the enrichment type and revision
iobject_family, created = dingos_class_map['InfoObjectFamily'].objects.get_or_create(name=iobject_family_name)
# create or retrieve namespace of data type
fact_dt_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=fact_dt_namespace_uri)
# create or retrieve namespace of the infoobject type
iobject_type_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=iobject_type_namespace_uri)
if created and fact_dt_namespace_name:
fact_dt_namespace.name = fact_dt_namespace_name
fact_dt_namespace.save()
# create or retrieve the fact-value data type object
fact_dt, created = dingos_class_map['FactDataType'].objects.get_or_create(name=fact_dt_name,
namespace=fact_dt_namespace)
if created:
fact_dt.kind = fact_dt_kind
fact_dt.save()
# create or retreive the iobject type
iobject_type, created = dingos_class_map['InfoObjectType'].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
fact_term, created = dingos_class_map['FactTerm'].objects.get_or_create(term=fact_term_name,
attribute=fact_term_attribute)
fact_term_2_type, dummy = dingos_class_map['FactTerm2Type'].objects.get_or_create(fact_term=fact_term,
iobject_type=iobject_type,
)
fact_term_2_type.fact_data_types.add(fact_dt)
fact_term_2_type.save()
return fact_term, created
|
[
"def",
"get_or_create_fact_term",
"(",
"iobject_family_name",
",",
"fact_term_name",
",",
"fact_term_attribute",
",",
"iobject_type_name",
",",
"iobject_type_namespace_uri",
",",
"fact_dt_name",
"=",
"DINGOS_DEFAULT_FACT_DATATYPE",
",",
"fact_dt_namespace_name",
"=",
"None",
",",
"fact_dt_kind",
"=",
"FactDataType",
".",
"UNKNOWN_KIND",
",",
"fact_dt_namespace_uri",
"=",
"DINGOS_NAMESPACE_URI",
",",
"dingos_class_map",
"=",
"dingos_class_map",
")",
":",
"if",
"not",
"fact_term_attribute",
":",
"fact_term_attribute",
"=",
"''",
"# create or retrieve the enrichment type and revision",
"iobject_family",
",",
"created",
"=",
"dingos_class_map",
"[",
"'InfoObjectFamily'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_family_name",
")",
"# create or retrieve namespace of data type",
"fact_dt_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'DataTypeNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"fact_dt_namespace_uri",
")",
"# create or retrieve namespace of the infoobject type",
"iobject_type_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'DataTypeNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"iobject_type_namespace_uri",
")",
"if",
"created",
"and",
"fact_dt_namespace_name",
":",
"fact_dt_namespace",
".",
"name",
"=",
"fact_dt_namespace_name",
"fact_dt_namespace",
".",
"save",
"(",
")",
"# create or retrieve the fact-value data type object",
"fact_dt",
",",
"created",
"=",
"dingos_class_map",
"[",
"'FactDataType'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"fact_dt_name",
",",
"namespace",
"=",
"fact_dt_namespace",
")",
"if",
"created",
":",
"fact_dt",
".",
"kind",
"=",
"fact_dt_kind",
"fact_dt",
".",
"save",
"(",
")",
"# create or retreive the iobject type",
"iobject_type",
",",
"created",
"=",
"dingos_class_map",
"[",
"'InfoObjectType'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_type_name",
",",
"iobject_family",
"=",
"iobject_family",
",",
"namespace",
"=",
"iobject_type_namespace",
")",
"fact_term",
",",
"created",
"=",
"dingos_class_map",
"[",
"'FactTerm'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"term",
"=",
"fact_term_name",
",",
"attribute",
"=",
"fact_term_attribute",
")",
"fact_term_2_type",
",",
"dummy",
"=",
"dingos_class_map",
"[",
"'FactTerm2Type'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"fact_term",
"=",
"fact_term",
",",
"iobject_type",
"=",
"iobject_type",
",",
")",
"fact_term_2_type",
".",
"fact_data_types",
".",
"add",
"(",
"fact_dt",
")",
"fact_term_2_type",
".",
"save",
"(",
")",
"return",
"fact_term",
",",
"created"
] |
Get or create a fact term.
|
[
"Get",
"or",
"create",
"a",
"fact",
"term",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L2050-L2109
|
238,628
|
siemens/django-dingos
|
dingos/models.py
|
InfoObject2Fact.marking_thru
|
def marking_thru(self):
"""
Return the back-pointer to markings that
may have been attached via Django's content type mechanism.
"""
self_django_type = ContentType.objects.get_for_model(self)
return Marking2X.objects.filter(content_type__pk=self_django_type.id,
object_id=self.id)
|
python
|
def marking_thru(self):
"""
Return the back-pointer to markings that
may have been attached via Django's content type mechanism.
"""
self_django_type = ContentType.objects.get_for_model(self)
return Marking2X.objects.filter(content_type__pk=self_django_type.id,
object_id=self.id)
|
[
"def",
"marking_thru",
"(",
"self",
")",
":",
"self_django_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"self",
")",
"return",
"Marking2X",
".",
"objects",
".",
"filter",
"(",
"content_type__pk",
"=",
"self_django_type",
".",
"id",
",",
"object_id",
"=",
"self",
".",
"id",
")"
] |
Return the back-pointer to markings that
may have been attached via Django's content type mechanism.
|
[
"Return",
"the",
"back",
"-",
"pointer",
"to",
"markings",
"that",
"may",
"have",
"been",
"attached",
"via",
"Django",
"s",
"content",
"type",
"mechanism",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L687-L694
|
238,629
|
siemens/django-dingos
|
dingos/models.py
|
InfoObject.embedded_in
|
def embedded_in(self):
"""
Used in the view for the InfoObject (in order to be able to use the standard class-based object view.
Should be removed from here and put into a proper custom view for the object.
This query only returns embedding objects of the latest revision: to change
this, the filter 'iobject__timestamp=F('iobject__identifier__latest__timestamp' must
be removed.
"""
return self._DCM['InfoObject2Fact']. \
objects. \
filter(fact__value_iobject_id=self.identifier). \
filter(iobject__timestamp=F('iobject__identifier__latest__timestamp')). \
order_by('-iobject__timestamp') \
.values_list(
'iobject',
'iobject__identifier__namespace__uri',
'iobject__identifier__uid',
'iobject__timestamp',
'iobject__name',
'fact__value_iobject_ts',
'fact__fact_term__term',
'node_id__name').distinct()
|
python
|
def embedded_in(self):
"""
Used in the view for the InfoObject (in order to be able to use the standard class-based object view.
Should be removed from here and put into a proper custom view for the object.
This query only returns embedding objects of the latest revision: to change
this, the filter 'iobject__timestamp=F('iobject__identifier__latest__timestamp' must
be removed.
"""
return self._DCM['InfoObject2Fact']. \
objects. \
filter(fact__value_iobject_id=self.identifier). \
filter(iobject__timestamp=F('iobject__identifier__latest__timestamp')). \
order_by('-iobject__timestamp') \
.values_list(
'iobject',
'iobject__identifier__namespace__uri',
'iobject__identifier__uid',
'iobject__timestamp',
'iobject__name',
'fact__value_iobject_ts',
'fact__fact_term__term',
'node_id__name').distinct()
|
[
"def",
"embedded_in",
"(",
"self",
")",
":",
"return",
"self",
".",
"_DCM",
"[",
"'InfoObject2Fact'",
"]",
".",
"objects",
".",
"filter",
"(",
"fact__value_iobject_id",
"=",
"self",
".",
"identifier",
")",
".",
"filter",
"(",
"iobject__timestamp",
"=",
"F",
"(",
"'iobject__identifier__latest__timestamp'",
")",
")",
".",
"order_by",
"(",
"'-iobject__timestamp'",
")",
".",
"values_list",
"(",
"'iobject'",
",",
"'iobject__identifier__namespace__uri'",
",",
"'iobject__identifier__uid'",
",",
"'iobject__timestamp'",
",",
"'iobject__name'",
",",
"'fact__value_iobject_ts'",
",",
"'fact__fact_term__term'",
",",
"'node_id__name'",
")",
".",
"distinct",
"(",
")"
] |
Used in the view for the InfoObject (in order to be able to use the standard class-based object view.
Should be removed from here and put into a proper custom view for the object.
This query only returns embedding objects of the latest revision: to change
this, the filter 'iobject__timestamp=F('iobject__identifier__latest__timestamp' must
be removed.
|
[
"Used",
"in",
"the",
"view",
"for",
"the",
"InfoObject",
"(",
"in",
"order",
"to",
"be",
"able",
"to",
"use",
"the",
"standard",
"class",
"-",
"based",
"object",
"view",
".",
"Should",
"be",
"removed",
"from",
"here",
"and",
"put",
"into",
"a",
"proper",
"custom",
"view",
"for",
"the",
"object",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L946-L969
|
238,630
|
siemens/django-dingos
|
dingos/models.py
|
InfoObject.set_name
|
def set_name(self,name=None):
"""
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
"""
if name:
self.name = name[:254]
else:
self.name = self.extract_name()[:254]
self.save()
return self.name
|
python
|
def set_name(self,name=None):
"""
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
"""
if name:
self.name = name[:254]
else:
self.name = self.extract_name()[:254]
self.save()
return self.name
|
[
"def",
"set_name",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"self",
".",
"name",
"=",
"name",
"[",
":",
"254",
"]",
"else",
":",
"self",
".",
"name",
"=",
"self",
".",
"extract_name",
"(",
")",
"[",
":",
"254",
"]",
"self",
".",
"save",
"(",
")",
"return",
"self",
".",
"name"
] |
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
|
[
"Set",
"the",
"name",
"of",
"the",
"object",
".",
"If",
"no",
"name",
"is",
"given",
"the",
"name",
"is",
"extracted",
"via",
"the",
"extract_name",
"method",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L1496-L1508
|
238,631
|
siemens/django-dingos
|
dingos/models.py
|
InfoObject.add_relation
|
def add_relation(self,
target_id=None,
relation_types=None,
fact_dt_namespace_name=None,
fact_dt_namespace_uri=DINGOS_NAMESPACE_URI,
fact_dt_kind=FactDataType.UNKNOWN_KIND,
fact_dt_name='String',
metadata_dict=None,
markings=None
):
"""
Add a relationship between this object and another object.
"""
if not markings:
markings = []
if relation_types == None:
relation_types = []
# Create fact-term for relation types
relation_type_ft, created = get_or_create_fact_term(iobject_family_name=self.iobject_family.name,
fact_term_name=DINGOS_RELATION_TYPE_FACTTERM_NAME,
iobject_type_name=self.iobject_type.name,
iobject_type_namespace_uri=self.iobject_type.namespace.uri,
fact_dt_name=fact_dt_name,
fact_dt_namespace_name=fact_dt_namespace_name,
fact_dt_kind=fact_dt_kind,
fact_dt_namespace_uri=fact_dt_namespace_uri)
# Create fact containing relation types
relation_type_fact, created = get_or_create_fact(fact_term=relation_type_ft,
fact_dt_name=fact_dt_name,
fact_dt_namespace_uri=fact_dt_namespace_uri,
values=relation_types,
value_iobject_id=None,
value_iobject_ts=None,
)
rel_target_id = target_id
rel_source_id = self.identifier
# Create relation object
relation, created = self._DCM['Relation'].objects.get_or_create(
source_id=rel_source_id,
target_id=rel_target_id,
relation_type=relation_type_fact)
# Add markings
for marking in markings:
Marking2X.objects.create(marked=relation,
marking=marking)
if metadata_dict:
# If the relation already existed and had associated metadata,
# we retrieve the identifier of that metadata object and
# write the current metadata as new revision. Otherwise,
# we create a new identifier.
if relation.metadata_id:
rel_identifier_uid = relation.metadata_id.uid
rel_identifier_namespace_uri = relation.metadata_id.namespace.uri
else:
rel_identifier_uid = None
rel_identifier_namespace_uri = DINGOS_ID_NAMESPACE_URI
metadata_iobject, created = get_or_create_iobject(identifier_uid=rel_identifier_uid,
identifier_namespace_uri=rel_identifier_namespace_uri,
iobject_type_name=DINGOS_RELATION_METADATA_OBJECT_TYPE_NAME,
iobject_type_namespace_uri=DINGOS_NAMESPACE_URI,
iobject_type_revision_name=DINGOS_REVISION_NAME,
iobject_family_name=DINGOS_IOBJECT_FAMILY_NAME,
iobject_family_revision_name=DINGOS_REVISION_NAME,
timestamp=None,
overwrite=False)
metadata_iobject.from_dict(metadata_dict)
return relation
|
python
|
def add_relation(self,
target_id=None,
relation_types=None,
fact_dt_namespace_name=None,
fact_dt_namespace_uri=DINGOS_NAMESPACE_URI,
fact_dt_kind=FactDataType.UNKNOWN_KIND,
fact_dt_name='String',
metadata_dict=None,
markings=None
):
"""
Add a relationship between this object and another object.
"""
if not markings:
markings = []
if relation_types == None:
relation_types = []
# Create fact-term for relation types
relation_type_ft, created = get_or_create_fact_term(iobject_family_name=self.iobject_family.name,
fact_term_name=DINGOS_RELATION_TYPE_FACTTERM_NAME,
iobject_type_name=self.iobject_type.name,
iobject_type_namespace_uri=self.iobject_type.namespace.uri,
fact_dt_name=fact_dt_name,
fact_dt_namespace_name=fact_dt_namespace_name,
fact_dt_kind=fact_dt_kind,
fact_dt_namespace_uri=fact_dt_namespace_uri)
# Create fact containing relation types
relation_type_fact, created = get_or_create_fact(fact_term=relation_type_ft,
fact_dt_name=fact_dt_name,
fact_dt_namespace_uri=fact_dt_namespace_uri,
values=relation_types,
value_iobject_id=None,
value_iobject_ts=None,
)
rel_target_id = target_id
rel_source_id = self.identifier
# Create relation object
relation, created = self._DCM['Relation'].objects.get_or_create(
source_id=rel_source_id,
target_id=rel_target_id,
relation_type=relation_type_fact)
# Add markings
for marking in markings:
Marking2X.objects.create(marked=relation,
marking=marking)
if metadata_dict:
# If the relation already existed and had associated metadata,
# we retrieve the identifier of that metadata object and
# write the current metadata as new revision. Otherwise,
# we create a new identifier.
if relation.metadata_id:
rel_identifier_uid = relation.metadata_id.uid
rel_identifier_namespace_uri = relation.metadata_id.namespace.uri
else:
rel_identifier_uid = None
rel_identifier_namespace_uri = DINGOS_ID_NAMESPACE_URI
metadata_iobject, created = get_or_create_iobject(identifier_uid=rel_identifier_uid,
identifier_namespace_uri=rel_identifier_namespace_uri,
iobject_type_name=DINGOS_RELATION_METADATA_OBJECT_TYPE_NAME,
iobject_type_namespace_uri=DINGOS_NAMESPACE_URI,
iobject_type_revision_name=DINGOS_REVISION_NAME,
iobject_family_name=DINGOS_IOBJECT_FAMILY_NAME,
iobject_family_revision_name=DINGOS_REVISION_NAME,
timestamp=None,
overwrite=False)
metadata_iobject.from_dict(metadata_dict)
return relation
|
[
"def",
"add_relation",
"(",
"self",
",",
"target_id",
"=",
"None",
",",
"relation_types",
"=",
"None",
",",
"fact_dt_namespace_name",
"=",
"None",
",",
"fact_dt_namespace_uri",
"=",
"DINGOS_NAMESPACE_URI",
",",
"fact_dt_kind",
"=",
"FactDataType",
".",
"UNKNOWN_KIND",
",",
"fact_dt_name",
"=",
"'String'",
",",
"metadata_dict",
"=",
"None",
",",
"markings",
"=",
"None",
")",
":",
"if",
"not",
"markings",
":",
"markings",
"=",
"[",
"]",
"if",
"relation_types",
"==",
"None",
":",
"relation_types",
"=",
"[",
"]",
"# Create fact-term for relation types",
"relation_type_ft",
",",
"created",
"=",
"get_or_create_fact_term",
"(",
"iobject_family_name",
"=",
"self",
".",
"iobject_family",
".",
"name",
",",
"fact_term_name",
"=",
"DINGOS_RELATION_TYPE_FACTTERM_NAME",
",",
"iobject_type_name",
"=",
"self",
".",
"iobject_type",
".",
"name",
",",
"iobject_type_namespace_uri",
"=",
"self",
".",
"iobject_type",
".",
"namespace",
".",
"uri",
",",
"fact_dt_name",
"=",
"fact_dt_name",
",",
"fact_dt_namespace_name",
"=",
"fact_dt_namespace_name",
",",
"fact_dt_kind",
"=",
"fact_dt_kind",
",",
"fact_dt_namespace_uri",
"=",
"fact_dt_namespace_uri",
")",
"# Create fact containing relation types",
"relation_type_fact",
",",
"created",
"=",
"get_or_create_fact",
"(",
"fact_term",
"=",
"relation_type_ft",
",",
"fact_dt_name",
"=",
"fact_dt_name",
",",
"fact_dt_namespace_uri",
"=",
"fact_dt_namespace_uri",
",",
"values",
"=",
"relation_types",
",",
"value_iobject_id",
"=",
"None",
",",
"value_iobject_ts",
"=",
"None",
",",
")",
"rel_target_id",
"=",
"target_id",
"rel_source_id",
"=",
"self",
".",
"identifier",
"# Create relation object",
"relation",
",",
"created",
"=",
"self",
".",
"_DCM",
"[",
"'Relation'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"source_id",
"=",
"rel_source_id",
",",
"target_id",
"=",
"rel_target_id",
",",
"relation_type",
"=",
"relation_type_fact",
")",
"# Add markings",
"for",
"marking",
"in",
"markings",
":",
"Marking2X",
".",
"objects",
".",
"create",
"(",
"marked",
"=",
"relation",
",",
"marking",
"=",
"marking",
")",
"if",
"metadata_dict",
":",
"# If the relation already existed and had associated metadata,",
"# we retrieve the identifier of that metadata object and",
"# write the current metadata as new revision. Otherwise,",
"# we create a new identifier.",
"if",
"relation",
".",
"metadata_id",
":",
"rel_identifier_uid",
"=",
"relation",
".",
"metadata_id",
".",
"uid",
"rel_identifier_namespace_uri",
"=",
"relation",
".",
"metadata_id",
".",
"namespace",
".",
"uri",
"else",
":",
"rel_identifier_uid",
"=",
"None",
"rel_identifier_namespace_uri",
"=",
"DINGOS_ID_NAMESPACE_URI",
"metadata_iobject",
",",
"created",
"=",
"get_or_create_iobject",
"(",
"identifier_uid",
"=",
"rel_identifier_uid",
",",
"identifier_namespace_uri",
"=",
"rel_identifier_namespace_uri",
",",
"iobject_type_name",
"=",
"DINGOS_RELATION_METADATA_OBJECT_TYPE_NAME",
",",
"iobject_type_namespace_uri",
"=",
"DINGOS_NAMESPACE_URI",
",",
"iobject_type_revision_name",
"=",
"DINGOS_REVISION_NAME",
",",
"iobject_family_name",
"=",
"DINGOS_IOBJECT_FAMILY_NAME",
",",
"iobject_family_revision_name",
"=",
"DINGOS_REVISION_NAME",
",",
"timestamp",
"=",
"None",
",",
"overwrite",
"=",
"False",
")",
"metadata_iobject",
".",
"from_dict",
"(",
"metadata_dict",
")",
"return",
"relation"
] |
Add a relationship between this object and another object.
|
[
"Add",
"a",
"relationship",
"between",
"this",
"object",
"and",
"another",
"object",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L1510-L1586
|
238,632
|
PureWhiteWu/pyformatter
|
pyformatter/core.py
|
Formatter._load_data
|
def _load_data(self):
"""
Load data from raw_data or file_path
"""
if self.raw_data is None and self.data_format is not FormatType.PYTHON:
if self.file_path is None:
raise ArgumentInvalid('One of "raw_data" or "file_path" should be set!')
if not os.path.isfile(self.file_path) or not os.access(self.file_path, os.R_OK):
raise ArgumentInvalid('"file_path" should be a valid path to an exist file with read permission!')
with open(self.file_path) as f:
self.raw_data = f.read()
|
python
|
def _load_data(self):
"""
Load data from raw_data or file_path
"""
if self.raw_data is None and self.data_format is not FormatType.PYTHON:
if self.file_path is None:
raise ArgumentInvalid('One of "raw_data" or "file_path" should be set!')
if not os.path.isfile(self.file_path) or not os.access(self.file_path, os.R_OK):
raise ArgumentInvalid('"file_path" should be a valid path to an exist file with read permission!')
with open(self.file_path) as f:
self.raw_data = f.read()
|
[
"def",
"_load_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"raw_data",
"is",
"None",
"and",
"self",
".",
"data_format",
"is",
"not",
"FormatType",
".",
"PYTHON",
":",
"if",
"self",
".",
"file_path",
"is",
"None",
":",
"raise",
"ArgumentInvalid",
"(",
"'One of \"raw_data\" or \"file_path\" should be set!'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"file_path",
")",
"or",
"not",
"os",
".",
"access",
"(",
"self",
".",
"file_path",
",",
"os",
".",
"R_OK",
")",
":",
"raise",
"ArgumentInvalid",
"(",
"'\"file_path\" should be a valid path to an exist file with read permission!'",
")",
"with",
"open",
"(",
"self",
".",
"file_path",
")",
"as",
"f",
":",
"self",
".",
"raw_data",
"=",
"f",
".",
"read",
"(",
")"
] |
Load data from raw_data or file_path
|
[
"Load",
"data",
"from",
"raw_data",
"or",
"file_path"
] |
c3cc9f48d0e251919542e87524900d6ef8d16d82
|
https://github.com/PureWhiteWu/pyformatter/blob/c3cc9f48d0e251919542e87524900d6ef8d16d82/pyformatter/core.py#L42-L52
|
238,633
|
PureWhiteWu/pyformatter
|
pyformatter/core.py
|
Formatter._validate
|
def _validate(self):
"""
Validate the input data.
"""
if self.data_format is FormatType.PYTHON:
self.data = self.raw_data
elif self.data_format is FormatType.JSON:
self._validate_json()
elif self.data_format is FormatType.YAML:
self._validate_yaml()
|
python
|
def _validate(self):
"""
Validate the input data.
"""
if self.data_format is FormatType.PYTHON:
self.data = self.raw_data
elif self.data_format is FormatType.JSON:
self._validate_json()
elif self.data_format is FormatType.YAML:
self._validate_yaml()
|
[
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"self",
".",
"data_format",
"is",
"FormatType",
".",
"PYTHON",
":",
"self",
".",
"data",
"=",
"self",
".",
"raw_data",
"elif",
"self",
".",
"data_format",
"is",
"FormatType",
".",
"JSON",
":",
"self",
".",
"_validate_json",
"(",
")",
"elif",
"self",
".",
"data_format",
"is",
"FormatType",
".",
"YAML",
":",
"self",
".",
"_validate_yaml",
"(",
")"
] |
Validate the input data.
|
[
"Validate",
"the",
"input",
"data",
"."
] |
c3cc9f48d0e251919542e87524900d6ef8d16d82
|
https://github.com/PureWhiteWu/pyformatter/blob/c3cc9f48d0e251919542e87524900d6ef8d16d82/pyformatter/core.py#L54-L63
|
238,634
|
demurgos/py-pathmatch
|
pathmatch/wildmatch.py
|
match
|
def match(pattern, text, no_escape=False, path_name=True, wild_star=True, period=False,
case_fold=False):
u"""
Matches text against the supplied wildmatch pattern.
To get git's behavior, use the `wild_star` flag.
Note that the EXTMATCH (ksh extended glob patterns) option is not available
:type pattern: text_type
:param pattern: A wildmatch pattern
:type text: text_type
:param text: The text to match
:type no_escape: bool
:param no_escape: Disable backslash escaping
:type path_name: bool
:param path_name: Separator (slash) in text cannot be matched by an asterisk, question-mark nor
bracket expression in pattern (only a literal).
:type wild_star: bool
:param wild_star: A True value forces the `path_name` flag to True. This allows the
double-asterisk `**` to match any (0 to many) number of directories
:type period: bool
:param period: A leading period in text cannot be matched by an asterisk, question-mark nor
bracket expression in pattern (only a literal). A period is "leading" if:
- it is the first character of `text`
OR
- path_name (or wild_star) is True and the previous character is a slash
:type case_fold: bool
:param case_fold: Perform a case insensitive match (GNU Extension)
:rtype: bool
:return: Result of the match
"""
regex = translate(pattern, no_escape=no_escape, path_name=path_name, wild_star=wild_star,
period=period, case_fold=case_fold, closed_regex=True)
return regex.match(text) is not None
|
python
|
def match(pattern, text, no_escape=False, path_name=True, wild_star=True, period=False,
case_fold=False):
u"""
Matches text against the supplied wildmatch pattern.
To get git's behavior, use the `wild_star` flag.
Note that the EXTMATCH (ksh extended glob patterns) option is not available
:type pattern: text_type
:param pattern: A wildmatch pattern
:type text: text_type
:param text: The text to match
:type no_escape: bool
:param no_escape: Disable backslash escaping
:type path_name: bool
:param path_name: Separator (slash) in text cannot be matched by an asterisk, question-mark nor
bracket expression in pattern (only a literal).
:type wild_star: bool
:param wild_star: A True value forces the `path_name` flag to True. This allows the
double-asterisk `**` to match any (0 to many) number of directories
:type period: bool
:param period: A leading period in text cannot be matched by an asterisk, question-mark nor
bracket expression in pattern (only a literal). A period is "leading" if:
- it is the first character of `text`
OR
- path_name (or wild_star) is True and the previous character is a slash
:type case_fold: bool
:param case_fold: Perform a case insensitive match (GNU Extension)
:rtype: bool
:return: Result of the match
"""
regex = translate(pattern, no_escape=no_escape, path_name=path_name, wild_star=wild_star,
period=period, case_fold=case_fold, closed_regex=True)
return regex.match(text) is not None
|
[
"def",
"match",
"(",
"pattern",
",",
"text",
",",
"no_escape",
"=",
"False",
",",
"path_name",
"=",
"True",
",",
"wild_star",
"=",
"True",
",",
"period",
"=",
"False",
",",
"case_fold",
"=",
"False",
")",
":",
"regex",
"=",
"translate",
"(",
"pattern",
",",
"no_escape",
"=",
"no_escape",
",",
"path_name",
"=",
"path_name",
",",
"wild_star",
"=",
"wild_star",
",",
"period",
"=",
"period",
",",
"case_fold",
"=",
"case_fold",
",",
"closed_regex",
"=",
"True",
")",
"return",
"regex",
".",
"match",
"(",
"text",
")",
"is",
"not",
"None"
] |
u"""
Matches text against the supplied wildmatch pattern.
To get git's behavior, use the `wild_star` flag.
Note that the EXTMATCH (ksh extended glob patterns) option is not available
:type pattern: text_type
:param pattern: A wildmatch pattern
:type text: text_type
:param text: The text to match
:type no_escape: bool
:param no_escape: Disable backslash escaping
:type path_name: bool
:param path_name: Separator (slash) in text cannot be matched by an asterisk, question-mark nor
bracket expression in pattern (only a literal).
:type wild_star: bool
:param wild_star: A True value forces the `path_name` flag to True. This allows the
double-asterisk `**` to match any (0 to many) number of directories
:type period: bool
:param period: A leading period in text cannot be matched by an asterisk, question-mark nor
bracket expression in pattern (only a literal). A period is "leading" if:
- it is the first character of `text`
OR
- path_name (or wild_star) is True and the previous character is a slash
:type case_fold: bool
:param case_fold: Perform a case insensitive match (GNU Extension)
:rtype: bool
:return: Result of the match
|
[
"u",
"Matches",
"text",
"against",
"the",
"supplied",
"wildmatch",
"pattern",
"."
] |
70b3aa99ee34da8b80b6ec6340862b760159d2a1
|
https://github.com/demurgos/py-pathmatch/blob/70b3aa99ee34da8b80b6ec6340862b760159d2a1/pathmatch/wildmatch.py#L602-L637
|
238,635
|
demurgos/py-pathmatch
|
pathmatch/wildmatch.py
|
WildmatchPattern.filter
|
def filter(self, texts):
u"""
Returns a generator yielding the elements of `texts` matching this pattern.
:type texts: typing.Iterable[text_type]
:param texts: An iterable collection of texts to match
:rtype: typing.Iterable[text_type]
:return: A generator of filtered elements.
"""
return (text for text in texts if self.regex.match(text) is not None)
|
python
|
def filter(self, texts):
u"""
Returns a generator yielding the elements of `texts` matching this pattern.
:type texts: typing.Iterable[text_type]
:param texts: An iterable collection of texts to match
:rtype: typing.Iterable[text_type]
:return: A generator of filtered elements.
"""
return (text for text in texts if self.regex.match(text) is not None)
|
[
"def",
"filter",
"(",
"self",
",",
"texts",
")",
":",
"return",
"(",
"text",
"for",
"text",
"in",
"texts",
"if",
"self",
".",
"regex",
".",
"match",
"(",
"text",
")",
"is",
"not",
"None",
")"
] |
u"""
Returns a generator yielding the elements of `texts` matching this pattern.
:type texts: typing.Iterable[text_type]
:param texts: An iterable collection of texts to match
:rtype: typing.Iterable[text_type]
:return: A generator of filtered elements.
|
[
"u",
"Returns",
"a",
"generator",
"yielding",
"the",
"elements",
"of",
"texts",
"matching",
"this",
"pattern",
"."
] |
70b3aa99ee34da8b80b6ec6340862b760159d2a1
|
https://github.com/demurgos/py-pathmatch/blob/70b3aa99ee34da8b80b6ec6340862b760159d2a1/pathmatch/wildmatch.py#L733-L742
|
238,636
|
perimosocordiae/viztricks
|
viztricks/convenience.py
|
imagesc
|
def imagesc(data, title=None, fig='current', ax=None):
'''Simple alias for a Matlab-like imshow function.'''
ax = _get_axis(fig, ax, False)
ax.imshow(data, interpolation='nearest', aspect='auto')
if title:
ax.set_title(title)
return plt.show
|
python
|
def imagesc(data, title=None, fig='current', ax=None):
'''Simple alias for a Matlab-like imshow function.'''
ax = _get_axis(fig, ax, False)
ax.imshow(data, interpolation='nearest', aspect='auto')
if title:
ax.set_title(title)
return plt.show
|
[
"def",
"imagesc",
"(",
"data",
",",
"title",
"=",
"None",
",",
"fig",
"=",
"'current'",
",",
"ax",
"=",
"None",
")",
":",
"ax",
"=",
"_get_axis",
"(",
"fig",
",",
"ax",
",",
"False",
")",
"ax",
".",
"imshow",
"(",
"data",
",",
"interpolation",
"=",
"'nearest'",
",",
"aspect",
"=",
"'auto'",
")",
"if",
"title",
":",
"ax",
".",
"set_title",
"(",
"title",
")",
"return",
"plt",
".",
"show"
] |
Simple alias for a Matlab-like imshow function.
|
[
"Simple",
"alias",
"for",
"a",
"Matlab",
"-",
"like",
"imshow",
"function",
"."
] |
bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb
|
https://github.com/perimosocordiae/viztricks/blob/bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb/viztricks/convenience.py#L72-L78
|
238,637
|
perimosocordiae/viztricks
|
viztricks/convenience.py
|
vector_field
|
def vector_field(points, directions, title=None, fig='current', ax=None,
edge_style='k-', vertex_style='o'):
'''Plots vectors that start at 'points', and move along 'directions'.'''
assert points.shape[1] in (2,3) and directions.shape == points.shape
ax = _get_axis(fig, ax, points.shape[1] == 3)
# Plot.
if points.shape[1] == 2:
x,y = points.T
dx,dy = directions.T
if hasattr(ax, 'zaxis'): # Must be on a 3d plot axis, so supply zeros.
_quiver3d(ax, x, y, 0, dx, dy, 0, arrow_length_ratio=0.1)
else:
args = (x, y, dx, dy)
ax.quiver(*args, angles='xy', scale_units='xy', scale=1, headwidth=5)
if vertex_style is not None:
ax.scatter(x, y, marker=vertex_style, zorder=2, edgecolor='none')
else:
x,y,z = points.T
dx,dy,dz = directions.T
_quiver3d(ax, x, y, z, dx, dy, dz, arrow_length_ratio=0.1)
if vertex_style is not None:
ax.scatter(x, y, z, marker=vertex_style, zorder=2, edgecolor='none')
if title:
ax.set_title(title)
return plt.show
|
python
|
def vector_field(points, directions, title=None, fig='current', ax=None,
edge_style='k-', vertex_style='o'):
'''Plots vectors that start at 'points', and move along 'directions'.'''
assert points.shape[1] in (2,3) and directions.shape == points.shape
ax = _get_axis(fig, ax, points.shape[1] == 3)
# Plot.
if points.shape[1] == 2:
x,y = points.T
dx,dy = directions.T
if hasattr(ax, 'zaxis'): # Must be on a 3d plot axis, so supply zeros.
_quiver3d(ax, x, y, 0, dx, dy, 0, arrow_length_ratio=0.1)
else:
args = (x, y, dx, dy)
ax.quiver(*args, angles='xy', scale_units='xy', scale=1, headwidth=5)
if vertex_style is not None:
ax.scatter(x, y, marker=vertex_style, zorder=2, edgecolor='none')
else:
x,y,z = points.T
dx,dy,dz = directions.T
_quiver3d(ax, x, y, z, dx, dy, dz, arrow_length_ratio=0.1)
if vertex_style is not None:
ax.scatter(x, y, z, marker=vertex_style, zorder=2, edgecolor='none')
if title:
ax.set_title(title)
return plt.show
|
[
"def",
"vector_field",
"(",
"points",
",",
"directions",
",",
"title",
"=",
"None",
",",
"fig",
"=",
"'current'",
",",
"ax",
"=",
"None",
",",
"edge_style",
"=",
"'k-'",
",",
"vertex_style",
"=",
"'o'",
")",
":",
"assert",
"points",
".",
"shape",
"[",
"1",
"]",
"in",
"(",
"2",
",",
"3",
")",
"and",
"directions",
".",
"shape",
"==",
"points",
".",
"shape",
"ax",
"=",
"_get_axis",
"(",
"fig",
",",
"ax",
",",
"points",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
")",
"# Plot.",
"if",
"points",
".",
"shape",
"[",
"1",
"]",
"==",
"2",
":",
"x",
",",
"y",
"=",
"points",
".",
"T",
"dx",
",",
"dy",
"=",
"directions",
".",
"T",
"if",
"hasattr",
"(",
"ax",
",",
"'zaxis'",
")",
":",
"# Must be on a 3d plot axis, so supply zeros.",
"_quiver3d",
"(",
"ax",
",",
"x",
",",
"y",
",",
"0",
",",
"dx",
",",
"dy",
",",
"0",
",",
"arrow_length_ratio",
"=",
"0.1",
")",
"else",
":",
"args",
"=",
"(",
"x",
",",
"y",
",",
"dx",
",",
"dy",
")",
"ax",
".",
"quiver",
"(",
"*",
"args",
",",
"angles",
"=",
"'xy'",
",",
"scale_units",
"=",
"'xy'",
",",
"scale",
"=",
"1",
",",
"headwidth",
"=",
"5",
")",
"if",
"vertex_style",
"is",
"not",
"None",
":",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"marker",
"=",
"vertex_style",
",",
"zorder",
"=",
"2",
",",
"edgecolor",
"=",
"'none'",
")",
"else",
":",
"x",
",",
"y",
",",
"z",
"=",
"points",
".",
"T",
"dx",
",",
"dy",
",",
"dz",
"=",
"directions",
".",
"T",
"_quiver3d",
"(",
"ax",
",",
"x",
",",
"y",
",",
"z",
",",
"dx",
",",
"dy",
",",
"dz",
",",
"arrow_length_ratio",
"=",
"0.1",
")",
"if",
"vertex_style",
"is",
"not",
"None",
":",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"z",
",",
"marker",
"=",
"vertex_style",
",",
"zorder",
"=",
"2",
",",
"edgecolor",
"=",
"'none'",
")",
"if",
"title",
":",
"ax",
".",
"set_title",
"(",
"title",
")",
"return",
"plt",
".",
"show"
] |
Plots vectors that start at 'points', and move along 'directions'.
|
[
"Plots",
"vectors",
"that",
"start",
"at",
"points",
"and",
"move",
"along",
"directions",
"."
] |
bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb
|
https://github.com/perimosocordiae/viztricks/blob/bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb/viztricks/convenience.py#L110-L134
|
238,638
|
dizak/headnode_notifier
|
headnode_notifier.py
|
read_passwd_file
|
def read_passwd_file(pass_file):
"""Read password from external file and retrun as string. The file should
contain just single line. Prevents hard-coding password anywhere in this
script. IMPORTANT! Password is stored as plain text! Do NOT use with your
personal account!"
Args:
pass_file (str): /path/to/pass_file
"""
with open(pass_file) as fin:
passwd = fin.read().strip()
return passwd
|
python
|
def read_passwd_file(pass_file):
"""Read password from external file and retrun as string. The file should
contain just single line. Prevents hard-coding password anywhere in this
script. IMPORTANT! Password is stored as plain text! Do NOT use with your
personal account!"
Args:
pass_file (str): /path/to/pass_file
"""
with open(pass_file) as fin:
passwd = fin.read().strip()
return passwd
|
[
"def",
"read_passwd_file",
"(",
"pass_file",
")",
":",
"with",
"open",
"(",
"pass_file",
")",
"as",
"fin",
":",
"passwd",
"=",
"fin",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"return",
"passwd"
] |
Read password from external file and retrun as string. The file should
contain just single line. Prevents hard-coding password anywhere in this
script. IMPORTANT! Password is stored as plain text! Do NOT use with your
personal account!"
Args:
pass_file (str): /path/to/pass_file
|
[
"Read",
"password",
"from",
"external",
"file",
"and",
"retrun",
"as",
"string",
".",
"The",
"file",
"should",
"contain",
"just",
"single",
"line",
".",
"Prevents",
"hard",
"-",
"coding",
"password",
"anywhere",
"in",
"this",
"script",
".",
"IMPORTANT!",
"Password",
"is",
"stored",
"as",
"plain",
"text!",
"Do",
"NOT",
"use",
"with",
"your",
"personal",
"account!"
] |
dc19577417ddf86d7818da0946ec28758255e8c3
|
https://github.com/dizak/headnode_notifier/blob/dc19577417ddf86d7818da0946ec28758255e8c3/headnode_notifier.py#L19-L30
|
238,639
|
dizak/headnode_notifier
|
headnode_notifier.py
|
send_mail
|
def send_mail(to_addr,
subj_msg,
body_msg,
attach_path,
serv_addr,
serv_port,
from_addr,
passwd):
"""Send an e-mail message using smtplib and email standard python libraries.
IMPORTANT! Password is stored as plain text! Do NOT use with your personal
account!
Args:
to_addr (str): Recipient address.
subj_msg (str): Message subject.
body_msg (str): Message body.
serv_addr (str): Server's address. Default: <smtp.gmail.com>.
serv_port (int): Server's port. Default: <587>.
from_addr (str): Account address. Default: <headnode.notifiy@gmail.com>.
passwd (str): Account password.
"""
msg = MIMEMultipart()
if attach_path is not None:
with open(attach_path, "rb") as fin:
part = MIMEBase("application", "octet-stream")
part.set_payload(fin.read())
encoders.encode_base64(part)
part.add_header("Content-Disposition",
"attachment; filename={0}".format(attach_path))
msg.attach(part)
else:
pass
msg["From"] = from_addr
msg["To"] = to_addr
msg["Subject"] = subj_msg
msg.attach(MIMEText(body_msg, "plain"))
server = smtplib.SMTP(serv_addr, serv_port)
server.starttls()
server.login(from_addr, passwd)
text_msg = msg.as_string()
server.sendmail(from_addr, to_addr, text_msg)
server.quit
|
python
|
def send_mail(to_addr,
subj_msg,
body_msg,
attach_path,
serv_addr,
serv_port,
from_addr,
passwd):
"""Send an e-mail message using smtplib and email standard python libraries.
IMPORTANT! Password is stored as plain text! Do NOT use with your personal
account!
Args:
to_addr (str): Recipient address.
subj_msg (str): Message subject.
body_msg (str): Message body.
serv_addr (str): Server's address. Default: <smtp.gmail.com>.
serv_port (int): Server's port. Default: <587>.
from_addr (str): Account address. Default: <headnode.notifiy@gmail.com>.
passwd (str): Account password.
"""
msg = MIMEMultipart()
if attach_path is not None:
with open(attach_path, "rb") as fin:
part = MIMEBase("application", "octet-stream")
part.set_payload(fin.read())
encoders.encode_base64(part)
part.add_header("Content-Disposition",
"attachment; filename={0}".format(attach_path))
msg.attach(part)
else:
pass
msg["From"] = from_addr
msg["To"] = to_addr
msg["Subject"] = subj_msg
msg.attach(MIMEText(body_msg, "plain"))
server = smtplib.SMTP(serv_addr, serv_port)
server.starttls()
server.login(from_addr, passwd)
text_msg = msg.as_string()
server.sendmail(from_addr, to_addr, text_msg)
server.quit
|
[
"def",
"send_mail",
"(",
"to_addr",
",",
"subj_msg",
",",
"body_msg",
",",
"attach_path",
",",
"serv_addr",
",",
"serv_port",
",",
"from_addr",
",",
"passwd",
")",
":",
"msg",
"=",
"MIMEMultipart",
"(",
")",
"if",
"attach_path",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"attach_path",
",",
"\"rb\"",
")",
"as",
"fin",
":",
"part",
"=",
"MIMEBase",
"(",
"\"application\"",
",",
"\"octet-stream\"",
")",
"part",
".",
"set_payload",
"(",
"fin",
".",
"read",
"(",
")",
")",
"encoders",
".",
"encode_base64",
"(",
"part",
")",
"part",
".",
"add_header",
"(",
"\"Content-Disposition\"",
",",
"\"attachment; filename={0}\"",
".",
"format",
"(",
"attach_path",
")",
")",
"msg",
".",
"attach",
"(",
"part",
")",
"else",
":",
"pass",
"msg",
"[",
"\"From\"",
"]",
"=",
"from_addr",
"msg",
"[",
"\"To\"",
"]",
"=",
"to_addr",
"msg",
"[",
"\"Subject\"",
"]",
"=",
"subj_msg",
"msg",
".",
"attach",
"(",
"MIMEText",
"(",
"body_msg",
",",
"\"plain\"",
")",
")",
"server",
"=",
"smtplib",
".",
"SMTP",
"(",
"serv_addr",
",",
"serv_port",
")",
"server",
".",
"starttls",
"(",
")",
"server",
".",
"login",
"(",
"from_addr",
",",
"passwd",
")",
"text_msg",
"=",
"msg",
".",
"as_string",
"(",
")",
"server",
".",
"sendmail",
"(",
"from_addr",
",",
"to_addr",
",",
"text_msg",
")",
"server",
".",
"quit"
] |
Send an e-mail message using smtplib and email standard python libraries.
IMPORTANT! Password is stored as plain text! Do NOT use with your personal
account!
Args:
to_addr (str): Recipient address.
subj_msg (str): Message subject.
body_msg (str): Message body.
serv_addr (str): Server's address. Default: <smtp.gmail.com>.
serv_port (int): Server's port. Default: <587>.
from_addr (str): Account address. Default: <headnode.notifiy@gmail.com>.
passwd (str): Account password.
|
[
"Send",
"an",
"e",
"-",
"mail",
"message",
"using",
"smtplib",
"and",
"email",
"standard",
"python",
"libraries",
".",
"IMPORTANT!",
"Password",
"is",
"stored",
"as",
"plain",
"text!",
"Do",
"NOT",
"use",
"with",
"your",
"personal",
"account!"
] |
dc19577417ddf86d7818da0946ec28758255e8c3
|
https://github.com/dizak/headnode_notifier/blob/dc19577417ddf86d7818da0946ec28758255e8c3/headnode_notifier.py#L33-L74
|
238,640
|
rvswift/EB
|
EB/builder/utilities/performance.py
|
roc_calculator
|
def roc_calculator(screened_molecules, status_field, active_label, decoy_label):
"""
Calculates ROC curve
"""
P = 0 # Total no. of actives
N = 0 # Total no. of decoys
tpf = [];
tpf.append(0) # true positive fraction list
fpf = [];
fpf.append(0) # false positive fraction list
fpindex = [] # indeces where decoys are found are labeled '1'
# Tally the # of positives & negatives at each threshold & in total
for index in range(len(screened_molecules)):
if screened_molecules[index].GetProp(status_field) == active_label and index == 0:
tpf[index] = float(1)
P = P + 1
fpindex.append(0)
elif screened_molecules[index].GetProp(status_field) == active_label and index > 0:
tpf.append(float(tpf[index - 1] + 1))
fpf.append(float(fpf[index - 1]))
P = P + 1
fpindex.append(0)
elif screened_molecules[index].GetProp(status_field) == decoy_label and index == 0:
fpf[index] = float(1)
N = N + 1
fpindex.append(1)
elif screened_molecules[index].GetProp(status_field) == decoy_label and index > 0:
fpf.append(float(fpf[index - 1] + 1))
tpf.append(float(tpf[index - 1]))
N = N + 1
fpindex.append(1)
# calculate TPF & FPF
for index in range(len(tpf)):
tpf[index] = tpf[index] / P
fpf[index] = fpf[index] / N
return tpf, fpf, P, N
|
python
|
def roc_calculator(screened_molecules, status_field, active_label, decoy_label):
"""
Calculates ROC curve
"""
P = 0 # Total no. of actives
N = 0 # Total no. of decoys
tpf = [];
tpf.append(0) # true positive fraction list
fpf = [];
fpf.append(0) # false positive fraction list
fpindex = [] # indeces where decoys are found are labeled '1'
# Tally the # of positives & negatives at each threshold & in total
for index in range(len(screened_molecules)):
if screened_molecules[index].GetProp(status_field) == active_label and index == 0:
tpf[index] = float(1)
P = P + 1
fpindex.append(0)
elif screened_molecules[index].GetProp(status_field) == active_label and index > 0:
tpf.append(float(tpf[index - 1] + 1))
fpf.append(float(fpf[index - 1]))
P = P + 1
fpindex.append(0)
elif screened_molecules[index].GetProp(status_field) == decoy_label and index == 0:
fpf[index] = float(1)
N = N + 1
fpindex.append(1)
elif screened_molecules[index].GetProp(status_field) == decoy_label and index > 0:
fpf.append(float(fpf[index - 1] + 1))
tpf.append(float(tpf[index - 1]))
N = N + 1
fpindex.append(1)
# calculate TPF & FPF
for index in range(len(tpf)):
tpf[index] = tpf[index] / P
fpf[index] = fpf[index] / N
return tpf, fpf, P, N
|
[
"def",
"roc_calculator",
"(",
"screened_molecules",
",",
"status_field",
",",
"active_label",
",",
"decoy_label",
")",
":",
"P",
"=",
"0",
"# Total no. of actives",
"N",
"=",
"0",
"# Total no. of decoys",
"tpf",
"=",
"[",
"]",
"tpf",
".",
"append",
"(",
"0",
")",
"# true positive fraction list",
"fpf",
"=",
"[",
"]",
"fpf",
".",
"append",
"(",
"0",
")",
"# false positive fraction list",
"fpindex",
"=",
"[",
"]",
"# indeces where decoys are found are labeled '1'",
"# Tally the # of positives & negatives at each threshold & in total",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"screened_molecules",
")",
")",
":",
"if",
"screened_molecules",
"[",
"index",
"]",
".",
"GetProp",
"(",
"status_field",
")",
"==",
"active_label",
"and",
"index",
"==",
"0",
":",
"tpf",
"[",
"index",
"]",
"=",
"float",
"(",
"1",
")",
"P",
"=",
"P",
"+",
"1",
"fpindex",
".",
"append",
"(",
"0",
")",
"elif",
"screened_molecules",
"[",
"index",
"]",
".",
"GetProp",
"(",
"status_field",
")",
"==",
"active_label",
"and",
"index",
">",
"0",
":",
"tpf",
".",
"append",
"(",
"float",
"(",
"tpf",
"[",
"index",
"-",
"1",
"]",
"+",
"1",
")",
")",
"fpf",
".",
"append",
"(",
"float",
"(",
"fpf",
"[",
"index",
"-",
"1",
"]",
")",
")",
"P",
"=",
"P",
"+",
"1",
"fpindex",
".",
"append",
"(",
"0",
")",
"elif",
"screened_molecules",
"[",
"index",
"]",
".",
"GetProp",
"(",
"status_field",
")",
"==",
"decoy_label",
"and",
"index",
"==",
"0",
":",
"fpf",
"[",
"index",
"]",
"=",
"float",
"(",
"1",
")",
"N",
"=",
"N",
"+",
"1",
"fpindex",
".",
"append",
"(",
"1",
")",
"elif",
"screened_molecules",
"[",
"index",
"]",
".",
"GetProp",
"(",
"status_field",
")",
"==",
"decoy_label",
"and",
"index",
">",
"0",
":",
"fpf",
".",
"append",
"(",
"float",
"(",
"fpf",
"[",
"index",
"-",
"1",
"]",
"+",
"1",
")",
")",
"tpf",
".",
"append",
"(",
"float",
"(",
"tpf",
"[",
"index",
"-",
"1",
"]",
")",
")",
"N",
"=",
"N",
"+",
"1",
"fpindex",
".",
"append",
"(",
"1",
")",
"# calculate TPF & FPF",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"tpf",
")",
")",
":",
"tpf",
"[",
"index",
"]",
"=",
"tpf",
"[",
"index",
"]",
"/",
"P",
"fpf",
"[",
"index",
"]",
"=",
"fpf",
"[",
"index",
"]",
"/",
"N",
"return",
"tpf",
",",
"fpf",
",",
"P",
",",
"N"
] |
Calculates ROC curve
|
[
"Calculates",
"ROC",
"curve"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/performance.py#L32-L71
|
238,641
|
wickman/compactor
|
compactor/bin/http_example.py
|
listen
|
def listen(identifier):
"""
Launch a listener and return the compactor context.
"""
context = Context()
process = WebProcess(identifier)
context.spawn(process)
log.info("Launching PID %s", process.pid)
return process, context
|
python
|
def listen(identifier):
"""
Launch a listener and return the compactor context.
"""
context = Context()
process = WebProcess(identifier)
context.spawn(process)
log.info("Launching PID %s", process.pid)
return process, context
|
[
"def",
"listen",
"(",
"identifier",
")",
":",
"context",
"=",
"Context",
"(",
")",
"process",
"=",
"WebProcess",
"(",
"identifier",
")",
"context",
".",
"spawn",
"(",
"process",
")",
"log",
".",
"info",
"(",
"\"Launching PID %s\"",
",",
"process",
".",
"pid",
")",
"return",
"process",
",",
"context"
] |
Launch a listener and return the compactor context.
|
[
"Launch",
"a",
"listener",
"and",
"return",
"the",
"compactor",
"context",
"."
] |
52714be3d84aa595a212feccb4d92ec250cede2a
|
https://github.com/wickman/compactor/blob/52714be3d84aa595a212feccb4d92ec250cede2a/compactor/bin/http_example.py#L34-L46
|
238,642
|
obskyr/puush.py
|
puush/puush.py
|
Account.history
|
def history(self):
"""Get the latest 10 files uploaded to the account.
Return a list of Puush File objects.
"""
res = self._api_request('hist')
if res[0][0] == '-1':
raise PuushError("History retrieval failed.")
files = []
for line in res[1:]:
id, upload_time, url, filename, views, _ = line
files.append(self._File(id, url, filename, upload_time, views))
return files
|
python
|
def history(self):
"""Get the latest 10 files uploaded to the account.
Return a list of Puush File objects.
"""
res = self._api_request('hist')
if res[0][0] == '-1':
raise PuushError("History retrieval failed.")
files = []
for line in res[1:]:
id, upload_time, url, filename, views, _ = line
files.append(self._File(id, url, filename, upload_time, views))
return files
|
[
"def",
"history",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"_api_request",
"(",
"'hist'",
")",
"if",
"res",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'-1'",
":",
"raise",
"PuushError",
"(",
"\"History retrieval failed.\"",
")",
"files",
"=",
"[",
"]",
"for",
"line",
"in",
"res",
"[",
"1",
":",
"]",
":",
"id",
",",
"upload_time",
",",
"url",
",",
"filename",
",",
"views",
",",
"_",
"=",
"line",
"files",
".",
"append",
"(",
"self",
".",
"_File",
"(",
"id",
",",
"url",
",",
"filename",
",",
"upload_time",
",",
"views",
")",
")",
"return",
"files"
] |
Get the latest 10 files uploaded to the account.
Return a list of Puush File objects.
|
[
"Get",
"the",
"latest",
"10",
"files",
"uploaded",
"to",
"the",
"account",
".",
"Return",
"a",
"list",
"of",
"Puush",
"File",
"objects",
"."
] |
a0e4fe424c43a7758d8aaf1b31659ab51266f6d1
|
https://github.com/obskyr/puush.py/blob/a0e4fe424c43a7758d8aaf1b31659ab51266f6d1/puush/puush.py#L174-L186
|
238,643
|
kezabelle/django-template-selector
|
templateselector/fields.py
|
TemplateChoiceField.prepare_value
|
def prepare_value(self, value):
"""
To avoid evaluating the lazysorted callable more than necessary to
establish a potential initial value for the field, we do it here.
If there's
- only one template choice, and
- the field is required, and
- there's no prior initial set (either by being bound or by being set
higher up the stack
then forcibly select the only "good" value as the default.
"""
if value is None and self.required:
choices =list(self.choices)
if len(choices) == 1:
value = choices[0][0]
return super(TemplateChoiceField, self).prepare_value(value)
|
python
|
def prepare_value(self, value):
"""
To avoid evaluating the lazysorted callable more than necessary to
establish a potential initial value for the field, we do it here.
If there's
- only one template choice, and
- the field is required, and
- there's no prior initial set (either by being bound or by being set
higher up the stack
then forcibly select the only "good" value as the default.
"""
if value is None and self.required:
choices =list(self.choices)
if len(choices) == 1:
value = choices[0][0]
return super(TemplateChoiceField, self).prepare_value(value)
|
[
"def",
"prepare_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"and",
"self",
".",
"required",
":",
"choices",
"=",
"list",
"(",
"self",
".",
"choices",
")",
"if",
"len",
"(",
"choices",
")",
"==",
"1",
":",
"value",
"=",
"choices",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"super",
"(",
"TemplateChoiceField",
",",
"self",
")",
".",
"prepare_value",
"(",
"value",
")"
] |
To avoid evaluating the lazysorted callable more than necessary to
establish a potential initial value for the field, we do it here.
If there's
- only one template choice, and
- the field is required, and
- there's no prior initial set (either by being bound or by being set
higher up the stack
then forcibly select the only "good" value as the default.
|
[
"To",
"avoid",
"evaluating",
"the",
"lazysorted",
"callable",
"more",
"than",
"necessary",
"to",
"establish",
"a",
"potential",
"initial",
"value",
"for",
"the",
"field",
"we",
"do",
"it",
"here",
"."
] |
1c42baf934f38bb09b0939c67512057e33cacabb
|
https://github.com/kezabelle/django-template-selector/blob/1c42baf934f38bb09b0939c67512057e33cacabb/templateselector/fields.py#L196-L212
|
238,644
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/FloatConverter.py
|
FloatConverter.to_float_with_default
|
def to_float_with_default(value, default_value):
"""
Converts value into float or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: float value or default value when conversion is not supported.
"""
result = FloatConverter.to_nullable_float(value)
return result if result != None else default_value
|
python
|
def to_float_with_default(value, default_value):
"""
Converts value into float or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: float value or default value when conversion is not supported.
"""
result = FloatConverter.to_nullable_float(value)
return result if result != None else default_value
|
[
"def",
"to_float_with_default",
"(",
"value",
",",
"default_value",
")",
":",
"result",
"=",
"FloatConverter",
".",
"to_nullable_float",
"(",
"value",
")",
"return",
"result",
"if",
"result",
"!=",
"None",
"else",
"default_value"
] |
Converts value into float or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: float value or default value when conversion is not supported.
|
[
"Converts",
"value",
"into",
"float",
"or",
"returns",
"default",
"when",
"conversion",
"is",
"not",
"possible",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/FloatConverter.py#L55-L66
|
238,645
|
roboogle/gtkmvc3
|
gtkmvco/examples/thread/model.py
|
MyModel.run
|
def run(self):
"""This method is run by a separated thread"""
self.busy = True
for i in range(9):
self.counter += 1
time.sleep(0.5)
pass
self.counter += 1
self.busy = False
return
|
python
|
def run(self):
"""This method is run by a separated thread"""
self.busy = True
for i in range(9):
self.counter += 1
time.sleep(0.5)
pass
self.counter += 1
self.busy = False
return
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"busy",
"=",
"True",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"self",
".",
"counter",
"+=",
"1",
"time",
".",
"sleep",
"(",
"0.5",
")",
"pass",
"self",
".",
"counter",
"+=",
"1",
"self",
".",
"busy",
"=",
"False",
"return"
] |
This method is run by a separated thread
|
[
"This",
"method",
"is",
"run",
"by",
"a",
"separated",
"thread"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/thread/model.py#L58-L69
|
238,646
|
majerteam/sqla_inspect
|
sqla_inspect/base.py
|
BaseSqlaInspector.get_info_field
|
def get_info_field(prop):
"""
Return the info attribute of the given property
"""
if isinstance(prop, ColumnProperty):
column = prop.columns[0]
elif isinstance(prop, RelationshipProperty):
column = prop
return column.info
|
python
|
def get_info_field(prop):
"""
Return the info attribute of the given property
"""
if isinstance(prop, ColumnProperty):
column = prop.columns[0]
elif isinstance(prop, RelationshipProperty):
column = prop
return column.info
|
[
"def",
"get_info_field",
"(",
"prop",
")",
":",
"if",
"isinstance",
"(",
"prop",
",",
"ColumnProperty",
")",
":",
"column",
"=",
"prop",
".",
"columns",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"prop",
",",
"RelationshipProperty",
")",
":",
"column",
"=",
"prop",
"return",
"column",
".",
"info"
] |
Return the info attribute of the given property
|
[
"Return",
"the",
"info",
"attribute",
"of",
"the",
"given",
"property"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/base.py#L62-L72
|
238,647
|
majerteam/sqla_inspect
|
sqla_inspect/base.py
|
Registry.add_item
|
def add_item(self, sqla_col_type, item, key_specific=None):
"""
Add an item to the registry
"""
if key_specific is not None:
self.setdefault(key_specific, {})[sqla_col_type] = item
else:
self[sqla_col_type] = item
|
python
|
def add_item(self, sqla_col_type, item, key_specific=None):
"""
Add an item to the registry
"""
if key_specific is not None:
self.setdefault(key_specific, {})[sqla_col_type] = item
else:
self[sqla_col_type] = item
|
[
"def",
"add_item",
"(",
"self",
",",
"sqla_col_type",
",",
"item",
",",
"key_specific",
"=",
"None",
")",
":",
"if",
"key_specific",
"is",
"not",
"None",
":",
"self",
".",
"setdefault",
"(",
"key_specific",
",",
"{",
"}",
")",
"[",
"sqla_col_type",
"]",
"=",
"item",
"else",
":",
"self",
"[",
"sqla_col_type",
"]",
"=",
"item"
] |
Add an item to the registry
|
[
"Add",
"an",
"item",
"to",
"the",
"registry"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/base.py#L79-L86
|
238,648
|
majerteam/sqla_inspect
|
sqla_inspect/base.py
|
FormatterRegistry.add_formatter
|
def add_formatter(self, sqla_col_type, formatter, key_specific=None):
"""
Add a formatter to the registry
if key_specific is provided, this formatter will only be used for some
specific exports
"""
self.add_item(sqla_col_type, formatter, key_specific)
|
python
|
def add_formatter(self, sqla_col_type, formatter, key_specific=None):
"""
Add a formatter to the registry
if key_specific is provided, this formatter will only be used for some
specific exports
"""
self.add_item(sqla_col_type, formatter, key_specific)
|
[
"def",
"add_formatter",
"(",
"self",
",",
"sqla_col_type",
",",
"formatter",
",",
"key_specific",
"=",
"None",
")",
":",
"self",
".",
"add_item",
"(",
"sqla_col_type",
",",
"formatter",
",",
"key_specific",
")"
] |
Add a formatter to the registry
if key_specific is provided, this formatter will only be used for some
specific exports
|
[
"Add",
"a",
"formatter",
"to",
"the",
"registry",
"if",
"key_specific",
"is",
"provided",
"this",
"formatter",
"will",
"only",
"be",
"used",
"for",
"some",
"specific",
"exports"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/base.py#L102-L108
|
238,649
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.setup_layout
|
def setup_layout(self, orientation=None):
"""Setup the layout for the tooltip in the given orientation
:param layout: the orentation of the layout
:type layout: QtCore.Qt.Orientation | None
:returns: None
:rtype: None
:raises: None
"""
if orientation == QtCore.Qt.Horizontal or orientation is None:
layout = QtGui.QHBoxLayout()
elif orientation == QtCore.Qt.Vertical:
layout = QtGui.QVBoxLayout()
else:
raise TypeError('Orientation is of wrong type! Allowed is QtCore.Qt.Horizontal and QtCore.Qt.Vertical. Given: %s' % orientation)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
|
python
|
def setup_layout(self, orientation=None):
"""Setup the layout for the tooltip in the given orientation
:param layout: the orentation of the layout
:type layout: QtCore.Qt.Orientation | None
:returns: None
:rtype: None
:raises: None
"""
if orientation == QtCore.Qt.Horizontal or orientation is None:
layout = QtGui.QHBoxLayout()
elif orientation == QtCore.Qt.Vertical:
layout = QtGui.QVBoxLayout()
else:
raise TypeError('Orientation is of wrong type! Allowed is QtCore.Qt.Horizontal and QtCore.Qt.Vertical. Given: %s' % orientation)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
|
[
"def",
"setup_layout",
"(",
"self",
",",
"orientation",
"=",
"None",
")",
":",
"if",
"orientation",
"==",
"QtCore",
".",
"Qt",
".",
"Horizontal",
"or",
"orientation",
"is",
"None",
":",
"layout",
"=",
"QtGui",
".",
"QHBoxLayout",
"(",
")",
"elif",
"orientation",
"==",
"QtCore",
".",
"Qt",
".",
"Vertical",
":",
"layout",
"=",
"QtGui",
".",
"QVBoxLayout",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Orientation is of wrong type! Allowed is QtCore.Qt.Horizontal and QtCore.Qt.Vertical. Given: %s'",
"%",
"orientation",
")",
"layout",
".",
"setContentsMargins",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"layout",
".",
"setSpacing",
"(",
"0",
")",
"self",
".",
"setLayout",
"(",
"layout",
")"
] |
Setup the layout for the tooltip in the given orientation
:param layout: the orentation of the layout
:type layout: QtCore.Qt.Orientation | None
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"layout",
"for",
"the",
"tooltip",
"in",
"the",
"given",
"orientation"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L95-L112
|
238,650
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.setup_size
|
def setup_size(self, width, height):
"""Set the width and height for one cell in the tooltip
This is inderectly acomplished by setting the iconsizes for the buttons.
:param width: the width of one cell, min. is 7 -> icon width = 0
:type width: int
:param height: the height of one cell, min. is 6 -> icon height = 0
:type height: int
:returns: None
:rtype: None
:raises: None
"""
self._iconw = max(0, width - 7)
self._iconh = max(0, height - 6)
self.update_all_buttons()
|
python
|
def setup_size(self, width, height):
"""Set the width and height for one cell in the tooltip
This is inderectly acomplished by setting the iconsizes for the buttons.
:param width: the width of one cell, min. is 7 -> icon width = 0
:type width: int
:param height: the height of one cell, min. is 6 -> icon height = 0
:type height: int
:returns: None
:rtype: None
:raises: None
"""
self._iconw = max(0, width - 7)
self._iconh = max(0, height - 6)
self.update_all_buttons()
|
[
"def",
"setup_size",
"(",
"self",
",",
"width",
",",
"height",
")",
":",
"self",
".",
"_iconw",
"=",
"max",
"(",
"0",
",",
"width",
"-",
"7",
")",
"self",
".",
"_iconh",
"=",
"max",
"(",
"0",
",",
"height",
"-",
"6",
")",
"self",
".",
"update_all_buttons",
"(",
")"
] |
Set the width and height for one cell in the tooltip
This is inderectly acomplished by setting the iconsizes for the buttons.
:param width: the width of one cell, min. is 7 -> icon width = 0
:type width: int
:param height: the height of one cell, min. is 6 -> icon height = 0
:type height: int
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"width",
"and",
"height",
"for",
"one",
"cell",
"in",
"the",
"tooltip"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L114-L129
|
238,651
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.setup_cyatimer
|
def setup_cyatimer(self, interval):
"""Setup the timer that will close the widget after the mouse left the widget for the time of interval
:param interval: the time that the tooltip waits before it dissapears in milliseconds
:type interval: int
:returns: None
:rtype: None
:raises: None
"""
self.cyatimer = QtCore.QTimer(self)
self.cyatimer.setSingleShot(True)
self.cyatimer.timeout.connect(self.hide)
self._interval = interval
|
python
|
def setup_cyatimer(self, interval):
"""Setup the timer that will close the widget after the mouse left the widget for the time of interval
:param interval: the time that the tooltip waits before it dissapears in milliseconds
:type interval: int
:returns: None
:rtype: None
:raises: None
"""
self.cyatimer = QtCore.QTimer(self)
self.cyatimer.setSingleShot(True)
self.cyatimer.timeout.connect(self.hide)
self._interval = interval
|
[
"def",
"setup_cyatimer",
"(",
"self",
",",
"interval",
")",
":",
"self",
".",
"cyatimer",
"=",
"QtCore",
".",
"QTimer",
"(",
"self",
")",
"self",
".",
"cyatimer",
".",
"setSingleShot",
"(",
"True",
")",
"self",
".",
"cyatimer",
".",
"timeout",
".",
"connect",
"(",
"self",
".",
"hide",
")",
"self",
".",
"_interval",
"=",
"interval"
] |
Setup the timer that will close the widget after the mouse left the widget for the time of interval
:param interval: the time that the tooltip waits before it dissapears in milliseconds
:type interval: int
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"timer",
"that",
"will",
"close",
"the",
"widget",
"after",
"the",
"mouse",
"left",
"the",
"widget",
"for",
"the",
"time",
"of",
"interval"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L131-L143
|
238,652
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.event
|
def event(self, event):
"""Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
"""
if event.type() == QtCore.QEvent.WindowDeactivate: # hide the tooltip
self.cyatimer.stop()
self.hide()
return True
if event.type() == QtCore.QEvent.Leave: # start timer
self.cyatimer.start(self._interval)
return True
if event.type() == QtCore.QEvent.Enter: # reset/stop timer
self.cyatimer.stop()
return True
if event.type() == QtCore.QEvent.Show: # render the widgets
self.cyatimer.stop()
return True
return super(WidgetToolTip, self).event(event)
|
python
|
def event(self, event):
"""Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
"""
if event.type() == QtCore.QEvent.WindowDeactivate: # hide the tooltip
self.cyatimer.stop()
self.hide()
return True
if event.type() == QtCore.QEvent.Leave: # start timer
self.cyatimer.start(self._interval)
return True
if event.type() == QtCore.QEvent.Enter: # reset/stop timer
self.cyatimer.stop()
return True
if event.type() == QtCore.QEvent.Show: # render the widgets
self.cyatimer.stop()
return True
return super(WidgetToolTip, self).event(event)
|
[
"def",
"event",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"type",
"(",
")",
"==",
"QtCore",
".",
"QEvent",
".",
"WindowDeactivate",
":",
"# hide the tooltip",
"self",
".",
"cyatimer",
".",
"stop",
"(",
")",
"self",
".",
"hide",
"(",
")",
"return",
"True",
"if",
"event",
".",
"type",
"(",
")",
"==",
"QtCore",
".",
"QEvent",
".",
"Leave",
":",
"# start timer",
"self",
".",
"cyatimer",
".",
"start",
"(",
"self",
".",
"_interval",
")",
"return",
"True",
"if",
"event",
".",
"type",
"(",
")",
"==",
"QtCore",
".",
"QEvent",
".",
"Enter",
":",
"# reset/stop timer",
"self",
".",
"cyatimer",
".",
"stop",
"(",
")",
"return",
"True",
"if",
"event",
".",
"type",
"(",
")",
"==",
"QtCore",
".",
"QEvent",
".",
"Show",
":",
"# render the widgets",
"self",
".",
"cyatimer",
".",
"stop",
"(",
")",
"return",
"True",
"return",
"super",
"(",
"WidgetToolTip",
",",
"self",
")",
".",
"event",
"(",
"event",
")"
] |
Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
|
[
"Reimplementation",
"of",
"QWidget",
".",
"event"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L145-L167
|
238,653
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.create_button
|
def create_button(self, widget):
"""Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
"""
btn = QtGui.QToolButton(self)
btn.setIconSize(QtCore.QSize(self._iconw, self._iconh))
self.update_button(btn, widget)
return btn
|
python
|
def create_button(self, widget):
"""Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
"""
btn = QtGui.QToolButton(self)
btn.setIconSize(QtCore.QSize(self._iconw, self._iconh))
self.update_button(btn, widget)
return btn
|
[
"def",
"create_button",
"(",
"self",
",",
"widget",
")",
":",
"btn",
"=",
"QtGui",
".",
"QToolButton",
"(",
"self",
")",
"btn",
".",
"setIconSize",
"(",
"QtCore",
".",
"QSize",
"(",
"self",
".",
"_iconw",
",",
"self",
".",
"_iconh",
")",
")",
"self",
".",
"update_button",
"(",
"btn",
",",
"widget",
")",
"return",
"btn"
] |
Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
|
[
"Create",
"a",
"button",
"that",
"has",
"the",
"given",
"widget",
"rendered",
"as",
"an",
"icon"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L169-L181
|
238,654
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.update_button
|
def update_button(self, button, widget):
"""Update the icon of the button with the given widget
if the widget does not is invalid, it is deleted from the tooltip automatically.
:param button: the button to update
:type button: QtGui.QAbstractButton
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
"""
if not shiboken.isValid(widget):
self.remove_widget(widget)
return
button.setIconSize(QtCore.QSize(self._iconw, self._iconh))
pix = QtGui.QPixmap(widget.size())
widget.render(pix)
icon = QtGui.QIcon(pix)
button.setIcon(icon)
|
python
|
def update_button(self, button, widget):
"""Update the icon of the button with the given widget
if the widget does not is invalid, it is deleted from the tooltip automatically.
:param button: the button to update
:type button: QtGui.QAbstractButton
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
"""
if not shiboken.isValid(widget):
self.remove_widget(widget)
return
button.setIconSize(QtCore.QSize(self._iconw, self._iconh))
pix = QtGui.QPixmap(widget.size())
widget.render(pix)
icon = QtGui.QIcon(pix)
button.setIcon(icon)
|
[
"def",
"update_button",
"(",
"self",
",",
"button",
",",
"widget",
")",
":",
"if",
"not",
"shiboken",
".",
"isValid",
"(",
"widget",
")",
":",
"self",
".",
"remove_widget",
"(",
"widget",
")",
"return",
"button",
".",
"setIconSize",
"(",
"QtCore",
".",
"QSize",
"(",
"self",
".",
"_iconw",
",",
"self",
".",
"_iconh",
")",
")",
"pix",
"=",
"QtGui",
".",
"QPixmap",
"(",
"widget",
".",
"size",
"(",
")",
")",
"widget",
".",
"render",
"(",
"pix",
")",
"icon",
"=",
"QtGui",
".",
"QIcon",
"(",
"pix",
")",
"button",
".",
"setIcon",
"(",
"icon",
")"
] |
Update the icon of the button with the given widget
if the widget does not is invalid, it is deleted from the tooltip automatically.
:param button: the button to update
:type button: QtGui.QAbstractButton
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
|
[
"Update",
"the",
"icon",
"of",
"the",
"button",
"with",
"the",
"given",
"widget"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L183-L203
|
238,655
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.update_all_buttons
|
def update_all_buttons(self, ):
"""Update all buttons
:returns: None
:rtype: None
:raises: None
"""
for widget, button in self._buttons.items():
self.update_button(button, widget)
self.adjustSize()
|
python
|
def update_all_buttons(self, ):
"""Update all buttons
:returns: None
:rtype: None
:raises: None
"""
for widget, button in self._buttons.items():
self.update_button(button, widget)
self.adjustSize()
|
[
"def",
"update_all_buttons",
"(",
"self",
",",
")",
":",
"for",
"widget",
",",
"button",
"in",
"self",
".",
"_buttons",
".",
"items",
"(",
")",
":",
"self",
".",
"update_button",
"(",
"button",
",",
"widget",
")",
"self",
".",
"adjustSize",
"(",
")"
] |
Update all buttons
:returns: None
:rtype: None
:raises: None
|
[
"Update",
"all",
"buttons"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L205-L214
|
238,656
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.focus_widget
|
def focus_widget(self, checked=None, w=None):
"""Focus the given widget. Checked is ignored and only used as a slot for QAbstractButton.clicked.
:param checked: The checked state of the button that was clicked
:type checked: bool
:param w: the widget to focus
:type w: QtGui.QWidget
:returns: None
:raises: None
"""
if w is None:
return
if w.isMinimized():
w.showNormal()
else:
w.show()
w.activateWindow()
w.setFocus()
|
python
|
def focus_widget(self, checked=None, w=None):
"""Focus the given widget. Checked is ignored and only used as a slot for QAbstractButton.clicked.
:param checked: The checked state of the button that was clicked
:type checked: bool
:param w: the widget to focus
:type w: QtGui.QWidget
:returns: None
:raises: None
"""
if w is None:
return
if w.isMinimized():
w.showNormal()
else:
w.show()
w.activateWindow()
w.setFocus()
|
[
"def",
"focus_widget",
"(",
"self",
",",
"checked",
"=",
"None",
",",
"w",
"=",
"None",
")",
":",
"if",
"w",
"is",
"None",
":",
"return",
"if",
"w",
".",
"isMinimized",
"(",
")",
":",
"w",
".",
"showNormal",
"(",
")",
"else",
":",
"w",
".",
"show",
"(",
")",
"w",
".",
"activateWindow",
"(",
")",
"w",
".",
"setFocus",
"(",
")"
] |
Focus the given widget. Checked is ignored and only used as a slot for QAbstractButton.clicked.
:param checked: The checked state of the button that was clicked
:type checked: bool
:param w: the widget to focus
:type w: QtGui.QWidget
:returns: None
:raises: None
|
[
"Focus",
"the",
"given",
"widget",
".",
"Checked",
"is",
"ignored",
"and",
"only",
"used",
"as",
"a",
"slot",
"for",
"QAbstractButton",
".",
"clicked",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L216-L233
|
238,657
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.add_widget
|
def add_widget(self, widget):
"""Add the given widget to the tooltip
:param widget: the widget to add
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
"""
if self._buttons.get(widget):
return
btn = self.create_button(widget)
cb = partial(self.focus_widget, w=widget)
btn.clicked.connect(cb)
self.layout().addWidget(btn)
self._buttons[widget] = btn
|
python
|
def add_widget(self, widget):
"""Add the given widget to the tooltip
:param widget: the widget to add
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
"""
if self._buttons.get(widget):
return
btn = self.create_button(widget)
cb = partial(self.focus_widget, w=widget)
btn.clicked.connect(cb)
self.layout().addWidget(btn)
self._buttons[widget] = btn
|
[
"def",
"add_widget",
"(",
"self",
",",
"widget",
")",
":",
"if",
"self",
".",
"_buttons",
".",
"get",
"(",
"widget",
")",
":",
"return",
"btn",
"=",
"self",
".",
"create_button",
"(",
"widget",
")",
"cb",
"=",
"partial",
"(",
"self",
".",
"focus_widget",
",",
"w",
"=",
"widget",
")",
"btn",
".",
"clicked",
".",
"connect",
"(",
"cb",
")",
"self",
".",
"layout",
"(",
")",
".",
"addWidget",
"(",
"btn",
")",
"self",
".",
"_buttons",
"[",
"widget",
"]",
"=",
"btn"
] |
Add the given widget to the tooltip
:param widget: the widget to add
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
|
[
"Add",
"the",
"given",
"widget",
"to",
"the",
"tooltip"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L235-L250
|
238,658
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.remove_widget
|
def remove_widget(self, widget):
"""Remove the given widget from the tooltip
:param widget: the widget to remove
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: KeyError
"""
button = self._buttons.pop(widget)
self.layout().removeWidget(button)
button.deleteLater()
|
python
|
def remove_widget(self, widget):
"""Remove the given widget from the tooltip
:param widget: the widget to remove
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: KeyError
"""
button = self._buttons.pop(widget)
self.layout().removeWidget(button)
button.deleteLater()
|
[
"def",
"remove_widget",
"(",
"self",
",",
"widget",
")",
":",
"button",
"=",
"self",
".",
"_buttons",
".",
"pop",
"(",
"widget",
")",
"self",
".",
"layout",
"(",
")",
".",
"removeWidget",
"(",
"button",
")",
"button",
".",
"deleteLater",
"(",
")"
] |
Remove the given widget from the tooltip
:param widget: the widget to remove
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: KeyError
|
[
"Remove",
"the",
"given",
"widget",
"from",
"the",
"tooltip"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L252-L263
|
238,659
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.eventFilter
|
def eventFilter(self, watched, event):
"""Filter ToolTip events and display this tooltip widget, if watched requests a tooltip.
:param watched: The watched object
:type watched: QtCore.QObject
:param event: The event sent by watched
:type event: QtCore.QEvent
:returns: True if the event was processed. False if the event should be passed on.
:rtype: bool
:raises: None
"""
if event.type() == self._triggerevent:
self.show()
return True
else:
return False
|
python
|
def eventFilter(self, watched, event):
"""Filter ToolTip events and display this tooltip widget, if watched requests a tooltip.
:param watched: The watched object
:type watched: QtCore.QObject
:param event: The event sent by watched
:type event: QtCore.QEvent
:returns: True if the event was processed. False if the event should be passed on.
:rtype: bool
:raises: None
"""
if event.type() == self._triggerevent:
self.show()
return True
else:
return False
|
[
"def",
"eventFilter",
"(",
"self",
",",
"watched",
",",
"event",
")",
":",
"if",
"event",
".",
"type",
"(",
")",
"==",
"self",
".",
"_triggerevent",
":",
"self",
".",
"show",
"(",
")",
"return",
"True",
"else",
":",
"return",
"False"
] |
Filter ToolTip events and display this tooltip widget, if watched requests a tooltip.
:param watched: The watched object
:type watched: QtCore.QObject
:param event: The event sent by watched
:type event: QtCore.QEvent
:returns: True if the event was processed. False if the event should be passed on.
:rtype: bool
:raises: None
|
[
"Filter",
"ToolTip",
"events",
"and",
"display",
"this",
"tooltip",
"widget",
"if",
"watched",
"requests",
"a",
"tooltip",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L265-L280
|
238,660
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.get_position
|
def get_position(self, ):
"""Return a recommended position for this widget to appear
This implemenation returns a position so that the widget is vertically centerd on the mouse
and 10 pixels left of the mouse
:returns: the position
:rtype: QPoint
:raises: None
"""
pos = QtGui.QCursor.pos()
if self._alignment & QtCore.Qt.AlignLeft == QtCore.Qt.AlignLeft:
pos.setX(pos.x() - self._offset)
elif self._alignment & QtCore.Qt.AlignRight == QtCore.Qt.AlignRight:
pos.setX(pos.x() - self.frameGeometry().width() + self._offset)
elif self._alignment & QtCore.Qt.AlignHCenter == QtCore.Qt.AlignHCenter:
pos.setX(pos.x() - self.frameGeometry().width()/2)
if self._alignment & QtCore.Qt.AlignTop == QtCore.Qt.AlignTop:
pos.setY(pos.y() - self._offset)
elif self._alignment & QtCore.Qt.AlignBottom == QtCore.Qt.AlignBottom:
pos.setY(pos.y() - self.frameGeometry().height() + self._offset)
elif self._alignment & QtCore.Qt.AlignVCenter == QtCore.Qt.AlignVCenter:
pos.setY(pos.y() - self.frameGeometry().height()/2)
return pos
|
python
|
def get_position(self, ):
"""Return a recommended position for this widget to appear
This implemenation returns a position so that the widget is vertically centerd on the mouse
and 10 pixels left of the mouse
:returns: the position
:rtype: QPoint
:raises: None
"""
pos = QtGui.QCursor.pos()
if self._alignment & QtCore.Qt.AlignLeft == QtCore.Qt.AlignLeft:
pos.setX(pos.x() - self._offset)
elif self._alignment & QtCore.Qt.AlignRight == QtCore.Qt.AlignRight:
pos.setX(pos.x() - self.frameGeometry().width() + self._offset)
elif self._alignment & QtCore.Qt.AlignHCenter == QtCore.Qt.AlignHCenter:
pos.setX(pos.x() - self.frameGeometry().width()/2)
if self._alignment & QtCore.Qt.AlignTop == QtCore.Qt.AlignTop:
pos.setY(pos.y() - self._offset)
elif self._alignment & QtCore.Qt.AlignBottom == QtCore.Qt.AlignBottom:
pos.setY(pos.y() - self.frameGeometry().height() + self._offset)
elif self._alignment & QtCore.Qt.AlignVCenter == QtCore.Qt.AlignVCenter:
pos.setY(pos.y() - self.frameGeometry().height()/2)
return pos
|
[
"def",
"get_position",
"(",
"self",
",",
")",
":",
"pos",
"=",
"QtGui",
".",
"QCursor",
".",
"pos",
"(",
")",
"if",
"self",
".",
"_alignment",
"&",
"QtCore",
".",
"Qt",
".",
"AlignLeft",
"==",
"QtCore",
".",
"Qt",
".",
"AlignLeft",
":",
"pos",
".",
"setX",
"(",
"pos",
".",
"x",
"(",
")",
"-",
"self",
".",
"_offset",
")",
"elif",
"self",
".",
"_alignment",
"&",
"QtCore",
".",
"Qt",
".",
"AlignRight",
"==",
"QtCore",
".",
"Qt",
".",
"AlignRight",
":",
"pos",
".",
"setX",
"(",
"pos",
".",
"x",
"(",
")",
"-",
"self",
".",
"frameGeometry",
"(",
")",
".",
"width",
"(",
")",
"+",
"self",
".",
"_offset",
")",
"elif",
"self",
".",
"_alignment",
"&",
"QtCore",
".",
"Qt",
".",
"AlignHCenter",
"==",
"QtCore",
".",
"Qt",
".",
"AlignHCenter",
":",
"pos",
".",
"setX",
"(",
"pos",
".",
"x",
"(",
")",
"-",
"self",
".",
"frameGeometry",
"(",
")",
".",
"width",
"(",
")",
"/",
"2",
")",
"if",
"self",
".",
"_alignment",
"&",
"QtCore",
".",
"Qt",
".",
"AlignTop",
"==",
"QtCore",
".",
"Qt",
".",
"AlignTop",
":",
"pos",
".",
"setY",
"(",
"pos",
".",
"y",
"(",
")",
"-",
"self",
".",
"_offset",
")",
"elif",
"self",
".",
"_alignment",
"&",
"QtCore",
".",
"Qt",
".",
"AlignBottom",
"==",
"QtCore",
".",
"Qt",
".",
"AlignBottom",
":",
"pos",
".",
"setY",
"(",
"pos",
".",
"y",
"(",
")",
"-",
"self",
".",
"frameGeometry",
"(",
")",
".",
"height",
"(",
")",
"+",
"self",
".",
"_offset",
")",
"elif",
"self",
".",
"_alignment",
"&",
"QtCore",
".",
"Qt",
".",
"AlignVCenter",
"==",
"QtCore",
".",
"Qt",
".",
"AlignVCenter",
":",
"pos",
".",
"setY",
"(",
"pos",
".",
"y",
"(",
")",
"-",
"self",
".",
"frameGeometry",
"(",
")",
".",
"height",
"(",
")",
"/",
"2",
")",
"return",
"pos"
] |
Return a recommended position for this widget to appear
This implemenation returns a position so that the widget is vertically centerd on the mouse
and 10 pixels left of the mouse
:returns: the position
:rtype: QPoint
:raises: None
|
[
"Return",
"a",
"recommended",
"position",
"for",
"this",
"widget",
"to",
"appear"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L282-L305
|
238,661
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
WidgetToolTip.show
|
def show(self, ):
"""Reimplementation that moves the tooltip and updates the buttons
:returns: None
:rtype: None
:raises: None
"""
self.update_all_buttons()
pos = self.get_position()
self.move(pos)
super(WidgetToolTip, self).show()
|
python
|
def show(self, ):
"""Reimplementation that moves the tooltip and updates the buttons
:returns: None
:rtype: None
:raises: None
"""
self.update_all_buttons()
pos = self.get_position()
self.move(pos)
super(WidgetToolTip, self).show()
|
[
"def",
"show",
"(",
"self",
",",
")",
":",
"self",
".",
"update_all_buttons",
"(",
")",
"pos",
"=",
"self",
".",
"get_position",
"(",
")",
"self",
".",
"move",
"(",
"pos",
")",
"super",
"(",
"WidgetToolTip",
",",
"self",
")",
".",
"show",
"(",
")"
] |
Reimplementation that moves the tooltip and updates the buttons
:returns: None
:rtype: None
:raises: None
|
[
"Reimplementation",
"that",
"moves",
"the",
"tooltip",
"and",
"updates",
"the",
"buttons"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L387-L397
|
238,662
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/tooltip.py
|
JB_WindowToolTip.show
|
def show(self, ):
"""Reimplementation of show to update all currently available JB_MainWindows
:returns: None
:rtype: None
:raises: None
"""
wins = set(JB_MainWindow.instances())
widgets = set(self.get_widgets())
for w in wins - widgets:
self.add_widget(w)
super(JB_WindowToolTip, self).show()
|
python
|
def show(self, ):
"""Reimplementation of show to update all currently available JB_MainWindows
:returns: None
:rtype: None
:raises: None
"""
wins = set(JB_MainWindow.instances())
widgets = set(self.get_widgets())
for w in wins - widgets:
self.add_widget(w)
super(JB_WindowToolTip, self).show()
|
[
"def",
"show",
"(",
"self",
",",
")",
":",
"wins",
"=",
"set",
"(",
"JB_MainWindow",
".",
"instances",
"(",
")",
")",
"widgets",
"=",
"set",
"(",
"self",
".",
"get_widgets",
"(",
")",
")",
"for",
"w",
"in",
"wins",
"-",
"widgets",
":",
"self",
".",
"add_widget",
"(",
"w",
")",
"super",
"(",
"JB_WindowToolTip",
",",
"self",
")",
".",
"show",
"(",
")"
] |
Reimplementation of show to update all currently available JB_MainWindows
:returns: None
:rtype: None
:raises: None
|
[
"Reimplementation",
"of",
"show",
"to",
"update",
"all",
"currently",
"available",
"JB_MainWindows"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L402-L413
|
238,663
|
mapmyfitness/jtime
|
jtime/jira_ext.py
|
JIRA.get_issue
|
def get_issue(self, branch):
"""
Gets the JIRA issue associated with the branch name.
Returns None if no issue with this branch name.
"""
if branch:
try:
issue = self.issue(branch, expand='changelog')
return issue
except jira.exceptions.JIRAError as ex:
if ex.status_code == 404:
print "No JIRA issue found for branch %s" % branch
else:
print str(ex)
|
python
|
def get_issue(self, branch):
"""
Gets the JIRA issue associated with the branch name.
Returns None if no issue with this branch name.
"""
if branch:
try:
issue = self.issue(branch, expand='changelog')
return issue
except jira.exceptions.JIRAError as ex:
if ex.status_code == 404:
print "No JIRA issue found for branch %s" % branch
else:
print str(ex)
|
[
"def",
"get_issue",
"(",
"self",
",",
"branch",
")",
":",
"if",
"branch",
":",
"try",
":",
"issue",
"=",
"self",
".",
"issue",
"(",
"branch",
",",
"expand",
"=",
"'changelog'",
")",
"return",
"issue",
"except",
"jira",
".",
"exceptions",
".",
"JIRAError",
"as",
"ex",
":",
"if",
"ex",
".",
"status_code",
"==",
"404",
":",
"print",
"\"No JIRA issue found for branch %s\"",
"%",
"branch",
"else",
":",
"print",
"str",
"(",
"ex",
")"
] |
Gets the JIRA issue associated with the branch name.
Returns None if no issue with this branch name.
|
[
"Gets",
"the",
"JIRA",
"issue",
"associated",
"with",
"the",
"branch",
"name",
"."
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jira_ext.py#L16-L30
|
238,664
|
mapmyfitness/jtime
|
jtime/jira_ext.py
|
JIRA.workflow_transition
|
def workflow_transition(self, issue, status_name):
"""
Change the status of a JIRA issue to a named status. Will only be updated
if this transition is available from the current status.
"""
transitions = self.transitions(issue)
for transition in transitions:
if transition['to']['name'] == status_name:
transition_id = transition['id']
self.transition_issue(issue, transition_id)
print "Changed status of issue %s to %s" % (issue.key, status_name)
return True
print "Unable to change status of issue %s to %s" % (issue.key, status_name)
|
python
|
def workflow_transition(self, issue, status_name):
"""
Change the status of a JIRA issue to a named status. Will only be updated
if this transition is available from the current status.
"""
transitions = self.transitions(issue)
for transition in transitions:
if transition['to']['name'] == status_name:
transition_id = transition['id']
self.transition_issue(issue, transition_id)
print "Changed status of issue %s to %s" % (issue.key, status_name)
return True
print "Unable to change status of issue %s to %s" % (issue.key, status_name)
|
[
"def",
"workflow_transition",
"(",
"self",
",",
"issue",
",",
"status_name",
")",
":",
"transitions",
"=",
"self",
".",
"transitions",
"(",
"issue",
")",
"for",
"transition",
"in",
"transitions",
":",
"if",
"transition",
"[",
"'to'",
"]",
"[",
"'name'",
"]",
"==",
"status_name",
":",
"transition_id",
"=",
"transition",
"[",
"'id'",
"]",
"self",
".",
"transition_issue",
"(",
"issue",
",",
"transition_id",
")",
"print",
"\"Changed status of issue %s to %s\"",
"%",
"(",
"issue",
".",
"key",
",",
"status_name",
")",
"return",
"True",
"print",
"\"Unable to change status of issue %s to %s\"",
"%",
"(",
"issue",
".",
"key",
",",
"status_name",
")"
] |
Change the status of a JIRA issue to a named status. Will only be updated
if this transition is available from the current status.
|
[
"Change",
"the",
"status",
"of",
"a",
"JIRA",
"issue",
"to",
"a",
"named",
"status",
".",
"Will",
"only",
"be",
"updated",
"if",
"this",
"transition",
"is",
"available",
"from",
"the",
"current",
"status",
"."
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jira_ext.py#L64-L77
|
238,665
|
mapmyfitness/jtime
|
jtime/jira_ext.py
|
JIRA.get_datetime_issue_in_progress
|
def get_datetime_issue_in_progress(self, issue):
"""
If the issue is in progress, gets that most recent time that the issue became 'In Progress'
"""
histories = issue.changelog.histories
for history in reversed(histories):
history_items = history.items
for item in history_items:
if item.field == 'status' and item.toString == "In Progress":
return dateutil.parser.parse(history.created)
|
python
|
def get_datetime_issue_in_progress(self, issue):
"""
If the issue is in progress, gets that most recent time that the issue became 'In Progress'
"""
histories = issue.changelog.histories
for history in reversed(histories):
history_items = history.items
for item in history_items:
if item.field == 'status' and item.toString == "In Progress":
return dateutil.parser.parse(history.created)
|
[
"def",
"get_datetime_issue_in_progress",
"(",
"self",
",",
"issue",
")",
":",
"histories",
"=",
"issue",
".",
"changelog",
".",
"histories",
"for",
"history",
"in",
"reversed",
"(",
"histories",
")",
":",
"history_items",
"=",
"history",
".",
"items",
"for",
"item",
"in",
"history_items",
":",
"if",
"item",
".",
"field",
"==",
"'status'",
"and",
"item",
".",
"toString",
"==",
"\"In Progress\"",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"history",
".",
"created",
")"
] |
If the issue is in progress, gets that most recent time that the issue became 'In Progress'
|
[
"If",
"the",
"issue",
"is",
"in",
"progress",
"gets",
"that",
"most",
"recent",
"time",
"that",
"the",
"issue",
"became",
"In",
"Progress"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jira_ext.py#L79-L88
|
238,666
|
mapmyfitness/jtime
|
jtime/jira_ext.py
|
JIRA.get_cycle_time
|
def get_cycle_time(self, issue_or_start_or_key):
"""
Provided an issue or a start datetime, will return the cycle time since the start or progress
"""
if isinstance(issue_or_start_or_key, basestring):
issue_or_start_or_key = self.get_issue(issue_or_start_or_key)
if isinstance(issue_or_start_or_key, jira.resources.Issue):
progress_started = self.get_datetime_issue_in_progress(issue_or_start_or_key)
elif isinstance(issue_or_start_or_key, datetime.datetime):
progress_started = issue_or_start_or_key
curr_time = datetime.datetime.now(dateutil.tz.tzlocal())
return utils.working_cycletime(progress_started, curr_time)
|
python
|
def get_cycle_time(self, issue_or_start_or_key):
"""
Provided an issue or a start datetime, will return the cycle time since the start or progress
"""
if isinstance(issue_or_start_or_key, basestring):
issue_or_start_or_key = self.get_issue(issue_or_start_or_key)
if isinstance(issue_or_start_or_key, jira.resources.Issue):
progress_started = self.get_datetime_issue_in_progress(issue_or_start_or_key)
elif isinstance(issue_or_start_or_key, datetime.datetime):
progress_started = issue_or_start_or_key
curr_time = datetime.datetime.now(dateutil.tz.tzlocal())
return utils.working_cycletime(progress_started, curr_time)
|
[
"def",
"get_cycle_time",
"(",
"self",
",",
"issue_or_start_or_key",
")",
":",
"if",
"isinstance",
"(",
"issue_or_start_or_key",
",",
"basestring",
")",
":",
"issue_or_start_or_key",
"=",
"self",
".",
"get_issue",
"(",
"issue_or_start_or_key",
")",
"if",
"isinstance",
"(",
"issue_or_start_or_key",
",",
"jira",
".",
"resources",
".",
"Issue",
")",
":",
"progress_started",
"=",
"self",
".",
"get_datetime_issue_in_progress",
"(",
"issue_or_start_or_key",
")",
"elif",
"isinstance",
"(",
"issue_or_start_or_key",
",",
"datetime",
".",
"datetime",
")",
":",
"progress_started",
"=",
"issue_or_start_or_key",
"curr_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"dateutil",
".",
"tz",
".",
"tzlocal",
"(",
")",
")",
"return",
"utils",
".",
"working_cycletime",
"(",
"progress_started",
",",
"curr_time",
")"
] |
Provided an issue or a start datetime, will return the cycle time since the start or progress
|
[
"Provided",
"an",
"issue",
"or",
"a",
"start",
"datetime",
"will",
"return",
"the",
"cycle",
"time",
"since",
"the",
"start",
"or",
"progress"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jira_ext.py#L101-L114
|
238,667
|
mapmyfitness/jtime
|
jtime/jira_ext.py
|
JIRA.get_week_avg_cycletime
|
def get_week_avg_cycletime(self):
"""
Gets the average cycletime of the current user for the past week.
This includes any ticket that was marked "In Progress" but not reopened.
"""
def moving_average(new_val, old_avg, prev_n):
return (new_val + old_avg) / (prev_n + 1)
active_tickets_jql = 'assignee=currentUser() and status was "In Progress" DURING (startOfWeek(), endofweek()) and status not in (Backlog, Open) ORDER BY updated DESC'
week_active_tickets = self.search_issues(active_tickets_jql)
avg_cycletime = 0
n_issues = 0
for issue in week_active_tickets:
cycle_time = self.get_cycle_time(self.get_issue(issue.key))
avg_cycletime = moving_average(cycle_time, avg_cycletime, n_issues)
n_issues = n_issues + 1
return avg_cycletime
|
python
|
def get_week_avg_cycletime(self):
"""
Gets the average cycletime of the current user for the past week.
This includes any ticket that was marked "In Progress" but not reopened.
"""
def moving_average(new_val, old_avg, prev_n):
return (new_val + old_avg) / (prev_n + 1)
active_tickets_jql = 'assignee=currentUser() and status was "In Progress" DURING (startOfWeek(), endofweek()) and status not in (Backlog, Open) ORDER BY updated DESC'
week_active_tickets = self.search_issues(active_tickets_jql)
avg_cycletime = 0
n_issues = 0
for issue in week_active_tickets:
cycle_time = self.get_cycle_time(self.get_issue(issue.key))
avg_cycletime = moving_average(cycle_time, avg_cycletime, n_issues)
n_issues = n_issues + 1
return avg_cycletime
|
[
"def",
"get_week_avg_cycletime",
"(",
"self",
")",
":",
"def",
"moving_average",
"(",
"new_val",
",",
"old_avg",
",",
"prev_n",
")",
":",
"return",
"(",
"new_val",
"+",
"old_avg",
")",
"/",
"(",
"prev_n",
"+",
"1",
")",
"active_tickets_jql",
"=",
"'assignee=currentUser() and status was \"In Progress\" DURING (startOfWeek(), endofweek()) and status not in (Backlog, Open) ORDER BY updated DESC'",
"week_active_tickets",
"=",
"self",
".",
"search_issues",
"(",
"active_tickets_jql",
")",
"avg_cycletime",
"=",
"0",
"n_issues",
"=",
"0",
"for",
"issue",
"in",
"week_active_tickets",
":",
"cycle_time",
"=",
"self",
".",
"get_cycle_time",
"(",
"self",
".",
"get_issue",
"(",
"issue",
".",
"key",
")",
")",
"avg_cycletime",
"=",
"moving_average",
"(",
"cycle_time",
",",
"avg_cycletime",
",",
"n_issues",
")",
"n_issues",
"=",
"n_issues",
"+",
"1",
"return",
"avg_cycletime"
] |
Gets the average cycletime of the current user for the past week.
This includes any ticket that was marked "In Progress" but not reopened.
|
[
"Gets",
"the",
"average",
"cycletime",
"of",
"the",
"current",
"user",
"for",
"the",
"past",
"week",
".",
"This",
"includes",
"any",
"ticket",
"that",
"was",
"marked",
"In",
"Progress",
"but",
"not",
"reopened",
"."
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jira_ext.py#L116-L135
|
238,668
|
Brazelton-Lab/bio_utils
|
bio_utils/blast_tools/filter_b6_evalue.py
|
b6_evalue_filter
|
def b6_evalue_filter(handle, e_value, *args, **kwargs):
"""Yields lines from handle with E-value less than or equal to e_value
Args:
handle (file): B6/M8 file handle, can be any iterator so long as it
it returns subsequent "lines" of a B6/M8 entry
e_value (float): max E-value to return
*args: Variable length argument list for b6_iter
**kwargs: Arbitrary keyword arguments for b6_iter
Yields:
B6Entry: class containing all B6/M8 data
Example:
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> b6_handle = open('test.b6')
>>> for entry in b6_evalue_filter(b6_handle, 1e5)
... print(entry.evalue) # Print E-value of filtered entry
"""
for entry in b6_iter(handle, *args, **kwargs):
if entry.evalue <= e_value:
yield entry
|
python
|
def b6_evalue_filter(handle, e_value, *args, **kwargs):
"""Yields lines from handle with E-value less than or equal to e_value
Args:
handle (file): B6/M8 file handle, can be any iterator so long as it
it returns subsequent "lines" of a B6/M8 entry
e_value (float): max E-value to return
*args: Variable length argument list for b6_iter
**kwargs: Arbitrary keyword arguments for b6_iter
Yields:
B6Entry: class containing all B6/M8 data
Example:
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> b6_handle = open('test.b6')
>>> for entry in b6_evalue_filter(b6_handle, 1e5)
... print(entry.evalue) # Print E-value of filtered entry
"""
for entry in b6_iter(handle, *args, **kwargs):
if entry.evalue <= e_value:
yield entry
|
[
"def",
"b6_evalue_filter",
"(",
"handle",
",",
"e_value",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"entry",
"in",
"b6_iter",
"(",
"handle",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"entry",
".",
"evalue",
"<=",
"e_value",
":",
"yield",
"entry"
] |
Yields lines from handle with E-value less than or equal to e_value
Args:
handle (file): B6/M8 file handle, can be any iterator so long as it
it returns subsequent "lines" of a B6/M8 entry
e_value (float): max E-value to return
*args: Variable length argument list for b6_iter
**kwargs: Arbitrary keyword arguments for b6_iter
Yields:
B6Entry: class containing all B6/M8 data
Example:
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> b6_handle = open('test.b6')
>>> for entry in b6_evalue_filter(b6_handle, 1e5)
... print(entry.evalue) # Print E-value of filtered entry
|
[
"Yields",
"lines",
"from",
"handle",
"with",
"E",
"-",
"value",
"less",
"than",
"or",
"equal",
"to",
"e_value"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/blast_tools/filter_b6_evalue.py#L43-L70
|
238,669
|
FujiMakoto/AgentML
|
agentml/parser/tags/condition.py
|
Condition.value
|
def value(self):
"""
Return the current evaluation of a condition statement
"""
return ''.join(map(str, self.evaluate(self.trigger.user)))
|
python
|
def value(self):
"""
Return the current evaluation of a condition statement
"""
return ''.join(map(str, self.evaluate(self.trigger.user)))
|
[
"def",
"value",
"(",
"self",
")",
":",
"return",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"self",
".",
"evaluate",
"(",
"self",
".",
"trigger",
".",
"user",
")",
")",
")"
] |
Return the current evaluation of a condition statement
|
[
"Return",
"the",
"current",
"evaluation",
"of",
"a",
"condition",
"statement"
] |
c8cb64b460d876666bf29ea2c682189874c7c403
|
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/tags/condition.py#L37-L41
|
238,670
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/estimator.py
|
get_judged_identifiers
|
def get_judged_identifiers(input_file):
"""
Extracts the paragraph identifiers, and the scores of the judged paragraphs from relevance
judgements in the NTCIR-11 Math-2, and NTCIR-12 MathIR format.
Parameters
----------
input_file : file
The input file containing relevance judgements in the NTCIR-11 Math-2, and NTCIR-12 MathIR
format.
Yields
------
(str, float)
The judged paragraph identifiers, and scores.
"""
for line in tqdm(list(input_file)):
_, __, identifier, score = line.split(' ')
yield (identifier, float(score))
|
python
|
def get_judged_identifiers(input_file):
"""
Extracts the paragraph identifiers, and the scores of the judged paragraphs from relevance
judgements in the NTCIR-11 Math-2, and NTCIR-12 MathIR format.
Parameters
----------
input_file : file
The input file containing relevance judgements in the NTCIR-11 Math-2, and NTCIR-12 MathIR
format.
Yields
------
(str, float)
The judged paragraph identifiers, and scores.
"""
for line in tqdm(list(input_file)):
_, __, identifier, score = line.split(' ')
yield (identifier, float(score))
|
[
"def",
"get_judged_identifiers",
"(",
"input_file",
")",
":",
"for",
"line",
"in",
"tqdm",
"(",
"list",
"(",
"input_file",
")",
")",
":",
"_",
",",
"__",
",",
"identifier",
",",
"score",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"yield",
"(",
"identifier",
",",
"float",
"(",
"score",
")",
")"
] |
Extracts the paragraph identifiers, and the scores of the judged paragraphs from relevance
judgements in the NTCIR-11 Math-2, and NTCIR-12 MathIR format.
Parameters
----------
input_file : file
The input file containing relevance judgements in the NTCIR-11 Math-2, and NTCIR-12 MathIR
format.
Yields
------
(str, float)
The judged paragraph identifiers, and scores.
|
[
"Extracts",
"the",
"paragraph",
"identifiers",
"and",
"the",
"scores",
"of",
"the",
"judged",
"paragraphs",
"from",
"relevance",
"judgements",
"in",
"the",
"NTCIR",
"-",
"11",
"Math",
"-",
"2",
"and",
"NTCIR",
"-",
"12",
"MathIR",
"format",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/estimator.py#L22-L40
|
238,671
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/estimator.py
|
get_all_identifiers
|
def get_all_identifiers(dataset):
"""
Extracts paragraph identifiers from a dataset in the NTCIR-11 Math-2, and NTCIR-12 MathIR XHTML5
format.
Parameters
----------
dataset : Path
A path to a dataset.
Yields
-------
(Path, str)
A parent directory of a paragraph, and the identifier of the paragraph.
"""
for document in tqdm(
dataset.glob("**/*.xhtml.zip"), desc="get_all_identifiers(%s)" % dataset.name):
identifier = get_identifier(document)
directory = document.parents[0]
yield (directory, identifier)
|
python
|
def get_all_identifiers(dataset):
"""
Extracts paragraph identifiers from a dataset in the NTCIR-11 Math-2, and NTCIR-12 MathIR XHTML5
format.
Parameters
----------
dataset : Path
A path to a dataset.
Yields
-------
(Path, str)
A parent directory of a paragraph, and the identifier of the paragraph.
"""
for document in tqdm(
dataset.glob("**/*.xhtml.zip"), desc="get_all_identifiers(%s)" % dataset.name):
identifier = get_identifier(document)
directory = document.parents[0]
yield (directory, identifier)
|
[
"def",
"get_all_identifiers",
"(",
"dataset",
")",
":",
"for",
"document",
"in",
"tqdm",
"(",
"dataset",
".",
"glob",
"(",
"\"**/*.xhtml.zip\"",
")",
",",
"desc",
"=",
"\"get_all_identifiers(%s)\"",
"%",
"dataset",
".",
"name",
")",
":",
"identifier",
"=",
"get_identifier",
"(",
"document",
")",
"directory",
"=",
"document",
".",
"parents",
"[",
"0",
"]",
"yield",
"(",
"directory",
",",
"identifier",
")"
] |
Extracts paragraph identifiers from a dataset in the NTCIR-11 Math-2, and NTCIR-12 MathIR XHTML5
format.
Parameters
----------
dataset : Path
A path to a dataset.
Yields
-------
(Path, str)
A parent directory of a paragraph, and the identifier of the paragraph.
|
[
"Extracts",
"paragraph",
"identifiers",
"from",
"a",
"dataset",
"in",
"the",
"NTCIR",
"-",
"11",
"Math",
"-",
"2",
"and",
"NTCIR",
"-",
"12",
"MathIR",
"XHTML5",
"format",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/estimator.py#L60-L79
|
238,672
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/estimator.py
|
get_position
|
def get_position(directory, identifier):
"""
Extracts the position of a paragraph from the identifier, and the parent directory of the
paragraph.
Parameters
----------
directory : Path
A parent directory of a paragraph.
identifier : str
An identifier of a paragraph.
Returns
-------
float
The estimated position of the paragraph in the range [0; 1).
"""
paragraph_number = get_paragraph_number(identifier)
paragraph_total = max( # Not all paragraphs are stored, e.g. because of processing errors.
get_paragraph_number(get_identifier(document)) + 1
for document in directory.iterdir())
assert paragraph_total > paragraph_number and paragraph_total > 0
position = paragraph_number / paragraph_total
return position
|
python
|
def get_position(directory, identifier):
"""
Extracts the position of a paragraph from the identifier, and the parent directory of the
paragraph.
Parameters
----------
directory : Path
A parent directory of a paragraph.
identifier : str
An identifier of a paragraph.
Returns
-------
float
The estimated position of the paragraph in the range [0; 1).
"""
paragraph_number = get_paragraph_number(identifier)
paragraph_total = max( # Not all paragraphs are stored, e.g. because of processing errors.
get_paragraph_number(get_identifier(document)) + 1
for document in directory.iterdir())
assert paragraph_total > paragraph_number and paragraph_total > 0
position = paragraph_number / paragraph_total
return position
|
[
"def",
"get_position",
"(",
"directory",
",",
"identifier",
")",
":",
"paragraph_number",
"=",
"get_paragraph_number",
"(",
"identifier",
")",
"paragraph_total",
"=",
"max",
"(",
"# Not all paragraphs are stored, e.g. because of processing errors.",
"get_paragraph_number",
"(",
"get_identifier",
"(",
"document",
")",
")",
"+",
"1",
"for",
"document",
"in",
"directory",
".",
"iterdir",
"(",
")",
")",
"assert",
"paragraph_total",
">",
"paragraph_number",
"and",
"paragraph_total",
">",
"0",
"position",
"=",
"paragraph_number",
"/",
"paragraph_total",
"return",
"position"
] |
Extracts the position of a paragraph from the identifier, and the parent directory of the
paragraph.
Parameters
----------
directory : Path
A parent directory of a paragraph.
identifier : str
An identifier of a paragraph.
Returns
-------
float
The estimated position of the paragraph in the range [0; 1).
|
[
"Extracts",
"the",
"position",
"of",
"a",
"paragraph",
"from",
"the",
"identifier",
"and",
"the",
"parent",
"directory",
"of",
"the",
"paragraph",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/estimator.py#L104-L127
|
238,673
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/estimator.py
|
get_all_positions
|
def get_all_positions(dataset, num_workers=1):
"""
Extracts paragraph identifiers, and positions from a dataset in the NTCIR-11 Math-2, and
NTCIR-12 MathIR XHTML5 format.
Parameters
----------
dataset : Path
A path to a dataset.
num_workers : int, optional
The number of processes that will extract paragraph positions from the dataset.
Yields
-------
(Path, str, float)
A parent directory of a paragraph, the identifier of the paragraph, and an estimate of the
position of the paragraph in its parent document. The position is in the range [0; 1).
"""
positions = []
identifiers = tqdm(
list(get_all_identifiers(dataset)), desc="get_all_positions(%s)" % dataset.name)
with Pool(num_workers) as pool:
for directory, identifier, position in pool.map(_get_position_worker, identifiers):
positions.append((directory, identifier, position))
for directory, identifier, position in positions:
yield (directory, identifier, position)
|
python
|
def get_all_positions(dataset, num_workers=1):
"""
Extracts paragraph identifiers, and positions from a dataset in the NTCIR-11 Math-2, and
NTCIR-12 MathIR XHTML5 format.
Parameters
----------
dataset : Path
A path to a dataset.
num_workers : int, optional
The number of processes that will extract paragraph positions from the dataset.
Yields
-------
(Path, str, float)
A parent directory of a paragraph, the identifier of the paragraph, and an estimate of the
position of the paragraph in its parent document. The position is in the range [0; 1).
"""
positions = []
identifiers = tqdm(
list(get_all_identifiers(dataset)), desc="get_all_positions(%s)" % dataset.name)
with Pool(num_workers) as pool:
for directory, identifier, position in pool.map(_get_position_worker, identifiers):
positions.append((directory, identifier, position))
for directory, identifier, position in positions:
yield (directory, identifier, position)
|
[
"def",
"get_all_positions",
"(",
"dataset",
",",
"num_workers",
"=",
"1",
")",
":",
"positions",
"=",
"[",
"]",
"identifiers",
"=",
"tqdm",
"(",
"list",
"(",
"get_all_identifiers",
"(",
"dataset",
")",
")",
",",
"desc",
"=",
"\"get_all_positions(%s)\"",
"%",
"dataset",
".",
"name",
")",
"with",
"Pool",
"(",
"num_workers",
")",
"as",
"pool",
":",
"for",
"directory",
",",
"identifier",
",",
"position",
"in",
"pool",
".",
"map",
"(",
"_get_position_worker",
",",
"identifiers",
")",
":",
"positions",
".",
"append",
"(",
"(",
"directory",
",",
"identifier",
",",
"position",
")",
")",
"for",
"directory",
",",
"identifier",
",",
"position",
"in",
"positions",
":",
"yield",
"(",
"directory",
",",
"identifier",
",",
"position",
")"
] |
Extracts paragraph identifiers, and positions from a dataset in the NTCIR-11 Math-2, and
NTCIR-12 MathIR XHTML5 format.
Parameters
----------
dataset : Path
A path to a dataset.
num_workers : int, optional
The number of processes that will extract paragraph positions from the dataset.
Yields
-------
(Path, str, float)
A parent directory of a paragraph, the identifier of the paragraph, and an estimate of the
position of the paragraph in its parent document. The position is in the range [0; 1).
|
[
"Extracts",
"paragraph",
"identifiers",
"and",
"positions",
"from",
"a",
"dataset",
"in",
"the",
"NTCIR",
"-",
"11",
"Math",
"-",
"2",
"and",
"NTCIR",
"-",
"12",
"MathIR",
"XHTML5",
"format",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/estimator.py#L135-L160
|
238,674
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/estimator.py
|
get_estimators
|
def get_estimators(positions_all, positions_relevant):
"""
Extracts density estimators from a judged sample of paragraph positions.
Parameters
----------
positions_all : dict of (Path, float)
A sample of paragraph positions from various datasets in the NTCIR-11
Math-2, and NTCIR-12 MathIR format.
positions_relevant : dict of (Path, float)
A sample of relevant paragraph positions from various datasets in the
NTCIR-11 A subsample of relevant paragraph positions.
Returns
-------
(float, KernelDensity, KernelDensity)
An estimate of P(relevant), and estimators of p(position), and p(position | relevant).
"""
samples_all = [
(position,) for _, positions in positions_all.items() for position in positions]
samples_relevant = [
(position,) for _, positions in positions_relevant.items() for position in positions]
estimators = dict()
estimators["P(relevant)"] = len(samples_relevant) / len(samples_all)
LOGGER.info("Fitting prior p(position) density estimator")
estimators["p(position)"] = KernelDensity(**KERNEL).fit(samples_all)
LOGGER.info("Fitting conditional p(position | relevant) density estimator")
estimators["p(position|relevant)"] = KernelDensity(**KERNEL).fit(samples_relevant)
return (
estimators["P(relevant)"], estimators["p(position)"], estimators["p(position|relevant)"])
|
python
|
def get_estimators(positions_all, positions_relevant):
"""
Extracts density estimators from a judged sample of paragraph positions.
Parameters
----------
positions_all : dict of (Path, float)
A sample of paragraph positions from various datasets in the NTCIR-11
Math-2, and NTCIR-12 MathIR format.
positions_relevant : dict of (Path, float)
A sample of relevant paragraph positions from various datasets in the
NTCIR-11 A subsample of relevant paragraph positions.
Returns
-------
(float, KernelDensity, KernelDensity)
An estimate of P(relevant), and estimators of p(position), and p(position | relevant).
"""
samples_all = [
(position,) for _, positions in positions_all.items() for position in positions]
samples_relevant = [
(position,) for _, positions in positions_relevant.items() for position in positions]
estimators = dict()
estimators["P(relevant)"] = len(samples_relevant) / len(samples_all)
LOGGER.info("Fitting prior p(position) density estimator")
estimators["p(position)"] = KernelDensity(**KERNEL).fit(samples_all)
LOGGER.info("Fitting conditional p(position | relevant) density estimator")
estimators["p(position|relevant)"] = KernelDensity(**KERNEL).fit(samples_relevant)
return (
estimators["P(relevant)"], estimators["p(position)"], estimators["p(position|relevant)"])
|
[
"def",
"get_estimators",
"(",
"positions_all",
",",
"positions_relevant",
")",
":",
"samples_all",
"=",
"[",
"(",
"position",
",",
")",
"for",
"_",
",",
"positions",
"in",
"positions_all",
".",
"items",
"(",
")",
"for",
"position",
"in",
"positions",
"]",
"samples_relevant",
"=",
"[",
"(",
"position",
",",
")",
"for",
"_",
",",
"positions",
"in",
"positions_relevant",
".",
"items",
"(",
")",
"for",
"position",
"in",
"positions",
"]",
"estimators",
"=",
"dict",
"(",
")",
"estimators",
"[",
"\"P(relevant)\"",
"]",
"=",
"len",
"(",
"samples_relevant",
")",
"/",
"len",
"(",
"samples_all",
")",
"LOGGER",
".",
"info",
"(",
"\"Fitting prior p(position) density estimator\"",
")",
"estimators",
"[",
"\"p(position)\"",
"]",
"=",
"KernelDensity",
"(",
"*",
"*",
"KERNEL",
")",
".",
"fit",
"(",
"samples_all",
")",
"LOGGER",
".",
"info",
"(",
"\"Fitting conditional p(position | relevant) density estimator\"",
")",
"estimators",
"[",
"\"p(position|relevant)\"",
"]",
"=",
"KernelDensity",
"(",
"*",
"*",
"KERNEL",
")",
".",
"fit",
"(",
"samples_relevant",
")",
"return",
"(",
"estimators",
"[",
"\"P(relevant)\"",
"]",
",",
"estimators",
"[",
"\"p(position)\"",
"]",
",",
"estimators",
"[",
"\"p(position|relevant)\"",
"]",
")"
] |
Extracts density estimators from a judged sample of paragraph positions.
Parameters
----------
positions_all : dict of (Path, float)
A sample of paragraph positions from various datasets in the NTCIR-11
Math-2, and NTCIR-12 MathIR format.
positions_relevant : dict of (Path, float)
A sample of relevant paragraph positions from various datasets in the
NTCIR-11 A subsample of relevant paragraph positions.
Returns
-------
(float, KernelDensity, KernelDensity)
An estimate of P(relevant), and estimators of p(position), and p(position | relevant).
|
[
"Extracts",
"density",
"estimators",
"from",
"a",
"judged",
"sample",
"of",
"paragraph",
"positions",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/estimator.py#L163-L192
|
238,675
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/estimator.py
|
get_estimates
|
def get_estimates(estimators_tuple, positions, num_workers=1):
"""
Estimates densities, and probabilities for paragraph positions.
Parameters
----------
estimators_tuple : (float, KernelDensity, KernelDensity)
An estimate of the prior probability P(relevant), an estimator of the prior density
p(position), and an estimator of the conditional density p(position | relevant).
positions : iterable of float
Paragraph positions for which densities, and probabilities will be estimated.
num_workers : int, optional
The number of processes that will compute the estimates.
Returns
-------
five-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position) in the form of histograms.
"""
estimators = dict()
estimators["P(relevant)"], estimators["p(position)"], \
estimators["p(position|relevant)"] = estimators_tuple
log_estimates = dict()
log_estimates["P(relevant)"] = log(estimators["P(relevant)"])
X = [(position,) for position in positions]
with Pool(num_workers) as pool:
first_job = pool.map_async(estimators["p(position)"].score_samples, tqdm(
array_split(X, num_workers), desc="p(position)"))
second_job = pool.map_async(estimators["p(position|relevant)"].score_samples, tqdm(
array_split(X, num_workers), desc="p(position | relevant)"))
log_estimates["p(position)"] = concatenate(first_job.get())
log_estimates["p(position|relevant)"] = concatenate(second_job.get())
log_estimates["P(position,relevant)"] = \
log_estimates["p(position|relevant)"] + log_estimates["P(relevant)"]
log_estimates["P(relevant|position)"] = \
log_estimates["P(position,relevant)"] - log_estimates["p(position)"]
return (
[estimators["P(relevant)"]] * len(X), exp(log_estimates["p(position)"]),
exp(log_estimates["p(position|relevant)"]), exp(log_estimates["P(position,relevant)"]),
exp(log_estimates["P(relevant|position)"]))
|
python
|
def get_estimates(estimators_tuple, positions, num_workers=1):
"""
Estimates densities, and probabilities for paragraph positions.
Parameters
----------
estimators_tuple : (float, KernelDensity, KernelDensity)
An estimate of the prior probability P(relevant), an estimator of the prior density
p(position), and an estimator of the conditional density p(position | relevant).
positions : iterable of float
Paragraph positions for which densities, and probabilities will be estimated.
num_workers : int, optional
The number of processes that will compute the estimates.
Returns
-------
five-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position) in the form of histograms.
"""
estimators = dict()
estimators["P(relevant)"], estimators["p(position)"], \
estimators["p(position|relevant)"] = estimators_tuple
log_estimates = dict()
log_estimates["P(relevant)"] = log(estimators["P(relevant)"])
X = [(position,) for position in positions]
with Pool(num_workers) as pool:
first_job = pool.map_async(estimators["p(position)"].score_samples, tqdm(
array_split(X, num_workers), desc="p(position)"))
second_job = pool.map_async(estimators["p(position|relevant)"].score_samples, tqdm(
array_split(X, num_workers), desc="p(position | relevant)"))
log_estimates["p(position)"] = concatenate(first_job.get())
log_estimates["p(position|relevant)"] = concatenate(second_job.get())
log_estimates["P(position,relevant)"] = \
log_estimates["p(position|relevant)"] + log_estimates["P(relevant)"]
log_estimates["P(relevant|position)"] = \
log_estimates["P(position,relevant)"] - log_estimates["p(position)"]
return (
[estimators["P(relevant)"]] * len(X), exp(log_estimates["p(position)"]),
exp(log_estimates["p(position|relevant)"]), exp(log_estimates["P(position,relevant)"]),
exp(log_estimates["P(relevant|position)"]))
|
[
"def",
"get_estimates",
"(",
"estimators_tuple",
",",
"positions",
",",
"num_workers",
"=",
"1",
")",
":",
"estimators",
"=",
"dict",
"(",
")",
"estimators",
"[",
"\"P(relevant)\"",
"]",
",",
"estimators",
"[",
"\"p(position)\"",
"]",
",",
"estimators",
"[",
"\"p(position|relevant)\"",
"]",
"=",
"estimators_tuple",
"log_estimates",
"=",
"dict",
"(",
")",
"log_estimates",
"[",
"\"P(relevant)\"",
"]",
"=",
"log",
"(",
"estimators",
"[",
"\"P(relevant)\"",
"]",
")",
"X",
"=",
"[",
"(",
"position",
",",
")",
"for",
"position",
"in",
"positions",
"]",
"with",
"Pool",
"(",
"num_workers",
")",
"as",
"pool",
":",
"first_job",
"=",
"pool",
".",
"map_async",
"(",
"estimators",
"[",
"\"p(position)\"",
"]",
".",
"score_samples",
",",
"tqdm",
"(",
"array_split",
"(",
"X",
",",
"num_workers",
")",
",",
"desc",
"=",
"\"p(position)\"",
")",
")",
"second_job",
"=",
"pool",
".",
"map_async",
"(",
"estimators",
"[",
"\"p(position|relevant)\"",
"]",
".",
"score_samples",
",",
"tqdm",
"(",
"array_split",
"(",
"X",
",",
"num_workers",
")",
",",
"desc",
"=",
"\"p(position | relevant)\"",
")",
")",
"log_estimates",
"[",
"\"p(position)\"",
"]",
"=",
"concatenate",
"(",
"first_job",
".",
"get",
"(",
")",
")",
"log_estimates",
"[",
"\"p(position|relevant)\"",
"]",
"=",
"concatenate",
"(",
"second_job",
".",
"get",
"(",
")",
")",
"log_estimates",
"[",
"\"P(position,relevant)\"",
"]",
"=",
"log_estimates",
"[",
"\"p(position|relevant)\"",
"]",
"+",
"log_estimates",
"[",
"\"P(relevant)\"",
"]",
"log_estimates",
"[",
"\"P(relevant|position)\"",
"]",
"=",
"log_estimates",
"[",
"\"P(position,relevant)\"",
"]",
"-",
"log_estimates",
"[",
"\"p(position)\"",
"]",
"return",
"(",
"[",
"estimators",
"[",
"\"P(relevant)\"",
"]",
"]",
"*",
"len",
"(",
"X",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"p(position)\"",
"]",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"p(position|relevant)\"",
"]",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"P(position,relevant)\"",
"]",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"P(relevant|position)\"",
"]",
")",
")"
] |
Estimates densities, and probabilities for paragraph positions.
Parameters
----------
estimators_tuple : (float, KernelDensity, KernelDensity)
An estimate of the prior probability P(relevant), an estimator of the prior density
p(position), and an estimator of the conditional density p(position | relevant).
positions : iterable of float
Paragraph positions for which densities, and probabilities will be estimated.
num_workers : int, optional
The number of processes that will compute the estimates.
Returns
-------
five-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position) in the form of histograms.
|
[
"Estimates",
"densities",
"and",
"probabilities",
"for",
"paragraph",
"positions",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/estimator.py#L195-L236
|
238,676
|
foliant-docs/foliantcontrib.mkdocs
|
foliant/backends/mkdocs.py
|
Backend._get_build_command
|
def _get_build_command(self, mkdocs_site_path: Path) -> str:
'''Generate ``mkdocs build`` command to build the site.
:param mkdocs_site_path: Path to the output directory for the site
'''
components = [self._mkdocs_config.get('mkdocs_path', 'mkdocs')]
components.append('build')
components.append(f'-d "{self._escape_control_characters(str(mkdocs_site_path))}"')
command = ' '.join(components)
self.logger.debug(f'Build command: {command}')
return command
|
python
|
def _get_build_command(self, mkdocs_site_path: Path) -> str:
'''Generate ``mkdocs build`` command to build the site.
:param mkdocs_site_path: Path to the output directory for the site
'''
components = [self._mkdocs_config.get('mkdocs_path', 'mkdocs')]
components.append('build')
components.append(f'-d "{self._escape_control_characters(str(mkdocs_site_path))}"')
command = ' '.join(components)
self.logger.debug(f'Build command: {command}')
return command
|
[
"def",
"_get_build_command",
"(",
"self",
",",
"mkdocs_site_path",
":",
"Path",
")",
"->",
"str",
":",
"components",
"=",
"[",
"self",
".",
"_mkdocs_config",
".",
"get",
"(",
"'mkdocs_path'",
",",
"'mkdocs'",
")",
"]",
"components",
".",
"append",
"(",
"'build'",
")",
"components",
".",
"append",
"(",
"f'-d \"{self._escape_control_characters(str(mkdocs_site_path))}\"'",
")",
"command",
"=",
"' '",
".",
"join",
"(",
"components",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f'Build command: {command}'",
")",
"return",
"command"
] |
Generate ``mkdocs build`` command to build the site.
:param mkdocs_site_path: Path to the output directory for the site
|
[
"Generate",
"mkdocs",
"build",
"command",
"to",
"build",
"the",
"site",
"."
] |
5f71a47139ab1cb630f1b61d4cef1c0657001272
|
https://github.com/foliant-docs/foliantcontrib.mkdocs/blob/5f71a47139ab1cb630f1b61d4cef1c0657001272/foliant/backends/mkdocs.py#L49-L63
|
238,677
|
foliant-docs/foliantcontrib.mkdocs
|
foliant/backends/mkdocs.py
|
Backend._get_ghp_command
|
def _get_ghp_command(self) -> str:
'''Generate ``mkdocs gh-deploy`` command to deploy the site to GitHub Pages.'''
components = [self._mkdocs_config.get('mkdocs_path', 'mkdocs')]
components.append('gh-deploy')
command = ' '.join(components)
self.logger.debug(f'GHP upload command: {command}')
return command
|
python
|
def _get_ghp_command(self) -> str:
'''Generate ``mkdocs gh-deploy`` command to deploy the site to GitHub Pages.'''
components = [self._mkdocs_config.get('mkdocs_path', 'mkdocs')]
components.append('gh-deploy')
command = ' '.join(components)
self.logger.debug(f'GHP upload command: {command}')
return command
|
[
"def",
"_get_ghp_command",
"(",
"self",
")",
"->",
"str",
":",
"components",
"=",
"[",
"self",
".",
"_mkdocs_config",
".",
"get",
"(",
"'mkdocs_path'",
",",
"'mkdocs'",
")",
"]",
"components",
".",
"append",
"(",
"'gh-deploy'",
")",
"command",
"=",
"' '",
".",
"join",
"(",
"components",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f'GHP upload command: {command}'",
")",
"return",
"command"
] |
Generate ``mkdocs gh-deploy`` command to deploy the site to GitHub Pages.
|
[
"Generate",
"mkdocs",
"gh",
"-",
"deploy",
"command",
"to",
"deploy",
"the",
"site",
"to",
"GitHub",
"Pages",
"."
] |
5f71a47139ab1cb630f1b61d4cef1c0657001272
|
https://github.com/foliant-docs/foliantcontrib.mkdocs/blob/5f71a47139ab1cb630f1b61d4cef1c0657001272/foliant/backends/mkdocs.py#L65-L75
|
238,678
|
foliant-docs/foliantcontrib.mkdocs
|
foliant/backends/mkdocs.py
|
Backend._get_page_with_optional_heading
|
def _get_page_with_optional_heading(self, page_file_path: str) -> str or Dict:
'''Get the content of first heading of source Markdown file, if the file
contains any headings. Return a data element of ``pages`` section
of ``mkdocs.yml`` file.
:param page_file_path: path to source Markdown file
:returns: Unchanged file path or a dictionary: content of first heading, file path
'''
self.logger.debug(f'Looking for the first heading in {page_file_path}')
if page_file_path.endswith('.md'):
page_file_full_path = self.project_path / self.config['src_dir'] / page_file_path
with open(page_file_full_path, encoding='utf8') as page_file:
content = page_file.read()
headings_found = search(
r'^\s*#{1,6}[ \t]+([^\r\n]+?)(?:[ \t]+\{#\S+\})?\s*[\r\n]+',
content
)
if headings_found:
first_heading = headings_found.group(1)
self.logger.debug(f'Heading found: {first_heading}')
return {first_heading: page_file_path}
self.logger.debug(f'No heading found, returning original file path.')
return page_file_path
|
python
|
def _get_page_with_optional_heading(self, page_file_path: str) -> str or Dict:
'''Get the content of first heading of source Markdown file, if the file
contains any headings. Return a data element of ``pages`` section
of ``mkdocs.yml`` file.
:param page_file_path: path to source Markdown file
:returns: Unchanged file path or a dictionary: content of first heading, file path
'''
self.logger.debug(f'Looking for the first heading in {page_file_path}')
if page_file_path.endswith('.md'):
page_file_full_path = self.project_path / self.config['src_dir'] / page_file_path
with open(page_file_full_path, encoding='utf8') as page_file:
content = page_file.read()
headings_found = search(
r'^\s*#{1,6}[ \t]+([^\r\n]+?)(?:[ \t]+\{#\S+\})?\s*[\r\n]+',
content
)
if headings_found:
first_heading = headings_found.group(1)
self.logger.debug(f'Heading found: {first_heading}')
return {first_heading: page_file_path}
self.logger.debug(f'No heading found, returning original file path.')
return page_file_path
|
[
"def",
"_get_page_with_optional_heading",
"(",
"self",
",",
"page_file_path",
":",
"str",
")",
"->",
"str",
"or",
"Dict",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f'Looking for the first heading in {page_file_path}'",
")",
"if",
"page_file_path",
".",
"endswith",
"(",
"'.md'",
")",
":",
"page_file_full_path",
"=",
"self",
".",
"project_path",
"/",
"self",
".",
"config",
"[",
"'src_dir'",
"]",
"/",
"page_file_path",
"with",
"open",
"(",
"page_file_full_path",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"page_file",
":",
"content",
"=",
"page_file",
".",
"read",
"(",
")",
"headings_found",
"=",
"search",
"(",
"r'^\\s*#{1,6}[ \\t]+([^\\r\\n]+?)(?:[ \\t]+\\{#\\S+\\})?\\s*[\\r\\n]+'",
",",
"content",
")",
"if",
"headings_found",
":",
"first_heading",
"=",
"headings_found",
".",
"group",
"(",
"1",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f'Heading found: {first_heading}'",
")",
"return",
"{",
"first_heading",
":",
"page_file_path",
"}",
"self",
".",
"logger",
".",
"debug",
"(",
"f'No heading found, returning original file path.'",
")",
"return",
"page_file_path"
] |
Get the content of first heading of source Markdown file, if the file
contains any headings. Return a data element of ``pages`` section
of ``mkdocs.yml`` file.
:param page_file_path: path to source Markdown file
:returns: Unchanged file path or a dictionary: content of first heading, file path
|
[
"Get",
"the",
"content",
"of",
"first",
"heading",
"of",
"source",
"Markdown",
"file",
"if",
"the",
"file",
"contains",
"any",
"headings",
".",
"Return",
"a",
"data",
"element",
"of",
"pages",
"section",
"of",
"mkdocs",
".",
"yml",
"file",
"."
] |
5f71a47139ab1cb630f1b61d4cef1c0657001272
|
https://github.com/foliant-docs/foliantcontrib.mkdocs/blob/5f71a47139ab1cb630f1b61d4cef1c0657001272/foliant/backends/mkdocs.py#L77-L106
|
238,679
|
jespino/anillo
|
anillo/middlewares/json.py
|
wrap_json
|
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
|
python
|
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
|
[
"def",
"wrap_json",
"(",
"func",
"=",
"None",
",",
"*",
",",
"encoder",
"=",
"json",
".",
"JSONEncoder",
",",
"preserve_raw_body",
"=",
"False",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"functools",
".",
"partial",
"(",
"wrap_json",
",",
"encoder",
"=",
"encoder",
",",
"preserve_raw_body",
"=",
"preserve_raw_body",
")",
"wrapped_func",
"=",
"wrap_json_body",
"(",
"func",
",",
"preserve_raw_body",
"=",
"preserve_raw_body",
")",
"wrapped_func",
"=",
"wrap_json_response",
"(",
"wrapped_func",
",",
"encoder",
"=",
"encoder",
")",
"return",
"wrapped_func"
] |
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
|
[
"A",
"middleware",
"that",
"parses",
"the",
"body",
"of",
"json",
"requests",
"and",
"encodes",
"the",
"json",
"responses",
"."
] |
901a84fd2b4fa909bc06e8bd76090457990576a7
|
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/json.py#L9-L32
|
238,680
|
jespino/anillo
|
anillo/middlewares/json.py
|
wrap_json_params
|
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
|
python
|
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
|
[
"def",
"wrap_json_params",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ctype",
",",
"pdict",
"=",
"parse_header",
"(",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"''",
")",
")",
"if",
"ctype",
"==",
"\"application/json\"",
":",
"request",
".",
"params",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"if",
"request",
".",
"body",
"else",
"None",
"return",
"func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
A middleware that parses the body of json requests and
add it to the request under the `params` key.
|
[
"A",
"middleware",
"that",
"parses",
"the",
"body",
"of",
"json",
"requests",
"and",
"add",
"it",
"to",
"the",
"request",
"under",
"the",
"params",
"key",
"."
] |
901a84fd2b4fa909bc06e8bd76090457990576a7
|
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/json.py#L60-L72
|
238,681
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
run
|
def run(itf):
"""
Run preprocess functions
"""
if not itf:
return 1
# access command-line arguments
options = SplitInput(itf)
# read input
infile = os.path.abspath(options.input)
molList = read_csv(infile, options)
# split molList into actives and decoys
activeList, decoyList = partition(molList, options)
# split actives and decoys into training and validation sets
trainset, valset = split(activeList, decoyList, options)
# write csv files formatted for ensemble builder
csv_writer(trainset, options, 'training_set')
csv_writer(valset, options, 'test_set')
|
python
|
def run(itf):
"""
Run preprocess functions
"""
if not itf:
return 1
# access command-line arguments
options = SplitInput(itf)
# read input
infile = os.path.abspath(options.input)
molList = read_csv(infile, options)
# split molList into actives and decoys
activeList, decoyList = partition(molList, options)
# split actives and decoys into training and validation sets
trainset, valset = split(activeList, decoyList, options)
# write csv files formatted for ensemble builder
csv_writer(trainset, options, 'training_set')
csv_writer(valset, options, 'test_set')
|
[
"def",
"run",
"(",
"itf",
")",
":",
"if",
"not",
"itf",
":",
"return",
"1",
"# access command-line arguments",
"options",
"=",
"SplitInput",
"(",
"itf",
")",
"# read input",
"infile",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"options",
".",
"input",
")",
"molList",
"=",
"read_csv",
"(",
"infile",
",",
"options",
")",
"# split molList into actives and decoys",
"activeList",
",",
"decoyList",
"=",
"partition",
"(",
"molList",
",",
"options",
")",
"# split actives and decoys into training and validation sets",
"trainset",
",",
"valset",
"=",
"split",
"(",
"activeList",
",",
"decoyList",
",",
"options",
")",
"# write csv files formatted for ensemble builder",
"csv_writer",
"(",
"trainset",
",",
"options",
",",
"'training_set'",
")",
"csv_writer",
"(",
"valset",
",",
"options",
",",
"'test_set'",
")"
] |
Run preprocess functions
|
[
"Run",
"preprocess",
"functions"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L11-L34
|
238,682
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
write_csv_header
|
def write_csv_header(mol, csv_writer):
"""
Write the csv header
"""
# create line list where line elements for writing will be stored
line = []
# ID
line.append('id')
# status
line.append('status')
# query labels
queryList = mol.properties.keys()
for queryLabel in queryList:
line.append(queryLabel)
# write line
csv_writer.writerow(line)
|
python
|
def write_csv_header(mol, csv_writer):
"""
Write the csv header
"""
# create line list where line elements for writing will be stored
line = []
# ID
line.append('id')
# status
line.append('status')
# query labels
queryList = mol.properties.keys()
for queryLabel in queryList:
line.append(queryLabel)
# write line
csv_writer.writerow(line)
|
[
"def",
"write_csv_header",
"(",
"mol",
",",
"csv_writer",
")",
":",
"# create line list where line elements for writing will be stored",
"line",
"=",
"[",
"]",
"# ID",
"line",
".",
"append",
"(",
"'id'",
")",
"# status",
"line",
".",
"append",
"(",
"'status'",
")",
"# query labels",
"queryList",
"=",
"mol",
".",
"properties",
".",
"keys",
"(",
")",
"for",
"queryLabel",
"in",
"queryList",
":",
"line",
".",
"append",
"(",
"queryLabel",
")",
"# write line",
"csv_writer",
".",
"writerow",
"(",
"line",
")"
] |
Write the csv header
|
[
"Write",
"the",
"csv",
"header"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L37-L57
|
238,683
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
write_csv_line
|
def write_csv_line(mol, csv_writer, options):
"""
Parse mol object and write a line to the csv file
"""
# set variables
status_field = options.status_field
# elements for writing will be stored in the line list
line = []
# ID
id = mol.GetProp('id')
if id is not None:
line.append(id)
else:
line.append('n/a')
# status
line.append(mol.GetProp(status_field))
# query labels
queryList = mol.properties.keys()
for queryLabel in queryList:
line.append(mol.properties[queryLabel])
# write line
csv_writer.writerow(line)
|
python
|
def write_csv_line(mol, csv_writer, options):
"""
Parse mol object and write a line to the csv file
"""
# set variables
status_field = options.status_field
# elements for writing will be stored in the line list
line = []
# ID
id = mol.GetProp('id')
if id is not None:
line.append(id)
else:
line.append('n/a')
# status
line.append(mol.GetProp(status_field))
# query labels
queryList = mol.properties.keys()
for queryLabel in queryList:
line.append(mol.properties[queryLabel])
# write line
csv_writer.writerow(line)
|
[
"def",
"write_csv_line",
"(",
"mol",
",",
"csv_writer",
",",
"options",
")",
":",
"# set variables",
"status_field",
"=",
"options",
".",
"status_field",
"# elements for writing will be stored in the line list",
"line",
"=",
"[",
"]",
"# ID",
"id",
"=",
"mol",
".",
"GetProp",
"(",
"'id'",
")",
"if",
"id",
"is",
"not",
"None",
":",
"line",
".",
"append",
"(",
"id",
")",
"else",
":",
"line",
".",
"append",
"(",
"'n/a'",
")",
"# status",
"line",
".",
"append",
"(",
"mol",
".",
"GetProp",
"(",
"status_field",
")",
")",
"# query labels",
"queryList",
"=",
"mol",
".",
"properties",
".",
"keys",
"(",
")",
"for",
"queryLabel",
"in",
"queryList",
":",
"line",
".",
"append",
"(",
"mol",
".",
"properties",
"[",
"queryLabel",
"]",
")",
"# write line",
"csv_writer",
".",
"writerow",
"(",
"line",
")"
] |
Parse mol object and write a line to the csv file
|
[
"Parse",
"mol",
"object",
"and",
"write",
"a",
"line",
"to",
"the",
"csv",
"file"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L60-L87
|
238,684
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
csv_writer
|
def csv_writer(molecules, options, prefix):
"""
Write a csv file.
"""
# output file
outdir = os.getcwd()
filename = prefix + '.csv'
outfile = os.path.join(outdir, filename)
# initiate csv writer object
f = open(outfile, 'w')
csv_writer = csv.writer(f)
# write csv header
mol = molecules[0]
write_csv_header(mol, csv_writer)
# write csv lines
for mol in molecules:
write_csv_line(mol, csv_writer, options)
# close file
f.close()
|
python
|
def csv_writer(molecules, options, prefix):
"""
Write a csv file.
"""
# output file
outdir = os.getcwd()
filename = prefix + '.csv'
outfile = os.path.join(outdir, filename)
# initiate csv writer object
f = open(outfile, 'w')
csv_writer = csv.writer(f)
# write csv header
mol = molecules[0]
write_csv_header(mol, csv_writer)
# write csv lines
for mol in molecules:
write_csv_line(mol, csv_writer, options)
# close file
f.close()
|
[
"def",
"csv_writer",
"(",
"molecules",
",",
"options",
",",
"prefix",
")",
":",
"# output file",
"outdir",
"=",
"os",
".",
"getcwd",
"(",
")",
"filename",
"=",
"prefix",
"+",
"'.csv'",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"filename",
")",
"# initiate csv writer object",
"f",
"=",
"open",
"(",
"outfile",
",",
"'w'",
")",
"csv_writer",
"=",
"csv",
".",
"writer",
"(",
"f",
")",
"# write csv header",
"mol",
"=",
"molecules",
"[",
"0",
"]",
"write_csv_header",
"(",
"mol",
",",
"csv_writer",
")",
"# write csv lines",
"for",
"mol",
"in",
"molecules",
":",
"write_csv_line",
"(",
"mol",
",",
"csv_writer",
",",
"options",
")",
"# close file",
"f",
".",
"close",
"(",
")"
] |
Write a csv file.
|
[
"Write",
"a",
"csv",
"file",
"."
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L90-L113
|
238,685
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
split
|
def split(activeList, decoyList, options):
"""
Create training and validation sets
"""
# set input variables
training_fraction = options.training_fraction
decoy_to_active = options.decoy_to_active
# take care of default decoy_to_active ratio
if decoy_to_active is None:
decoy_to_active = len(decoyList) / len(activeList)
# verify that there are enough molecules to satisfy the ratio
if len(decoyList) < (len(activeList) * decoy_to_active):
max = len(decoyList) / len(activeList)
print("\n The maximum decoy to active ratio the input file will support is {f} \n".format(f=max))
sys.exit(1)
# randomly split the actives
trainsize = int(round(training_fraction * len(activeList)))
trainIndex = []
valIndex = []
trainIndex = random.sample(range(len(activeList)), trainsize)
valIndex = [x for x in range(len(activeList)) if x not in trainIndex]
trainactives = [activeList[index] for index in trainIndex]
valactives = [activeList[index] for index in valIndex]
# match up decoys
trainsize = len(trainactives) * decoy_to_active
valsize = len(valactives) * decoy_to_active
trainIndex = []
valIndex = []
trainIndex = random.sample(range(len(decoyList)), int(trainsize))
valIndex = [x for x in range(len(decoyList)) if x not in trainIndex][0:int(valsize)]
traindecoys = [decoyList[index] for index in trainIndex]
valdecoys = [decoyList[index] for index in valIndex]
# merge actives and decoys for each set
trainset = trainactives + traindecoys
valset = valactives + valdecoys
# return sets
return trainset, valset
|
python
|
def split(activeList, decoyList, options):
"""
Create training and validation sets
"""
# set input variables
training_fraction = options.training_fraction
decoy_to_active = options.decoy_to_active
# take care of default decoy_to_active ratio
if decoy_to_active is None:
decoy_to_active = len(decoyList) / len(activeList)
# verify that there are enough molecules to satisfy the ratio
if len(decoyList) < (len(activeList) * decoy_to_active):
max = len(decoyList) / len(activeList)
print("\n The maximum decoy to active ratio the input file will support is {f} \n".format(f=max))
sys.exit(1)
# randomly split the actives
trainsize = int(round(training_fraction * len(activeList)))
trainIndex = []
valIndex = []
trainIndex = random.sample(range(len(activeList)), trainsize)
valIndex = [x for x in range(len(activeList)) if x not in trainIndex]
trainactives = [activeList[index] for index in trainIndex]
valactives = [activeList[index] for index in valIndex]
# match up decoys
trainsize = len(trainactives) * decoy_to_active
valsize = len(valactives) * decoy_to_active
trainIndex = []
valIndex = []
trainIndex = random.sample(range(len(decoyList)), int(trainsize))
valIndex = [x for x in range(len(decoyList)) if x not in trainIndex][0:int(valsize)]
traindecoys = [decoyList[index] for index in trainIndex]
valdecoys = [decoyList[index] for index in valIndex]
# merge actives and decoys for each set
trainset = trainactives + traindecoys
valset = valactives + valdecoys
# return sets
return trainset, valset
|
[
"def",
"split",
"(",
"activeList",
",",
"decoyList",
",",
"options",
")",
":",
"# set input variables",
"training_fraction",
"=",
"options",
".",
"training_fraction",
"decoy_to_active",
"=",
"options",
".",
"decoy_to_active",
"# take care of default decoy_to_active ratio",
"if",
"decoy_to_active",
"is",
"None",
":",
"decoy_to_active",
"=",
"len",
"(",
"decoyList",
")",
"/",
"len",
"(",
"activeList",
")",
"# verify that there are enough molecules to satisfy the ratio",
"if",
"len",
"(",
"decoyList",
")",
"<",
"(",
"len",
"(",
"activeList",
")",
"*",
"decoy_to_active",
")",
":",
"max",
"=",
"len",
"(",
"decoyList",
")",
"/",
"len",
"(",
"activeList",
")",
"print",
"(",
"\"\\n The maximum decoy to active ratio the input file will support is {f} \\n\"",
".",
"format",
"(",
"f",
"=",
"max",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# randomly split the actives",
"trainsize",
"=",
"int",
"(",
"round",
"(",
"training_fraction",
"*",
"len",
"(",
"activeList",
")",
")",
")",
"trainIndex",
"=",
"[",
"]",
"valIndex",
"=",
"[",
"]",
"trainIndex",
"=",
"random",
".",
"sample",
"(",
"range",
"(",
"len",
"(",
"activeList",
")",
")",
",",
"trainsize",
")",
"valIndex",
"=",
"[",
"x",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"activeList",
")",
")",
"if",
"x",
"not",
"in",
"trainIndex",
"]",
"trainactives",
"=",
"[",
"activeList",
"[",
"index",
"]",
"for",
"index",
"in",
"trainIndex",
"]",
"valactives",
"=",
"[",
"activeList",
"[",
"index",
"]",
"for",
"index",
"in",
"valIndex",
"]",
"# match up decoys",
"trainsize",
"=",
"len",
"(",
"trainactives",
")",
"*",
"decoy_to_active",
"valsize",
"=",
"len",
"(",
"valactives",
")",
"*",
"decoy_to_active",
"trainIndex",
"=",
"[",
"]",
"valIndex",
"=",
"[",
"]",
"trainIndex",
"=",
"random",
".",
"sample",
"(",
"range",
"(",
"len",
"(",
"decoyList",
")",
")",
",",
"int",
"(",
"trainsize",
")",
")",
"valIndex",
"=",
"[",
"x",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"decoyList",
")",
")",
"if",
"x",
"not",
"in",
"trainIndex",
"]",
"[",
"0",
":",
"int",
"(",
"valsize",
")",
"]",
"traindecoys",
"=",
"[",
"decoyList",
"[",
"index",
"]",
"for",
"index",
"in",
"trainIndex",
"]",
"valdecoys",
"=",
"[",
"decoyList",
"[",
"index",
"]",
"for",
"index",
"in",
"valIndex",
"]",
"# merge actives and decoys for each set",
"trainset",
"=",
"trainactives",
"+",
"traindecoys",
"valset",
"=",
"valactives",
"+",
"valdecoys",
"# return sets",
"return",
"trainset",
",",
"valset"
] |
Create training and validation sets
|
[
"Create",
"training",
"and",
"validation",
"sets"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L116-L163
|
238,686
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
partition
|
def partition(molList, options):
"""
Partition molList into activeList and decoyList
"""
# set input variables
status_field = options.status_field
active_label = options.active_label
decoy_label = options.decoy_label
# initiate lists
activeList = []
decoyList = []
# partition moList
for mol in molList:
if mol.GetProp(status_field) == active_label:
activeList.append(mol)
elif mol.GetProp(status_field) == decoy_label:
decoyList.append(mol)
# return partitions
return activeList, decoyList
|
python
|
def partition(molList, options):
"""
Partition molList into activeList and decoyList
"""
# set input variables
status_field = options.status_field
active_label = options.active_label
decoy_label = options.decoy_label
# initiate lists
activeList = []
decoyList = []
# partition moList
for mol in molList:
if mol.GetProp(status_field) == active_label:
activeList.append(mol)
elif mol.GetProp(status_field) == decoy_label:
decoyList.append(mol)
# return partitions
return activeList, decoyList
|
[
"def",
"partition",
"(",
"molList",
",",
"options",
")",
":",
"# set input variables",
"status_field",
"=",
"options",
".",
"status_field",
"active_label",
"=",
"options",
".",
"active_label",
"decoy_label",
"=",
"options",
".",
"decoy_label",
"# initiate lists",
"activeList",
"=",
"[",
"]",
"decoyList",
"=",
"[",
"]",
"# partition moList",
"for",
"mol",
"in",
"molList",
":",
"if",
"mol",
".",
"GetProp",
"(",
"status_field",
")",
"==",
"active_label",
":",
"activeList",
".",
"append",
"(",
"mol",
")",
"elif",
"mol",
".",
"GetProp",
"(",
"status_field",
")",
"==",
"decoy_label",
":",
"decoyList",
".",
"append",
"(",
"mol",
")",
"# return partitions",
"return",
"activeList",
",",
"decoyList"
] |
Partition molList into activeList and decoyList
|
[
"Partition",
"molList",
"into",
"activeList",
"and",
"decoyList"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L166-L187
|
238,687
|
rvswift/EB
|
EB/builder/splitter/splitter.py
|
read_csv
|
def read_csv(csvfile, options):
"""
Read csv and return molList, a list of mol objects
"""
# open file or exit
name, ext = os.path.splitext(csvfile)
try:
if ext == '.gz':
f = gzip.open(csvfile, 'rb')
else:
f = open(csvfile, 'rU')
except IOError:
print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csvfile)))
sys.exit(1)
# read file
csv_reader = csv.reader(f)
molList = []
linenumber = 1
for line in csv_reader:
# get column labels from the first line
if linenumber == 1:
prop_indices = read_header(line, options)
# otherwise read line & append to MolList
else:
mol = Molecule()
mol = read_line(line, options, prop_indices, mol)
# if the line's junk, skip it
if mol == 1:
print(" skipping molecule 'm'\n".format(m=(linenumber - 1)))
else:
molList.append(mol)
linenumber += 1
# return molList
return molList
|
python
|
def read_csv(csvfile, options):
"""
Read csv and return molList, a list of mol objects
"""
# open file or exit
name, ext = os.path.splitext(csvfile)
try:
if ext == '.gz':
f = gzip.open(csvfile, 'rb')
else:
f = open(csvfile, 'rU')
except IOError:
print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csvfile)))
sys.exit(1)
# read file
csv_reader = csv.reader(f)
molList = []
linenumber = 1
for line in csv_reader:
# get column labels from the first line
if linenumber == 1:
prop_indices = read_header(line, options)
# otherwise read line & append to MolList
else:
mol = Molecule()
mol = read_line(line, options, prop_indices, mol)
# if the line's junk, skip it
if mol == 1:
print(" skipping molecule 'm'\n".format(m=(linenumber - 1)))
else:
molList.append(mol)
linenumber += 1
# return molList
return molList
|
[
"def",
"read_csv",
"(",
"csvfile",
",",
"options",
")",
":",
"# open file or exit",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"csvfile",
")",
"try",
":",
"if",
"ext",
"==",
"'.gz'",
":",
"f",
"=",
"gzip",
".",
"open",
"(",
"csvfile",
",",
"'rb'",
")",
"else",
":",
"f",
"=",
"open",
"(",
"csvfile",
",",
"'rU'",
")",
"except",
"IOError",
":",
"print",
"(",
"\" \\n '{f}' could not be opened\\n\"",
".",
"format",
"(",
"f",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"csvfile",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# read file",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"f",
")",
"molList",
"=",
"[",
"]",
"linenumber",
"=",
"1",
"for",
"line",
"in",
"csv_reader",
":",
"# get column labels from the first line",
"if",
"linenumber",
"==",
"1",
":",
"prop_indices",
"=",
"read_header",
"(",
"line",
",",
"options",
")",
"# otherwise read line & append to MolList",
"else",
":",
"mol",
"=",
"Molecule",
"(",
")",
"mol",
"=",
"read_line",
"(",
"line",
",",
"options",
",",
"prop_indices",
",",
"mol",
")",
"# if the line's junk, skip it",
"if",
"mol",
"==",
"1",
":",
"print",
"(",
"\" skipping molecule 'm'\\n\"",
".",
"format",
"(",
"m",
"=",
"(",
"linenumber",
"-",
"1",
")",
")",
")",
"else",
":",
"molList",
".",
"append",
"(",
"mol",
")",
"linenumber",
"+=",
"1",
"# return molList",
"return",
"molList"
] |
Read csv and return molList, a list of mol objects
|
[
"Read",
"csv",
"and",
"return",
"molList",
"a",
"list",
"of",
"mol",
"objects"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/splitter/splitter.py#L190-L230
|
238,688
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command_internals.py
|
InputHeader.read
|
def read(self, input_file):
""" Reads an InputHeader from `input_file`.
The input header is read as a sequence of *<key>***:***<value>* pairs
separated by a newline. The end of the input header is signalled by an
empty line or an end-of-file.
:param input_file: File-like object that supports iteration over lines
"""
key, value = None, None
import sys
for line in input_file:
if line == '\n':
break
if line[-1:] == '\n':
line = line[:-1]
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
self._update(key, value)
key, value = item[0], urllib.unquote(item[1])
elif key is not None:
# continuation of the current item
value = '\n'.join([value, urllib.unquote(line)])
self._update(key, value)
return
|
python
|
def read(self, input_file):
""" Reads an InputHeader from `input_file`.
The input header is read as a sequence of *<key>***:***<value>* pairs
separated by a newline. The end of the input header is signalled by an
empty line or an end-of-file.
:param input_file: File-like object that supports iteration over lines
"""
key, value = None, None
import sys
for line in input_file:
if line == '\n':
break
if line[-1:] == '\n':
line = line[:-1]
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
self._update(key, value)
key, value = item[0], urllib.unquote(item[1])
elif key is not None:
# continuation of the current item
value = '\n'.join([value, urllib.unquote(line)])
self._update(key, value)
return
|
[
"def",
"read",
"(",
"self",
",",
"input_file",
")",
":",
"key",
",",
"value",
"=",
"None",
",",
"None",
"import",
"sys",
"for",
"line",
"in",
"input_file",
":",
"if",
"line",
"==",
"'\\n'",
":",
"break",
"if",
"line",
"[",
"-",
"1",
":",
"]",
"==",
"'\\n'",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"item",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"len",
"(",
"item",
")",
"==",
"2",
":",
"# start of a new item",
"self",
".",
"_update",
"(",
"key",
",",
"value",
")",
"key",
",",
"value",
"=",
"item",
"[",
"0",
"]",
",",
"urllib",
".",
"unquote",
"(",
"item",
"[",
"1",
"]",
")",
"elif",
"key",
"is",
"not",
"None",
":",
"# continuation of the current item",
"value",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"value",
",",
"urllib",
".",
"unquote",
"(",
"line",
")",
"]",
")",
"self",
".",
"_update",
"(",
"key",
",",
"value",
")",
"return"
] |
Reads an InputHeader from `input_file`.
The input header is read as a sequence of *<key>***:***<value>* pairs
separated by a newline. The end of the input header is signalled by an
empty line or an end-of-file.
:param input_file: File-like object that supports iteration over lines
|
[
"Reads",
"an",
"InputHeader",
"from",
"input_file",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command_internals.py#L110-L137
|
238,689
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command_internals.py
|
MessagesHeader.write
|
def write(self, output_file):
""" Writes this MessageHeader to an output stream.
Messages are written as a sequence of *<message_text-message_level>***=**
*<message_text-text>* pairs separated by '\r\n'. The sequence is
terminated by a pair of '\r\n' sequences.
"""
for message_level, message_text in self:
output_file.write('%s=%s\r\n' % (message_level, message_text))
output_file.write('\r\n')
|
python
|
def write(self, output_file):
""" Writes this MessageHeader to an output stream.
Messages are written as a sequence of *<message_text-message_level>***=**
*<message_text-text>* pairs separated by '\r\n'. The sequence is
terminated by a pair of '\r\n' sequences.
"""
for message_level, message_text in self:
output_file.write('%s=%s\r\n' % (message_level, message_text))
output_file.write('\r\n')
|
[
"def",
"write",
"(",
"self",
",",
"output_file",
")",
":",
"for",
"message_level",
",",
"message_text",
"in",
"self",
":",
"output_file",
".",
"write",
"(",
"'%s=%s\\r\\n'",
"%",
"(",
"message_level",
",",
"message_text",
")",
")",
"output_file",
".",
"write",
"(",
"'\\r\\n'",
")"
] |
Writes this MessageHeader to an output stream.
Messages are written as a sequence of *<message_text-message_level>***=**
*<message_text-text>* pairs separated by '\r\n'. The sequence is
terminated by a pair of '\r\n' sequences.
|
[
"Writes",
"this",
"MessageHeader",
"to",
"an",
"output",
"stream",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command_internals.py#L189-L199
|
238,690
|
emilydolson/avida-spatial-tools
|
avidaspatial/cell_picker.py
|
cell_picker.callback
|
def callback(self, event):
"""
Selects cells on click.
"""
self.init_width()
if len(self.initial) > 0:
for cell in self.initial:
self.color_square(cell[0], cell[1], True)
self.initial = []
self.begin_drag = event
self.color_square(event.x, event.y)
|
python
|
def callback(self, event):
"""
Selects cells on click.
"""
self.init_width()
if len(self.initial) > 0:
for cell in self.initial:
self.color_square(cell[0], cell[1], True)
self.initial = []
self.begin_drag = event
self.color_square(event.x, event.y)
|
[
"def",
"callback",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"init_width",
"(",
")",
"if",
"len",
"(",
"self",
".",
"initial",
")",
">",
"0",
":",
"for",
"cell",
"in",
"self",
".",
"initial",
":",
"self",
".",
"color_square",
"(",
"cell",
"[",
"0",
"]",
",",
"cell",
"[",
"1",
"]",
",",
"True",
")",
"self",
".",
"initial",
"=",
"[",
"]",
"self",
".",
"begin_drag",
"=",
"event",
"self",
".",
"color_square",
"(",
"event",
".",
"x",
",",
"event",
".",
"y",
")"
] |
Selects cells on click.
|
[
"Selects",
"cells",
"on",
"click",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/cell_picker.py#L56-L67
|
238,691
|
emilydolson/avida-spatial-tools
|
avidaspatial/cell_picker.py
|
cell_picker.init_width
|
def init_width(self):
"""
Get rectangle diameters
"""
self.col_width = self.c.winfo_width()/self.cols
self.row_height = self.c.winfo_height()/self.rows
|
python
|
def init_width(self):
"""
Get rectangle diameters
"""
self.col_width = self.c.winfo_width()/self.cols
self.row_height = self.c.winfo_height()/self.rows
|
[
"def",
"init_width",
"(",
"self",
")",
":",
"self",
".",
"col_width",
"=",
"self",
".",
"c",
".",
"winfo_width",
"(",
")",
"/",
"self",
".",
"cols",
"self",
".",
"row_height",
"=",
"self",
".",
"c",
".",
"winfo_height",
"(",
")",
"/",
"self",
".",
"rows"
] |
Get rectangle diameters
|
[
"Get",
"rectangle",
"diameters"
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/cell_picker.py#L69-L74
|
238,692
|
emilydolson/avida-spatial-tools
|
avidaspatial/cell_picker.py
|
cell_picker.color_square
|
def color_square(self, x, y, unit_coords=False):
"""
Handles actually coloring the squares
"""
# Calculate column and row number
if unit_coords:
col = x
row = y
else:
col = x//self.col_width
row = y//self.row_height
# If the tile is not filled, create a rectangle
if not self.tiles[row][col]:
self.tiles[row][col] = \
self.c.create_rectangle(col*self.col_width,
row*self.row_height,
(col+1)*self.col_width,
(row+1)*self.row_height,
fill="black")
self.cells.append(row*self.cols + col)
# If the tile is filled, delete the rectangle and clear the reference
else:
self.c.delete(self.tiles[row][col])
self.tiles[row][col] = None
self.cells.remove(row*self.cols + col)
|
python
|
def color_square(self, x, y, unit_coords=False):
"""
Handles actually coloring the squares
"""
# Calculate column and row number
if unit_coords:
col = x
row = y
else:
col = x//self.col_width
row = y//self.row_height
# If the tile is not filled, create a rectangle
if not self.tiles[row][col]:
self.tiles[row][col] = \
self.c.create_rectangle(col*self.col_width,
row*self.row_height,
(col+1)*self.col_width,
(row+1)*self.row_height,
fill="black")
self.cells.append(row*self.cols + col)
# If the tile is filled, delete the rectangle and clear the reference
else:
self.c.delete(self.tiles[row][col])
self.tiles[row][col] = None
self.cells.remove(row*self.cols + col)
|
[
"def",
"color_square",
"(",
"self",
",",
"x",
",",
"y",
",",
"unit_coords",
"=",
"False",
")",
":",
"# Calculate column and row number",
"if",
"unit_coords",
":",
"col",
"=",
"x",
"row",
"=",
"y",
"else",
":",
"col",
"=",
"x",
"//",
"self",
".",
"col_width",
"row",
"=",
"y",
"//",
"self",
".",
"row_height",
"# If the tile is not filled, create a rectangle",
"if",
"not",
"self",
".",
"tiles",
"[",
"row",
"]",
"[",
"col",
"]",
":",
"self",
".",
"tiles",
"[",
"row",
"]",
"[",
"col",
"]",
"=",
"self",
".",
"c",
".",
"create_rectangle",
"(",
"col",
"*",
"self",
".",
"col_width",
",",
"row",
"*",
"self",
".",
"row_height",
",",
"(",
"col",
"+",
"1",
")",
"*",
"self",
".",
"col_width",
",",
"(",
"row",
"+",
"1",
")",
"*",
"self",
".",
"row_height",
",",
"fill",
"=",
"\"black\"",
")",
"self",
".",
"cells",
".",
"append",
"(",
"row",
"*",
"self",
".",
"cols",
"+",
"col",
")",
"# If the tile is filled, delete the rectangle and clear the reference",
"else",
":",
"self",
".",
"c",
".",
"delete",
"(",
"self",
".",
"tiles",
"[",
"row",
"]",
"[",
"col",
"]",
")",
"self",
".",
"tiles",
"[",
"row",
"]",
"[",
"col",
"]",
"=",
"None",
"self",
".",
"cells",
".",
"remove",
"(",
"row",
"*",
"self",
".",
"cols",
"+",
"col",
")"
] |
Handles actually coloring the squares
|
[
"Handles",
"actually",
"coloring",
"the",
"squares"
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/cell_picker.py#L76-L102
|
238,693
|
emilydolson/avida-spatial-tools
|
avidaspatial/cell_picker.py
|
cell_picker.dragend
|
def dragend(self, event):
"""
Handles the end of a drag action.
"""
x_range = [self.begin_drag.x//self.col_width, event.x//self.col_width]
y_range = [self.begin_drag.y//self.row_height,
event.y//self.row_height]
# Check bounds
for i in range(2):
for ls in [x_range, y_range]:
if ls[i] < 0:
ls[i] = 0
if ls[i] >= self.rows:
ls[i] = self.rows-1
for x in range(min(x_range), max(x_range)+1):
for y in range(min(y_range), max(y_range)+1):
if x == self.begin_drag.x//self.col_width and \
y == self.begin_drag.y//self.row_height:
continue
self.color_square(x*self.col_width, y*self.row_height)
self.begin_drag = None
print(len(self.cells), "cells selected")
|
python
|
def dragend(self, event):
"""
Handles the end of a drag action.
"""
x_range = [self.begin_drag.x//self.col_width, event.x//self.col_width]
y_range = [self.begin_drag.y//self.row_height,
event.y//self.row_height]
# Check bounds
for i in range(2):
for ls in [x_range, y_range]:
if ls[i] < 0:
ls[i] = 0
if ls[i] >= self.rows:
ls[i] = self.rows-1
for x in range(min(x_range), max(x_range)+1):
for y in range(min(y_range), max(y_range)+1):
if x == self.begin_drag.x//self.col_width and \
y == self.begin_drag.y//self.row_height:
continue
self.color_square(x*self.col_width, y*self.row_height)
self.begin_drag = None
print(len(self.cells), "cells selected")
|
[
"def",
"dragend",
"(",
"self",
",",
"event",
")",
":",
"x_range",
"=",
"[",
"self",
".",
"begin_drag",
".",
"x",
"//",
"self",
".",
"col_width",
",",
"event",
".",
"x",
"//",
"self",
".",
"col_width",
"]",
"y_range",
"=",
"[",
"self",
".",
"begin_drag",
".",
"y",
"//",
"self",
".",
"row_height",
",",
"event",
".",
"y",
"//",
"self",
".",
"row_height",
"]",
"# Check bounds",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"for",
"ls",
"in",
"[",
"x_range",
",",
"y_range",
"]",
":",
"if",
"ls",
"[",
"i",
"]",
"<",
"0",
":",
"ls",
"[",
"i",
"]",
"=",
"0",
"if",
"ls",
"[",
"i",
"]",
">=",
"self",
".",
"rows",
":",
"ls",
"[",
"i",
"]",
"=",
"self",
".",
"rows",
"-",
"1",
"for",
"x",
"in",
"range",
"(",
"min",
"(",
"x_range",
")",
",",
"max",
"(",
"x_range",
")",
"+",
"1",
")",
":",
"for",
"y",
"in",
"range",
"(",
"min",
"(",
"y_range",
")",
",",
"max",
"(",
"y_range",
")",
"+",
"1",
")",
":",
"if",
"x",
"==",
"self",
".",
"begin_drag",
".",
"x",
"//",
"self",
".",
"col_width",
"and",
"y",
"==",
"self",
".",
"begin_drag",
".",
"y",
"//",
"self",
".",
"row_height",
":",
"continue",
"self",
".",
"color_square",
"(",
"x",
"*",
"self",
".",
"col_width",
",",
"y",
"*",
"self",
".",
"row_height",
")",
"self",
".",
"begin_drag",
"=",
"None",
"print",
"(",
"len",
"(",
"self",
".",
"cells",
")",
",",
"\"cells selected\"",
")"
] |
Handles the end of a drag action.
|
[
"Handles",
"the",
"end",
"of",
"a",
"drag",
"action",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/cell_picker.py#L110-L134
|
238,694
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/specialdict.py
|
_lower_if_str
|
def _lower_if_str(item):
"""
Try to convert item to lowercase, if it is string.
Args:
item (obj): Str, unicode or any other object.
Returns:
obj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \
`item` itself.
"""
# python 2 / 3 shill
try:
string_type = basestring
except NameError:
string_type = str
if isinstance(item, string_type):
return item.lower()
return item
|
python
|
def _lower_if_str(item):
"""
Try to convert item to lowercase, if it is string.
Args:
item (obj): Str, unicode or any other object.
Returns:
obj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \
`item` itself.
"""
# python 2 / 3 shill
try:
string_type = basestring
except NameError:
string_type = str
if isinstance(item, string_type):
return item.lower()
return item
|
[
"def",
"_lower_if_str",
"(",
"item",
")",
":",
"# python 2 / 3 shill",
"try",
":",
"string_type",
"=",
"basestring",
"except",
"NameError",
":",
"string_type",
"=",
"str",
"if",
"isinstance",
"(",
"item",
",",
"string_type",
")",
":",
"return",
"item",
".",
"lower",
"(",
")",
"return",
"item"
] |
Try to convert item to lowercase, if it is string.
Args:
item (obj): Str, unicode or any other object.
Returns:
obj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \
`item` itself.
|
[
"Try",
"to",
"convert",
"item",
"to",
"lowercase",
"if",
"it",
"is",
"string",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/specialdict.py#L11-L31
|
238,695
|
rosenbrockc/ci
|
pyci/utility.py
|
get_repo_relpath
|
def get_repo_relpath(repo, relpath):
"""Returns the absolute path to the 'relpath' taken relative to the base
directory of the repository.
"""
from os import path
if relpath[0:2] == "./":
return path.join(repo, relpath[2::])
else:
from os import chdir, getcwd
cd = getcwd()
chdir(path.expanduser(repo))
result = path.abspath(relpath)
chdir(cd)
return result
|
python
|
def get_repo_relpath(repo, relpath):
"""Returns the absolute path to the 'relpath' taken relative to the base
directory of the repository.
"""
from os import path
if relpath[0:2] == "./":
return path.join(repo, relpath[2::])
else:
from os import chdir, getcwd
cd = getcwd()
chdir(path.expanduser(repo))
result = path.abspath(relpath)
chdir(cd)
return result
|
[
"def",
"get_repo_relpath",
"(",
"repo",
",",
"relpath",
")",
":",
"from",
"os",
"import",
"path",
"if",
"relpath",
"[",
"0",
":",
"2",
"]",
"==",
"\"./\"",
":",
"return",
"path",
".",
"join",
"(",
"repo",
",",
"relpath",
"[",
"2",
":",
":",
"]",
")",
"else",
":",
"from",
"os",
"import",
"chdir",
",",
"getcwd",
"cd",
"=",
"getcwd",
"(",
")",
"chdir",
"(",
"path",
".",
"expanduser",
"(",
"repo",
")",
")",
"result",
"=",
"path",
".",
"abspath",
"(",
"relpath",
")",
"chdir",
"(",
"cd",
")",
"return",
"result"
] |
Returns the absolute path to the 'relpath' taken relative to the base
directory of the repository.
|
[
"Returns",
"the",
"absolute",
"path",
"to",
"the",
"relpath",
"taken",
"relative",
"to",
"the",
"base",
"directory",
"of",
"the",
"repository",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/utility.py#L18-L31
|
238,696
|
rosenbrockc/ci
|
pyci/utility.py
|
load_with_datetime
|
def load_with_datetime(pairs):
"""Deserialize JSON into python datetime objects."""
d = {}
for k, v in pairs:
if isinstance(v, basestring):
try:
d[k] = dateutil.parser.parse(v)
except ValueError:
d[k] = v
else:
d[k] = v
return d
|
python
|
def load_with_datetime(pairs):
"""Deserialize JSON into python datetime objects."""
d = {}
for k, v in pairs:
if isinstance(v, basestring):
try:
d[k] = dateutil.parser.parse(v)
except ValueError:
d[k] = v
else:
d[k] = v
return d
|
[
"def",
"load_with_datetime",
"(",
"pairs",
")",
":",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"pairs",
":",
"if",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"try",
":",
"d",
"[",
"k",
"]",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"v",
")",
"except",
"ValueError",
":",
"d",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"d",
"[",
"k",
"]",
"=",
"v",
"return",
"d"
] |
Deserialize JSON into python datetime objects.
|
[
"Deserialize",
"JSON",
"into",
"python",
"datetime",
"objects",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/utility.py#L34-L45
|
238,697
|
rosenbrockc/ci
|
pyci/utility.py
|
get_json
|
def get_json(jsonpath, default):
"""Returns the JSON serialized object at the specified path, or the default
if it doesn't exist or can't be deserialized.
"""
from os import path
import json
result = default
if path.isfile(jsonpath):
try:
with open(jsonpath) as f:
result = json.load(f, object_pairs_hook=load_with_datetime)
except(IOError):
err("Unable to deserialize JSON at {}".format(jsonpath))
pass
return result
|
python
|
def get_json(jsonpath, default):
"""Returns the JSON serialized object at the specified path, or the default
if it doesn't exist or can't be deserialized.
"""
from os import path
import json
result = default
if path.isfile(jsonpath):
try:
with open(jsonpath) as f:
result = json.load(f, object_pairs_hook=load_with_datetime)
except(IOError):
err("Unable to deserialize JSON at {}".format(jsonpath))
pass
return result
|
[
"def",
"get_json",
"(",
"jsonpath",
",",
"default",
")",
":",
"from",
"os",
"import",
"path",
"import",
"json",
"result",
"=",
"default",
"if",
"path",
".",
"isfile",
"(",
"jsonpath",
")",
":",
"try",
":",
"with",
"open",
"(",
"jsonpath",
")",
"as",
"f",
":",
"result",
"=",
"json",
".",
"load",
"(",
"f",
",",
"object_pairs_hook",
"=",
"load_with_datetime",
")",
"except",
"(",
"IOError",
")",
":",
"err",
"(",
"\"Unable to deserialize JSON at {}\"",
".",
"format",
"(",
"jsonpath",
")",
")",
"pass",
"return",
"result"
] |
Returns the JSON serialized object at the specified path, or the default
if it doesn't exist or can't be deserialized.
|
[
"Returns",
"the",
"JSON",
"serialized",
"object",
"at",
"the",
"specified",
"path",
"or",
"the",
"default",
"if",
"it",
"doesn",
"t",
"exist",
"or",
"can",
"t",
"be",
"deserialized",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/utility.py#L47-L63
|
238,698
|
rosenbrockc/ci
|
pyci/utility.py
|
run_exec
|
def run_exec(repodir, command, output, index):
"""Runs the specified command in the repo directory.
:arg repodir: the absolute path of the repo directory to run 'command' in.
:arg command: what to run in the 'repodir'. Should be valid in the context
of the $PATH variable.
:arg output: the multiprocessing queue to push the results to.
:arg index: the index of this test in the master list.
"""
from os import path
from subprocess import Popen, PIPE
from datetime import datetime
child = Popen("cd {}; {} > {}.cidat".format(repodir, command, index),
shell=True, executable="/bin/bash")
# Need to do this so that we are sure the process is done before moving on
child.wait()
output.put({"index": index, "end": datetime.now(), "code": child.returncode,
"output": path.join(repodir, "{}.cidat".format(index))})
|
python
|
def run_exec(repodir, command, output, index):
"""Runs the specified command in the repo directory.
:arg repodir: the absolute path of the repo directory to run 'command' in.
:arg command: what to run in the 'repodir'. Should be valid in the context
of the $PATH variable.
:arg output: the multiprocessing queue to push the results to.
:arg index: the index of this test in the master list.
"""
from os import path
from subprocess import Popen, PIPE
from datetime import datetime
child = Popen("cd {}; {} > {}.cidat".format(repodir, command, index),
shell=True, executable="/bin/bash")
# Need to do this so that we are sure the process is done before moving on
child.wait()
output.put({"index": index, "end": datetime.now(), "code": child.returncode,
"output": path.join(repodir, "{}.cidat".format(index))})
|
[
"def",
"run_exec",
"(",
"repodir",
",",
"command",
",",
"output",
",",
"index",
")",
":",
"from",
"os",
"import",
"path",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"from",
"datetime",
"import",
"datetime",
"child",
"=",
"Popen",
"(",
"\"cd {}; {} > {}.cidat\"",
".",
"format",
"(",
"repodir",
",",
"command",
",",
"index",
")",
",",
"shell",
"=",
"True",
",",
"executable",
"=",
"\"/bin/bash\"",
")",
"# Need to do this so that we are sure the process is done before moving on",
"child",
".",
"wait",
"(",
")",
"output",
".",
"put",
"(",
"{",
"\"index\"",
":",
"index",
",",
"\"end\"",
":",
"datetime",
".",
"now",
"(",
")",
",",
"\"code\"",
":",
"child",
".",
"returncode",
",",
"\"output\"",
":",
"path",
".",
"join",
"(",
"repodir",
",",
"\"{}.cidat\"",
".",
"format",
"(",
"index",
")",
")",
"}",
")"
] |
Runs the specified command in the repo directory.
:arg repodir: the absolute path of the repo directory to run 'command' in.
:arg command: what to run in the 'repodir'. Should be valid in the context
of the $PATH variable.
:arg output: the multiprocessing queue to push the results to.
:arg index: the index of this test in the master list.
|
[
"Runs",
"the",
"specified",
"command",
"in",
"the",
"repo",
"directory",
"."
] |
4d5a60291424a83124d1d962d17fb4c7718cde2b
|
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/utility.py#L72-L90
|
238,699
|
mikicz/arca
|
arca/utils.py
|
load_class
|
def load_class(location: str) -> type:
""" Loads a class from a string and returns it.
>>> from arca.utils import load_class
>>> load_class("arca.backend.BaseBackend")
<class 'arca.backend.base.BaseBackend'>
:raise ArcaMisconfigured: If the class can't be loaded.
"""
module_name, _, class_name = location.rpartition(".")
if not module_name:
raise ArcaMisconfigured(f"The module is not specified, can't load class from '{location}'")
try:
imported_module = importlib.import_module(module_name)
return getattr(imported_module, class_name)
except ModuleNotFoundError:
raise ArcaMisconfigured(f"{module_name} does not exist.")
except AttributeError:
raise ArcaMisconfigured(f"{module_name} does not have a {class_name} class")
|
python
|
def load_class(location: str) -> type:
""" Loads a class from a string and returns it.
>>> from arca.utils import load_class
>>> load_class("arca.backend.BaseBackend")
<class 'arca.backend.base.BaseBackend'>
:raise ArcaMisconfigured: If the class can't be loaded.
"""
module_name, _, class_name = location.rpartition(".")
if not module_name:
raise ArcaMisconfigured(f"The module is not specified, can't load class from '{location}'")
try:
imported_module = importlib.import_module(module_name)
return getattr(imported_module, class_name)
except ModuleNotFoundError:
raise ArcaMisconfigured(f"{module_name} does not exist.")
except AttributeError:
raise ArcaMisconfigured(f"{module_name} does not have a {class_name} class")
|
[
"def",
"load_class",
"(",
"location",
":",
"str",
")",
"->",
"type",
":",
"module_name",
",",
"_",
",",
"class_name",
"=",
"location",
".",
"rpartition",
"(",
"\".\"",
")",
"if",
"not",
"module_name",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"The module is not specified, can't load class from '{location}'\"",
")",
"try",
":",
"imported_module",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"return",
"getattr",
"(",
"imported_module",
",",
"class_name",
")",
"except",
"ModuleNotFoundError",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"{module_name} does not exist.\"",
")",
"except",
"AttributeError",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"{module_name} does not have a {class_name} class\"",
")"
] |
Loads a class from a string and returns it.
>>> from arca.utils import load_class
>>> load_class("arca.backend.BaseBackend")
<class 'arca.backend.base.BaseBackend'>
:raise ArcaMisconfigured: If the class can't be loaded.
|
[
"Loads",
"a",
"class",
"from",
"a",
"string",
"and",
"returns",
"it",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/utils.py#L25-L45
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.