sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def serve_static(app, base_url, base_path, index=False):
"""
Serve a directory statically
Parameters:
* app: Grole application object
* base_url: Base URL to serve from, e.g. /static
* base_path: Base path to look for files in
* index: Provide simple directory indexes if True
"""
@app.route(base_url + '/(.*)')
def serve(env, req):
"""
Static files
"""
try:
base = pathlib.Path(base_path).resolve()
path = (base / req.match.group(1)).resolve()
except FileNotFoundError:
return Response(None, 404, 'Not Found')
# Don't let bad paths through
if base == path or base in path.parents:
if path.is_file():
return ResponseFile(str(path))
if index and path.is_dir():
if base == path:
ret = ''
else:
ret = '<a href="../">../</a><br/>\r\n'
for item in path.iterdir():
name = item.parts[-1]
if item.is_dir():
name += '/'
ret += '<a href="{}">{}</a><br/>\r\n'.format(urllib.parse.quote(name), html.escape(name))
ret = ResponseString(ret, 'text/html')
return ret
return Response(None, 404, 'Not Found')
|
Serve a directory statically
Parameters:
* app: Grole application object
* base_url: Base URL to serve from, e.g. /static
* base_path: Base path to look for files in
* index: Provide simple directory indexes if True
|
entailment
|
def serve_doc(app, url):
"""
Serve API documentation extracted from request handler docstrings
Parameters:
* app: Grole application object
* url: URL to serve at
"""
@app.route(url, doc=False)
def index(env, req):
ret = ''
for d in env['doc']:
ret += 'URL: {url}, supported methods: {methods}{doc}\n'.format(**d)
return ret
|
Serve API documentation extracted from request handler docstrings
Parameters:
* app: Grole application object
* url: URL to serve at
|
entailment
|
def parse_args(args=sys.argv[1:]):
"""
Parse command line arguments for Grole server running as static file server
"""
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help='address to listen on, default localhost',
default='localhost')
parser.add_argument('-p', '--port', help='port to listen on, default 1234',
default=1234, type=int)
parser.add_argument('-d', '--directory', help='directory to serve, default .',
default='.')
parser.add_argument('-n', '--noindex', help='do not show directory indexes',
default=False, action='store_true')
loglevel = parser.add_mutually_exclusive_group()
loglevel.add_argument('-v', '--verbose', help='verbose logging',
default=False, action='store_true')
loglevel.add_argument('-q', '--quiet', help='quiet logging',
default=False, action='store_true')
return parser.parse_args(args)
|
Parse command line arguments for Grole server running as static file server
|
entailment
|
def main(args=sys.argv[1:]):
"""
Run Grole static file server
"""
args = parse_args(args)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
app = Grole()
serve_static(app, '', args.directory, not args.noindex)
app.run(args.address, args.port)
|
Run Grole static file server
|
entailment
|
async def _read(self, reader):
"""
Parses HTTP request into member variables
"""
start_line = await self._readline(reader)
self.method, self.location, self.version = start_line.decode().split()
path_query = urllib.parse.unquote(self.location).split('?', 1)
self.path = path_query[0]
self.query = {}
if len(path_query) > 1:
for q in path_query[1].split('&'):
try:
k, v = q.split('=', 1)
self.query[k] = v
except ValueError:
self.query[q] = None
self.headers = {}
while True:
header_raw = await self._readline(reader)
if header_raw.strip() == b'':
break
header = header_raw.decode().split(':', 1)
self.headers[header[0]] = header[1].strip()
# TODO implement chunked handling
self.data = b''
await self._buffer_body(reader)
|
Parses HTTP request into member variables
|
entailment
|
async def _readline(self, reader):
"""
Readline helper
"""
ret = await reader.readline()
if len(ret) == 0 and reader.at_eof():
raise EOFError()
return ret
|
Readline helper
|
entailment
|
async def _buffer_body(self, reader):
"""
Buffers the body of the request
"""
remaining = int(self.headers.get('Content-Length', 0))
if remaining > 0:
try:
self.data = await reader.readexactly(remaining)
except asyncio.IncompleteReadError:
raise EOFError()
|
Buffers the body of the request
|
entailment
|
def route(self, path_regex, methods=['GET'], doc=True):
"""
Decorator to register a handler
Parameters:
* path_regex: Request path regex to match against for running the handler
* methods: HTTP methods to use this handler for
* doc: Add to internal doc structure
"""
def register_func(func):
"""
Decorator implementation
"""
if doc:
self.env['doc'].append({'url': path_regex, 'methods': ', '.join(methods), 'doc': func.__doc__})
for method in methods:
self._handlers[method].append((re.compile(path_regex), func))
return func # Return the original function
return register_func
|
Decorator to register a handler
Parameters:
* path_regex: Request path regex to match against for running the handler
* methods: HTTP methods to use this handler for
* doc: Add to internal doc structure
|
entailment
|
async def _handle(self, reader, writer):
"""
Handle a single TCP connection
Parses requests, finds appropriate handlers and returns responses
"""
peer = writer.get_extra_info('peername')
self._logger.debug('New connection from {}'.format(peer))
try:
# Loop handling requests
while True:
# Read the request
req = Request()
await req._read(reader)
# Find and execute handler
res = None
for path_regex, handler in self._handlers.get(req.method, []):
match = path_regex.fullmatch(req.path)
if match:
req.match = match
try:
if inspect.iscoroutinefunction(handler):
res = await handler(self.env, req)
else:
res = handler(self.env, req)
if not isinstance(res, Response):
res = Response(data=res)
except:
# Error - log it and return 500
self._logger.error(traceback.format_exc())
res = Response(code=500, reason='Internal Server Error')
break
# No handler - send 404
if res == None:
res = Response(code=404, reason='Not Found')
# Respond
await res._write(writer)
self._logger.info('{}: {} -> {}'.format(peer, req.path, res.code))
except EOFError:
self._logger.debug('Connection closed from {}'.format(peer))
except Exception as e:
self._logger.error('Connection error ({}) from {}'.format(e, peer))
writer.close()
|
Handle a single TCP connection
Parses requests, finds appropriate handlers and returns responses
|
entailment
|
def run(self, host='localhost', port=1234):
"""
Launch the server. Will run forever accepting connections until interrupted.
Parameters:
* host: The host to listen on
* port: The port to listen on
"""
# Setup loop
loop = asyncio.get_event_loop()
coro = asyncio.start_server(self._handle, host, port, loop=loop)
try:
server = loop.run_until_complete(coro)
except Exception as e:
self._logger.error('Could not launch server: {}'.format(e))
return
# Run the server
self._logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
Launch the server. Will run forever accepting connections until interrupted.
Parameters:
* host: The host to listen on
* port: The port to listen on
|
entailment
|
def hold(model: Model, reducer: Optional[Callable] = None) -> Iterator[list]:
"""Temporarilly withold change events in a modifiable list.
All changes that are captured within a "hold" context are forwarded to a list
which is yielded to the user before being sent to views of the given ``model``.
If desired, the user may modify the list of events before the context is left in
order to change the events that are ultimately sent to the model's views.
Parameters:
model:
The model object whose change events will be temporarilly witheld.
reducer:
A function for modifying the events list at the end of the context.
Its signature is ``(model, events) -> new_events`` where ``model`` is the
given model, ``events`` is the complete list of events produced in the
context, and the returned ``new_events`` is a list of events that will
actuall be distributed to views.
Notes:
All changes witheld from views will be sent as a single notification. For
example if you view a :class:`specate.mvc.models.List` and its ``append()``
method is called three times within a :func:`hold` context,
Examples:
Note how the event from ``l.append(1)`` is omitted from the printed statements.
.. code-block:: python
from spectate import mvc
l = mvc.List()
mvc.view(d, lambda d, e: list(map(print, e)))
with mvc.hold(l) as events:
l.append(1)
l.append(2)
del events[0]
.. code-block:: text
{'index': 1, 'old': Undefined, 'new': 2}
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
events = []
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: events.extend(e)
try:
yield events
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
events = tuple(events)
if reducer is not None:
events = tuple(map(Data, reducer(model, events)))
model._notify_model_views(events)
|
Temporarilly withold change events in a modifiable list.
All changes that are captured within a "hold" context are forwarded to a list
which is yielded to the user before being sent to views of the given ``model``.
If desired, the user may modify the list of events before the context is left in
order to change the events that are ultimately sent to the model's views.
Parameters:
model:
The model object whose change events will be temporarilly witheld.
reducer:
A function for modifying the events list at the end of the context.
Its signature is ``(model, events) -> new_events`` where ``model`` is the
given model, ``events`` is the complete list of events produced in the
context, and the returned ``new_events`` is a list of events that will
actuall be distributed to views.
Notes:
All changes witheld from views will be sent as a single notification. For
example if you view a :class:`specate.mvc.models.List` and its ``append()``
method is called three times within a :func:`hold` context,
Examples:
Note how the event from ``l.append(1)`` is omitted from the printed statements.
.. code-block:: python
from spectate import mvc
l = mvc.List()
mvc.view(d, lambda d, e: list(map(print, e)))
with mvc.hold(l) as events:
l.append(1)
l.append(2)
del events[0]
.. code-block:: text
{'index': 1, 'old': Undefined, 'new': 2}
|
entailment
|
def rollback(
model: Model, undo: Optional[Callable] = None, *args, **kwargs
) -> Iterator[list]:
"""Withold events if an error occurs.
Generall operate
Parameters:
model:
The model object whose change events may be witheld.
undo:
An optional function for reversing any changes that may have taken place.
Its signature is ``(model, events, error)`` where ``model`` is the given
model, ``event`` is a tuple of all the events that took place, and ``error``
is the exception that was riased. Any changes that you make to the model
within this function will not produce events.
Examples:
Simple supression of events:
.. code-block:: python
from spectate import mvc
d = mvc.Dict()
@mvc.view(d)
def should_not_be_called(d, events):
# we never call this view
assert False
try:
with mvc.rollback(d):
d["a"] = 1
d["b"] # key doesn't exist
except KeyError:
pass
Undo changes for a dictionary:
.. code-block:: python
from spectate import mvc
def undo_dict_changes(model, events, error):
seen = set()
for e in reversed(events):
if e.old is mvc.Undefined:
del model[e.key]
else:
model[e.key] = e.old
try:
with mvc.rollback(d, undo=undo_dict_changes):
d["a"] = 1
d["b"] = 2
print(d)
d["c"]
except KeyError:
pass
print(d)
.. code-block:: python
{'a': 1, 'b': 2}
{}
"""
with hold(model, *args, **kwargs) as events:
try:
yield events
except Exception as error:
if undo is not None:
with mute(model):
undo(model, tuple(events), error)
events.clear()
raise
|
Withold events if an error occurs.
Generall operate
Parameters:
model:
The model object whose change events may be witheld.
undo:
An optional function for reversing any changes that may have taken place.
Its signature is ``(model, events, error)`` where ``model`` is the given
model, ``event`` is a tuple of all the events that took place, and ``error``
is the exception that was riased. Any changes that you make to the model
within this function will not produce events.
Examples:
Simple supression of events:
.. code-block:: python
from spectate import mvc
d = mvc.Dict()
@mvc.view(d)
def should_not_be_called(d, events):
# we never call this view
assert False
try:
with mvc.rollback(d):
d["a"] = 1
d["b"] # key doesn't exist
except KeyError:
pass
Undo changes for a dictionary:
.. code-block:: python
from spectate import mvc
def undo_dict_changes(model, events, error):
seen = set()
for e in reversed(events):
if e.old is mvc.Undefined:
del model[e.key]
else:
model[e.key] = e.old
try:
with mvc.rollback(d, undo=undo_dict_changes):
d["a"] = 1
d["b"] = 2
print(d)
d["c"]
except KeyError:
pass
print(d)
.. code-block:: python
{'a': 1, 'b': 2}
{}
|
entailment
|
def mute(model: Model):
"""Block a model's views from being notified.
All changes within a "mute" context will be blocked. No content is yielded to the
user as in :func:`hold`, and the views of the model are never notified that changes
took place.
Parameters:
mode: The model whose change events will be blocked.
Examples:
The view is never called due to the :func:`mute` context:
.. code-block:: python
from spectate import mvc
l = mvc.List()
@mvc.view(l)
def raises(events):
raise ValueError("Events occured!")
with mvc.mute(l):
l.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: None
try:
yield
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
|
Block a model's views from being notified.
All changes within a "mute" context will be blocked. No content is yielded to the
user as in :func:`hold`, and the views of the model are never notified that changes
took place.
Parameters:
mode: The model whose change events will be blocked.
Examples:
The view is never called due to the :func:`mute` context:
.. code-block:: python
from spectate import mvc
l = mvc.List()
@mvc.view(l)
def raises(events):
raise ValueError("Events occured!")
with mvc.mute(l):
l.append(1)
|
entailment
|
def expose(*methods):
"""A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
"""
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
|
A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
|
entailment
|
def expose_as(name, base, *methods):
"""Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
"""
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
|
Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
|
entailment
|
def watchable(value):
"""Returns True if the given value is a :class:`Watchable` subclass or instance."""
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
|
Returns True if the given value is a :class:`Watchable` subclass or instance.
|
entailment
|
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
|
Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
|
entailment
|
def watched(cls, *args, **kwargs):
"""Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
"""
value = cls(*args, **kwargs)
return value, watch(value)
|
Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
|
entailment
|
def unwatch(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
|
Return the :class:`Specatator` of a :class:`Watchable` instance.
|
entailment
|
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
|
Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
|
entailment
|
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
|
Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
|
entailment
|
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
|
Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
|
entailment
|
def cut_to_length(text, length, delim):
"""Shorten given text on first delimiter after given number
of characters.
"""
cut = text.find(delim, length)
if cut > -1:
return text[:cut]
else:
return text
|
Shorten given text on first delimiter after given number
of characters.
|
entailment
|
def get_interpreter_path(version=None):
"""Return the executable of a specified or current version."""
if version and version != str(sys.version_info[0]):
return settings.PYTHON_INTERPRETER + version
else:
return sys.executable
|
Return the executable of a specified or current version.
|
entailment
|
def license_from_trove(trove):
"""Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers.
"""
license = []
for classifier in trove:
if 'License' in classifier:
stripped = classifier.strip()
# if taken from EGG-INFO, begins with Classifier:
stripped = stripped[stripped.find('License'):]
if stripped in settings.TROVE_LICENSES:
license.append(settings.TROVE_LICENSES[stripped])
return ' and '.join(license)
|
Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers.
|
entailment
|
def versions_from_trove(trove):
"""Finds out python version from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
python version string
"""
versions = set()
for classifier in trove:
if 'Programming Language :: Python ::' in classifier:
ver = classifier.split('::')[-1]
major = ver.split('.')[0].strip()
if major:
versions.add(major)
return sorted(
set([v for v in versions if v.replace('.', '', 1).isdigit()]))
|
Finds out python version from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
python version string
|
entailment
|
def pypi_metadata_extension(extraction_fce):
"""Extracts data from PyPI and merges them with data from extraction
method.
"""
def inner(self, client=None):
data = extraction_fce(self)
if client is None:
logger.warning("Client is None, it was probably disabled")
data.update_attr('source0', self.archive.name)
return data
try:
release_data = client.release_data(self.name, self.version)
except BaseException:
logger.warning("Some kind of error while communicating with "
"client: {0}.".format(client), exc_info=True)
return data
try:
url, md5_digest = get_url(client, self.name, self.version)
except exc.MissingUrlException:
url, md5_digest = ('FAILED TO EXTRACT FROM PYPI',
'FAILED TO EXTRACT FROM PYPI')
data_dict = {'source0': url, 'md5': md5_digest}
for data_field in settings.PYPI_USABLE_DATA:
data_dict[data_field] = release_data.get(data_field, '')
# we usually get better license representation from trove classifiers
data_dict["license"] = license_from_trove(release_data.get(
'classifiers', ''))
data.set_from(data_dict, update=True)
return data
return inner
|
Extracts data from PyPI and merges them with data from extraction
method.
|
entailment
|
def venv_metadata_extension(extraction_fce):
"""Extracts specific metadata from virtualenv object, merges them with data
from given extraction method.
"""
def inner(self):
data = extraction_fce(self)
if virtualenv is None or not self.venv:
logger.debug("Skipping virtualenv metadata extraction.")
return data
temp_dir = tempfile.mkdtemp()
try:
extractor = virtualenv.VirtualEnv(self.name, temp_dir,
self.name_convertor,
self.base_python_version)
data.set_from(extractor.get_venv_data, update=True)
except exc.VirtualenvFailException as e:
logger.error("{}, skipping virtualenv metadata extraction.".format(
e))
finally:
shutil.rmtree(temp_dir)
return data
return inner
|
Extracts specific metadata from virtualenv object, merges them with data
from given extraction method.
|
entailment
|
def process_description(description_fce):
"""Removes special character delimiters, titles
and wraps paragraphs.
"""
def inner(description):
clear_description = \
re.sub(r'\s+', ' ', # multiple whitespaces
# general URLs
re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '',
# delimiters
re.sub('(#|=|---|~|`)*', '',
# very short lines, typically titles
re.sub('((\r?\n)|^).{0,8}((\r?\n)|$)', '',
# PyPI's version and downloads tags
re.sub(
'((\r*.. image::|:target:) https?|(:align:|:alt:))[^\n]*\n', '',
description_fce(description))))))
return ' '.join(textwrap.wrap(clear_description, 80))
return inner
|
Removes special character delimiters, titles
and wraps paragraphs.
|
entailment
|
def versions_from_archive(self):
"""Return Python versions extracted from trove classifiers. """
py_vers = versions_from_trove(self.classifiers)
return [ver for ver in py_vers if ver != self.unsupported_version]
|
Return Python versions extracted from trove classifiers.
|
entailment
|
def srcname(self):
"""Return srcname for the macro if the pypi name should be changed.
Those cases are:
- name was provided with -r option
- pypi name is like python-<name>
"""
if self.rpm_name or self.name.startswith(('python-', 'Python-')):
return self.name_convertor.base_name(self.rpm_name or self.name)
|
Return srcname for the macro if the pypi name should be changed.
Those cases are:
- name was provided with -r option
- pypi name is like python-<name>
|
entailment
|
def extract_data(self):
"""Extracts data from archive.
Returns:
PackageData object containing the extracted data.
"""
data = PackageData(
local_file=self.local_file,
name=self.name,
pkg_name=self.rpm_name or self.name_convertor.rpm_name(
self.name, pkg_name=True),
version=self.version,
srcname=self.srcname)
with self.archive:
data.set_from(self.data_from_archive)
# for example nose has attribute `packages` but instead of name
# listing the pacakges is using function to find them, that makes
# data.packages an empty set if virtualenv is disabled
if self.venv_extraction_disabled and getattr(data, "packages") == []:
data.packages = [data.name]
return data
|
Extracts data from archive.
Returns:
PackageData object containing the extracted data.
|
entailment
|
def data_from_archive(self):
"""Returns all metadata extractable from the archive.
Returns:
dictionary containing metadata extracted from the archive
"""
archive_data = {}
archive_data['runtime_deps'] = self.runtime_deps
archive_data['build_deps'] = [
['BuildRequires', 'python2-devel']] + self.build_deps
archive_data['py_modules'] = self.py_modules
archive_data['scripts'] = self.scripts
archive_data['home_page'] = self.home_page
archive_data['description'] = self.description
archive_data['summary'] = self.summary
archive_data['license'] = self.license
archive_data['has_pth'] = self.has_pth
archive_data['has_extension'] = self.has_extension
archive_data['has_test_suite'] = self.has_test_suite
archive_data['python_versions'] = self.versions_from_archive
(archive_data['doc_files'],
archive_data['doc_license']) = self.separate_license_files(
self.doc_files)
archive_data['dirname'] = self.archive.top_directory
return archive_data
|
Returns all metadata extractable from the archive.
Returns:
dictionary containing metadata extracted from the archive
|
entailment
|
def runtime_deps(self): # install_requires
"""Returns list of runtime dependencies of the package specified in
setup.py.
Dependencies are in RPM SPECFILE format - see dependency_to_rpm()
for details, but names are already transformed according to
current distro.
Returns:
list of runtime dependencies of the package
"""
install_requires = self.metadata['install_requires']
if self.metadata[
'entry_points'] and 'setuptools' not in install_requires:
install_requires.append('setuptools') # entrypoints
return sorted(self.name_convert_deps_list(deps_from_pyp_format(
install_requires, runtime=True)))
|
Returns list of runtime dependencies of the package specified in
setup.py.
Dependencies are in RPM SPECFILE format - see dependency_to_rpm()
for details, but names are already transformed according to
current distro.
Returns:
list of runtime dependencies of the package
|
entailment
|
def build_deps(self): # setup_requires [tests_require, install_requires]
"""Same as runtime_deps, but build dependencies. Test and install
requires are included if package contains test suite to prevent
%check phase crashes because of missing dependencies
Returns:
list of build dependencies of the package
"""
build_requires = self.metadata['setup_requires']
if self.has_test_suite:
build_requires += self.metadata['tests_require'] + self.metadata[
'install_requires']
if 'setuptools' not in build_requires:
build_requires.append('setuptools')
return sorted(self.name_convert_deps_list(deps_from_pyp_format(
build_requires, runtime=False)))
|
Same as runtime_deps, but build dependencies. Test and install
requires are included if package contains test suite to prevent
%check phase crashes because of missing dependencies
Returns:
list of build dependencies of the package
|
entailment
|
def doc_files(self):
"""Returns list of doc files that should be used for %doc in specfile.
Returns:
List of doc files from the archive - only basenames, not full
paths.
"""
doc_files = []
for doc_file_re in settings.DOC_FILES_RE:
doc_files.extend(
self.archive.get_files_re(doc_file_re, ignorecase=True))
return ['/'.join(x.split('/')[1:]) for x in doc_files]
|
Returns list of doc files that should be used for %doc in specfile.
Returns:
List of doc files from the archive - only basenames, not full
paths.
|
entailment
|
def sphinx_dir(self):
"""Returns directory with sphinx documentation, if there is such.
Returns:
Full path to sphinx documentation dir inside the archive, or None
if there is no such.
"""
# search for sphinx dir doc/ or docs/ under the first directory in
# archive (e.g. spam-1.0.0/doc)
candidate_dirs = self.archive.get_directories_re(
settings.SPHINX_DIR_RE, full_path=True)
# search for conf.py in the dirs (TODO: what if more are found?)
for directory in candidate_dirs:
contains_conf_py = self.archive.get_files_re(
r'{0}/conf.py$'.format(re.escape(directory)), full_path=True)
in_tests = 'tests' in directory.split(os.sep)
if contains_conf_py and not in_tests:
return directory
|
Returns directory with sphinx documentation, if there is such.
Returns:
Full path to sphinx documentation dir inside the archive, or None
if there is no such.
|
entailment
|
def data_from_archive(self):
"""Appends setup.py specific metadata to archive_data."""
archive_data = super(SetupPyMetadataExtractor, self).data_from_archive
archive_data['has_packages'] = self.has_packages
archive_data['packages'] = self.packages
archive_data['has_bundled_egg_info'] = self.has_bundled_egg_info
sphinx_dir = self.sphinx_dir
if sphinx_dir:
archive_data['sphinx_dir'] = "/".join(sphinx_dir.split("/")[1:])
archive_data['build_deps'].append(
['BuildRequires', self.name_convertor.rpm_name(
"sphinx", self.base_python_version)])
return archive_data
|
Appends setup.py specific metadata to archive_data.
|
entailment
|
def get_requires(self, requires_types):
"""Extracts requires of given types from metadata file, filter windows
specific requires.
"""
if not isinstance(requires_types, list):
requires_types = list(requires_types)
extracted_requires = []
for requires_name in requires_types:
for requires in self.json_metadata.get(requires_name, []):
if 'win' in requires.get('environment', {}):
continue
extracted_requires.extend(requires['requires'])
return extracted_requires
|
Extracts requires of given types from metadata file, filter windows
specific requires.
|
entailment
|
def get_url(client, name, version, wheel=False, hashed_format=False):
"""Retrieves list of package URLs using PyPI's XML-RPC. Chooses URL
of prefered archive and md5_digest.
"""
try:
release_urls = client.release_urls(name, version)
release_data = client.release_data(name, version)
except BaseException: # some kind of error with client
logger.debug('Client: {0} Name: {1} Version: {2}.'.format(
client, name, version))
raise exceptions.MissingUrlException(
"Some kind of error while communicating with client: {0}.".format(
client), exc_info=True)
url = ''
md5_digest = None
if not wheel:
# Prefered archive is tar.gz
if len(release_urls):
zip_url = zip_md5 = ''
for release_url in release_urls:
if release_url['url'].endswith("tar.gz"):
url = release_url['url']
md5_digest = release_url['md5_digest']
if release_url['url'].endswith(".zip"):
zip_url = release_url['url']
zip_md5 = release_url['md5_digest']
if url == '':
url = zip_url or release_urls[0]['url']
md5_digest = zip_md5 or release_urls[0]['md5_digest']
elif release_data:
url = release_data['download_url']
else:
# Only wheel is acceptable
for release_url in release_urls:
if release_url['url'].endswith("none-any.whl"):
url = release_url['url']
md5_digest = release_url['md5_digest']
break
if not url:
raise exceptions.MissingUrlException(
"Url of source archive not found.")
if url == 'UNKNOWN':
raise exceptions.MissingUrlException(
"{0} package has no sources on PyPI, Please ask the maintainer "
"to upload sources.".format(release_data['name']))
if not hashed_format:
url = ("https://files.pythonhosted.org/packages/source"
"/{0[0]}/{0}/{1}").format(name, url.split("/")[-1])
return (url, md5_digest)
|
Retrieves list of package URLs using PyPI's XML-RPC. Chooses URL
of prefered archive and md5_digest.
|
entailment
|
def get(self, wheel=False):
"""Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable.
"""
try:
url = get_url(self.client, self.name, self.version,
wheel, hashed_format=True)[0]
except exceptions.MissingUrlException as e:
raise SystemExit(e)
if wheel:
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = '{0}/{1}'.format(save_dir, url.split('/')[-1])
request.urlretrieve(url, save_file)
logger.info('Downloaded package from PyPI: {0}.'.format(save_file))
return save_file
|
Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable.
|
entailment
|
def get(self):
"""Copies file from local filesystem to self.save_dir.
Returns:
Full path of the copied file.
Raises:
EnvironmentError if the file can't be found or the save_dir
is not writable.
"""
if self.local_file.endswith('.whl'):
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = '{0}/{1}'.format(save_dir, os.path.basename(
self.local_file))
if not os.path.exists(save_file) or not os.path.samefile(
self.local_file, save_file):
shutil.copy2(self.local_file, save_file)
logger.info('Local file: {0} copyed to {1}.'.format(
self.local_file, save_file))
return save_file
|
Copies file from local filesystem to self.save_dir.
Returns:
Full path of the copied file.
Raises:
EnvironmentError if the file can't be found or the save_dir
is not writable.
|
entailment
|
def _stripped_name_version(self):
"""Returns filename stripped of the suffix.
Returns:
Filename stripped of the suffix (extension).
"""
# we don't use splitext, because on "a.tar.gz" it returns ("a.tar",
# "gz")
filename = os.path.basename(self.local_file)
for archive_suffix in settings.ARCHIVE_SUFFIXES:
if filename.endswith(archive_suffix):
return filename.rstrip('{0}'.format(archive_suffix))
# if for cycle is exhausted it means no suffix was found
else:
raise exceptions.UnknownArchiveFormatException(
'Unkown archive format of file {0}.'.format(filename))
|
Returns filename stripped of the suffix.
Returns:
Filename stripped of the suffix (extension).
|
entailment
|
def fill(self, path):
'''
Scans content of directories
'''
self.bindir = set(os.listdir(path + 'bin/'))
self.lib_sitepackages = set(os.listdir(glob.glob(
path + 'lib/python?.?/site-packages/')[0]))
|
Scans content of directories
|
entailment
|
def install_package_to_venv(self):
'''
Installs package given as first argument to virtualenv without
dependencies
'''
try:
self.env.install(self.name, force=True, options=["--no-deps"])
except (ve.PackageInstallationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException(
'Failed to install package to virtualenv')
self.dirs_after_install.fill(self.temp_dir + '/venv/')
|
Installs package given as first argument to virtualenv without
dependencies
|
entailment
|
def get_dirs_differance(self):
'''
Makes final versions of site_packages and scripts using DirsContent
sub method and filters
'''
try:
diff = self.dirs_after_install - self.dirs_before_install
except ValueError:
raise VirtualenvFailException(
"Some of the DirsContent attributes is uninicialized")
self.data['has_pth'] = \
any([x for x in diff.lib_sitepackages if x.endswith('.pth')])
site_packages = site_packages_filter(diff.lib_sitepackages)
self.data['packages'] = sorted(
[p for p in site_packages if not p.endswith(MODULE_SUFFIXES)])
self.data['py_modules'] = sorted(set(
[os.path.splitext(m)[0] for m in site_packages - set(
self.data['packages'])]))
self.data['scripts'] = scripts_filter(sorted(diff.bindir))
logger.debug('Data from files differance in virtualenv:')
logger.debug(pprint.pformat(self.data))
|
Makes final versions of site_packages and scripts using DirsContent
sub method and filters
|
entailment
|
def main(package, v, d, s, r, proxy, srpm, p, b, o, t, venv, autonc, sclize,
**scl_kwargs):
"""Convert PyPI package to RPM specfile or SRPM.
\b
\b\bArguments:
PACKAGE Provide PyPI name of the package or path to compressed
source file.
"""
register_file_log_handler('/tmp/pyp2rpm-{0}.log'.format(getpass.getuser()))
if srpm or s:
register_console_log_handler()
distro = o
if t and os.path.splitext(t)[0] in settings.KNOWN_DISTROS:
distro = t
elif t and not (b or p):
raise click.UsageError("Default python versions for template {0} are "
"missing in settings, add them or use flags "
"-b/-p to set python versions.".format(t))
logger = logging.getLogger(__name__)
logger.info('Pyp2rpm initialized.')
convertor = Convertor(package=package,
version=v,
save_dir=d,
template=t or settings.DEFAULT_TEMPLATE,
distro=distro,
base_python_version=b,
python_versions=p,
rpm_name=r,
proxy=proxy,
venv=venv,
autonc=autonc)
logger.debug(
'Convertor: {0} created. Trying to convert.'.format(convertor))
converted = convertor.convert()
logger.debug('Convertor: {0} succesfully converted.'.format(convertor))
if sclize:
converted = convert_to_scl(converted, scl_kwargs)
if srpm or s:
if r:
spec_name = r + '.spec'
else:
prefix = 'python-' if not convertor.name.startswith(
'python-') else ''
spec_name = prefix + convertor.name + '.spec'
logger.info('Using name: {0} for specfile.'.format(spec_name))
if d == settings.DEFAULT_PKG_SAVE_PATH:
# default save_path is rpmbuild tree so we want to save spec
# in rpmbuild/SPECS/
spec_path = d + '/SPECS/' + spec_name
else:
# if user provide save_path then save spec in provided path
spec_path = d + '/' + spec_name
spec_dir = os.path.dirname(spec_path)
if not os.path.exists(spec_dir):
os.makedirs(spec_dir)
logger.debug('Opening specfile: {0}.'.format(spec_path))
if not utils.PY3:
converted = converted.encode('utf-8')
with open(spec_path, 'w') as f:
f.write(converted)
logger.info('Specfile saved at: {0}.'.format(spec_path))
if srpm:
msg = utils.build_srpm(spec_path, d)
logger.info(msg)
else:
logger.debug('Printing specfile to stdout.')
if utils.PY3:
print(converted)
else:
print(converted.encode('utf-8'))
logger.debug('Specfile printed.')
logger.info("That's all folks!")
|
Convert PyPI package to RPM specfile or SRPM.
\b
\b\bArguments:
PACKAGE Provide PyPI name of the package or path to compressed
source file.
|
entailment
|
def convert_to_scl(spec, scl_options):
"""Convert spec into SCL-style spec file using `spec2scl`.
Args:
spec: (str) a spec file
scl_options: (dict) SCL options provided
Returns:
A converted spec file
"""
scl_options['skip_functions'] = scl_options['skip_functions'].split(',')
scl_options['meta_spec'] = None
convertor = SclConvertor(options=scl_options)
return str(convertor.convert(spec))
|
Convert spec into SCL-style spec file using `spec2scl`.
Args:
spec: (str) a spec file
scl_options: (dict) SCL options provided
Returns:
A converted spec file
|
entailment
|
def format_options(self, ctx, formatter):
"""Writes SCL related options into the formatter as a separate
group.
"""
super(Pyp2rpmCommand, self).format_options(ctx, formatter)
scl_opts = []
for param in self.get_params(ctx):
if isinstance(param, SclizeOption):
scl_opts.append(param.get_scl_help_record(ctx))
if scl_opts:
with formatter.section('SCL related options'):
formatter.write_dl(scl_opts)
|
Writes SCL related options into the formatter as a separate
group.
|
entailment
|
def handle_parse_result(self, ctx, opts, args):
"""Validate SCL related options before parsing."""
if 'sclize' in opts and not SclConvertor:
raise click.UsageError("Please install spec2scl package to "
"perform SCL-style conversion")
if self.name in opts and 'sclize' not in opts:
raise click.UsageError(
"`--{}` can only be used with --sclize option".format(
self.name))
return super(SclizeOption, self).handle_parse_result(ctx, opts, args)
|
Validate SCL related options before parsing.
|
entailment
|
def to_list(var):
"""Checks if given value is a list, tries to convert, if it is not."""
if var is None:
return []
if isinstance(var, str):
var = var.split('\n')
elif not isinstance(var, list):
try:
var = list(var)
except TypeError:
raise ValueError("{} cannot be converted to the list.".format(var))
return var
|
Checks if given value is a list, tries to convert, if it is not.
|
entailment
|
def run(self):
"""Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise.
"""
if self.stdout:
sys.stdout.write("extracted json data:\n" + json.dumps(
self.metadata, default=to_str) + "\n")
else:
extract_dist.class_metadata = self.metadata
|
Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise.
|
entailment
|
def dependency_to_rpm(dep, runtime):
"""Converts a dependency got by pkg_resources.Requirement.parse()
to RPM format.
Args:
dep - a dependency retrieved by pkg_resources.Requirement.parse()
runtime - whether the returned dependency should be runtime (True)
or build time (False)
Returns:
List of semi-SPECFILE dependencies (package names are not properly
converted yet).
For example: [['Requires', 'jinja2'],
['Conflicts', 'jinja2', '=', '2.0.1']]
"""
logger.debug('Dependencies provided: {0} runtime: {1}.'.format(
dep, runtime))
converted = []
if not len(dep.specs):
converted.append(['Requires', dep.project_name])
else:
for ver_spec in dep.specs:
if ver_spec[0] == '!=':
converted.append(
['Conflicts', dep.project_name, '=', ver_spec[1]])
elif ver_spec[0] == '==':
converted.append(
['Requires', dep.project_name, '=', ver_spec[1]])
else:
converted.append(
['Requires', dep.project_name, ver_spec[0], ver_spec[1]])
if not runtime:
for conv in converted:
conv[0] = "Build" + conv[0]
logger.debug('Converted dependencies: {0}.'.format(converted))
return converted
|
Converts a dependency got by pkg_resources.Requirement.parse()
to RPM format.
Args:
dep - a dependency retrieved by pkg_resources.Requirement.parse()
runtime - whether the returned dependency should be runtime (True)
or build time (False)
Returns:
List of semi-SPECFILE dependencies (package names are not properly
converted yet).
For example: [['Requires', 'jinja2'],
['Conflicts', 'jinja2', '=', '2.0.1']]
|
entailment
|
def deps_from_pyp_format(requires, runtime=True):
"""Parses dependencies extracted from setup.py.
Args:
requires: list of dependencies as written in setup.py of the package.
runtime: are the dependencies runtime (True) or build time (False)?
Returns:
List of semi-SPECFILE dependencies (see dependency_to_rpm for format).
"""
parsed = []
logger.debug("Dependencies from setup.py: {0} runtime: {1}.".format(
requires, runtime))
for req in requires:
try:
parsed.append(Requirement.parse(req))
except ValueError:
logger.warn("Unparsable dependency {0}.".format(req),
exc_info=True)
in_rpm_format = []
for dep in parsed:
in_rpm_format.extend(dependency_to_rpm(dep, runtime))
logger.debug("Dependencies from setup.py in rpm format: {0}.".format(
in_rpm_format))
return in_rpm_format
|
Parses dependencies extracted from setup.py.
Args:
requires: list of dependencies as written in setup.py of the package.
runtime: are the dependencies runtime (True) or build time (False)?
Returns:
List of semi-SPECFILE dependencies (see dependency_to_rpm for format).
|
entailment
|
def deps_from_pydit_json(requires, runtime=True):
"""Parses dependencies returned by pydist.json, since versions
uses brackets we can't use pkg_resources to parse and we need a separate
method
Args:
requires: list of dependencies as written in pydist.json of the package
runtime: are the dependencies runtime (True) or build time (False)
Returns:
List of semi-SPECFILE dependecies (see dependency_to_rpm for format)
"""
parsed = []
for req in requires:
# req looks like 'some-name (>=X.Y,!=Y.X)' or 'someme-name' where
# 'some-name' is the name of required package and '(>=X.Y,!=Y.X)'
# are specs
name, specs = None, None
# len(reqs) == 1 if there are not specified versions, 2 otherwise
reqs = req.split(' ')
name = reqs[0]
if len(reqs) == 2:
specs = reqs[1]
# try if there are more specs in spec part of the requires
specs = specs.split(",")
# strip brackets
specs = [re.sub('[()]', '', spec) for spec in specs]
# this will divide (>=0.1.2) to ['>=', '0', '.1.2']
# or (0.1.2) into ['', '0', '.1.2']
specs = [re.split('([0-9])', spec, 1) for spec in specs]
# we have separated specs based on number as delimiter
# so we need to join it back to rest of version number
# e.g ['>=', '0', '.1.2'] to ['>=', '0.1.2']
for spec in specs:
spec[1:3] = [''.join(spec[1:3])]
if specs:
for spec in specs:
if '!' in spec[0]:
parsed.append(['Conflicts', name, '=', spec[1]])
elif specs[0] == '==':
parsed.append(['Requires', name, '=', spec[1]])
else:
parsed.append(['Requires', name, spec[0], spec[1]])
else:
parsed.append(['Requires', name])
if not runtime:
for pars in parsed:
pars[0] = 'Build' + pars[0]
return parsed
|
Parses dependencies returned by pydist.json, since versions
uses brackets we can't use pkg_resources to parse and we need a separate
method
Args:
requires: list of dependencies as written in pydist.json of the package
runtime: are the dependencies runtime (True) or build time (False)
Returns:
List of semi-SPECFILE dependecies (see dependency_to_rpm for format)
|
entailment
|
def get_changelog_date_packager(self):
"""Returns part of the changelog entry, containing date and packager.
"""
try:
packager = subprocess.Popen(
'rpmdev-packager', stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
# Hi John Doe, you should install rpmdevtools
packager = "John Doe <john@doe.com>"
logger.warn("Package rpmdevtools is missing, using default "
"name: {0}.".format(packager))
with utils.c_time_locale():
date_str = time.strftime('%a %b %d %Y', time.gmtime())
encoding = locale.getpreferredencoding()
return u'{0} {1}'.format(date_str, packager.decode(encoding))
|
Returns part of the changelog entry, containing date and packager.
|
entailment
|
def run(self):
"""Executes the code of the specified module."""
with utils.ChangeDir(self.dirname):
sys.path.insert(0, self.dirname)
sys.argv[1:] = self.args
runpy.run_module(self.not_suffixed(self.filename),
run_name='__main__',
alter_sys=True)
|
Executes the code of the specified module.
|
entailment
|
def run(self, interpreter):
"""Executes the code of the specified module. Deserializes captured
json data.
"""
with utils.ChangeDir(self.dirname):
command_list = ['PYTHONPATH=' + main_dir, interpreter,
self.filename] + list(self.args)
try:
proc = Popen(' '.join(command_list), stdout=PIPE, stderr=PIPE,
shell=True)
stream_data = proc.communicate()
except Exception as e:
logger.error(
"Error {0} while executing extract_dist command.".format(e))
raise ExtractionError
stream_data = [utils.console_to_str(s) for s in stream_data]
if proc.returncode:
logger.error(
"Subprocess failed, stdout: {0[0]}, stderr: {0[1]}".format(
stream_data))
self._result = json.loads(stream_data[0].split(
"extracted json data:\n")[-1].split("\n")[0])
|
Executes the code of the specified module. Deserializes captured
json data.
|
entailment
|
def generator_to_list(fn):
"""This decorator is for flat_list function.
It converts returned generator to list.
"""
def wrapper(*args, **kw):
return list(fn(*args, **kw))
return wrapper
|
This decorator is for flat_list function.
It converts returned generator to list.
|
entailment
|
def flat_list(lst):
"""This function flatten given nested list.
Argument:
nested list
Returns:
flat list
"""
if isinstance(lst, list):
for item in lst:
for i in flat_list(item):
yield i
else:
yield lst
|
This function flatten given nested list.
Argument:
nested list
Returns:
flat list
|
entailment
|
def extractor_cls(self):
"""Returns the class that can read this archive based on archive suffix.
Returns:
Class that can read this archive or None if no such exists.
"""
file_cls = None
# only catches ".gz", even from ".tar.gz"
if self.is_tar:
file_cls = TarFile
elif self.is_zip:
file_cls = ZipFile
else:
logger.info("Couldn't recognize archive suffix: {0}.".format(
self.suffix))
return file_cls
|
Returns the class that can read this archive based on archive suffix.
Returns:
Class that can read this archive or None if no such exists.
|
entailment
|
def get_content_of_file(self, name, full_path=False):
"""Returns content of file from archive.
If full_path is set to False and two files with given name exist,
content of one is returned (it is not specified which one that is).
If set to True, returns content of exactly that file.
Args:
name: name of the file to get content of
Returns:
Content of the file with given name or None, if no such.
"""
if self.handle:
for member in self.handle.getmembers():
if (full_path and member.name == name) or (
not full_path and os.path.basename(
member.name) == name):
extracted = self.handle.extractfile(member)
return extracted.read().decode(
locale.getpreferredencoding())
return None
|
Returns content of file from archive.
If full_path is set to False and two files with given name exist,
content of one is returned (it is not specified which one that is).
If set to True, returns content of exactly that file.
Args:
name: name of the file to get content of
Returns:
Content of the file with given name or None, if no such.
|
entailment
|
def extract_file(self, name, full_path=False, directory="."):
"""Extract a member from the archive to the specified working directory.
Behaviour of name and pull_path is the same as in function
get_content_of_file.
"""
if self.handle:
for member in self.handle.getmembers():
if (full_path and member.name == name or
not full_path and os.path.basename(
member.name) == name):
# TODO handle KeyError exception
self.handle.extract(member, path=directory)
|
Extract a member from the archive to the specified working directory.
Behaviour of name and pull_path is the same as in function
get_content_of_file.
|
entailment
|
def extract_all(self, directory=".", members=None):
"""Extract all member from the archive to the specified working
directory.
"""
if self.handle:
self.handle.extractall(path=directory, members=members)
|
Extract all member from the archive to the specified working
directory.
|
entailment
|
def has_file_with_suffix(self, suffixes):
"""Finds out if there is a file with one of suffixes in the archive.
Args:
suffixes: list of suffixes or single suffix to look for
Returns:
True if there is at least one file with at least one given suffix
in the archive, False otherwise (or archive can't be opened)
"""
if not isinstance(suffixes, list):
suffixes = [suffixes]
if self.handle:
for member in self.handle.getmembers():
if os.path.splitext(member.name)[1] in suffixes:
return True
else:
# hack for .zip files, where directories are not returned
# themselves, therefore we can't find e.g. .egg-info
for suffix in suffixes:
if '{0}/'.format(suffix) in member.name:
return True
return False
|
Finds out if there is a file with one of suffixes in the archive.
Args:
suffixes: list of suffixes or single suffix to look for
Returns:
True if there is at least one file with at least one given suffix
in the archive, False otherwise (or archive can't be opened)
|
entailment
|
def get_files_re(self, file_re, full_path=False, ignorecase=False):
"""Finds all files that match file_re and returns their list.
Doesn't return directories, only files.
Args:
file_re: raw string to match files against (gets compiled into re)
full_path: whether to match against full path inside the archive
or just the filenames
ignorecase: whether to ignore case when using the given re
Returns:
List of full paths of files inside the archive that match the given
file_re.
"""
try:
if ignorecase:
compiled_re = re.compile(file_re, re.I)
else:
compiled_re = re.compile(file_re)
except sre_constants.error:
logger.error("Failed to compile regex: {}.".format(file_re))
return []
found = []
if self.handle:
for member in self.handle.getmembers():
if isinstance(member, TarInfo) and member.isdir():
pass # for TarInfo files, filter out directories
elif (full_path and compiled_re.search(member.name)) or (
not full_path and compiled_re.search(os.path.basename(
member.name))):
found.append(member.name)
return found
|
Finds all files that match file_re and returns their list.
Doesn't return directories, only files.
Args:
file_re: raw string to match files against (gets compiled into re)
full_path: whether to match against full path inside the archive
or just the filenames
ignorecase: whether to ignore case when using the given re
Returns:
List of full paths of files inside the archive that match the given
file_re.
|
entailment
|
def get_directories_re(
self,
directory_re,
full_path=False,
ignorecase=False):
"""Same as get_files_re, but for directories"""
if ignorecase:
compiled_re = re.compile(directory_re, re.I)
else:
compiled_re = re.compile(directory_re)
found = set()
if self.handle:
for member in self.handle.getmembers():
# zipfiles only list directories => have to work around that
if isinstance(member, ZipInfo):
to_match = os.path.dirname(member.name)
# tarfiles => only match directories
elif isinstance(member, TarInfo) and member.isdir():
to_match = member.name
else:
to_match = None
if to_match:
if ((full_path and compiled_re.search(to_match)) or (
not full_path and compiled_re.search(
os.path.basename(to_match)))):
found.add(to_match)
return list(found)
|
Same as get_files_re, but for directories
|
entailment
|
def top_directory(self):
"""Return the name of the archive topmost directory."""
if self.handle:
return os.path.commonprefix(self.handle.getnames()).rstrip('/')
|
Return the name of the archive topmost directory.
|
entailment
|
def json_wheel_metadata(self):
"""Simple getter that get content of metadata.json file in .whl archive
Returns:
metadata from metadata.json or pydist.json in json format
"""
for meta_file in ("metadata.json", "pydist.json"):
try:
return json.loads(self.get_content_of_file(meta_file))
except TypeError as err:
logger.warning(
'Could not extract metadata from {}.'
' Error: {}'.format(meta_file, err))
sys.exit(
'Unable to extract package metadata from .whl archive. '
'This might be caused by an old .whl format version. '
'You may ask the upstream to upload fresh wheels created '
'with wheel >= 0.17.0 or to upload an sdist as well to '
'workaround this problem.')
|
Simple getter that get content of metadata.json file in .whl archive
Returns:
metadata from metadata.json or pydist.json in json format
|
entailment
|
def record(self):
"""Getter that get content of RECORD file in .whl archive
Returns:
dict with keys `modules` and `scripts`
"""
modules = []
scripts = []
if self.get_content_of_file('RECORD'):
lines = self.get_content_of_file('RECORD').splitlines()
for line in lines:
if 'dist-info' in line or '/' not in line:
continue
elif '.data/scripts' in line:
script = line.split(',', 1)[0]
# strip Name.version.data/scripts/
scripts.append(re.sub('.*/.*/', '', script))
else:
# strip everything from first occurance of slash
modules.append(re.sub('/.*', '', line))
return {'modules': sorted(set(modules)),
'scripts': sorted(set(scripts))}
|
Getter that get content of RECORD file in .whl archive
Returns:
dict with keys `modules` and `scripts`
|
entailment
|
def rpm_versioned_name(cls, name, version, default_number=False):
"""Properly versions the name.
For example:
rpm_versioned_name('python-foo', '26') will return python26-foo
rpm_versioned_name('pyfoo, '3') will return python3-pyfoo
If version is same as settings.DEFAULT_PYTHON_VERSION, no change
is done.
Args:
name: name to version
version: version or None
Returns:
Versioned name or the original name if given version is None.
"""
regexp = re.compile(r'^python(\d*|)-(.*)')
auto_provides_regexp = re.compile(r'^python(\d*|)dist(.*)')
if (not version or version == cls.get_default_py_version() and
not default_number):
found = regexp.search(name)
# second check is to avoid renaming of python2-devel to
# python-devel
if found and found.group(2) != 'devel':
if 'epel' not in cls.template:
return 'python-{0}'.format(regexp.search(name).group(2))
return name
versioned_name = name
if version:
if regexp.search(name):
versioned_name = re.sub(r'^python(\d*|)-', 'python{0}-'.format(
version), name)
elif auto_provides_regexp.search(name):
versioned_name = re.sub(
r'^python(\d*|)dist', 'python{0}dist'.format(
version), name)
else:
versioned_name = 'python{0}-{1}'.format(version, name)
if ('epel' in cls.template and version !=
cls.get_default_py_version()):
versioned_name = versioned_name.replace('{0}'.format(
version), '%{{python{0}_pkgversion}}'.format(version))
return versioned_name
|
Properly versions the name.
For example:
rpm_versioned_name('python-foo', '26') will return python26-foo
rpm_versioned_name('pyfoo, '3') will return python3-pyfoo
If version is same as settings.DEFAULT_PYTHON_VERSION, no change
is done.
Args:
name: name to version
version: version or None
Returns:
Versioned name or the original name if given version is None.
|
entailment
|
def rpm_name(self, name, python_version=None, pkg_name=False):
"""Returns name of the package converted to (possibly) correct package
name according to Packaging Guidelines.
Args:
name: name to convert
python_version: python version for which to retrieve the name of
the package
pkg_name: flag to perform conversion of rpm package name,
present in this class just for API compatibility reason
Returns:
Converted name of the package, that should be in line with
Fedora Packaging Guidelines. If for_python is not None,
the returned name is in form python%(version)s-%(name)s
"""
logger.debug("Converting name: {0} to rpm name, version: {1}.".format(
name, python_version))
rpmized_name = self.base_name(name)
rpmized_name = 'python-{0}'.format(rpmized_name)
if self.distro == 'mageia':
rpmized_name = rpmized_name.lower()
logger.debug('Rpmized name of {0}: {1}.'.format(name, rpmized_name))
return NameConvertor.rpm_versioned_name(rpmized_name, python_version)
|
Returns name of the package converted to (possibly) correct package
name according to Packaging Guidelines.
Args:
name: name to convert
python_version: python version for which to retrieve the name of
the package
pkg_name: flag to perform conversion of rpm package name,
present in this class just for API compatibility reason
Returns:
Converted name of the package, that should be in line with
Fedora Packaging Guidelines. If for_python is not None,
the returned name is in form python%(version)s-%(name)s
|
entailment
|
def base_name(self, name):
"""Removes any python prefixes of suffixes from name if present."""
base_name = name.replace('.', "-")
# remove python prefix if present
found_prefix = self.reg_start.search(name)
if found_prefix:
base_name = found_prefix.group(2)
# remove -pythonXY like suffix if present
found_end = self.reg_end.search(name.lower())
if found_end:
base_name = found_end.group(1)
return base_name
|
Removes any python prefixes of suffixes from name if present.
|
entailment
|
def merge(self, other):
"""Merges object with other NameVariants object, not set values
of self.variants are replace by values from other object.
"""
if not isinstance(other, NameVariants):
raise TypeError("NameVariants isinstance can be merge with"
"other isinstance of the same class")
for key in self.variants:
self.variants[key] = self.variants[key] or other.variants[key]
return self
|
Merges object with other NameVariants object, not set values
of self.variants are replace by values from other object.
|
entailment
|
def rpm_name(self, name, python_version=None, pkg_name=False):
"""Checks if name converted using superclass rpm_name_method match name
of package in the query. Searches for correct name if it doesn't.
Args:
name: name to convert
python_version: python version for which to retrieve the name of
the package
pkg_name: flag to perform conversion of rpm package name
(foo -> python-foo)
"""
if pkg_name:
return super(DandifiedNameConvertor, self).rpm_name(
name, python_version)
original_name = name
converted = super(DandifiedNameConvertor, self).rpm_name(
name, python_version)
python_query = self.query.filter(name__substr=[
'python', 'py', original_name, canonical_form(original_name)])
if converted in [pkg.name for pkg in python_query]:
logger.debug("Converted name exists")
return converted
logger.debug("Converted name not found, searches for correct form")
not_versioned_name = NameVariants(self.base_name(original_name), '')
versioned_name = NameVariants(self.base_name(original_name),
python_version)
if self.base_name(original_name).startswith("py"):
nonpy_name = NameVariants(self.base_name(
original_name)[2:], python_version)
for pkg in python_query:
versioned_name.find_match(pkg.name)
not_versioned_name.find_match(pkg.name)
if 'nonpy_name' in locals():
nonpy_name.find_match(pkg.name)
if 'nonpy_name' in locals():
versioned_name = versioned_name.merge(nonpy_name)
correct_form = versioned_name.merge(not_versioned_name).best_matching
logger.debug("Most likely correct form of the name {0}.".format(
correct_form))
return correct_form or converted
|
Checks if name converted using superclass rpm_name_method match name
of package in the query. Searches for correct name if it doesn't.
Args:
name: name to convert
python_version: python version for which to retrieve the name of
the package
pkg_name: flag to perform conversion of rpm package name
(foo -> python-foo)
|
entailment
|
def merge_versions(self, data):
"""Merges python versions specified in command lines options with
extracted versions, checks if some of the versions is not > 2 if EPEL6
template will be used. attributes base_python_version and
python_versions contain values specified by command line options or
default values, data.python_versions contains extracted data.
"""
if self.template == "epel6.spec":
# if user requested version greater than 2, writes error message
# and exits
requested_versions = self.python_versions
if self.base_python_version:
requested_versions += [self.base_python_version]
if any(int(ver[0]) > 2 for ver in requested_versions):
sys.stderr.write(
"Invalid version, major number of python version for "
"EPEL6 spec file must not be greater than 2.\n")
sys.exit(1)
# if version greater than 2 were extracted it is removed
data.python_versions = [
ver for ver in data.python_versions if not int(ver[0]) > 2]
# Set python versions from default values in settings.
base_version, additional_versions = (
self.template_base_py_ver, self.template_py_vers)
# Sync default values with extracted versions from PyPI classifiers.
if data.python_versions:
if base_version not in data.python_versions:
base_version = data.python_versions[0]
additional_versions = [
v for v in additional_versions if v in data.python_versions]
# Override default values with those set from command line if any.
if self.base_python_version:
base_version = self.base_python_version
if self.python_versions:
additional_versions = [
v for v in self.python_versions if v != base_version]
data.base_python_version = base_version
data.python_versions = additional_versions
|
Merges python versions specified in command lines options with
extracted versions, checks if some of the versions is not > 2 if EPEL6
template will be used. attributes base_python_version and
python_versions contain values specified by command line options or
default values, data.python_versions contains extracted data.
|
entailment
|
def convert(self):
"""Returns RPM SPECFILE.
Returns:
rendered RPM SPECFILE.
"""
# move file into position
try:
local_file = self.getter.get()
except (exceptions.NoSuchPackageException, OSError) as e:
logger.error(
"Failed and exiting:", exc_info=True)
logger.info("Pyp2rpm failed. See log for more info.")
sys.exit(e)
# save name and version from the file (rewrite if set previously)
self.name, self.version = self.getter.get_name_version()
self.local_file = local_file
data = self.metadata_extractor.extract_data(self.client)
logger.debug("Extracted metadata:")
logger.debug(pprint.pformat(data.data))
self.merge_versions(data)
jinja_env = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(['/']),
jinja2.PackageLoader('pyp2rpm', 'templates'), ]))
for filter in filters.__all__:
jinja_env.filters[filter.__name__] = filter
try:
jinja_template = jinja_env.get_template(
os.path.abspath(self.template))
except jinja2.exceptions.TemplateNotFound:
# absolute path not found => search in default template dir
logger.warn('Template: {0} was not found in {1} using default '
'template dir.'.format(
self.template, os.path.abspath(self.template)))
jinja_template = jinja_env.get_template(self.template)
logger.info('Using default template: {0}.'.format(self.template))
ret = jinja_template.render(data=data, name_convertor=name_convertor)
return re.sub(r'[ \t]+\n', "\n", ret)
|
Returns RPM SPECFILE.
Returns:
rendered RPM SPECFILE.
|
entailment
|
def getter(self):
"""Returns an instance of proper PackageGetter subclass. Always
returns the same instance.
Returns:
Instance of the proper PackageGetter subclass according to
provided argument.
Raises:
NoSuchSourceException if source to get the package from is unknown
NoSuchPackageException if the package is unknown on PyPI
"""
if not hasattr(self, '_getter'):
if not self.pypi:
self._getter = package_getters.LocalFileGetter(
self.package,
self.save_dir)
else:
logger.debug(
'{0} does not exist as local file trying PyPI.'.format(
self.package))
self._getter = package_getters.PypiDownloader(
self.client,
self.package,
self.version,
self.save_dir)
return self._getter
|
Returns an instance of proper PackageGetter subclass. Always
returns the same instance.
Returns:
Instance of the proper PackageGetter subclass according to
provided argument.
Raises:
NoSuchSourceException if source to get the package from is unknown
NoSuchPackageException if the package is unknown on PyPI
|
entailment
|
def metadata_extractor(self):
"""Returns an instance of proper MetadataExtractor subclass.
Always returns the same instance.
Returns:
The proper MetadataExtractor subclass according to local file
suffix.
"""
if not hasattr(self, '_local_file'):
raise AttributeError("local_file attribute must be set before "
"calling metadata_extractor")
if not hasattr(self, '_metadata_extractor'):
if self.local_file.endswith('.whl'):
logger.info("Getting metadata from wheel using "
"WheelMetadataExtractor.")
extractor_cls = metadata_extractors.WheelMetadataExtractor
else:
logger.info("Getting metadata from setup.py using "
"SetupPyMetadataExtractor.")
extractor_cls = metadata_extractors.SetupPyMetadataExtractor
base_python_version = (
self.base_python_version or self.template_base_py_ver)
self._metadata_extractor = extractor_cls(
self.local_file,
self.name,
self.name_convertor,
self.version,
self.rpm_name,
self.venv,
base_python_version)
return self._metadata_extractor
|
Returns an instance of proper MetadataExtractor subclass.
Always returns the same instance.
Returns:
The proper MetadataExtractor subclass according to local file
suffix.
|
entailment
|
def client(self):
"""XMLRPC client for PyPI. Always returns the same instance.
If the package is provided as a path to compressed source file,
PyPI will not be used and the client will not be instantiated.
Returns:
XMLRPC client for PyPI or None.
"""
if self.proxy:
proxyhandler = urllib.ProxyHandler({"http": self.proxy})
opener = urllib.build_opener(proxyhandler)
urllib.install_opener(opener)
transport = ProxyTransport()
if not hasattr(self, '_client'):
transport = None
if self.pypi:
if self.proxy:
logger.info('Using provided proxy: {0}.'.format(
self.proxy))
self._client = xmlrpclib.ServerProxy(settings.PYPI_URL,
transport=transport)
self._client_set = True
else:
self._client = None
return self._client
|
XMLRPC client for PyPI. Always returns the same instance.
If the package is provided as a path to compressed source file,
PyPI will not be used and the client will not be instantiated.
Returns:
XMLRPC client for PyPI or None.
|
entailment
|
def memoize_by_args(func):
"""Memoizes return value of a func based on args."""
memory = {}
@functools.wraps(func)
def memoized(*args):
if args not in memory.keys():
value = func(*args)
memory[args] = value
return memory[args]
return memoized
|
Memoizes return value of a func based on args.
|
entailment
|
def build_srpm(specfile, save_dir):
"""Builds a srpm from given specfile using rpmbuild.
Generated srpm is stored in directory specified by save_dir.
Args:
specfile: path to a specfile
save_dir: path to source and build tree
"""
logger.info('Starting rpmbuild to build: {0} SRPM.'.format(specfile))
if save_dir != get_default_save_path():
try:
msg = subprocess.Popen(
['rpmbuild',
'--define', '_sourcedir {0}'.format(save_dir),
'--define', '_builddir {0}'.format(save_dir),
'--define', '_srcrpmdir {0}'.format(save_dir),
'--define', '_rpmdir {0}'.format(save_dir),
'-bs', specfile], stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
logger.error(
"Rpmbuild failed for specfile: {0} and save_dir: {1}".format(
specfile, save_dir), exc_info=True)
msg = 'Rpmbuild failed. See log for more info.'
return msg
else:
if not os.path.exists(save_dir):
raise IOError("Specify folder to store a file (SAVE_DIR) "
"or install rpmdevtools.")
try:
msg = subprocess.Popen(
['rpmbuild',
'--define', '_sourcedir {0}'.format(save_dir + '/SOURCES'),
'--define', '_builddir {0}'.format(save_dir + '/BUILD'),
'--define', '_srcrpmdir {0}'.format(save_dir + '/SRPMS'),
'--define', '_rpmdir {0}'.format(save_dir + '/RPMS'),
'-bs', specfile], stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
logger.error("Rpmbuild failed for specfile: {0} and save_dir: "
"{1}".format(specfile, save_dir), exc_info=True)
msg = 'Rpmbuild failed. See log for more info.'
return msg
|
Builds a srpm from given specfile using rpmbuild.
Generated srpm is stored in directory specified by save_dir.
Args:
specfile: path to a specfile
save_dir: path to source and build tree
|
entailment
|
def remove_major_minor_suffix(scripts):
"""Checks if executables already contain a "-MAJOR.MINOR" suffix. """
minor_major_regex = re.compile("-\d.?\d?$")
return [x for x in scripts if not minor_major_regex.search(x)]
|
Checks if executables already contain a "-MAJOR.MINOR" suffix.
|
entailment
|
def runtime_to_build(runtime_deps):
"""Adds all runtime deps to build deps"""
build_deps = copy.deepcopy(runtime_deps)
for dep in build_deps:
if len(dep) > 0:
dep[0] = 'BuildRequires'
return build_deps
|
Adds all runtime deps to build deps
|
entailment
|
def unique_deps(deps):
"""Remove duplicities from deps list of the lists"""
deps.sort()
return list(k for k, _ in itertools.groupby(deps))
|
Remove duplicities from deps list of the lists
|
entailment
|
def c_time_locale():
"""Context manager with C LC_TIME locale"""
old_time_locale = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'C')
yield
locale.setlocale(locale.LC_TIME, old_time_locale)
|
Context manager with C LC_TIME locale
|
entailment
|
def rpm_eval(macro):
"""Get value of given macro using rpm tool"""
try:
value = subprocess.Popen(
['rpm', '--eval', macro],
stdout=subprocess.PIPE).communicate()[0].strip()
except OSError:
logger.error('Failed to get value of {0} rpm macro'.format(
macro), exc_info=True)
value = b''
return console_to_str(value)
|
Get value of given macro using rpm tool
|
entailment
|
def get_default_save_path():
"""Return default save path for the packages"""
macro = '%{_topdir}'
if rpm:
save_path = rpm.expandMacro(macro)
else:
save_path = rpm_eval(macro)
if not save_path:
logger.warn("rpm tools are missing, using default save path "
"~/rpmbuild/.")
save_path = os.path.expanduser('~/rpmbuild')
return save_path
|
Return default save path for the packages
|
entailment
|
def check_and_get_data(input_list,**pars):
"""Verify that all specified files are present. If not, retrieve them from MAST.
Parameters
----------
input_list : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
total_input_list: list
list of full filenames
"""
empty_list = []
retrieve_list = [] # Actual files retrieved via astroquery and resident on disk
candidate_list = [] # File names gathered from *_asn.fits file
ipppssoot_list = [] # ipppssoot names used to avoid duplicate downloads
total_input_list = [] # Output full filename list of data on disk
# Loop over the input_list to determine if the item in the input_list is a full association file
# (*_asn.fits), a full individual image file (aka singleton, *_flt.fits), or a root name specification
# (association or singleton, ipppssoot).
for input_item in input_list:
print('Input item: ', input_item)
indx = input_item.find('_')
# Input with a suffix (_xxx.fits)
if indx != -1:
lc_input_item = input_item.lower()
suffix = lc_input_item[indx+1:indx+4]
print('file: ', lc_input_item)
# For an association, need to open the table and read the image names as this could
# be a custom association. The assumption is this file is on local disk when specified
# in this manner (vs just the ipppssoot of the association).
# This "if" block just collects the wanted full file names.
if suffix == 'asn':
try:
asntab = Table.read(input_item, format='fits')
except FileNotFoundError:
log.error('File {} not found.'.format(input_item))
return(empty_list)
for row in asntab:
if row['MEMTYPE'].startswith('PROD'):
continue
memname = row['MEMNAME'].lower().strip()
# Need to check if the MEMNAME is a full filename or an ipppssoot
if memname.find('_') != -1:
candidate_list.append(memname)
else:
candidate_list.append(memname + '_flc.fits')
elif suffix == 'flc' or suffix == 'flt':
if lc_input_item not in candidate_list:
candidate_list.append(lc_input_item)
else:
log.error('Inappropriate file suffix: {}. Looking for "asn.fits", "flc.fits", or "flt.fits".'.format(suffix))
return(empty_list)
# Input is an ipppssoot (association or singleton), nine characters by definition.
# This "else" block actually downloads the data specified as ipppssoot.
elif len(input_item) == 9:
try:
if input_item not in ipppssoot_list:
# An ipppssoot of an individual file which is part of an association cannot be
# retrieved from MAST
retrieve_list = aqutils.retrieve_observation(input_item,**pars)
# If the retrieved list is not empty, add filename(s) to the total_input_list.
# Also, update the ipppssoot_list so we do not try to download the data again. Need
# to do this since retrieve_list can be empty because (1) data cannot be acquired (error)
# or (2) data is already on disk (ok).
if retrieve_list:
total_input_list += retrieve_list
ipppssoot_list.append(input_item)
else:
log.error('File {} cannot be retrieved from MAST.'.format(input_item))
return(empty_list)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
# Only the retrieve_list files via astroquery have been put into the total_input_list thus far.
# Now check candidate_list to detect or acquire the requested files from MAST via
# astroquery.
for file in candidate_list:
# If the file is found on disk, add it to the total_input_list and continue
if glob.glob(file):
total_input_list.append(file)
continue
else:
log.error('File {} cannot be found on the local disk.'.format(file))
return(empty_list)
log.info("TOTAL INPUT LIST: {}".format(total_input_list))
return(total_input_list)
|
Verify that all specified files are present. If not, retrieve them from MAST.
Parameters
----------
input_list : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
total_input_list: list
list of full filenames
|
entailment
|
def perform_align(input_list, **kwargs):
"""Main calling function.
Parameters
----------
input_list : list
List of one or more IPPSSOOTs (rootnames) to align.
archive : Boolean
Retain copies of the downloaded files in the astroquery created sub-directories?
clobber : Boolean
Download and overwrite existing local copies of input files?
debug : Boolean
Attempt to use saved sourcelists stored in pickle files if they exist, or if they do not exist, save
sourcelists in pickle files for reuse so that step 4 can be skipped for faster subsequent debug/development
runs??
update_hdr_wcs : Boolean
Write newly computed WCS information to image image headers?
print_fit_parameters : Boolean
Specify whether or not to print out FIT results for each chip.
print_git_info : Boolean
Display git repository information?
output : Boolean
Should utils.astrometric_utils.create_astrometric_catalog() generate file 'ref_cat.ecsv' and should
generate_source_catalogs() generate the .reg region files for every chip of every input image and should
generate_astrometric_catalog() generate file 'refcatalog.cat'?
Updates
-------
filteredTable: Astropy Table
Table which contains processing information and alignment results for every raw image evaluated
"""
filteredTable = Table()
run_align(input_list, result=filteredTable, **kwargs)
return filteredTable
|
Main calling function.
Parameters
----------
input_list : list
List of one or more IPPSSOOTs (rootnames) to align.
archive : Boolean
Retain copies of the downloaded files in the astroquery created sub-directories?
clobber : Boolean
Download and overwrite existing local copies of input files?
debug : Boolean
Attempt to use saved sourcelists stored in pickle files if they exist, or if they do not exist, save
sourcelists in pickle files for reuse so that step 4 can be skipped for faster subsequent debug/development
runs??
update_hdr_wcs : Boolean
Write newly computed WCS information to image image headers?
print_fit_parameters : Boolean
Specify whether or not to print out FIT results for each chip.
print_git_info : Boolean
Display git repository information?
output : Boolean
Should utils.astrometric_utils.create_astrometric_catalog() generate file 'ref_cat.ecsv' and should
generate_source_catalogs() generate the .reg region files for every chip of every input image and should
generate_astrometric_catalog() generate file 'refcatalog.cat'?
Updates
-------
filteredTable: Astropy Table
Table which contains processing information and alignment results for every raw image evaluated
|
entailment
|
def match_relative_fit(imglist, reference_catalog):
"""Perform cross-matching and final fit using 2dHistogram matching
Parameters
----------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
reference_catalog : Table
Astropy Table of reference sources for this field
Returns
--------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
"""
log.info("------------------- STEP 5b: (match_relative_fit) Cross matching and fitting ---------------------------")
# 0: Specify matching algorithm to use
match = tweakwcs.TPMatch(searchrad=75, separation=0.1,
tolerance=2, use2dhist=True)
# match = tweakwcs.TPMatch(searchrad=250, separation=0.1,
# tolerance=100, use2dhist=False)
# Align images and correct WCS
# NOTE: this invocation does not use an astrometric catalog. This call allows all the input images to be aligned in
# a relative way using the first input image as the reference.
# 1: Perform relative alignment
tweakwcs.align_wcs(imglist, None, match=match, expand_refcat=True)
# Set all the group_id values to be the same so the various images/chips will be aligned to the astrometric
# reference catalog as an ensemble.
# BEWARE: If additional iterations of solutions are to be done, the group_id values need to be restored.
for image in imglist:
image.meta["group_id"] = 1234567
# 2: Perform absolute alignment
tweakwcs.align_wcs(imglist, reference_catalog, match=match)
# 3: Interpret RMS values from tweakwcs
interpret_fit_rms(imglist, reference_catalog)
return imglist
|
Perform cross-matching and final fit using 2dHistogram matching
Parameters
----------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
reference_catalog : Table
Astropy Table of reference sources for this field
Returns
--------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
|
entailment
|
def match_default_fit(imglist, reference_catalog):
"""Perform cross-matching and final fit using 2dHistogram matching
Parameters
----------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
reference_catalog : Table
Astropy Table of reference sources for this field
Returns
--------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
"""
log.info("-------------------- STEP 5b: (match_default_fit) Cross matching and fitting ---------------------------")
# Specify matching algorithm to use
match = tweakwcs.TPMatch(searchrad=250, separation=0.1,
tolerance=100, use2dhist=False)
# Align images and correct WCS
tweakwcs.align_wcs(imglist, reference_catalog, match=match, expand_refcat=False) #TODO: turn on 'expand_refcat' option in future development
# Interpret RMS values from tweakwcs
interpret_fit_rms(imglist, reference_catalog)
return imglist
|
Perform cross-matching and final fit using 2dHistogram matching
Parameters
----------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
reference_catalog : Table
Astropy Table of reference sources for this field
Returns
--------
imglist : list
List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs
|
entailment
|
def determine_fit_quality(imglist,filteredTable, print_fit_parameters=True):
"""Determine the quality of the fit to the data
Parameters
----------
imglist : list
output of interpret_fits. Contains sourcelist tables, newly computed WCS info, etc. for every chip of every valid
input image. This list should have been updated, in-place, with the new RMS values;
specifically,
* 'FIT_RMS': RMS of the separations between fitted image positions and reference positions
* 'TOTAL_RMS': mean of the FIT_RMS values for all observations
* 'NUM_FITS': number of images/group_id's with successful fits included in the TOTAL_RMS
These entries are added to the 'fit_info' dictionary.
filteredTable : object
Astropy Table object containing data pertaining to the associated dataset, including
the doProcess bool. It is intended this table is updated by subsequent functions for
bookkeeping purposes.
print_fit_parameters : bool
Specify whether or not to print out FIT results for each chip
Returns
-------
max_rms_val : float
The best Total rms determined from all of the images
num_xmatches: int
The number of stars used in matching the data
fitQual : int
fit quality catagory:
1 = valid solution with rms < 10 mas;
2 = Valid but compromised solution with rms < 10 mas;
3 = Valid solution with RMS >= 10 mas;
4 = Valid but compromised solution with RMS >= 10 mas;
5 = Not valid solution
filteredTable : object
modified filteredTable objecgt
fitStatusDict : dictionary
Dictionary containing the following:
overall fit validity (Boolean)
total (visit-level) RMS value in mas (float)
number of matched sources (int)
fit compromised status (Boolean)
reason fit is considered 'compromised' (only populated if 'compromised' field is "True")
"""
tweakwcs_info_keys = OrderedDict(imglist[0].meta['fit_info']).keys()
max_rms_val = 1e9
num_xmatches = 0
fitStatusDict={}
xshifts=[]
yshifts=[]
overall_valid = True
overall_comp = False
for item in imglist:
if item.meta['fit_info']['status'].startswith('FAILED') != True:
xshifts.append(item.meta['fit_info']['shift'][0])
yshifts.append(item.meta['fit_info']['shift'][1])
for item in imglist:
image_name = item.meta['name']
chip_num = item.meta['chip']
# Build fitStatusDict entry
dictKey = "{},{}".format(image_name, chip_num)
fitStatusDict[dictKey] = {'valid': False,
'max_rms': max_rms_val,
'num_matches': num_xmatches,
'compromised': False,
'reason': ""} # Initialize dictionary entry for current image/chip
#Handle fitting failures (no matches found)
if item.meta['fit_info']['status'].startswith("FAILED") == True:
log.warning("No cross matches found in any catalog for {} - no processing done.".format(image_name))
continue
fit_rms_val = item.meta['fit_info']['FIT_RMS']
max_rms_val = item.meta['fit_info']['TOTAL_RMS']
num_xmatches = item.meta['fit_info']['nmatches']
fitStatusDict[dictKey]['max_rms'] = max_rms_val
fitStatusDict[dictKey]['num_matches'] = num_xmatches
if num_xmatches < MIN_CROSS_MATCHES:
if catalogIndex < numCatalogs-1:
log.warning("Not enough cross matches found between astrometric catalog and sources found in {}".format(image_name))
continue
# Execute checks
nmatchesCheck = False
if num_xmatches > 4:
nmatchesCheck = True
radialOffsetCheck = False
radialOffset = math.sqrt(float(item.meta['fit_info']['shift'][0])**2 +
float(item.meta['fit_info']['shift'][0])**2)*item.wcs.pscale #radial offset in arssec
if float(num_xmatches) * 0.36 > 0.8 + (radialOffset/10.0)**8:
radialOffsetCheck = True
largeRmsCheck = True
if fit_rms_val > 150. or max_rms_val > 150.:
largeRmsCheck = False
# fitRmsCheck = False
# if fit_rms_val < max_rms_val:
# fitRmsCheck = True
consistencyCheck = True
rms_limit = max(item.meta['fit_info']['TOTAL_RMS'], 10.)
if not math.sqrt(np.std(np.asarray(xshifts)) ** 2 + np.std(np.asarray(yshifts)) ** 2) <= (
rms_limit / 1000.0) / (item.wcs.pscale):
consistencyCheck = False
# Decide if fit solutions are valid based on checks
if consistencyCheck == False: # Failed consistency check
fitStatusDict[dictKey]['valid'] = False
fitStatusDict[dictKey]['compromised'] = False
fitStatusDict[dictKey]['reason'] = "Consistency violation!"
elif largeRmsCheck == False: # RMS value(s) too large
fitStatusDict[dictKey]['valid'] = False
fitStatusDict[dictKey]['compromised'] = False
fitStatusDict[dictKey]['reason'] = "RMS too large (>150 mas)!"
elif radialOffsetCheck == False: # Failed radial offset check
fitStatusDict[dictKey]['valid'] = False
fitStatusDict[dictKey]['compromised'] = False
fitStatusDict[dictKey]['reason'] = "Radial offset value too large!"
elif nmatchesCheck == False: # Too few matches
fitStatusDict[dictKey]['valid'] = True
fitStatusDict[dictKey]['compromised'] = True
fitStatusDict[dictKey]['reason'] = "Too few matches!"
else: # all checks passed. Valid solution.
fitStatusDict[dictKey]['valid'] = True
fitStatusDict[dictKey]['compromised'] = False
fitStatusDict[dictKey]['reason'] = ""
# for now, generate overall valid and compromised values. Basically, if any of the entries for "valid" is False,
# treat the whole dataset as not valid. Same goes for compromised.
if fitStatusDict[dictKey]['valid'] == False:
overall_valid = False
if fitStatusDict[dictKey]['compromised'] == True:
overall_comp = True
log.info('RESULTS FOR {} Chip {}: FIT_RMS = {} mas, TOTAL_RMS = {} mas, NUM = {}'.format(image_name, item.meta['chip'], fit_rms_val, max_rms_val, num_xmatches))
# print fit params to screen
if print_fit_parameters:
log.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIT PARAMETERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.info("image: {}".format(image_name))
log.info("chip: {}".format(item.meta['chip']))
log.info("group_id: {}".format(item.meta['group_id']))
for tweakwcs_info_key in tweakwcs_info_keys:
if not tweakwcs_info_key.startswith("matched"):
log.info("{} : {}".format(tweakwcs_info_key,item.meta['fit_info'][tweakwcs_info_key]))
log.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.info("nmatchesCheck: {} radialOffsetCheck: {} largeRmsCheck: {}, consistencyCheck: {}".format(nmatchesCheck,radialOffsetCheck,largeRmsCheck,consistencyCheck))
# determine which fit quality category this latest fit falls into
if overall_valid == False:
fitQual = 5
log.info("FIT SOLUTION REJECTED")
filteredTable['status'][:] = 1
for ctr in range(0, len(filteredTable)):
filteredTable[ctr]['processMsg'] = fitStatusDict[filteredTable[ctr]['imageName'] + ",1"]["reason"]
else:
for ctr in range(0, len(filteredTable)):
filteredTable[ctr]['processMsg'] = ""
if overall_comp == False and max_rms_val < 10.:
log.info("Valid solution with RMS < 10 mas found!")
fitQual = 1
elif overall_comp == True and max_rms_val < 10.:
log.info("Valid but compromised solution with RMS < 10 mas found!")
fitQual = 2
elif overall_comp == False and max_rms_val >= 10.:
log.info("Valid solution with RMS >= 10 mas found!")
fitQual = 3
else:
log.info("Valid but compromised solution with RMS >= 10 mas found!")
fitQual = 4
if print_fit_parameters:
for item in imglist: log.info(fitStatusDict["{},{}".format(item.meta['name'], item.meta['chip'])])
if max_rms_val > MAX_FIT_RMS:
log.info("Total fit RMS value = {} mas greater than the maximum threshold value {}.".format(max_rms_val, MAX_FIT_RMS))
if not overall_valid:
log.info("The fit solution for some or all of the images is not valid.")
if max_rms_val > MAX_FIT_RMS or overall_valid == False:
log.info("Try again with the next catalog")
else:
log.info("Fit calculations successful.")
return max_rms_val, num_xmatches, fitQual, filteredTable, fitStatusDict
|
Determine the quality of the fit to the data
Parameters
----------
imglist : list
output of interpret_fits. Contains sourcelist tables, newly computed WCS info, etc. for every chip of every valid
input image. This list should have been updated, in-place, with the new RMS values;
specifically,
* 'FIT_RMS': RMS of the separations between fitted image positions and reference positions
* 'TOTAL_RMS': mean of the FIT_RMS values for all observations
* 'NUM_FITS': number of images/group_id's with successful fits included in the TOTAL_RMS
These entries are added to the 'fit_info' dictionary.
filteredTable : object
Astropy Table object containing data pertaining to the associated dataset, including
the doProcess bool. It is intended this table is updated by subsequent functions for
bookkeeping purposes.
print_fit_parameters : bool
Specify whether or not to print out FIT results for each chip
Returns
-------
max_rms_val : float
The best Total rms determined from all of the images
num_xmatches: int
The number of stars used in matching the data
fitQual : int
fit quality catagory:
1 = valid solution with rms < 10 mas;
2 = Valid but compromised solution with rms < 10 mas;
3 = Valid solution with RMS >= 10 mas;
4 = Valid but compromised solution with RMS >= 10 mas;
5 = Not valid solution
filteredTable : object
modified filteredTable objecgt
fitStatusDict : dictionary
Dictionary containing the following:
overall fit validity (Boolean)
total (visit-level) RMS value in mas (float)
number of matched sources (int)
fit compromised status (Boolean)
reason fit is considered 'compromised' (only populated if 'compromised' field is "True")
|
entailment
|
def generate_astrometric_catalog(imglist, **pars):
"""Generates a catalog of all sources from an existing astrometric catalog are in or near the FOVs of the images in
the input list.
Parameters
----------
imglist : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
ref_table : object
Astropy Table object of the catalog
"""
# generate catalog
temp_pars = pars.copy()
if pars['output'] == True:
pars['output'] = 'ref_cat.ecsv'
else:
pars['output'] = None
out_catalog = amutils.create_astrometric_catalog(imglist,**pars)
pars = temp_pars.copy()
#if the catalog has contents, write the catalog to ascii text file
if len(out_catalog) > 0 and pars['output']:
catalog_filename = "refcatalog.cat"
out_catalog.write(catalog_filename, format="ascii.fast_commented_header")
log.info("Wrote reference catalog {}.".format(catalog_filename))
return(out_catalog)
|
Generates a catalog of all sources from an existing astrometric catalog are in or near the FOVs of the images in
the input list.
Parameters
----------
imglist : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
ref_table : object
Astropy Table object of the catalog
|
entailment
|
def generate_source_catalogs(imglist, **pars):
"""Generates a dictionary of source catalogs keyed by image name.
Parameters
----------
imglist : list
List of one or more calibrated fits images that will be used for source detection.
Returns
-------
sourcecatalogdict : dictionary
a dictionary (keyed by image name) of two element dictionaries which in tern contain 1) a dictionary of the
detector-specific processing parameters and 2) an astropy table of position and photometry information of all
detected sources
"""
output = pars.get('output', False)
sourcecatalogdict = {}
for imgname in imglist:
log.info("Image name: {}".format(imgname))
sourcecatalogdict[imgname] = {}
# open image
imghdu = fits.open(imgname)
imgprimaryheader = imghdu[0].header
instrument = imgprimaryheader['INSTRUME'].lower()
detector = imgprimaryheader['DETECTOR'].lower()
# get instrument/detector-specific image alignment parameters
if instrument in detector_specific_params.keys():
if detector in detector_specific_params[instrument].keys():
detector_pars = detector_specific_params[instrument][detector]
# to allow generate_source_catalog to get detector specific parameters
detector_pars.update(pars)
sourcecatalogdict[imgname]["params"] = detector_pars
else:
sys.exit("ERROR! Unrecognized detector '{}'. Exiting...".format(detector))
log.error("ERROR! Unrecognized detector '{}'. Exiting...".format(detector))
else:
sys.exit("ERROR! Unrecognized instrument '{}'. Exiting...".format(instrument))
log.error("ERROR! Unrecognized instrument '{}'. Exiting...".format(instrument))
# Identify sources in image, convert coords from chip x, y form to reference WCS sky RA, Dec form.
imgwcs = HSTWCS(imghdu, 1)
fwhmpsf_pix = sourcecatalogdict[imgname]["params"]['fwhmpsf']/imgwcs.pscale #Convert fwhmpsf from arsec to pixels
sourcecatalogdict[imgname]["catalog_table"] = amutils.generate_source_catalog(imghdu, fwhm=fwhmpsf_pix, **detector_pars)
# write out coord lists to files for diagnostic purposes. Protip: To display the sources in these files in DS9,
# set the "Coordinate System" option to "Physical" when loading the region file.
imgroot = os.path.basename(imgname).split('_')[0]
numSci = amutils.countExtn(imghdu)
# Allow user to decide when and how to write out catalogs to files
if output:
for chip in range(1,numSci+1):
chip_cat = sourcecatalogdict[imgname]["catalog_table"][chip]
if chip_cat and len(chip_cat) > 0:
regfilename = "{}_sci{}_src.reg".format(imgroot, chip)
out_table = Table(chip_cat)
out_table.write(regfilename, include_names=["xcentroid", "ycentroid"], format="ascii.fast_commented_header")
log.info("Wrote region file {}\n".format(regfilename))
imghdu.close()
return(sourcecatalogdict)
|
Generates a dictionary of source catalogs keyed by image name.
Parameters
----------
imglist : list
List of one or more calibrated fits images that will be used for source detection.
Returns
-------
sourcecatalogdict : dictionary
a dictionary (keyed by image name) of two element dictionaries which in tern contain 1) a dictionary of the
detector-specific processing parameters and 2) an astropy table of position and photometry information of all
detected sources
|
entailment
|
def update_image_wcs_info(tweakwcs_output):
"""Write newly computed WCS information to image headers and write headerlet files
Parameters
----------
tweakwcs_output : list
output of tweakwcs. Contains sourcelist tables, newly computed WCS info, etc. for every chip of every valid
input image.
Returns
-------
out_headerlet_list : dictionary
a dictionary of the headerlet files created by this subroutine, keyed by flt/flc fits filename.
"""
out_headerlet_dict = {}
for item in tweakwcs_output:
imageName = item.meta['filename']
chipnum = item.meta['chip']
if chipnum == 1:
chipctr = 1
hdulist = fits.open(imageName, mode='update')
num_sci_ext = amutils.countExtn(hdulist)
# generate wcs name for updated image header, headerlet
if not hdulist['SCI',1].header['WCSNAME'] or hdulist['SCI',1].header['WCSNAME'] =="": #Just in case header value 'wcsname' is empty.
wcsName = "FIT_{}".format(item.meta['catalog_name'])
else:
wname = hdulist['sci', 1].header['wcsname']
if "-" in wname:
wcsName = '{}-FIT_{}'.format(wname[:wname.index('-')], item.meta['fit_info']['catalog'])
else:
wcsName = '{}-FIT_{}'.format(wname, item.meta['fit_info']['catalog'])
# establish correct mapping to the science extensions
sciExtDict = {}
for sciExtCtr in range(1, num_sci_ext + 1):
sciExtDict["{}".format(sciExtCtr)] = fileutil.findExtname(hdulist,'sci',extver=sciExtCtr)
# update header with new WCS info
updatehdr.update_wcs(hdulist, sciExtDict["{}".format(item.meta['chip'])], item.wcs, wcsname=wcsName,
reusename=True, verbose=True)
if chipctr == num_sci_ext:
# Close updated flc.fits or flt.fits file
#log.info("CLOSE {}\n".format(imageName)) # TODO: Remove before deployment
hdulist.flush()
hdulist.close()
# Create headerlet
out_headerlet = headerlet.create_headerlet(imageName, hdrname=wcsName, wcsname=wcsName)
# Update headerlet
update_headerlet_phdu(item, out_headerlet)
# Write headerlet
if imageName.endswith("flc.fits"):
headerlet_filename = imageName.replace("flc", "flt_hlet")
if imageName.endswith("flt.fits"):
headerlet_filename = imageName.replace("flt", "flt_hlet")
out_headerlet.writeto(headerlet_filename, clobber=True)
log.info("Wrote headerlet file {}.\n\n".format(headerlet_filename))
out_headerlet_dict[imageName] = headerlet_filename
# Attach headerlet as HDRLET extension
headerlet.attach_headerlet(imageName, headerlet_filename)
chipctr +=1
return (out_headerlet_dict)
|
Write newly computed WCS information to image headers and write headerlet files
Parameters
----------
tweakwcs_output : list
output of tweakwcs. Contains sourcelist tables, newly computed WCS info, etc. for every chip of every valid
input image.
Returns
-------
out_headerlet_list : dictionary
a dictionary of the headerlet files created by this subroutine, keyed by flt/flc fits filename.
|
entailment
|
def update_headerlet_phdu(tweakwcs_item, headerlet):
"""Update the primary header data unit keywords of a headerlet object in-place
Parameters
==========
tweakwc_item :
Basically the output from tweakwcs which contains the cross match and fit
information for every chip of every valid input image.
headerlet :
object containing WCS information
"""
# Get the data to be used as values for FITS keywords
rms_ra = tweakwcs_item.meta['fit_info']['RMS_RA'].value
rms_dec = tweakwcs_item.meta['fit_info']['RMS_DEC'].value
fit_rms = tweakwcs_item.meta['fit_info']['FIT_RMS']
nmatch = tweakwcs_item.meta['fit_info']['nmatches']
catalog = tweakwcs_item.meta['fit_info']['catalog']
x_shift = (tweakwcs_item.meta['fit_info']['shift'])[0]
y_shift = (tweakwcs_item.meta['fit_info']['shift'])[1]
rot = tweakwcs_item.meta['fit_info']['rot']
scale = tweakwcs_item.meta['fit_info']['scale'][0]
skew = tweakwcs_item.meta['fit_info']['skew']
# Update the existing FITS keywords
primary_header = headerlet[0].header
primary_header['RMS_RA'] = rms_ra
primary_header['RMS_DEC'] = rms_dec
primary_header['NMATCH'] = nmatch
primary_header['CATALOG'] = catalog
# Create a new FITS keyword
primary_header['FIT_RMS'] = (fit_rms, 'RMS (mas) of the 2D fit of the headerlet solution')
# Create the set of HISTORY keywords
primary_header['HISTORY'] = '~~~~~ FIT PARAMETERS ~~~~~'
primary_header['HISTORY'] = '{:>15} : {:9.4f} "/pixels'.format('platescale', tweakwcs_item.wcs.pscale)
primary_header['HISTORY'] = '{:>15} : {:9.4f} pixels'.format('x_shift', x_shift)
primary_header['HISTORY'] = '{:>15} : {:9.4f} pixels'.format('y_shift', y_shift)
primary_header['HISTORY'] = '{:>15} : {:9.4f} degrees'.format('rotation', rot)
primary_header['HISTORY'] = '{:>15} : {:9.4f}'.format('scale', scale)
primary_header['HISTORY'] = '{:>15} : {:9.4f}'.format('skew', skew)
|
Update the primary header data unit keywords of a headerlet object in-place
Parameters
==========
tweakwc_item :
Basically the output from tweakwcs which contains the cross match and fit
information for every chip of every valid input image.
headerlet :
object containing WCS information
|
entailment
|
def interpret_fit_rms(tweakwcs_output, reference_catalog):
"""Interpret the FIT information to convert RMS to physical units
Parameters
----------
tweakwcs_output : list
output of tweakwcs. Contains sourcelist tables, newly computed WCS info, etc. for every chip of every valid
input image. This list gets updated, in-place, with the new RMS values;
specifically,
* 'FIT_RMS': RMS of the separations between fitted image positions and reference positions
* 'TOTAL_RMS': mean of the FIT_RMS values for all observations
* 'NUM_FITS': number of images/group_id's with successful fits included in the TOTAL_RMS
These entries are added to the 'fit_info' dictionary.
reference_catalog : astropy.Table
Table of reference source positions used for the fit
Returns
-------
Nothing
"""
# Start by collecting information by group_id
group_ids = [info.meta['group_id'] for info in tweakwcs_output]
# Compress the list to have only unique group_id values to avoid some unnecessary iterations
group_ids = list(set(group_ids))
group_dict = {'avg_RMS':None}
obs_rms = []
for group_id in group_ids:
for item in tweakwcs_output:
# When status = FAILED (fit failed) or REFERENCE (relative alignment done with first image
# as the reference), skip to the beginning of the loop as there is no 'fit_info'.
if item.meta['fit_info']['status'] != 'SUCCESS':
continue
# Make sure to store data for any particular group_id only once.
if item.meta['group_id'] == group_id and \
group_id not in group_dict:
group_dict[group_id] = {'ref_idx':None, 'FIT_RMS':None}
log.info("fit_info: {}".format(item.meta['fit_info']))
tinfo = item.meta['fit_info']
ref_idx = tinfo['matched_ref_idx']
fitmask = tinfo['fitmask']
group_dict[group_id]['ref_idx'] = ref_idx
ref_RA = reference_catalog[ref_idx]['RA'][fitmask]
ref_DEC = reference_catalog[ref_idx]['DEC'][fitmask]
input_RA = tinfo['fit_RA']
input_DEC = tinfo['fit_DEC']
img_coords = SkyCoord(input_RA, input_DEC,
unit='deg',frame='icrs')
ref_coords = SkyCoord(ref_RA, ref_DEC, unit='deg',frame='icrs')
dra, ddec = img_coords.spherical_offsets_to(ref_coords)
ra_rms = np.std(dra.to(u.mas))
dec_rms = np.std(ddec.to(u.mas))
fit_rms = np.std(Angle(img_coords.separation(ref_coords), unit=u.mas)).value
group_dict[group_id]['FIT_RMS'] = fit_rms
group_dict[group_id]['RMS_RA'] = ra_rms
group_dict[group_id]['RMS_DEC'] = dec_rms
obs_rms.append(fit_rms)
# Compute RMS for entire ASN/observation set
total_rms = np.mean(obs_rms)
#total_rms = np.sqrt(np.sum(np.array(obs_rms)**2))
# Now, append computed results to tweakwcs_output
for item in tweakwcs_output:
group_id = item.meta['group_id']
if group_id in group_dict:
fit_rms = group_dict[group_id]['FIT_RMS']
ra_rms = group_dict[group_id]['RMS_RA']
dec_rms = group_dict[group_id]['RMS_DEC']
else:
fit_rms = None
ra_rms = None
dec_rms = None
item.meta['fit_info']['FIT_RMS'] = fit_rms
item.meta['fit_info']['TOTAL_RMS'] = total_rms
item.meta['fit_info']['NUM_FITS'] = len(group_ids)
item.meta['fit_info']['RMS_RA'] = ra_rms
item.meta['fit_info']['RMS_DEC'] = dec_rms
item.meta['fit_info']['catalog'] = reference_catalog.meta['catalog']
|
Interpret the FIT information to convert RMS to physical units
Parameters
----------
tweakwcs_output : list
output of tweakwcs. Contains sourcelist tables, newly computed WCS info, etc. for every chip of every valid
input image. This list gets updated, in-place, with the new RMS values;
specifically,
* 'FIT_RMS': RMS of the separations between fitted image positions and reference positions
* 'TOTAL_RMS': mean of the FIT_RMS values for all observations
* 'NUM_FITS': number of images/group_id's with successful fits included in the TOTAL_RMS
These entries are added to the 'fit_info' dictionary.
reference_catalog : astropy.Table
Table of reference source positions used for the fit
Returns
-------
Nothing
|
entailment
|
def sky(input=None,outExt=None,configObj=None, group=None, editpars=False, **inputDict):
"""
Perform sky subtraction on input list of images
Parameters
----------
input : str or list of str
a python list of image filenames, or just a single filename
configObj : configObject
an instance of configObject
inputDict : dict, optional
an optional list of parameters specified by the user
outExt : str
The extension of the output image. If the output already exists
then the input image is overwritten
Notes
-----
These are parameters that the configObj should contain by default,
they can be altered on the fly using the inputDict
Parameters that should be in configobj:
========== ===================================================================
Name Definition
========== ===================================================================
skymethod 'Sky computation method'
skysub 'Perform sky subtraction?'
skywidth 'Bin width of histogram for sampling sky statistics (in sigma)'
skystat 'Sky correction statistics parameter'
skylower 'Lower limit of usable data for sky (always in electrons)'
skyupper 'Upper limit of usable data for sky (always in electrons)'
skyclip 'Number of clipping iterations'
skylsigma 'Lower side clipping factor (in sigma)'
skyusigma 'Upper side clipping factor (in sigma)'
skymask_cat 'Catalog file listing image masks'
use_static 'Use static mask for skymatch computations?'
sky_bits 'Integer mask bit values considered good pixels in DQ array'
skyfile 'Name of file with user-computed sky values'
skyuser 'KEYWORD indicating a sky subtraction value if done by user'
in_memory 'Optimize for speed or for memory use'
========== ===================================================================
The output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted.
"""
if input is not None:
inputDict['input']=input
inputDict['output']=None
inputDict['updatewcs']=False
inputDict['group']=group
else:
print("Please supply an input image", file=sys.stderr)
raise ValueError
configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj,outExt=outExt)
|
Perform sky subtraction on input list of images
Parameters
----------
input : str or list of str
a python list of image filenames, or just a single filename
configObj : configObject
an instance of configObject
inputDict : dict, optional
an optional list of parameters specified by the user
outExt : str
The extension of the output image. If the output already exists
then the input image is overwritten
Notes
-----
These are parameters that the configObj should contain by default,
they can be altered on the fly using the inputDict
Parameters that should be in configobj:
========== ===================================================================
Name Definition
========== ===================================================================
skymethod 'Sky computation method'
skysub 'Perform sky subtraction?'
skywidth 'Bin width of histogram for sampling sky statistics (in sigma)'
skystat 'Sky correction statistics parameter'
skylower 'Lower limit of usable data for sky (always in electrons)'
skyupper 'Upper limit of usable data for sky (always in electrons)'
skyclip 'Number of clipping iterations'
skylsigma 'Lower side clipping factor (in sigma)'
skyusigma 'Upper side clipping factor (in sigma)'
skymask_cat 'Catalog file listing image masks'
use_static 'Use static mask for skymatch computations?'
sky_bits 'Integer mask bit values considered good pixels in DQ array'
skyfile 'Name of file with user-computed sky values'
skyuser 'KEYWORD indicating a sky subtraction value if done by user'
in_memory 'Optimize for speed or for memory use'
========== ===================================================================
The output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted.
|
entailment
|
def _skyUserFromFile(imageObjList, skyFile, apply_sky=None):
"""
Apply sky value as read in from a user-supplied input file.
"""
skyKW="MDRIZSKY" #header keyword that contains the sky that's been subtracted
# create dict of fname=sky pairs
skyvals = {}
if apply_sky is None:
skyapplied = False # flag whether sky has already been applied to images
else:
skyapplied = apply_sky
for line in open(skyFile):
if apply_sky is None and line[0] == '#' and 'applied' in line:
if '=' in line: linesep = '='
if ':' in line: linesep = ':'
appliedstr = line.split(linesep)[1].strip()
if appliedstr.lower() in ['yes','true','y','t']:
skyapplied = True
print('...Sky values already applied by user...')
if not util.is_blank(line) and line[0] != '#':
lspl = line.split()
svals = []
for lvals in lspl[1:]:
svals.append(float(lvals))
skyvals[lspl[0]] = svals
# Apply user values to appropriate input images
for imageSet in imageObjList:
fname = imageSet._filename
numchips=imageSet._numchips
sciExt=imageSet.scienceExt
if fname in skyvals:
print(" ...updating MDRIZSKY with user-supplied value.")
for chip in range(1,numchips+1,1):
if len(skyvals[fname]) == 1:
_skyValue = skyvals[fname][0]
else:
_skyValue = skyvals[fname][chip-1]
chipext = '%s,%d'%(sciExt,chip)
_updateKW(imageSet[chipext],fname,(sciExt,chip),skyKW,_skyValue)
# Update internal record with subtracted sky value
#
# .computedSky: value to be applied by the
# adrizzle/ablot steps.
# .subtractedSky: value already (or will be by adrizzle/ablot)
# subtracted from the image
if skyapplied:
imageSet[chipext].computedSky = None # used by adrizzle/ablot
else:
imageSet[chipext].computedSky = _skyValue
imageSet[chipext].subtractedSky = _skyValue
print("Setting ",skyKW,"=",_skyValue)
else:
print("*"*40)
print("*")
print("WARNING:")
print(" .... NO user-supplied sky value found for ",fname)
print(" .... Setting sky to a value of 0.0! ")
print("*")
print("*"*40)
|
Apply sky value as read in from a user-supplied input file.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.