sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def _translate(self, input_filename, output_filename):
"""Translate KML file to geojson for import"""
command = [
self.translate_binary,
'-f', 'GeoJSON',
output_filename,
input_filename
]
result = self._runcommand(command)
self.log('Result (Translate): ', result, lvl=debug)
|
Translate KML file to geojson for import
|
entailment
|
def _update_guide(self, guide, update=False, clear=True):
"""Update a single specified guide"""
kml_filename = os.path.join(self.cache_path, guide + '.kml')
geojson_filename = os.path.join(self.cache_path, guide + '.geojson')
if not os.path.exists(geojson_filename) or update:
try:
data = request.urlopen(self.guides[guide]).read().decode(
'utf-8')
except (request.URLError, request.HTTPError) as e:
self.log('Could not get web guide data:', e, type(e), lvl=warn)
return
with open(kml_filename, 'w') as f:
f.write(data)
self._translate(kml_filename, geojson_filename)
with open(geojson_filename, 'r') as f:
json_data = json.loads(f.read())
if len(json_data['features']) == 0:
self.log('No features found!', lvl=warn)
return
layer = objectmodels['layer'].find_one({'name': guide})
if clear and layer is not None:
layer.delete()
layer = None
if layer is None:
layer_uuid = std_uuid()
layer = objectmodels['layer']({
'uuid': layer_uuid,
'name': guide,
'type': 'geoobjects'
})
layer.save()
else:
layer_uuid = layer.uuid
if clear:
for item in objectmodels['geoobject'].find({'layer': layer_uuid}):
self.log('Deleting old guide location', lvl=debug)
item.delete()
locations = []
for item in json_data['features']:
self.log('Adding new guide location:', item, lvl=verbose)
location = objectmodels['geoobject']({
'uuid': std_uuid(),
'layer': layer_uuid,
'geojson': item,
'type': 'Skipperguide',
'name': 'Guide for %s' % (item['properties']['Name'])
})
locations.append(location)
self.log('Bulk inserting guide locations', lvl=debug)
objectmodels['geoobject'].bulk_create(locations)
|
Update a single specified guide
|
entailment
|
def send_mail_worker(config, mail, event):
"""Worker task to send out an email, which is a blocking process unless it is threaded"""
log = ""
try:
if config.get('ssl', True):
server = SMTP_SSL(config['server'], port=config['port'], timeout=30)
else:
server = SMTP(config['server'], port=config['port'], timeout=30)
if config['tls']:
log += 'Starting TLS\n'
server.starttls()
if config['username'] != '':
log += 'Logging in with ' + str(config['username']) + "\n"
server.login(config['username'], config['password'])
else:
log += 'No username, trying anonymous access\n'
log += 'Sending Mail\n'
response_send = server.send_message(mail)
server.quit()
except timeout as e:
log += 'Could not send email: ' + str(e) + "\n"
return False, log, event
log += 'Server response:' + str(response_send)
return True, log, event
|
Worker task to send out an email, which is a blocking process unless it is threaded
|
entailment
|
def send_mail(self, event):
"""Connect to mail server and send actual email"""
mime_mail = MIMEText(event.text)
mime_mail['Subject'] = event.subject
if event.account == 'default':
account_name = self.config.default_account
else:
account_name = event.account
account = list(filter(lambda account: account['name'] == account_name, self.config.accounts))[0]
mime_mail['From'] = render(account['mail_from'], {'server': account['server'], 'hostname': self.hostname})
mime_mail['To'] = event.to_address
self.log('MimeMail:', mime_mail, lvl=verbose)
if self.config.mail_send is True:
self.log('Sending mail to', event.to_address)
self.fireEvent(task(send_mail_worker, account, mime_mail, event), "mail-transmit-workers")
else:
self.log('Not sending mail, here it is for debugging info:', mime_mail, pretty=True)
|
Connect to mail server and send actual email
|
entailment
|
def provision_system_user(items, database_name, overwrite=False, clear=False, skip_user_check=False):
"""Provision a system user"""
from hfos.provisions.base import provisionList
from hfos.database import objectmodels
# TODO: Add a root user and make sure owner can access it later.
# Setting up details and asking for a password here is not very useful,
# since this process is usually run automated.
if overwrite is True:
hfoslog('Refusing to overwrite system user!', lvl=warn,
emitter='PROVISIONS')
overwrite = False
system_user_count = objectmodels['user'].count({'name': 'System'})
if system_user_count == 0 or clear is False:
provisionList(Users, 'user', overwrite, clear, skip_user_check=True)
hfoslog('Provisioning: Users: Done.', emitter="PROVISIONS")
else:
hfoslog('System user already present.', lvl=warn, emitter='PROVISIONS')
|
Provision a system user
|
entailment
|
def AlternatesGroup(expressions, final_function, name=""):
""" Group expressions using the OR character ``|``
>>> from collections import namedtuple
>>> expr = namedtuple('expr', 'regex group_lengths run')('(1)', [1], None)
>>> grouping = AlternatesGroup([expr, expr], lambda f: None, 'yeah')
>>> grouping.regex # doctest: +IGNORE_UNICODE
'(?:(1))|(?:(1))'
>>> grouping.group_lengths
[1, 1]
"""
inbetweens = ["|"] * (len(expressions) + 1)
inbetweens[0] = ""
inbetweens[-1] = ""
return Group(expressions, final_function, inbetweens, name)
|
Group expressions using the OR character ``|``
>>> from collections import namedtuple
>>> expr = namedtuple('expr', 'regex group_lengths run')('(1)', [1], None)
>>> grouping = AlternatesGroup([expr, expr], lambda f: None, 'yeah')
>>> grouping.regex # doctest: +IGNORE_UNICODE
'(?:(1))|(?:(1))'
>>> grouping.group_lengths
[1, 1]
|
entailment
|
def Group(expressions, final_function, inbetweens, name=""):
""" Group expressions together with ``inbetweens`` and with the output of a ``final_functions``.
"""
lengths = []
functions = []
regex = ""
i = 0
for expression in expressions:
regex += inbetweens[i]
regex += "(?:" + expression.regex + ")"
lengths.append(sum(expression.group_lengths))
functions.append(expression.run)
i += 1
regex += inbetweens[i]
return Expression(regex, functions, lengths, final_function, name)
|
Group expressions together with ``inbetweens`` and with the output of a ``final_functions``.
|
entailment
|
def findall(self, string):
""" Parse string, returning all outputs as parsed by functions
"""
output = []
for match in self.pattern.findall(string):
if hasattr(match, 'strip'):
match = [match]
self._list_add(output, self.run(match))
return output
|
Parse string, returning all outputs as parsed by functions
|
entailment
|
def scan(self, string):
""" Like findall, but also returning matching start and end string locations
"""
return list(self._scanner_to_matches(self.pattern.scanner(string), self.run))
|
Like findall, but also returning matching start and end string locations
|
entailment
|
def run(self, matches):
""" Run group functions over matches
"""
def _run(matches):
group_starting_pos = 0
for current_pos, (group_length, group_function) in enumerate(zip(self.group_lengths, self.group_functions)):
start_pos = current_pos + group_starting_pos
end_pos = current_pos + group_starting_pos + group_length
yield group_function(matches[start_pos:end_pos])
group_starting_pos += group_length - 1
return self.final_function(list(_run(matches)))
|
Run group functions over matches
|
entailment
|
def set_logfile(path, instance):
"""Specify logfile path"""
global logfile
logfile = os.path.normpath(path) + '/hfos.' + instance + '.log'
|
Specify logfile path
|
entailment
|
def is_muted(what):
"""
Checks if a logged event is to be muted for debugging purposes.
Also goes through the solo list - only items in there will be logged!
:param what:
:return:
"""
state = False
for item in solo:
if item not in what:
state = True
else:
state = False
break
for item in mute:
if item in what:
state = True
break
return state
|
Checks if a logged event is to be muted for debugging purposes.
Also goes through the solo list - only items in there will be logged!
:param what:
:return:
|
entailment
|
def hfoslog(*what, **kwargs):
"""Logs all *what arguments.
:param *what: Loggable objects (i.e. they have a string representation)
:param lvl: Debug message level
:param exc: Switch to better handle exceptions, use if logging in an
except clause
:param emitter: Optional log source, where this can't be determined
automatically
:param sourceloc: Give specific source code location hints, used internally
"""
# Count all messages (missing numbers give a hint at too high log level)
global count
global verbosity
count += 1
lvl = kwargs.get('lvl', info)
if lvl < verbosity['global']:
return
emitter = kwargs.get('emitter', 'UNKNOWN')
traceback = kwargs.get('tb', False)
frame_ref = kwargs.get('frame_ref', 0)
output = None
timestamp = time.time()
runtime = timestamp - start
callee = None
exception = kwargs.get('exc', False)
if exception:
exc_type, exc_obj, exc_tb = sys.exc_info() # NOQA
if verbosity['global'] <= debug or traceback:
# Automatically log the current function details.
if 'sourceloc' not in kwargs:
frame = kwargs.get('frame', frame_ref)
# Get the previous frame in the stack, otherwise it would
# be this function
current_frame = inspect.currentframe()
while frame > 0:
frame -= 1
current_frame = current_frame.f_back
func = current_frame.f_code
# Dump the message + the name of this function to the log.
if exception:
line_no = exc_tb.tb_lineno
if lvl <= error:
lvl = error
else:
line_no = func.co_firstlineno
callee = "[%.10s@%s:%i]" % (
func.co_name,
func.co_filename,
line_no
)
else:
callee = kwargs['sourceloc']
now = time.asctime()
msg = "[%s] : %5s : %.5f : %3i : [%5s]" % (now,
lvldata[lvl][0],
runtime,
count,
emitter)
content = ""
if callee:
if not uncut and lvl > 10:
msg += "%-60s" % callee
else:
msg += "%s" % callee
for thing in what:
content += " "
if kwargs.get('pretty', False):
content += pprint.pformat(thing)
else:
content += str(thing)
msg += content
if exception:
msg += "\n" + "".join(format_exception(exc_type, exc_obj, exc_tb))
if is_muted(msg):
return
if not uncut and lvl > 10 and len(msg) > 1000:
msg = msg[:1000]
if lvl >= verbosity['file']:
try:
f = open(logfile, "a")
f.write(msg + '\n')
f.flush()
f.close()
except IOError:
print("Can't open logfile %s for writing!" % logfile)
# sys.exit(23)
if is_marked(msg):
lvl = hilight
if lvl >= verbosity['console']:
output = str(msg)
if six.PY3 and color:
output = lvldata[lvl][1] + output + terminator
try:
print(output)
except UnicodeEncodeError as e:
print(output.encode("utf-8"))
hfoslog("Bad encoding encountered on previous message:", e,
lvl=error)
except BlockingIOError:
hfoslog("Too long log line encountered:", output[:20], lvl=warn)
if live:
item = [now, lvl, runtime, count, emitter, str(content)]
LiveLog.append(item)
|
Logs all *what arguments.
:param *what: Loggable objects (i.e. they have a string representation)
:param lvl: Debug message level
:param exc: Switch to better handle exceptions, use if logging in an
except clause
:param emitter: Optional log source, where this can't be determined
automatically
:param sourceloc: Give specific source code location hints, used internally
|
entailment
|
def get_tagged(self, event):
"""Return a list of tagged objects for a schema"""
self.log("Tagged objects request for", event.data, "from",
event.user, lvl=debug)
if event.data in self.tags:
tagged = self._get_tagged(event.data)
response = {
'component': 'hfos.events.schemamanager',
'action': 'get',
'data': tagged
}
self.fireEvent(send(event.client.uuid, response))
else:
self.log("Unavailable schema requested!", lvl=warn)
|
Return a list of tagged objects for a schema
|
entailment
|
def provision_system_vessel(items, database_name, overwrite=False, clear=False, skip_user_check=False):
"""Provisions the default system vessel"""
from hfos.provisions.base import provisionList
from hfos.database import objectmodels
vessel = objectmodels['vessel'].find_one({'name': 'Default System Vessel'})
if vessel is not None:
if overwrite is False:
hfoslog('Default vessel already existing. Skipping provisions.')
return
else:
vessel.delete()
provisionList([SystemVessel], 'vessel', overwrite, clear, skip_user_check)
sysconfig = objectmodels['systemconfig'].find_one({'active': True})
hfoslog('Adapting system config for default vessel:', sysconfig)
sysconfig.vesseluuid = SystemVessel['uuid']
sysconfig.save()
hfoslog('Provisioning: Vessel: Done.', emitter='PROVISIONS')
|
Provisions the default system vessel
|
entailment
|
def get_tile(url):
"""
Threadable function to retrieve map tiles from the internet
:param url: URL of tile to get
"""
log = ""
connection = None
try:
if six.PY3:
connection = urlopen(url=url, timeout=2) # NOQA
else:
connection = urlopen(url=url)
except Exception as e:
log += "MTST: ERROR Tilegetter error: %s " % str([type(e), e, url])
content = ""
# Read and return requested content
if connection:
try:
content = connection.read()
except (socket.timeout, socket.error) as e:
log += "MTST: ERROR Tilegetter error: %s " % str([type(e), e])
connection.close()
else:
log += "MTST: ERROR Got no connection."
return content, log
|
Threadable function to retrieve map tiles from the internet
:param url: URL of tile to get
|
entailment
|
def tilecache(self, event, *args, **kwargs):
"""Checks and caches a requested tile to disk, then delivers it to
client"""
request, response = event.args[:2]
self.log(request.path, lvl=verbose)
try:
filename, url = self._split_cache_url(request.path, 'tilecache')
except UrlError:
return
# self.log('CACHE QUERY:', filename, url)
# Do we have the tile already?
if os.path.isfile(filename):
self.log("Tile exists in cache", lvl=verbose)
# Don't set cookies for static content
response.cookie.clear()
try:
yield serve_file(request, response, filename)
finally:
event.stop()
else:
# We will have to get it first.
self.log("Tile not cached yet. Tile data: ", filename, url,
lvl=verbose)
if url in self._tiles:
self.log("Getting a tile for the second time?!", lvl=error)
else:
self._tiles += url
try:
tile, log = yield self.call(task(get_tile, url), "tcworkers")
if log:
self.log("Thread error: ", log, lvl=error)
except Exception as e:
self.log("[MTS]", e, type(e))
tile = None
tile_path = os.path.dirname(filename)
if tile:
try:
os.makedirs(tile_path)
except OSError as e:
if e.errno != errno.EEXIST:
self.log(
"Couldn't create path: %s (%s)" % (e, type(e)), lvl=error)
self.log("Caching tile.", lvl=verbose)
try:
with open(filename, "wb") as tile_file:
try:
tile_file.write(bytes(tile))
except Exception as e:
self.log("Writing error: %s" % str([type(e), e]), lvl=error)
except Exception as e:
self.log("Open error on %s - %s" % (filename, str([type(e), e])), lvl=error)
return
finally:
event.stop()
try:
self.log("Delivering tile.", lvl=verbose)
yield serve_file(request, response, filename)
except Exception as e:
self.log("Couldn't deliver tile: ", e, lvl=error)
event.stop()
self.log("Tile stored and delivered.", lvl=verbose)
else:
self.log("Got no tile, serving default tile: %s" % url)
if self.default_tile:
try:
yield serve_file(request, response, self.default_tile)
except Exception as e:
self.log('Cannot deliver default tile:', e, type(e),
exc=True, lvl=error)
finally:
event.stop()
else:
yield
|
Checks and caches a requested tile to disk, then delivers it to
client
|
entailment
|
def todegdec(origin):
"""
Convert from [+/-]DDD°MMM'SSS.SSSS" or [+/-]DDD°MMM.MMMM' to [+/-]DDD.DDDDD
"""
# if the input is already a float (or can be converted to float)
try:
return float(origin)
except ValueError:
pass
# DMS format
m = dms_re.search(origin)
if m:
degrees = int(m.group('degrees'))
minutes = float(m.group('minutes'))
seconds = float(m.group('seconds'))
return degrees + minutes / 60 + seconds / 3600
# Degree + Minutes format
m = mindec_re.search(origin)
if m:
degrees = int(m.group('degrees'))
minutes = float(m.group('minutes'))
return degrees + minutes / 60
|
Convert from [+/-]DDD°MMM'SSS.SSSS" or [+/-]DDD°MMM.MMMM' to [+/-]DDD.DDDDD
|
entailment
|
def tomindec(origin):
"""
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
"""
origin = float(origin)
degrees = int(origin)
minutes = (origin % 1) * 60
return degrees, minutes
|
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
|
entailment
|
def tomindecstr(origin):
"""
Convert [+/-]DDD.DDDDD to [+/-]DDD°MMM.MMMM'
"""
degrees, minutes = tomindec(origin)
return u'%d°%f\'' % (degrees, minutes)
|
Convert [+/-]DDD.DDDDD to [+/-]DDD°MMM.MMMM'
|
entailment
|
def todms(origin):
"""
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes, seconds)
"""
degrees, minutes = tomindec(origin)
seconds = (minutes % 1) * 60
return degrees, int(minutes), seconds
|
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes, seconds)
|
entailment
|
def todmsstr(origin):
"""
Convert [+/-]DDD.DDDDD to [+/-]DDD°MMM'DDD.DDDDD"
"""
degrees, minutes, seconds = todms(origin)
return u'%d°%d\'%f"' % (degrees, minutes, seconds)
|
Convert [+/-]DDD.DDDDD to [+/-]DDD°MMM'DDD.DDDDD"
|
entailment
|
def towgs84(E, N, pkm=False, presentation=None):
"""
Convert coordintes from TWD97 to WGS84
The east and north coordinates should be in meters and in float
pkm true for Penghu, Kinmen and Matsu area
You can specify one of the following presentations of the returned values:
dms - A tuple with degrees (int), minutes (int) and seconds (float)
dmsstr - [+/-]DDD°MMM'DDD.DDDDD" (unicode)
mindec - A tuple with degrees (int) and minutes (float)
mindecstr - [+/-]DDD°MMM.MMMMM' (unicode)
(default)degdec - DDD.DDDDD (float)
"""
_lng0 = lng0pkm if pkm else lng0
E /= 1000.0
N /= 1000.0
epsilon = (N-N0) / (k0*A)
eta = (E-E0) / (k0*A)
epsilonp = epsilon - beta1*sin(2*1*epsilon)*cosh(2*1*eta) - \
beta2*sin(2*2*epsilon)*cosh(2*2*eta) - \
beta3*sin(2*3*epsilon)*cosh(2*3*eta)
etap = eta - beta1*cos(2*1*epsilon)*sinh(2*1*eta) - \
beta2*cos(2*2*epsilon)*sinh(2*2*eta) - \
beta3*cos(2*3*epsilon)*sinh(2*3*eta)
sigmap = 1 - 2*1*beta1*cos(2*1*epsilon)*cosh(2*1*eta) - \
2*2*beta2*cos(2*2*epsilon)*cosh(2*2*eta) - \
2*3*beta3*cos(2*3*epsilon)*cosh(2*3*eta)
taup = 2*1*beta1*sin(2*1*epsilon)*sinh(2*1*eta) + \
2*2*beta2*sin(2*2*epsilon)*sinh(2*2*eta) + \
2*3*beta3*sin(2*3*epsilon)*sinh(2*3*eta)
chi = asin(sin(epsilonp) / cosh(etap))
latitude = chi + delta1*sin(2*1*chi) + \
delta2*sin(2*2*chi) + \
delta3*sin(2*3*chi)
longitude = _lng0 + atan(sinh(etap) / cos(epsilonp))
func = None
presentation = 'to%s' % presentation if presentation else None
if presentation in presentations:
func = getattr(sys.modules[__name__], presentation)
if func and func != 'todegdec':
return func(degrees(latitude)), func(degrees(longitude))
return (degrees(latitude), degrees(longitude))
|
Convert coordintes from TWD97 to WGS84
The east and north coordinates should be in meters and in float
pkm true for Penghu, Kinmen and Matsu area
You can specify one of the following presentations of the returned values:
dms - A tuple with degrees (int), minutes (int) and seconds (float)
dmsstr - [+/-]DDD°MMM'DDD.DDDDD" (unicode)
mindec - A tuple with degrees (int) and minutes (float)
mindecstr - [+/-]DDD°MMM.MMMMM' (unicode)
(default)degdec - DDD.DDDDD (float)
|
entailment
|
def fromwgs84(lat, lng, pkm=False):
"""
Convert coordintes from WGS84 to TWD97
pkm true for Penghu, Kinmen and Matsu area
The latitude and longitude can be in the following formats:
[+/-]DDD°MMM'SSS.SSSS" (unicode)
[+/-]DDD°MMM.MMMM' (unicode)
[+/-]DDD.DDDDD (string, unicode or float)
The returned coordinates are in meters
"""
_lng0 = lng0pkm if pkm else lng0
lat = radians(todegdec(lat))
lng = radians(todegdec(lng))
t = sinh((atanh(sin(lat)) - 2*pow(n,0.5)/(1+n)*atanh(2*pow(n,0.5)/(1+n)*sin(lat))))
epsilonp = atan(t/cos(lng-_lng0))
etap = atan(sin(lng-_lng0) / pow(1+t*t, 0.5))
E = E0 + k0*A*(etap + alpha1*cos(2*1*epsilonp)*sinh(2*1*etap) +
alpha2*cos(2*2*epsilonp)*sinh(2*2*etap) +
alpha3*cos(2*3*epsilonp)*sinh(2*3*etap))
N = N0 + k0*A*(epsilonp + alpha1*sin(2*1*epsilonp)*cosh(2*1*etap) +
alpha2*sin(2*2*epsilonp)*cosh(2*2*etap) +
alpha3*sin(2*3*epsilonp)*cosh(2*3*etap))
return E*1000, N*1000
|
Convert coordintes from WGS84 to TWD97
pkm true for Penghu, Kinmen and Matsu area
The latitude and longitude can be in the following formats:
[+/-]DDD°MMM'SSS.SSSS" (unicode)
[+/-]DDD°MMM.MMMM' (unicode)
[+/-]DDD.DDDDD (string, unicode or float)
The returned coordinates are in meters
|
entailment
|
def clipValue(self, value, minValue, maxValue):
'''
Makes sure that value is within a specific range.
If not, then the lower or upper bounds is returned
'''
return min(max(value, minValue), maxValue)
|
Makes sure that value is within a specific range.
If not, then the lower or upper bounds is returned
|
entailment
|
def getGroundResolution(self, latitude, level):
'''
returns the ground resolution for based on latitude and zoom level.
'''
latitude = self.clipValue(latitude, self.min_lat, self.max_lat);
mapSize = self.getMapDimensionsByZoomLevel(level)
return math.cos(
latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \
mapSize
|
returns the ground resolution for based on latitude and zoom level.
|
entailment
|
def getMapScale(self, latitude, level, dpi=96):
'''
returns the map scale on the dpi of the screen
'''
dpm = dpi / 0.0254 # convert to dots per meter
return self.getGroundResolution(latitude, level) * dpm
|
returns the map scale on the dpi of the screen
|
entailment
|
def convertLatLngToPixelXY(self, lat, lng, level):
'''
returns the x and y values of the pixel corresponding to a latitude
and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
lat = self.clipValue(lat, self.min_lat, self.max_lat)
lng = self.clipValue(lng, self.min_lng, self.max_lng)
x = (lng + 180) / 360
sinlat = math.sin(lat * math.pi / 180)
y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi)
pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1))
pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1))
return (pixelX, pixelY)
|
returns the x and y values of the pixel corresponding to a latitude
and longitude.
|
entailment
|
def convertPixelXYToLngLat(self, pixelX, pixelY, level):
'''
converts a pixel x, y to a latitude and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
x = (self.clipValue(pixelX, 0, mapSize - 1) / mapSize) - 0.5
y = 0.5 - (self.clipValue(pixelY, 0, mapSize - 1) / mapSize)
lat = 90 - 360 * math.atan(math.exp(-y * 2 * math.pi)) / math.pi
lng = 360 * x
return (lng, lat)
|
converts a pixel x, y to a latitude and longitude.
|
entailment
|
def tileXYZToQuadKey(self, x, y, z):
'''
Computes quadKey value based on tile x, y and z values.
'''
quadKey = ''
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
|
Computes quadKey value based on tile x, y and z values.
|
entailment
|
def quadKeyToTileXYZ(self, quadKey):
'''
Computes tile x, y and z values based on quadKey.
'''
tileX = 0
tileY = 0
tileZ = len(quadKey)
for i in range(tileZ, 0, -1):
mask = 1 << (i - 1)
value = quadKey[tileZ - i]
if value == '0':
continue
elif value == '1':
tileX |= mask
elif value == '2':
tileY |= mask
elif value == '3':
tileX |= mask
tileY |= mask
else:
raise Exception('Invalid QuadKey')
return (tileX, tileY, tileZ)
|
Computes tile x, y and z values based on quadKey.
|
entailment
|
def getTileOrigin(self, tileX, tileY, level):
'''
Returns the upper-left hand corner lat/lng for a tile
'''
pixelX, pixelY = self.convertTileXYToPixelXY(tileX, tileY)
lng, lat = self.convertPixelXYToLngLat(pixelX, pixelY, level)
return (lat, lng)
|
Returns the upper-left hand corner lat/lng for a tile
|
entailment
|
def getTileUrlsByLatLngExtent(self, xmin, ymin, xmax, ymax, level):
'''
Returns a list of tile urls by extent
'''
# Upper-Left Tile
tileXMin, tileYMin = self.tileUtils.convertLngLatToTileXY(xmin, ymax,
level)
# Lower-Right Tile
tileXMax, tileYMax = self.tileUtils.convertLngLatToTileXY(xmax, ymin,
level)
tileUrls = []
for y in range(tileYMax, tileYMin - 1, -1):
for x in range(tileXMin, tileXMax + 1, 1):
tileUrls.append(self.createTileUrl(x, y, level))
return tileUrls
|
Returns a list of tile urls by extent
|
entailment
|
def createTileUrl(self, x, y, z):
'''
returns new tile url based on template
'''
return self.tileTemplate.replace('{{x}}', str(x)).replace('{{y}}', str(
y)).replace('{{z}}', str(z))
|
returns new tile url based on template
|
entailment
|
def referenceframe(self, event):
"""Handles navigational reference frame updates.
These are necessary to assign geo coordinates to alerts and other
misc things.
:param event with incoming referenceframe message
"""
self.log("Got a reference frame update! ", event, lvl=events)
self.reference_frame = event.data
|
Handles navigational reference frame updates.
These are necessary to assign geo coordinates to alerts and other
misc things.
:param event with incoming referenceframe message
|
entailment
|
def userlogin(self, event):
"""Checks if an alert is ongoing and alerts the newly connected
client, if so."""
client_uuid = event.clientuuid
self.log(event.user, pretty=True, lvl=verbose)
self.log('Adding client')
self.clients[event.clientuuid] = event.user
for topic, alert in self.alerts.items():
self.alert(client_uuid, alert)
|
Checks if an alert is ongoing and alerts the newly connected
client, if so.
|
entailment
|
def trigger(self, event):
"""AlertManager event handler for incoming events
:param event with incoming AlertManager message
"""
topic = event.data.get('topic', None)
if topic is None:
self.log('No alert topic to trigger', lvl=warn)
return
alert = {
'topic': topic,
'message': event.data.get('msg', 'Alert has been triggered'),
'role': event.data.get('role', 'all')
}
self._trigger(event, alert)
|
AlertManager event handler for incoming events
:param event with incoming AlertManager message
|
entailment
|
def cancel(self, event):
"""AlertManager event handler for incoming events
:param event with incoming AlertManager message
"""
topic = event.data.get('topic', None)
if topic is None:
self.log('No alert topic to cancel', lvl=warn)
return
self._cancel(topic)
|
AlertManager event handler for incoming events
:param event with incoming AlertManager message
|
entailment
|
def cli(ctx, instance, quiet, verbose, log_level, dbhost, dbname):
"""Isomer Management Tool
This tool supports various operations to manage isomer instances.
Most of the commands are grouped. To obtain more information about the
groups' available sub commands/groups, try
iso [group]
To display details of a command or its sub groups, try
iso [group] [subgroup] [..] [command] --help
To get a map of all available commands, try
iso cmdmap
"""
ctx.obj['instance'] = instance
if dbname == db_default and instance != 'default':
dbname = instance
ctx.obj['quiet'] = quiet
ctx.obj['verbose'] = verbose
verbosity['console'] = log_level
verbosity['global'] = log_level
ctx.obj['dbhost'] = dbhost
ctx.obj['dbname'] = dbname
|
Isomer Management Tool
This tool supports various operations to manage isomer instances.
Most of the commands are grouped. To obtain more information about the
groups' available sub commands/groups, try
iso [group]
To display details of a command or its sub groups, try
iso [group] [subgroup] [..] [command] --help
To get a map of all available commands, try
iso cmdmap
|
entailment
|
def main():
"""Primary entry point for all AstroCats catalogs.
From this entry point, all internal catalogs can be accessed and their
public methods executed (for example: import scripts).
"""
from datetime import datetime
# Initialize Command-Line and User-Config Settings, Log
# -----------------------------------------------------
beg_time = datetime.now()
# Process command-line arguments to determine action
# If no subcommand (e.g. 'import') is given, returns 'None' --> exit
args, sub_clargs = load_command_line_args()
if args is None:
return
# Create a logging object
log = load_log(args)
# Run configuration/setup interactive script
if args.command == 'setup':
setup_user_config(log)
return
# Make sure configuration file exists, or that's what we're doing
# (with the 'setup' subcommand)
if not os.path.isfile(_CONFIG_PATH):
raise RuntimeError("'{}' does not exist. "
"Run `astrocats setup` to configure."
"".format(_CONFIG_PATH))
git_vers = get_git()
title_str = "Astrocats, version: {}, SHA: {}".format(__version__, git_vers)
log.warning("\n\n{}\n{}\n{}\n".format(title_str, '=' * len(title_str),
beg_time.ctime()))
# Load the user settings from the home directory
args = load_user_config(args, log)
# Choose Catalog and Operation(s) to perform
# ------------------------------------------
mod_name = args.command
log.debug("Importing specified module: '{}'".format(mod_name))
# Try to import the specified module
try:
mod = importlib.import_module('.' + mod_name, package='astrocats')
except Exception as err:
log.error("Import of specified module '{}' failed.".format(mod_name))
log_raise(log, str(err), type(err))
# Run the `main.main` method of the specified module
log.debug("Running `main.main()`")
mod.main.main(args, sub_clargs, log)
end_time = datetime.now()
log.warning("\nAll complete at {}, After {}".format(end_time, end_time -
beg_time))
return
|
Primary entry point for all AstroCats catalogs.
From this entry point, all internal catalogs can be accessed and their
public methods executed (for example: import scripts).
|
entailment
|
def setup_user_config(log):
"""Setup a configuration file in the user's home directory.
Currently this method stores default values to a fixed configuration
filename. It should be modified to run an interactive prompt session
asking for parameters (or at least confirming the default ones).
Arguments
---------
log : `logging.Logger` object
"""
log.warning("AstroCats Setup")
log.warning("Configure filepath: '{}'".format(_CONFIG_PATH))
# Create path to configuration file as needed
config_path_dir = os.path.split(_CONFIG_PATH)[0]
if not os.path.exists(config_path_dir):
log.debug("Creating config directory '{}'".format(config_path_dir))
os.makedirs(config_path_dir)
if not os.path.isdir(config_path_dir):
log_raise(log, "Configure path error '{}'".format(config_path_dir))
# Determine default settings
# Get this containing directory and use that as default data path
def_base_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
log.warning("Setting '{}' to default path: '{}'".format(_BASE_PATH_KEY,
def_base_path))
config = {_BASE_PATH_KEY: def_base_path}
# Write settings to configuration file
json.dump(config, open(_CONFIG_PATH, 'w'))
if not os.path.exists(def_base_path):
log_raise(log, "Problem creating configuration file.")
return
|
Setup a configuration file in the user's home directory.
Currently this method stores default values to a fixed configuration
filename. It should be modified to run an interactive prompt session
asking for parameters (or at least confirming the default ones).
Arguments
---------
log : `logging.Logger` object
|
entailment
|
def load_user_config(args, log):
"""Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
"""
if not os.path.exists(_CONFIG_PATH):
err_str = (
"Configuration file does not exists ({}).\n".format(_CONFIG_PATH) +
"Run `python -m astrocats setup` to configure.")
log_raise(log, err_str)
config = json.load(open(_CONFIG_PATH, 'r'))
setattr(args, _BASE_PATH_KEY, config[_BASE_PATH_KEY])
log.debug("Loaded configuration: {}: {}".format(_BASE_PATH_KEY, config[
_BASE_PATH_KEY]))
return args
|
Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
|
entailment
|
def load_command_line_args(clargs=None):
"""Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments.
"""
import argparse
git_vers = get_git()
parser = argparse.ArgumentParser(
prog='astrocats',
description='Generate catalogs for astronomical data.')
parser.add_argument('command', nargs='?', default=None)
parser.add_argument(
'--version',
action='version',
version='AstroCats v{}, SHA: {}'.format(__version__, git_vers))
parser.add_argument(
'--verbose',
'-v',
dest='verbose',
default=False,
action='store_true',
help='Print more messages to the screen.')
parser.add_argument(
'--debug',
'-d',
dest='debug',
default=False,
action='store_true',
help='Print excessive messages to the screen.')
parser.add_argument(
'--include-private',
dest='private',
default=False,
action='store_true',
help='Include private data in import.')
parser.add_argument(
'--travis',
'-t',
dest='travis',
default=False,
action='store_true',
help='Run import script in test mode for Travis.')
parser.add_argument(
'--clone-depth',
dest='clone_depth',
default=0,
type=int,
help=('When cloning git repos, only clone out to this depth '
'(default: 0 = all levels).'))
parser.add_argument(
'--purge-outputs',
dest='purge_outputs',
default=False,
action='store_true',
help=('Purge git outputs after cloning.'))
parser.add_argument(
'--log',
dest='log_filename',
default=None,
help='Filename to which to store logging information.')
# If output files should be written or not
# ----------------------------------------
write_group = parser.add_mutually_exclusive_group()
write_group.add_argument(
'--write',
action='store_true',
dest='write_entries',
default=True,
help='Write entries to files [default].')
write_group.add_argument(
'--no-write',
action='store_false',
dest='write_entries',
default=True,
help='do not write entries to file.')
# If previously cleared output files should be deleted or not
# -----------------------------------------------------------
delete_group = parser.add_mutually_exclusive_group()
delete_group.add_argument(
'--predelete',
action='store_true',
dest='delete_old',
default=True,
help='Delete all old event files to begin [default].')
delete_group.add_argument(
'--no-predelete',
action='store_false',
dest='delete_old',
default=True,
help='Do not delete all old event files to start.')
args, sub_clargs = parser.parse_known_args(args=clargs)
# Print the help information if no command is given
if args.command is None:
parser.print_help()
return None, None
return args, sub_clargs
|
Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments.
|
entailment
|
def load_log(args):
"""Load a `logging.Logger` object.
Arguments
---------
args : `argparse.Namespace` object
Namespace containing required settings:
{`args.debug`, `args.verbose`, and `args.log_filename`}.
Returns
-------
log : `logging.Logger` object
"""
from astrocats.catalog.utils import logger
# Determine verbosity ('None' means use default)
log_stream_level = None
if args.debug:
log_stream_level = logger.DEBUG
elif args.verbose:
log_stream_level = logger.INFO
# Create log
log = logger.get_logger(
stream_level=log_stream_level, tofile=args.log_filename)
log._verbose = args.verbose
log._debug = args.debug
return log
|
Load a `logging.Logger` object.
Arguments
---------
args : `argparse.Namespace` object
Namespace containing required settings:
{`args.debug`, `args.verbose`, and `args.log_filename`}.
Returns
-------
log : `logging.Logger` object
|
entailment
|
def compare_dicts(old_full, new_full, old_data, new_data, depth=0):
"""Function compares dictionaries by key-value recursively.
Old and new input data are both dictionaries
"""
depth = depth + 1
indent = " "*depth
# Print with an indentation matching the nested-dictionary depth
def my_print(str):
print("{}{}".format(indent, str))
old_keys = list(old_data.keys())
# Compare data key by key, in *this* dictionary level
# Note: since we're comparing by keys explicity, order doesnt matter
for key in old_keys:
# Remove elements as we go
old_vals = old_data.pop(key)
# Current key
my_print("{}".format(key))
# If `new_data` doesnt also have this key, return False
if key not in new_data:
my_print("Key '{}' not in new_data.".format(key))
my_print("Old:")
my_print(pprint(new_data))
my_print("New:")
my_print(pprint(new_data))
return False
# If it does have the key, extract the values (remove as we go)
new_vals = new_data.pop(key)
# If these values are a sub-dictionary, compare those
if isinstance(old_vals, dict) and isinstance(new_vals, dict):
# If the sub-dictionary are not the same, return False
if not compare_dicts(old_full, new_full, old_vals, new_vals, depth=depth):
return False
# If these values are a list of sub-dictionaries, compare each of those
elif (isinstance(old_vals, list) and isinstance(old_vals[0], dict) and
isinstance(old_vals, list) and isinstance(old_vals[0], dict)):
for old_elem, new_elem in zip_longest(old_vals, new_vals):
# If one or the other has extra elements, print message, but
# continue on
if old_elem is None or new_elem is None:
my_print("Missing element!")
my_print("\tOld: '{}'".format(old_elem))
my_print("\tNew: '{}'".format(new_elem))
else:
if not compare_dicts(old_full, new_full, old_elem, new_elem, depth=depth):
return False
# At the lowest-dictionary level, compare the values themselves
else:
# Turn everything into a list for convenience (most things should be
# already)
if (not isinstance(old_vals, list) and
not isinstance(new_vals, list)):
old_vals = [old_vals]
new_vals = [new_vals]
# Sort both lists
old_vals = sorted(old_vals)
new_vals = sorted(new_vals)
for oldv, newv in zip_longest(old_vals, new_vals):
# If one or the other has extra elements, print message, but
# continue on
if oldv is None or newv is None:
my_print("Missing element!")
my_print("\tOld: '{}'".format(oldv))
my_print("\tNew: '{}'".format(newv))
# If values match, continue
elif oldv == newv:
my_print("Good Match: '{}'".format(key))
# If values dont match, return False
else:
my_print("Bad Match: '{}'".format(key))
my_print("\tOld: '{}'".format(oldv))
my_print("\tNew: '{}'".format(newv))
return False
return True
|
Function compares dictionaries by key-value recursively.
Old and new input data are both dictionaries
|
entailment
|
def cohensutherland(xmin, ymax, xmax, ymin, x1, y1, x2, y2):
"""Clips a line to a rectangular area.
This implements the Cohen-Sutherland line clipping algorithm. xmin,
ymax, xmax and ymin denote the clipping area, into which the line
defined by x1, y1 (start point) and x2, y2 (end point) will be
clipped.
If the line does not intersect with the rectangular clipping area,
four None values will be returned as tuple. Otherwise a tuple of the
clipped line points will be returned in the form (cx1, cy1, cx2, cy2).
"""
INSIDE, LEFT, RIGHT, LOWER, UPPER = 0, 1, 2, 4, 8
def _getclip(xa, ya):
#if dbglvl>1: print('point: '),; print(xa,ya)
p = INSIDE # default is inside
# consider x
if xa < xmin:
p |= LEFT
elif xa > xmax:
p |= RIGHT
# consider y
if ya < ymin:
p |= LOWER # bitwise OR
elif ya > ymax:
p |= UPPER # bitwise OR
return p
# check for trivially outside lines
k1 = _getclip(x1, y1)
k2 = _getclip(x2, y2)
# %% examine non-trivially outside points
# bitwise OR |
while (k1 | k2) != 0: # if both points are inside box (0000) , ACCEPT trivial whole line in box
# if line trivially outside window, REJECT
if (k1 & k2) != 0: # bitwise AND &
#if dbglvl>1: print(' REJECT trivially outside box')
# return nan, nan, nan, nan
return None, None, None, None
# non-trivial case, at least one point outside window
# this is not a bitwise or, it's the word "or"
opt = k1 or k2 # take first non-zero point, short circuit logic
if opt & UPPER: # these are bitwise ANDS
x = x1 + (x2 - x1) * (ymax - y1) / (y2 - y1)
y = ymax
elif opt & LOWER:
x = x1 + (x2 - x1) * (ymin - y1) / (y2 - y1)
y = ymin
elif opt & RIGHT:
y = y1 + (y2 - y1) * (xmax - x1) / (x2 - x1)
x = xmax
elif opt & LEFT:
y = y1 + (y2 - y1) * (xmin - x1) / (x2 - x1)
x = xmin
else:
raise RuntimeError('Undefined clipping state')
if opt == k1:
x1, y1 = x, y
k1 = _getclip(x1, y1)
#if dbglvl>1: print('checking k1: ' + str(x) + ',' + str(y) + ' ' + str(k1))
elif opt == k2:
#if dbglvl>1: print('checking k2: ' + str(x) + ',' + str(y) + ' ' + str(k2))
x2, y2 = x, y
k2 = _getclip(x2, y2)
return x1, y1, x2, y2
|
Clips a line to a rectangular area.
This implements the Cohen-Sutherland line clipping algorithm. xmin,
ymax, xmax and ymin denote the clipping area, into which the line
defined by x1, y1 (start point) and x2, y2 (end point) will be
clipped.
If the line does not intersect with the rectangular clipping area,
four None values will be returned as tuple. Otherwise a tuple of the
clipped line points will be returned in the form (cx1, cy1, cx2, cy2).
|
entailment
|
def setupuv(rc):
"""
Horn Schunck legacy OpenCV function requires we use these old-fashioned cv matrices, not numpy array
"""
if cv is not None:
(r, c) = rc
u = cv.CreateMat(r, c, cv.CV_32FC1)
v = cv.CreateMat(r, c, cv.CV_32FC1)
return (u, v)
else:
return [None]*2
|
Horn Schunck legacy OpenCV function requires we use these old-fashioned cv matrices, not numpy array
|
entailment
|
def _init_cat_dict(self, cat_dict_class, key_in_self, **kwargs):
"""Initialize a CatDict object, checking for errors.
"""
# Catch errors associated with crappy, but not unexpected data
try:
new_entry = cat_dict_class(self, key=key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return None
return new_entry
|
Initialize a CatDict object, checking for errors.
|
entailment
|
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
**kwargs):
"""Add a CatDict to this Entry if initialization succeeds and it
doesn't already exist within the Entry.
"""
# Try to create a new instance of this subclass of `CatDict`
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
# Compare this new entry with all previous entries to make sure is new
if cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
# Return the entry in case we want to use any additional
# tags to augment the old entry
return new_entry
self.setdefault(key_in_self, []).append(new_entry)
return True
|
Add a CatDict to this Entry if initialization succeeds and it
doesn't already exist within the Entry.
|
entailment
|
def pbar(iter, desc='', **kwargs):
"""Wrapper for `tqdm` progress bar.
"""
return tqdm(
iter,
desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc),
dynamic_ncols=True,
**kwargs)
|
Wrapper for `tqdm` progress bar.
|
entailment
|
def pbar_strings(files, desc='', **kwargs):
"""Wrapper for `tqdm` progress bar which also sorts list of strings
"""
return tqdm(
sorted(files, key=lambda s: s.lower()),
desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc),
dynamic_ncols=True,
**kwargs)
|
Wrapper for `tqdm` progress bar which also sorts list of strings
|
entailment
|
def _get_task_priority(tasks, task_priority):
"""Get the task `priority` corresponding to the given `task_priority`.
If `task_priority` is an integer or 'None', return it.
If `task_priority` is a str, return the priority of the task it matches.
Otherwise, raise `ValueError`.
"""
if task_priority is None:
return None
if is_integer(task_priority):
return task_priority
if isinstance(task_priority, basestring):
if task_priority in tasks:
return tasks[task_priority].priority
raise ValueError("Unrecognized task priority '{}'".format(task_priority))
|
Get the task `priority` corresponding to the given `task_priority`.
If `task_priority` is an integer or 'None', return it.
If `task_priority` is a str, return the priority of the task it matches.
Otherwise, raise `ValueError`.
|
entailment
|
def import_data(self):
"""Run all of the import tasks.
This is executed by the 'scripts.main.py' when the module is run as an
executable. This can also be run as a method, in which case default
arguments are loaded, but can be overriden using `**kwargs`.
"""
tasks_list = self.load_task_list()
warnings.filterwarnings(
'ignore', r'Warning: converting a masked element to nan.')
# FIX
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Delete all old (previously constructed) output files
if self.args.delete_old:
self.log.warning("Deleting all old entry files.")
self.delete_old_entry_files()
# In update mode, load all entry stubs.
if self.args.load_stubs or self.args.update:
self.load_stubs()
if self.args.travis:
self.log.warning("Running in `travis` mode.")
prev_priority = 0
prev_task_name = ''
# for task, task_obj in tasks_list.items():
for task_name, task_obj in tasks_list.items():
if not task_obj.active:
continue
self.log.warning("Task: '{}'".format(task_name))
nice_name = task_obj.nice_name
mod_name = task_obj.module
func_name = task_obj.function
priority = task_obj.priority
# Make sure things are running in the correct order
if priority < prev_priority and priority > 0:
raise RuntimeError("Priority for '{}': '{}', less than prev,"
"'{}': '{}'.\n{}"
.format(task_name, priority, prev_task_name,
prev_priority, task_obj))
self.log.debug("\t{}, {}, {}, {}".format(nice_name, priority,
mod_name, func_name))
mod = importlib.import_module('.' + mod_name, package='astrocats')
self.current_task = task_obj
getattr(mod, func_name)(self)
num_events, num_stubs = self.count()
self.log.warning("Task finished. Events: {}, Stubs: {}".format(
num_events, num_stubs))
self.journal_entries()
num_events, num_stubs = self.count()
self.log.warning("Journal finished. Events: {}, Stubs: {}".format(
num_events, num_stubs))
prev_priority = priority
prev_task_name = task_name
process = psutil.Process(os.getpid())
memory = process.memory_info().rss
self.log.warning('Memory used (MBs): '
'{:,}'.format(memory / 1024. / 1024.))
return
|
Run all of the import tasks.
This is executed by the 'scripts.main.py' when the module is run as an
executable. This can also be run as a method, in which case default
arguments are loaded, but can be overriden using `**kwargs`.
|
entailment
|
def load_task_list(self):
"""Load the list of tasks in this catalog's 'input/tasks.json' file.
A `Task` object is created for each entry, with the parameters filled
in. These are placed in an OrderedDict, sorted by the `priority`
parameter, with positive values and then negative values,
e.g. [0, 2, 10, -10, -1].
"""
# In update mode, do not delete old files
if self.args.update:
self.log.info("Disabling `pre-delete` for 'update' mode.")
self.args.delete_old = False
# Dont allow both a 'min' and 'max' task priority
# FIX: this is probably unnecessary... having both could be useful
if ((self.args.min_task_priority is not None and
self.args.max_task_priority is not None)):
raise ValueError("Can only use *either* 'min' *or* 'max' priority")
# Load tasks data from input json file
tasks, task_names = self._load_task_list_from_file()
# Make sure 'active' modification lists are all valid
args_lists = [
self.args.args_task_list, self.args.yes_task_list,
self.args.no_task_list
]
args_names = ['--tasks', '--yes', '--no']
for arglist, lname in zip(args_lists, args_names):
if arglist is not None:
for tname in arglist:
if tname not in task_names:
raise ValueError(
"Value '{}' in '{}' list does not match"
" any tasks".format(tname, lname))
# Process min/max priority specification ('None' if none given)
min_priority = _get_task_priority(tasks, self.args.min_task_priority)
max_priority = _get_task_priority(tasks, self.args.max_task_priority)
task_groups = self.args.task_groups
if task_groups is not None:
if not isinstance(task_groups, list):
task_groups = [task_groups]
# Iterate over all tasks to determine which should be (in)active
# --------------------------------------------------------------
for key in tasks:
# If in update mode, only run update tasks
if self.args.update:
if not tasks[key].update:
tasks[key].active = False
# If specific list of tasks is given, make only those active
if self.args.args_task_list is not None:
if key in self.args.args_task_list:
tasks[key].active = True
else:
tasks[key].active = False
# Only run tasks above minimum priority
# (doesn't modify negtive priority tasks)
if min_priority is not None and tasks[key].priority >= 0:
tasks[key].active = False
if tasks[key].priority >= min_priority:
tasks[key].active = True
# Only run tasks below maximum priority
# (doesnt modify negative priority tasks)
if max_priority is not None and tasks[key].priority >= 0:
tasks[key].active = False
if tasks[key].priority <= max_priority:
tasks[key].active = True
# Set 'yes' tasks to *active*
if self.args.yes_task_list is not None:
if key in self.args.yes_task_list:
tasks[key].active = True
# Set 'no' tasks to *inactive*
if self.args.no_task_list is not None:
if key in self.args.no_task_list:
tasks[key].active = False
# Set tasks in target 'groups' to *active*
if task_groups is not None and tasks[key].groups is not None:
# Go through each group defined in the command line
for given_group in task_groups:
# If this task is a member of any of those groups
if given_group in tasks[key].groups:
tasks[key].active = True
break
# Sort entries as positive values, then negative values
# [0, 1, 2, 2, 10, -100, -10, -1]
# Tuples are sorted by first element (here: '0' if positive), then
# second (here normal order)
tasks = OrderedDict(
sorted(
tasks.items(),
key=lambda t: (t[1].priority < 0, t[1].priority, t[1].name)))
# Find the first task that has "always_journal" set to True
for key in tasks:
if tasks[key].active and tasks[key].always_journal:
self.min_journal_priority = tasks[key].priority
break
names_act = []
names_inact = []
for key, val in tasks.items():
if val.active:
names_act.append(key)
else:
names_inact.append(key)
self.log.info("Active Tasks:\n\t" + ", ".join(nn for nn in names_act))
self.log.debug("Inactive Tasks:\n\t" + ", ".join(nn for nn in
names_inact))
return tasks
|
Load the list of tasks in this catalog's 'input/tasks.json' file.
A `Task` object is created for each entry, with the parameters filled
in. These are placed in an OrderedDict, sorted by the `priority`
parameter, with positive values and then negative values,
e.g. [0, 2, 10, -10, -1].
|
entailment
|
def add_entry(self, name, load=True, delete=True):
"""Find an existing entry in, or add a new one to, the `entries` dict.
FIX: rename to `create_entry`???
Returns
-------
entries : OrderedDict of Entry objects
newname : str
Name of matching entry found in `entries`, or new entry added to
`entries`
"""
newname = self.clean_entry_name(name)
if not newname:
raise (ValueError('Fatal: Attempted to add entry with no name.'))
# If entry already exists, return
if newname in self.entries:
self.log.debug("`newname`: '{}' (name: '{}') already exists.".
format(newname, name))
# If this is a stub, we need to continue, possibly load file
if self.entries[newname]._stub:
self.log.debug("'{}' is a stub".format(newname))
# If a full (non-stub) event exists, return its name
else:
self.log.debug("'{}' is not a stub, returning".format(newname))
return newname
# If entry is alias of another entry in `entries`, find and return that
match_name = self.find_entry_name_of_alias(newname)
if match_name is not None:
self.log.debug(
"`newname`: '{}' (name: '{}') already exists as alias for "
"'{}'.".format(newname, name, match_name))
newname = match_name
# Load entry from file
if load:
loaded_name = self.load_entry_from_name(newname, delete=delete)
if loaded_name:
return loaded_name
# If we match an existing event, return that
if match_name is not None:
return match_name
# Create new entry
new_entry = self.proto(catalog=self, name=newname)
new_entry[self.proto._KEYS.SCHEMA] = self.SCHEMA.URL
self.log.log(self.log._LOADED,
"Created new entry for '{}'".format(newname))
# Add entry to dictionary
self.entries[newname] = new_entry
return newname
|
Find an existing entry in, or add a new one to, the `entries` dict.
FIX: rename to `create_entry`???
Returns
-------
entries : OrderedDict of Entry objects
newname : str
Name of matching entry found in `entries`, or new entry added to
`entries`
|
entailment
|
def find_entry_name_of_alias(self, alias):
"""Return the first entry name with the given 'alias' included in its
list of aliases.
Returns
-------
name of matching entry (str) or 'None' if no matches
"""
if alias in self.aliases:
name = self.aliases[alias]
if name in self.entries:
return name
else:
# Name wasn't found, possibly merged or deleted. Now look
# really hard.
for name, entry in self.entries.items():
aliases = entry.get_aliases(includename=False)
if alias in aliases:
if (ENTRY.DISTINCT_FROM not in entry or
alias not in entry[ENTRY.DISTINCT_FROM]):
return name
return None
|
Return the first entry name with the given 'alias' included in its
list of aliases.
Returns
-------
name of matching entry (str) or 'None' if no matches
|
entailment
|
def copy_entry_to_entry(self,
fromentry,
destentry,
check_for_dupes=True,
compare_to_existing=True):
"""Used by `merge_duplicates`
"""
self.log.info("Copy entry object '{}' to '{}'".format(fromentry[
fromentry._KEYS.NAME], destentry[destentry._KEYS.NAME]))
newsourcealiases = {}
if self.proto._KEYS.SOURCES in fromentry:
for source in fromentry[self.proto._KEYS.SOURCES]:
alias = source.pop(SOURCE.ALIAS)
newsourcealiases[alias] = source
newmodelaliases = {}
if self.proto._KEYS.MODELS in fromentry:
for model in fromentry[self.proto._KEYS.MODELS]:
alias = model.pop(MODEL.ALIAS)
newmodelaliases[alias] = model
if self.proto._KEYS.ERRORS in fromentry:
for err in fromentry[self.proto._KEYS.ERRORS]:
destentry.setdefault(self.proto._KEYS.ERRORS, []).append(err)
for rkey in fromentry:
key = fromentry._KEYS.get_key_by_name(rkey)
if key.no_source:
continue
for item in fromentry[key]:
# isd = False
if 'source' not in item:
raise ValueError("Item has no source!")
nsid = []
for sid in item['source'].split(','):
if sid in newsourcealiases:
source = newsourcealiases[sid]
nsid.append(destentry.add_source(**source))
else:
raise ValueError("Couldn't find source alias!")
item['source'] = uniq_cdl(nsid)
if 'model' in item:
nmid = []
for mid in item['model'].split(','):
if mid in newmodelaliases:
model = newmodelaliases[mid]
nmid.append(destentry.add_model(**model))
else:
raise ValueError("Couldn't find model alias!")
item['model'] = uniq_cdl(nmid)
if key == ENTRY.PHOTOMETRY:
destentry.add_photometry(
compare_to_existing=compare_to_existing,
**item)
elif key == ENTRY.SPECTRA:
destentry.add_spectrum(
compare_to_existing=compare_to_existing,
**item)
elif key == ENTRY.ERRORS:
destentry.add_error(**item)
elif key == ENTRY.MODELS:
continue
else:
destentry.add_quantity(
compare_to_existing=compare_to_existing,
check_for_dupes=False, quantities=key, **item)
return
|
Used by `merge_duplicates`
|
entailment
|
def merge_duplicates(self):
"""Merge and remove duplicate entries.
Compares each entry ('name') in `stubs` to all later entries to check
for duplicates in name or alias. If a duplicate is found, they are
merged and written to file.
"""
if len(self.entries) == 0:
self.log.error("WARNING: `entries` is empty, loading stubs")
if self.args.update:
self.log.warning(
"No sources changed, entry files unchanged in update."
" Skipping merge.")
return
self.entries = self.load_stubs()
task_str = self.get_current_task_str()
keys = list(sorted(self.entries.keys()))
n1 = 0
mainpbar = tqdm(total=len(keys), desc=task_str)
while n1 < len(keys):
name1 = keys[n1]
if name1 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name1))
n1 = n1 + 1
mainpbar.update(1)
continue
allnames1 = set(self.entries[name1].get_aliases() + self.entries[
name1].extra_aliases())
# Search all later names
for name2 in keys[n1 + 1:]:
if name1 == name2:
continue
if name1 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name1))
continue
if name2 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name2))
continue
allnames2 = set(self.entries[name2].get_aliases() +
self.entries[name2].extra_aliases())
# If there are any common names or aliases, merge
if len(allnames1 & allnames2):
self.log.warning("Found two entries with common aliases "
"('{}' and '{}'), merging.".format(name1,
name2))
load1 = self.proto.init_from_file(self, name=name1)
load2 = self.proto.init_from_file(self, name=name2)
if load1 is not None and load2 is not None:
# Delete old files
self._delete_entry_file(entry=load1)
self._delete_entry_file(entry=load2)
self.entries[name1] = load1
self.entries[name2] = load2
priority1 = 0
priority2 = 0
for an in allnames1:
if an.startswith(self.entries[name1]
.priority_prefixes()):
priority1 += 1
for an in allnames2:
if an.startswith(self.entries[name2]
.priority_prefixes()):
priority2 += 1
if priority1 > priority2:
self.copy_to_entry_in_catalog(name2, name1)
keys.append(name1)
del self.entries[name2]
else:
self.copy_to_entry_in_catalog(name1, name2)
keys.append(name2)
del self.entries[name1]
else:
self.log.warning('Duplicate already deleted')
# if len(self.entries) != 1:
# self.log.error(
# "WARNING: len(entries) = {}, expected 1. "
# "Still journaling...".format(len(self.entries)))
self.journal_entries()
if self.args.travis and n1 > self.TRAVIS_QUERY_LIMIT:
break
n1 = n1 + 1
mainpbar.update(1)
mainpbar.close()
|
Merge and remove duplicate entries.
Compares each entry ('name') in `stubs` to all later entries to check
for duplicates in name or alias. If a duplicate is found, they are
merged and written to file.
|
entailment
|
def load_stubs(self, log_mem=False):
"""Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
"""
# Initialize parameter related to diagnostic output of memory usage
if log_mem:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
LOG_MEMORY_INT = 1000
MEMORY_LIMIT = 1000.0
def _add_stub_manually(_fname):
"""Create and add a 'stub' by manually loading parameters from
JSON files.
Previously this was done by creating a full `Entry` instance, then
using the `Entry.get_stub()` method to trim it down. This was very
slow and memory intensive, hence this improved approach.
"""
# FIX: should this be ``fi.endswith(``.gz')`` ?
fname = uncompress_gz(_fname) if '.gz' in _fname else _fname
stub = None
stub_name = None
with codecs.open(fname, 'r') as jfil:
# Load the full JSON file
data = json.load(jfil, object_pairs_hook=OrderedDict)
# Extract the top-level keys (should just be the name of the
# entry)
stub_name = list(data.keys())
# Make sure there is only a single top-level entry
if len(stub_name) != 1:
err = "json file '{}' has multiple keys: {}".format(
fname, list(stub_name))
self._log.error(err)
raise ValueError(err)
stub_name = stub_name[0]
# Make sure a non-stub entry doesnt already exist with this
# name
if stub_name in self.entries and not self.entries[
stub_name]._stub:
err_str = (
"ERROR: non-stub entry already exists with name '{}'"
.format(stub_name))
self.log.error(err_str)
raise RuntimeError(err_str)
# Remove the outmost dict level
data = data[stub_name]
# Create a new `Entry` (subclass) instance
proto = self.proto
stub = proto(catalog=self, name=stub_name, stub=True)
# Add stub parameters if they are available
if proto._KEYS.ALIAS in data:
stub[proto._KEYS.ALIAS] = data[proto._KEYS.ALIAS]
if proto._KEYS.DISTINCT_FROM in data:
stub[proto._KEYS.DISTINCT_FROM] = data[
proto._KEYS.DISTINCT_FROM]
if proto._KEYS.RA in data:
stub[proto._KEYS.RA] = data[proto._KEYS.RA]
if proto._KEYS.DEC in data:
stub[proto._KEYS.DEC] = data[proto._KEYS.DEC]
if proto._KEYS.DISCOVER_DATE in data:
stub[proto._KEYS.DISCOVER_DATE] = data[
proto._KEYS.DISCOVER_DATE]
if proto._KEYS.SOURCES in data:
stub[proto._KEYS.SOURCES] = data[
proto._KEYS.SOURCES]
# Store the stub
self.entries[stub_name] = stub
self.log.debug("Added stub for '{}'".format(stub_name))
currenttask = 'Loading entry stubs'
files = self.PATHS.get_repo_output_file_list()
for ii, _fname in enumerate(pbar(files, currenttask)):
# Run normally
# _add_stub(_fname)
# Run 'manually' (extract stub parameters directly from JSON)
_add_stub_manually(_fname)
if log_mem:
rss = process.memory_info().rss / 1024 / 1024
if ii % LOG_MEMORY_INT == 0 or rss > MEMORY_LIMIT:
log_memory(self.log, "\nLoaded stub {}".format(ii),
logging.INFO)
if rss > MEMORY_LIMIT:
err = (
"Memory usage {}, has exceeded {} on file {} '{}'".
format(rss, MEMORY_LIMIT, ii, _fname))
self.log.error(err)
raise RuntimeError(err)
return self.entries
|
Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
|
entailment
|
def _delete_entry_file(self, entry_name=None, entry=None):
"""Delete the file associated with the given entry.
"""
if entry_name is None and entry is None:
raise RuntimeError("Either `entry_name` or `entry` must be given.")
elif entry_name is not None and entry is not None:
raise RuntimeError("Cannot use both `entry_name` and `entry`.")
if entry_name is not None:
entry = self.entries[entry_name]
else:
entry_name = entry[ENTRY.NAME]
# FIX: do we also need to check for gzipped files??
entry_filename = self.entry_filename(entry_name)
if self.args.write_entries:
self.log.info("Deleting entry file '{}' of entry '{}'".format(
entry_filename, entry_name))
if not os.path.exists(entry_filename):
self.log.error(
"Filename '{}' does not exist".format(entry_filename))
os.remove(entry_filename)
else:
self.log.debug("Not deleting '{}' because `write_entries`"
" is False".format(entry_filename))
return
|
Delete the file associated with the given entry.
|
entailment
|
def journal_entries(self,
clear=True,
gz=False,
bury=False,
write_stubs=False,
final=False):
"""Write all entries in `entries` to files, and clear. Depending on
arguments and `tasks`.
Iterates over all elements of `entries`, saving (possibly 'burying')
and deleting.
- If ``clear == True``, then each element of `entries` is deleted,
and a `stubs` entry is added
"""
# if (self.current_task.priority >= 0 and
# self.current_task.priority < self.min_journal_priority):
# return
# Write it all out!
# NOTE: this needs to use a `list` wrapper to allow modification of
# dict
for name in list(self.entries.keys()):
if self.args.write_entries:
# If this is a stub and we aren't writing stubs, skip
if self.entries[name]._stub and not write_stubs:
continue
# Bury non-SN entries here if only claimed type is non-SN type,
# or if primary name starts with a non-SN prefix.
bury_entry = False
save_entry = True
if bury:
(bury_entry, save_entry) = self.should_bury(name)
if save_entry:
save_name = self.entries[name].save(
bury=bury_entry, final=final)
self.log.info(
"Saved {} to '{}'.".format(name.ljust(20), save_name))
if (gz and os.path.getsize(save_name) >
self.COMPRESS_ABOVE_FILESIZE):
save_name = compress_gz(save_name)
self.log.debug(
"Compressed '{}' to '{}'".format(name, save_name))
# FIX: use subprocess
outdir, filename = os.path.split(save_name)
filename = filename.split('.')[0]
os.system('cd ' + outdir + '; git rm --cached ' +
filename + '.json; git add -f ' + filename +
'.json.gz; cd ' + self.PATHS.PATH_BASE)
if clear:
self.entries[name] = self.entries[name].get_stub()
self.log.debug("Entry for '{}' converted to stub".format(name))
return
|
Write all entries in `entries` to files, and clear. Depending on
arguments and `tasks`.
Iterates over all elements of `entries`, saving (possibly 'burying')
and deleting.
- If ``clear == True``, then each element of `entries` is deleted,
and a `stubs` entry is added
|
entailment
|
def set_preferred_names(self):
"""Choose between each entries given name and its possible aliases for
the best one.
"""
if len(self.entries) == 0:
self.log.error("WARNING: `entries` is empty, loading stubs")
self.load_stubs()
task_str = self.get_current_task_str()
for ni, oname in enumerate(pbar(self.entries, task_str)):
name = self.add_entry(oname)
self.entries[name].set_preferred_name()
if self.args.travis and ni > self.TRAVIS_QUERY_LIMIT:
break
return
|
Choose between each entries given name and its possible aliases for
the best one.
|
entailment
|
def _prep_git_add_file_list(self,
repo,
size_limit,
fail=True,
file_types=None):
"""Get a list of files which should be added to the given repository.
Notes
-----
* Finds files in the *root* of the given repository path.
* If `file_types` is given, only use those file types.
* If an uncompressed file is above the `size_limit`, it is compressed.
* If a compressed file is above the file limit, an error is raised
(if `fail = True`) or it is skipped (if `fail == False`).
Arguments
---------
repo : str
Path to repository
size_limit : scalar
fail : bool
Raise an error if a compressed file is still above the size limit.
file_types : list of str or None
Exclusive list of file types to add. 'None' to add all filetypes.
"""
add_files = []
if file_types is None:
file_patterns = ['*']
else:
self.log.error(
"WARNING: uncertain behavior with specified file types!")
file_patterns = ['*.' + ft for ft in file_types]
# Construct glob patterns for each file-type
file_patterns = [os.path.join(repo, fp) for fp in file_patterns]
for pattern in file_patterns:
file_list = glob(pattern)
for ff in file_list:
fsize = os.path.getsize(ff)
fname = str(ff)
comp_failed = False
# If the found file is too large
if fsize > size_limit:
self.log.debug("File '{}' size '{}' MB.".format(
fname, fsize / 1028 / 1028))
# If the file is already compressed... fail or skip
if ff.endswith('.gz'):
self.log.error(
"File '{}' is already compressed.".format(fname))
comp_failed = True
# Not yet compressed - compress it
else:
fname = compress_gz(fname)
fsize = os.path.getsize(fname)
self.log.info("Compressed to '{}', size '{}' MB".
format(fname, fsize / 1028 / 1028))
# If still too big, fail or skip
if fsize > size_limit:
comp_failed = True
# If compressed file is too large, skip file or raise error
if comp_failed:
# Raise an error
if fail:
raise RuntimeError(
"File '{}' cannot be added!".format(fname))
# Skip file without adding it
self.log.info("Skipping file.")
continue
# If everything is good, add file to list
add_files.append(fname)
return add_files
|
Get a list of files which should be added to the given repository.
Notes
-----
* Finds files in the *root* of the given repository path.
* If `file_types` is given, only use those file types.
* If an uncompressed file is above the `size_limit`, it is compressed.
* If a compressed file is above the file limit, an error is raised
(if `fail = True`) or it is skipped (if `fail == False`).
Arguments
---------
repo : str
Path to repository
size_limit : scalar
fail : bool
Raise an error if a compressed file is still above the size limit.
file_types : list of str or None
Exclusive list of file types to add. 'None' to add all filetypes.
|
entailment
|
def load_url(self,
url,
fname,
repo=None,
timeout=120,
post=None,
fail=False,
write=True,
json_sort=None,
cache_only=False,
archived_mode=None,
archived_task=None,
update_mode=None,
verify=False):
"""Load the given URL, or a cached-version.
Load page from url or cached file, depending on the current settings.
'archived' mode applies when `args.archived` is true (from
`--archived` CL argument), and when this task has `Task.archived`
also set to True.
'archived' mode:
* Try to load from cached file.
* If cache does not exist, try to load from web.
* If neither works, raise an error if ``fail == True``,
otherwise return None
non-'archived' mode:
* Try to load from url, save to cache file.
* If url fails, try to load existing cache file.
* If neither works, raise an error if ``fail == True``,
otherwise return None
'update' mode:
* In update mode, try to compare URL to cached file.
* If URL fails, return None
(cannot update)
* If URL data matches cached data, return None
(dont need to update)
* If URL is different from data, return url data
(proceed with update)
Arguments
---------
self
url : str
URL to download.
fname : str
Filename to which to save/load cached file. Inludes suffix.
NOTE: in general, this should be the source's BIBCODE.
repo : str or None
The full path of the data-repository the cached file should be
saved/loaded from. If 'None', then the current task is used to
determine the repo.
timeout : int
Time (in seconds) after which a URL query should exit.
post : dict
List of arguments to post to URL when requesting it.
archived : bool
Load a previously archived version of the file.
fail : bool
If the file/url cannot be loaded, raise an error.
write : bool
Save a new copy of the cached file.
json_sort : str or None
If data is being saved to a json file, sort first by this str.
quiet : bool
Whether to emit error messages upon being unable to find files.
verify : bool
Whether to check for valid SSL cert when downloading
"""
file_txt = None
url_txt = None
# Load default settings if needed
# -------------------------------
# Determine if we are running in archived mode
if archived_mode is None:
archived_mode = self.args.archived
# Determine if this task is one which uses archived files
if archived_task is None:
archived_task = self.current_task.archived
# Determine if running in update mode
if update_mode is None:
update_mode = self.args.update
# Construct the cached filename
if repo is None:
repo = self.get_current_task_repo()
cached_path = os.path.join(repo, fname)
# Load cached file if it exists
# ----------------------------
if os.path.isfile(cached_path):
with codecs.open(cached_path, 'r', encoding='utf8') as infile:
file_txt = infile.read()
self.log.debug("Task {}: Loaded from '{}'.".format(
self.current_task.name, cached_path))
# In `archived` mode and task - try to return the cached page
if archived_mode or (archived_task and not update_mode):
if file_txt is not None:
return file_txt
# If this flag is set, don't even attempt to download from web
if cache_only:
return None
# If file does not exist, log error, continue
else:
self.log.error("Task {}: Cached file '{}' does not exist.".
format(self.current_task.name, cached_path))
# Load url. 'None' is returned on failure - handle that below
url_txt = self.download_url(
url, timeout, fail=False, post=post, verify=verify)
# At this point, we might have both `url_txt` and `file_txt`
# If either of them failed, then they are set to None
# If URL download failed, error or return cached data
# ---------------------------------------------------
if url_txt is None:
# Both sources failed
if file_txt is None:
err_str = "Both url and file retrieval failed!"
# If we should raise errors on failure
if fail:
err_str += " `fail` set."
self.log.error(err_str)
raise RuntimeError(err_str)
# Otherwise warn and return None
self.log.warning(err_str)
return None
# Otherwise, if only url failed, return file data
else:
# If we are trying to update, but the url failed, then return
# None
if update_mode:
self.log.error(
"Cannot check for updates, url download failed.")
return None
# Otherwise, return file data
self.log.warning("URL download failed, using cached data.")
return file_txt
# Here: `url_txt` exists, `file_txt` may exist or may be None
# Determine if update should happen, and if file should be resaved
# Write new url_txt to cache file
# -------------------------------
if write:
self.log.info(
"Writing `url_txt` to file '{}'.".format(cached_path))
self._write_cache_file(url_txt, cached_path, json_sort=json_sort)
# If `file_txt` doesnt exist but were not writing.. warn
elif file_txt is None:
err_str = "Warning: cached file '{}' does not exist.".format(
cached_path)
err_str += " And is not being saved."
self.log.warning(err_str)
# Check if we need to update this data
# ------------------------------------
# If both `url_txt` and `file_txt` exist and update mode check MD5
if file_txt is not None and update_mode:
from hashlib import md5
url_md5 = md5(url_txt.encode('utf-8')).hexdigest()
file_md5 = md5(file_txt.encode('utf-8')).hexdigest()
self.log.debug("URL: '{}', File: '{}'.".format(url_md5, file_md5))
# If the data is the same, no need to parse (update), return None
if url_md5 == file_md5:
self.log.info(
"Skipping file '{}', no changes.".format(cached_path))
return None
else:
self.log.info("File '{}' has been updated".format(cached_path))
# Warn if we didnt save a new copy
if not write:
err_str = "Warning: updated data not saved to file."
self.log.warning(err_str)
return url_txt
|
Load the given URL, or a cached-version.
Load page from url or cached file, depending on the current settings.
'archived' mode applies when `args.archived` is true (from
`--archived` CL argument), and when this task has `Task.archived`
also set to True.
'archived' mode:
* Try to load from cached file.
* If cache does not exist, try to load from web.
* If neither works, raise an error if ``fail == True``,
otherwise return None
non-'archived' mode:
* Try to load from url, save to cache file.
* If url fails, try to load existing cache file.
* If neither works, raise an error if ``fail == True``,
otherwise return None
'update' mode:
* In update mode, try to compare URL to cached file.
* If URL fails, return None
(cannot update)
* If URL data matches cached data, return None
(dont need to update)
* If URL is different from data, return url data
(proceed with update)
Arguments
---------
self
url : str
URL to download.
fname : str
Filename to which to save/load cached file. Inludes suffix.
NOTE: in general, this should be the source's BIBCODE.
repo : str or None
The full path of the data-repository the cached file should be
saved/loaded from. If 'None', then the current task is used to
determine the repo.
timeout : int
Time (in seconds) after which a URL query should exit.
post : dict
List of arguments to post to URL when requesting it.
archived : bool
Load a previously archived version of the file.
fail : bool
If the file/url cannot be loaded, raise an error.
write : bool
Save a new copy of the cached file.
json_sort : str or None
If data is being saved to a json file, sort first by this str.
quiet : bool
Whether to emit error messages upon being unable to find files.
verify : bool
Whether to check for valid SSL cert when downloading
|
entailment
|
def download_url(self, url, timeout, fail=False, post=None, verify=True):
"""Download text from the given url.
Returns `None` on failure.
Arguments
---------
self
url : str
URL web address to download.
timeout : int
Duration after which URL request should terminate.
fail : bool
If `True`, then an error will be raised on failure.
If `False`, then 'None' is returned on failure.
post : dict
List of arguments to post to URL when requesting it.
verify : bool
Whether to check for valid SSL cert when downloading
Returns
-------
url_txt : str or None
On success the text of the url is returned. On failure `None` is
returned.
"""
_CODE_ERRORS = [500, 307, 404]
import requests
session = requests.Session()
try:
headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.95 Safari/537.36'
}
if post:
response = session.post(
url,
timeout=timeout,
headers=headers,
data=post,
verify=verify)
else:
response = session.get(
url, timeout=timeout, headers=headers, verify=verify)
response.raise_for_status()
# Look for errors
for xx in response.history:
xx.raise_for_status()
if xx.status_code in _CODE_ERRORS:
self.log.error("URL response returned status code '{}'".
format(xx.status_code))
raise
url_txt = response.text
self.log.debug("Task {}: Loaded `url_txt` from '{}'.".format(
self.current_task.name, url))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as err:
err_str = ("URL Download of '{}' failed ('{}')."
.format(url, str(err)))
# Raise an error on failure
if fail:
err_str += " and `fail` is set."
self.log.error(err_str)
raise RuntimeError(err_str)
# Log a warning on error, and return None
else:
self.log.warning(err_str)
return None
return url_txt
|
Download text from the given url.
Returns `None` on failure.
Arguments
---------
self
url : str
URL web address to download.
timeout : int
Duration after which URL request should terminate.
fail : bool
If `True`, then an error will be raised on failure.
If `False`, then 'None' is returned on failure.
post : dict
List of arguments to post to URL when requesting it.
verify : bool
Whether to check for valid SSL cert when downloading
Returns
-------
url_txt : str or None
On success the text of the url is returned. On failure `None` is
returned.
|
entailment
|
def append_sources_from(self, other):
"""Merge the source alias lists of two CatDicts."""
# Get aliases lists from this `CatDict` and other
self_aliases = self[self._KEYS.SOURCE].split(',')
other_aliases = other[self._KEYS.SOURCE].split(',')
# Store alias to `self`
self[self._KEYS.SOURCE] = uniq_cdl(self_aliases + other_aliases)
return
|
Merge the source alias lists of two CatDicts.
|
entailment
|
def current_task(self, args):
"""Name of current action for progress-bar output.
The specific task string is depends on the configuration via `args`.
Returns
-------
ctask : str
String representation of this task.
"""
ctask = self.nice_name if self.nice_name is not None else self.name
if args is not None:
if args.update:
ctask = ctask.replace('%pre', 'Updating')
else:
ctask = ctask.replace('%pre', 'Loading')
return ctask
|
Name of current action for progress-bar output.
The specific task string is depends on the configuration via `args`.
Returns
-------
ctask : str
String representation of this task.
|
entailment
|
def load_archive(self, args):
"""Whether previously archived data should be loaded.
"""
import warnings
warnings.warn("`Task.load_archive()` is deprecated! "
"`Catalog.load_url` handles the same functionality.")
return self.archived or args.archived
|
Whether previously archived data should be loaded.
|
entailment
|
def get_sha(path=None, log=None, short=False, timeout=None):
"""Use `git rev-parse HEAD <REPO>` to get current SHA.
"""
# git_command = "git rev-parse HEAD {}".format(repo_name).split()
# git_command = "git rev-parse HEAD".split()
git_command = ["git", "rev-parse"]
if short:
git_command.append("--short")
git_command.append("HEAD")
kwargs = {}
if path is not None:
kwargs['cwd'] = path
if timeout is not None:
kwargs['timeout'] = timeout
if log is not None:
log.debug("{} {}".format(git_command, str(kwargs)))
sha = subprocess.check_output(git_command, **kwargs)
try:
sha = sha.decode('ascii').strip()
except:
if log is not None:
log.debug("decode of '{}' failed".format(sha))
return sha
|
Use `git rev-parse HEAD <REPO>` to get current SHA.
|
entailment
|
def git_add_commit_push_all_repos(cat):
"""Add all files in each data repository tree, commit, push.
Creates a commit message based on the current catalog version info.
If either the `git add` or `git push` commands fail, an error will be
raised. Currently, if `commit` fails an error *WILL NOT* be raised
because the `commit` command will return a nonzero exit status if
there are no files to add... which we dont want to raise an error.
FIX: improve the error checking on this.
"""
log = cat.log
log.debug("gitter.git_add_commit_push_all_repos()")
# Do not commit/push private repos
all_repos = cat.PATHS.get_all_repo_folders(private=False)
for repo in all_repos:
log.info("Repo in: '{}'".format(repo))
# Get the initial git SHA
sha_beg = get_sha(repo)
log.debug("Current SHA: '{}'".format(sha_beg))
# Get files that should be added, compress and check sizes
add_files = cat._prep_git_add_file_list(repo,
cat.COMPRESS_ABOVE_FILESIZE)
log.info("Found {} Files to add.".format(len(add_files)))
if len(add_files) == 0:
continue
try:
# Add all files in the repository directory tree
git_comm = ["git", "add"]
if cat.args.travis:
git_comm.append("-f")
git_comm.extend(add_files)
_call_command_in_repo(
git_comm, repo, cat.log, fail=True, log_flag=False)
# Commit these files
commit_msg = "'push' - adding all files."
commit_msg = "{} : {}".format(cat._version_long, commit_msg)
log.info(commit_msg)
git_comm = ["git", "commit", "-am", commit_msg]
_call_command_in_repo(git_comm, repo, cat.log)
# Add all files in the repository directory tree
git_comm = ["git", "push"]
if not cat.args.travis:
_call_command_in_repo(git_comm, repo, cat.log, fail=True)
except Exception as err:
try:
git_comm = ["git", "reset", "HEAD"]
_call_command_in_repo(git_comm, repo, cat.log, fail=True)
except:
pass
raise err
return
|
Add all files in each data repository tree, commit, push.
Creates a commit message based on the current catalog version info.
If either the `git add` or `git push` commands fail, an error will be
raised. Currently, if `commit` fails an error *WILL NOT* be raised
because the `commit` command will return a nonzero exit status if
there are no files to add... which we dont want to raise an error.
FIX: improve the error checking on this.
|
entailment
|
def git_pull_all_repos(cat, strategy_recursive=True, strategy='theirs'):
"""Perform a 'git pull' in each data repository.
> `git pull -s recursive -X theirs`
"""
# raise RuntimeError("THIS DOESNT WORK YET!")
log = cat.log
log.debug("gitter.git_pull_all_repos()")
log.warning("WARNING: using experimental `git_pull_all_repos()`!")
all_repos = cat.PATHS.get_all_repo_folders()
for repo_name in all_repos:
log.info("Repo in: '{}'".format(repo_name))
# Get the initial git SHA
sha_beg = get_sha(repo_name)
log.debug("Current SHA: '{}'".format(sha_beg))
# Initialize the git repository
repo = git.Repo(repo_name)
# Construct the command to call
git_comm = "git pull --verbose"
if strategy_recursive:
git_comm += " -s recursive"
if strategy is not None:
git_comm += " -X {:s}".format(strategy)
log.debug("Calling '{}'".format(git_comm))
# Call git command (do this manually to use desired options)
# Set `with_exceptions=False` to handle errors ourselves (below)
code, out, err = repo.git.execute(
git_comm.split(),
with_stdout=True,
with_extended_output=True,
with_exceptions=False)
# Handle output of git command
if len(out):
log.info(out)
if len(err):
log.info(err)
# Hangle error-codes
if code != 0:
err_str = "Command '{}' returned exit code '{}'!".format(git_comm,
code)
err_str += "\n\tout: '{}'\n\terr: '{}'".format(out, err)
log.error(err_str)
raise RuntimeError(err_str)
sha_end = get_sha(repo_name)
if sha_end != sha_beg:
log.info("Updated SHA: '{}'".format(sha_end))
return
|
Perform a 'git pull' in each data repository.
> `git pull -s recursive -X theirs`
|
entailment
|
def git_clone_all_repos(cat):
"""Perform a 'git clone' for each data repository that doesnt exist.
"""
log = cat.log
log.debug("gitter.git_clone_all_repos()")
all_repos = cat.PATHS.get_all_repo_folders()
out_repos = cat.PATHS.get_repo_output_folders()
for repo in all_repos:
log.info("Repo in: '{}'".format(repo))
if os.path.isdir(repo):
log.info("Directory exists.")
else:
log.debug("Cloning directory...")
clone(repo, cat.log, depth=max(cat.args.clone_depth, 1))
if cat.args.purge_outputs and repo in out_repos:
for fil in glob(os.path.join(repo, '*.json')):
os.remove(fil)
grepo = git.cmd.Git(repo)
try:
grepo.status()
except git.GitCommandError:
log.error("Repository does not exist!")
raise
# Get the initial git SHA
sha_beg = get_sha(repo)
log.debug("Current SHA: '{}'".format(sha_beg))
return
|
Perform a 'git clone' for each data repository that doesnt exist.
|
entailment
|
def git_reset_all_repos(cat, hard=True, origin=False, clean=True):
"""Perform a 'git reset' in each data repository.
"""
log = cat.log
log.debug("gitter.git_reset_all_repos()")
all_repos = cat.PATHS.get_all_repo_folders()
for repo in all_repos:
log.warning("Repo in: '{}'".format(repo))
# Get the initial git SHA
sha_beg = get_sha(repo)
log.debug("Current SHA: '{}'".format(sha_beg))
grepo = git.cmd.Git(repo)
# Fetch first
log.info("fetching")
grepo.fetch()
args = []
if hard:
args.append('--hard')
if origin:
args.append('origin/master')
log.info("resetting")
retval = grepo.reset(*args)
if len(retval):
log.warning("Git says: '{}'".format(retval))
# Clean
if clean:
log.info("cleaning")
# [q]uiet, [f]orce, [d]irectories
retval = grepo.clean('-qdf')
if len(retval):
log.warning("Git says: '{}'".format(retval))
sha_end = get_sha(repo)
if sha_end != sha_beg:
log.debug("Updated SHA: '{}'".format(sha_end))
return
|
Perform a 'git reset' in each data repository.
|
entailment
|
def git_status_all_repos(cat, hard=True, origin=False, clean=True):
"""Perform a 'git status' in each data repository.
"""
log = cat.log
log.debug("gitter.git_status_all_repos()")
all_repos = cat.PATHS.get_all_repo_folders()
for repo_name in all_repos:
log.info("Repo in: '{}'".format(repo_name))
# Get the initial git SHA
sha_beg = get_sha(repo_name)
log.debug("Current SHA: '{}'".format(sha_beg))
log.info("Fetching")
fetch(repo_name, log=cat.log)
git_comm = ["git", "status"]
_call_command_in_repo(
git_comm, repo_name, cat.log, fail=True, log_flag=True)
sha_end = get_sha(repo_name)
if sha_end != sha_beg:
log.info("Updated SHA: '{}'".format(sha_end))
return
|
Perform a 'git status' in each data repository.
|
entailment
|
def clone(repo, log, depth=1):
"""Given a list of repositories, make sure they're all cloned.
Should be called from the subclassed `Catalog` objects, passed a list
of specific repository names.
Arguments
---------
all_repos : list of str
*Absolute* path specification of each target repository.
"""
kwargs = {}
if depth > 0:
kwargs['depth'] = depth
try:
repo_name = os.path.split(repo)[-1]
repo_name = "https://github.com/astrocatalogs/" + repo_name + ".git"
log.warning("Cloning '{}' (only needs to be done ".format(repo) +
"once, may take few minutes per repo).")
grepo = git.Repo.clone_from(repo_name, repo, **kwargs)
except:
log.error("CLONING '{}' INTERRUPTED".format(repo))
raise
return grepo
|
Given a list of repositories, make sure they're all cloned.
Should be called from the subclassed `Catalog` objects, passed a list
of specific repository names.
Arguments
---------
all_repos : list of str
*Absolute* path specification of each target repository.
|
entailment
|
def _call_command_in_repo(comm, repo, log, fail=False, log_flag=True):
"""Use `subprocess` to call a command in a certain (repo) directory.
Logs the output (both `stderr` and `stdout`) to the log, and checks the
return codes to make sure they're valid. Raises error if not.
Raises
------
exception `subprocess.CalledProcessError`: if the command fails
"""
if log_flag:
log.debug("Running '{}'.".format(" ".join(comm)))
process = subprocess.Popen(
comm, cwd=repo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if stderr is not None:
err_msg = stderr.decode('ascii').strip().splitlines()
for em in err_msg:
log.error(em)
if stdout is not None:
out_msg = stdout.decode('ascii').strip().splitlines()
for om in out_msg:
log.warning(om)
# Raises an error if the command failed.
if fail:
if process.returncode:
raise subprocess.CalledProcessError
return
|
Use `subprocess` to call a command in a certain (repo) directory.
Logs the output (both `stderr` and `stdout`) to the log, and checks the
return codes to make sure they're valid. Raises error if not.
Raises
------
exception `subprocess.CalledProcessError`: if the command fails
|
entailment
|
def _check(self):
"""Check that spectrum has legal combination of attributes."""
# Run the super method
super(Spectrum, self)._check()
err_str = None
has_data = self._KEYS.DATA in self
has_wave = self._KEYS.WAVELENGTHS in self
has_flux = self._KEYS.FLUXES in self
has_filename = self._KEYS.FILENAME in self
if not has_data:
if (not has_wave or not has_flux) and not has_filename:
err_str = (
"If `{}` not given".format(self._KEYS.DATA) +
"; `{}` or `{}` needed".format(
self._KEYS.WAVELENGTHS, self._KEYS.FLUXES))
if err_str is not None:
raise ValueError(err_str)
return
|
Check that spectrum has legal combination of attributes.
|
entailment
|
def is_duplicate_of(self, other):
"""Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other):
return True
row_matches = 0
for ri, row in enumerate(self.get(self._KEYS.DATA, [])):
lambda1, flux1 = tuple(row[0:2])
if (self._KEYS.DATA not in other or
ri > len(other[self._KEYS.DATA])):
break
lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2])
minlambdalen = min(len(lambda1), len(lambda2))
minfluxlen = min(len(flux1), len(flux2))
if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and
flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and
float(flux1[:minfluxlen + 1]) != 0.0):
row_matches += 1
# Five row matches should be enough to be sure spectrum is a dupe.
if row_matches >= 5:
return True
# Matches need to happen in the first 10 rows.
if ri >= 10:
break
return False
|
Check if spectrum is duplicate of another.
|
entailment
|
def sort_func(self, key):
"""Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.DATA:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key
|
Logic for sorting keys in a `Spectrum` relative to one another.
|
entailment
|
def sort_func(self, key):
"""Sorting logic for `Quantity` objects."""
if key == self._KEYS.VALUE:
return 'aaa'
if key == self._KEYS.SOURCE:
return 'zzz'
return key
|
Sorting logic for `Quantity` objects.
|
entailment
|
def keys(cls):
"""Return this class's attribute names (those not stating with '_').
Also retrieves the attributes from base classes, e.g.
For: ``ENTRY(KeyCollection)``, ``ENTRY.keys()`` gives just the
attributes of `ENTRY` (`KeyCollection` has no keys).
For: ``SUPERNOVA(ENTRY)``, ``SUPERNOVA.keys()`` gives both the
attributes of `SUPERNOVAE` itself, and of `ENTRY`.
Returns
-------
_keys : list of str
List of names of internal attributes. Order is effectiely random.
"""
if cls._keys:
return cls._keys
# If `_keys` is not yet defined, create it
# ----------------------------------------
_keys = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_keys.extend(mro.keys())
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_keys.extend([
kk for kk in vars(cls).keys()
if not kk.startswith('_') and not callable(getattr(cls, kk))
])
# Store for future retrieval
cls._keys = _keys
return cls._keys
|
Return this class's attribute names (those not stating with '_').
Also retrieves the attributes from base classes, e.g.
For: ``ENTRY(KeyCollection)``, ``ENTRY.keys()`` gives just the
attributes of `ENTRY` (`KeyCollection` has no keys).
For: ``SUPERNOVA(ENTRY)``, ``SUPERNOVA.keys()`` gives both the
attributes of `SUPERNOVAE` itself, and of `ENTRY`.
Returns
-------
_keys : list of str
List of names of internal attributes. Order is effectiely random.
|
entailment
|
def vals(cls):
"""Return this class's attribute values (those not stating with '_').
Returns
-------
_vals : list of objects
List of values of internal attributes. Order is effectiely random.
"""
if cls._vals:
return cls._vals
# If `_vals` is not yet defined, create it
# ----------------------------------------
_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_vals.extend(mro.vals())
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_vals.extend([
vv for kk, vv in vars(cls).items()
if not kk.startswith('_') and not callable(getattr(cls, kk))
])
# Store for future retrieval
cls._vals = _vals
return cls._vals
|
Return this class's attribute values (those not stating with '_').
Returns
-------
_vals : list of objects
List of values of internal attributes. Order is effectiely random.
|
entailment
|
def compare_vals(cls, sort=True):
"""Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
"""
if cls._compare_vals:
return cls._compare_vals
# If `_compare_vals` is not yet defined, create it
# ----------------------------------------
_compare_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_compare_vals.extend(mro.compare_vals(sort=False))
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_compare_vals.extend([
vv for kk, vv in vars(cls).items()
if (not kk.startswith('_') and not callable(getattr(cls, kk)) and
vv.compare)
])
# Sort keys based on priority, high priority values first
if sort:
_compare_vals = sorted(
_compare_vals,
reverse=True,
key=lambda key: (key.priority, key.name))
# Store for future retrieval
cls._compare_vals = _compare_vals
return cls._compare_vals
|
Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
|
entailment
|
def pretty(self):
"""Return a 'pretty' string representation of this `Key`.
note: do not override the builtin `__str__` or `__repr__` methods!
"""
retval = ("Key(name={}, type={}, listable={}, compare={}, "
"priority={}, kind_preference={}, "
"replace_better={})").format(
self.name, self.type, self.listable, self.compare,
self.priority, self.kind_preference, self.replace_better)
return retval
|
Return a 'pretty' string representation of this `Key`.
note: do not override the builtin `__str__` or `__repr__` methods!
|
entailment
|
def check(self, val):
"""Make sure given value is consistent with this `Key` specification.
NOTE: if `type` is 'None', then `listable` also is *not* checked.
"""
# If there is no `type` requirement, everything is allowed
if self.type is None:
return True
is_list = isinstance(val, list)
# If lists are not allowed, and this is a list --> false
if not self.listable and is_list:
return False
# `is_number` already checks for either list or single value
if self.type == KEY_TYPES.NUMERIC and not is_number(val):
return False
elif (self.type == KEY_TYPES.TIME and
not is_number(val) and '-' not in val and '/' not in val):
return False
elif self.type == KEY_TYPES.STRING:
# If its a list, check first element
if is_list:
if not isinstance(val[0], basestring):
return False
# Otherwise, check it
elif not isinstance(val, basestring):
return False
elif self.type == KEY_TYPES.BOOL:
if is_list and not isinstance(val[0], bool):
return False
elif not isinstance(val, bool):
return False
return True
|
Make sure given value is consistent with this `Key` specification.
NOTE: if `type` is 'None', then `listable` also is *not* checked.
|
entailment
|
def get_logger(name=None, stream_fmt=None, file_fmt=None, date_fmt=None,
stream_level=None, file_level=None,
tofile=None, tostr=True):
"""Create a standard logger object which logs to file and or stdout stream.
If a logger has already been created in this session, it is returned
(unless `name` is given).
Arguments
---------
name : str,
Handle for this logger, must be distinct for a distinct logger.
stream_fmt : str or `None`,
Format of log messages to stream (stdout). If `None`, default settings
are used.
file_fmt : str or `None`,
Format of log messages to file. If `None`, default settings are used.
date_fmt : str or `None`
Format of time stamps to stream and/or file. If `None`, default
settings are used.
stream_level : int,
Logging level for stream.
file_level : int,
Logging level for file.
tofile : str or `None`,
Filename to log to (turned off if `None`).
tostr : bool,
Log to stdout stream.
Returns
-------
logger : ``logging.Logger`` object,
Logger object to use for logging.
"""
if tofile is None and not tostr:
raise ValueError(
"Must log to something: `tofile` or `tostr` must be `True`.")
logger = logging.getLogger(name)
# Add a custom attribute to this `logger` so that we know when an existing
# one is being returned
if hasattr(logger, '_OSC_LOGGER'):
return logger
else:
logger._OSC_LOGGER = True
# Set other custom parameters
logger._LOADED = _LOADED_LEVEL
# Make sure handlers don't get duplicated (ipython issue)
while len(logger.handlers) > 0:
logger.handlers.pop()
# Prevents duplication or something something...
logger.propagate = 0
# Determine and Set Logging Levels
if file_level is None:
file_level = _FILE_LEVEL_DEF
if stream_level is None:
stream_level = _STREAM_LEVEL_DEF
# Logger object must be at minimum level
logger.setLevel(int(np.min([file_level, stream_level])))
if date_fmt is None:
date_fmt = '%Y/%m/%d %H:%M:%S'
# Log to file
# -----------
if tofile is not None:
if file_fmt is None:
file_fmt = "%(asctime)s %(levelname)8.8s [%(filename)20.20s:"
file_fmt += "%(funcName)-20.20s]%(indent)s%(message)s"
fileFormatter = IndentFormatter(file_fmt, datefmt=date_fmt)
fileHandler = logging.FileHandler(tofile, 'w')
fileHandler.setFormatter(fileFormatter)
fileHandler.setLevel(file_level)
logger.addHandler(fileHandler)
# Store output filename to `logger` object
logger.filename = tofile
# Log To stdout
# -------------
if tostr:
if stream_fmt is None:
stream_fmt = "%(indent)s%(message)s"
strFormatter = IndentFormatter(stream_fmt, datefmt=date_fmt)
strHandler = logging.StreamHandler()
strHandler.setFormatter(strFormatter)
strHandler.setLevel(stream_level)
logger.addHandler(strHandler)
return logger
|
Create a standard logger object which logs to file and or stdout stream.
If a logger has already been created in this session, it is returned
(unless `name` is given).
Arguments
---------
name : str,
Handle for this logger, must be distinct for a distinct logger.
stream_fmt : str or `None`,
Format of log messages to stream (stdout). If `None`, default settings
are used.
file_fmt : str or `None`,
Format of log messages to file. If `None`, default settings are used.
date_fmt : str or `None`
Format of time stamps to stream and/or file. If `None`, default
settings are used.
stream_level : int,
Logging level for stream.
file_level : int,
Logging level for file.
tofile : str or `None`,
Filename to log to (turned off if `None`).
tostr : bool,
Log to stdout stream.
Returns
-------
logger : ``logging.Logger`` object,
Logger object to use for logging.
|
entailment
|
def log_raise(log, err_str, err_type=RuntimeError):
"""Log an error message and raise an error.
Arguments
---------
log : `logging.Logger` object
err_str : str
Error message to be logged and raised.
err_type : `Exception` object
Type of error to raise.
"""
log.error(err_str)
# Make sure output is flushed
# (happens automatically to `StreamHandlers`, but not `FileHandlers`)
for handle in log.handlers:
handle.flush()
# Raise given error
raise err_type(err_str)
|
Log an error message and raise an error.
Arguments
---------
log : `logging.Logger` object
err_str : str
Error message to be logged and raised.
err_type : `Exception` object
Type of error to raise.
|
entailment
|
def log_memory(log, pref=None, lvl=logging.DEBUG, raise_flag=True):
"""Log the current memory usage.
"""
import os
import sys
cyc_str = ""
KB = 1024.0
if pref is not None:
cyc_str += "{}: ".format(pref)
# Linux returns units in Bytes; OSX in kilobytes
UNIT = KB*KB if sys.platform == 'darwin' else KB
good = False
# Use the `resource` module to check the maximum memory usage of this process
try:
import resource
max_self = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
max_child = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
_str = "RSS Max Self: {:7.2f} [MB], Child: {:7.2f} [MB]".format(
max_self/UNIT, max_child/UNIT)
cyc_str += _str
except Exception as err:
log.log(lvl, "resource.getrusage failed. '{}'".format(str(err)))
if raise_flag:
raise
else:
good = True
# Use the `psutil` module to check the current memory/cpu usage of this process
try:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
cpu_perc = process.cpu_percent()
mem_perc = process.memory_percent()
num_thr = process.num_threads()
_str = "; RSS: {:7.2f} [MB], {:7.2f}%; Threads: {:3d}, CPU: {:7.2f}%".format(
rss/UNIT, mem_perc, num_thr, cpu_perc)
cyc_str += _str
except Exception as err:
log.log(lvl, "psutil.Process failed. '{}'".format(str(err)))
if raise_flag:
raise
else:
good = True
if good:
log.log(lvl, cyc_str)
return
|
Log the current memory usage.
|
entailment
|
def doblob(morphed, blobdet, img, anno=True):
"""
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
"""
keypoints = blobdet.detect(morphed)
nkey = len(keypoints)
kpsize = asarray([k.size for k in keypoints])
final = img.copy() # is the .copy necessary?
final = cv2.drawKeypoints(img, keypoints, outImage=final,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# %% plot count of blobs
if anno:
cv2.putText(final, text=str(nkey), org=(int(img.shape[1]*.9), 25),
fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=2,
color=(0, 255, 0), thickness=2)
return final, nkey, kpsize
|
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
|
entailment
|
def load_args(self, args, clargs):
"""Parse arguments and return configuration settings.
"""
# Parse All Arguments
args = self.parser.parse_args(args=clargs, namespace=args)
# Print the help information if no subcommand is given
# subcommand is required for operation
if args.subcommand is None:
self.parser.print_help()
args = None
return args
|
Parse arguments and return configuration settings.
|
entailment
|
def _setup_argparse(self):
"""Create `argparse` instance, and setup with appropriate parameters.
"""
parser = argparse.ArgumentParser(
prog='catalog', description='Parent Catalog class for astrocats.')
subparsers = parser.add_subparsers(
description='valid subcommands', dest='subcommand')
# Data Import
# -----------
# Add the 'import' command, and related arguments
self._add_parser_arguments_import(subparsers)
# Git Subcommands
# ---------------
self._add_parser_arguments_git(subparsers)
# Analyze Catalogs
# ----------------
# Add the 'analyze' command, and related arguments
self._add_parser_arguments_analyze(subparsers)
return parser
|
Create `argparse` instance, and setup with appropriate parameters.
|
entailment
|
def _add_parser_arguments_import(self, subparsers):
"""Create parser for 'import' subcommand, and associated arguments.
"""
import_pars = subparsers.add_parser(
"import", help="Import data.")
import_pars.add_argument(
'--update', '-u', dest='update',
default=False, action='store_true',
help='Only update catalog using live sources.')
import_pars.add_argument(
'--load-stubs', dest='load_stubs',
default=False, action='store_true',
help='Load stubs before running.')
import_pars.add_argument(
'--archived', '-a', dest='archived',
default=False, action='store_true',
help='Always use task caches.')
# Control which 'tasks' are executed
# ----------------------------------
import_pars.add_argument(
'--tasks', dest='args_task_list', nargs='*', default=None,
help='space delimited list of tasks to perform.')
import_pars.add_argument(
'--yes', dest='yes_task_list', nargs='+', default=None,
help='space delimited list of tasks to turn on.')
import_pars.add_argument(
'--no', dest='no_task_list', nargs='+', default=None,
help='space delimited list of tasks to turn off.')
import_pars.add_argument(
'--min-task-priority', dest='min_task_priority',
default=None,
help='minimum priority for a task to run')
import_pars.add_argument(
'--max-task-priority', dest='max_task_priority',
default=None,
help='maximum priority for a task to run')
import_pars.add_argument(
'--task-groups', dest='task_groups',
default=None,
help='predefined group(s) of tasks to run.')
return import_pars
|
Create parser for 'import' subcommand, and associated arguments.
|
entailment
|
def _add_parser_arguments_git(self, subparsers):
"""Create a sub-parsers for git subcommands.
"""
subparsers.add_parser(
"git-clone",
help="Clone all defined data repositories if they dont exist.")
subparsers.add_parser(
"git-push",
help="Add all files to data repositories, commit, and push.")
subparsers.add_parser(
"git-pull",
help="'Pull' all data repositories.")
subparsers.add_parser(
"git-reset-local",
help="Hard reset all data repositories using local 'HEAD'.")
subparsers.add_parser(
"git-reset-origin",
help="Hard reset all data repositories using 'origin/master'.")
subparsers.add_parser(
"git-status",
help="Get the 'git status' of all data repositories.")
return
|
Create a sub-parsers for git subcommands.
|
entailment
|
def _add_parser_arguments_analyze(self, subparsers):
"""Create a parser for the 'analyze' subcommand.
"""
lyze_pars = subparsers.add_parser(
"analyze",
help="Perform basic analysis on this catalog.")
lyze_pars.add_argument(
'--count', '-c', dest='count',
default=False, action='store_true',
help='Determine counts of entries, files, etc.')
return lyze_pars
|
Create a parser for the 'analyze' subcommand.
|
entailment
|
def compress_gz(fname):
"""Compress the file with the given name and delete the uncompressed file.
The compressed filename is simply the input filename with '.gz' appended.
Arguments
---------
fname : str
Name of the file to compress and delete.
Returns
-------
comp_fname : str
Name of the compressed file produced. Equal to `fname + '.gz'`.
"""
import shutil
import gzip
comp_fname = fname + '.gz'
with codecs.open(fname, 'rb') as f_in, gzip.open(
comp_fname, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(fname)
return comp_fname
|
Compress the file with the given name and delete the uncompressed file.
The compressed filename is simply the input filename with '.gz' appended.
Arguments
---------
fname : str
Name of the file to compress and delete.
Returns
-------
comp_fname : str
Name of the compressed file produced. Equal to `fname + '.gz'`.
|
entailment
|
def draw_flow(img, flow, step=16, dtype=uint8):
"""
draws flow vectors on image
this came from opencv/examples directory
another way: http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
"""
maxval = iinfo(img.dtype).max
# scaleFact = 1. #arbitary factor to make flow visible
canno = (0, maxval, 0) # green color
h, w = img.shape[:2]
y, x = mgrid[step//2:h:step, step//2:w:step].reshape(2, -1)
fx, fy = flow[y, x].T
# create line endpoints
lines = vstack([x, y, (x+fx), (y+fy)]).T.reshape(-1, 2, 2)
lines = int32(lines + 0.5)
# create image
if img.ndim == 2: # assume gray
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else: # already RGB
vis = img
# draw line
cv2.polylines(vis, lines, isClosed=False, color=canno, thickness=1, lineType=8)
# draw filled green circles
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, center=(x1, y1), radius=1, color=canno, thickness=-1)
return vis
|
draws flow vectors on image
this came from opencv/examples directory
another way: http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
|
entailment
|
def draw_hsv(mag, ang, dtype=uint8, fn=None):
"""
mag must be uint8, uint16, uint32 and 2-D
ang is in radians (float)
"""
assert mag.shape == ang.shape
assert mag.ndim == 2
maxval = iinfo(dtype).max
hsv = dstack(((degrees(ang)/2).astype(dtype), # /2 to keep less than 255
ones_like(mag)*maxval, # maxval must be after in 1-D case
cv2.normalize(mag, alpha=0, beta=maxval, norm_type=cv2.NORM_MINMAX)))
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
if fn is not None:
print('writing ' + fn)
cv2.imwrite(fn, rgb)
return rgb
|
mag must be uint8, uint16, uint32 and 2-D
ang is in radians (float)
|
entailment
|
def flow2magang(flow, dtype=uint8):
"""
flow dimensions y,x,2 3-D. flow[...,0] is magnitude, flow[...,1] is angle
"""
fx, fy = flow[..., 0], flow[..., 1]
return hypot(fx, fy).astype(dtype), arctan2(fy, fx) + pi
|
flow dimensions y,x,2 3-D. flow[...,0] is magnitude, flow[...,1] is angle
|
entailment
|
def IOC(dir, type, nr, size):
"""
dir
One of IOC_NONE, IOC_WRITE, IOC_READ, or IOC_READ|IOC_WRITE.
Direction is from the application's point of view, not kernel's.
size (14-bits unsigned integer)
Size of the buffer passed to ioctl's "arg" argument.
"""
assert dir <= _IOC_DIRMASK, dir
assert type <= _IOC_TYPEMASK, type
assert nr <= _IOC_NRMASK, nr
assert size <= _IOC_SIZEMASK, size
return (dir << _IOC_DIRSHIFT) | (type << _IOC_TYPESHIFT) | (nr << _IOC_NRSHIFT) | (size << _IOC_SIZESHIFT)
|
dir
One of IOC_NONE, IOC_WRITE, IOC_READ, or IOC_READ|IOC_WRITE.
Direction is from the application's point of view, not kernel's.
size (14-bits unsigned integer)
Size of the buffer passed to ioctl's "arg" argument.
|
entailment
|
def IOC_TYPECHECK(t):
"""
Returns the size of given type, and check its suitability for use in an
ioctl command number.
"""
result = ctypes.sizeof(t)
assert result <= _IOC_SIZEMASK, result
return result
|
Returns the size of given type, and check its suitability for use in an
ioctl command number.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.