text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_field(ctx, search, by_type, obj): """Find fields in registered data models."""
# TODO: Fix this to work recursively on all possible subschemes if search is not None: search = search else: search = _ask("Enter search term") database = ctx.obj['db'] def find(search_schema, search_field, find_result=None, key=""): """Examine a schema to find fields by type or name""" if find_result is None: find_result = [] fields = search_schema['properties'] if not by_type: if search_field in fields: find_result.append(key) # log("Found queried fieldname in ", model) else: for field in fields: try: if "type" in fields[field]: # log(fields[field], field) if fields[field]["type"] == search_field: find_result.append((key, field)) # log("Found field", field, "in", model) except KeyError as e: log("Field access error:", e, type(e), exc=True, lvl=debug) if 'properties' in fields: # log('Sub properties checking:', fields['properties']) find_result.append(find(fields['properties'], search_field, find_result, key=fields['name'])) for field in fields: if 'items' in fields[field]: if 'properties' in fields[field]['items']: # log('Sub items checking:', fields[field]) find_result.append(find(fields[field]['items'], search_field, find_result, key=field)) else: pass # log('Items without proper definition!') return find_result if obj is not None: schema = database.objectmodels[obj]._schema result = find(schema, search, [], key="top") if result: # log(args.object, result) print(obj) pprint(result) else: for model, thing in database.objectmodels.items(): schema = thing._schema result = find(schema, search, [], key="top") if result: print(model) # log(model, result) print(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Distance(lat1, lon1, lat2, lon2): """Get distance between pairs of lat-lon points"""
az12, az21, dist = wgs84_geod.inv(lon1, lat1, lon2, lat2) return az21, dist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def client_details(self, *args): """Display known details about a given client"""
self.log(_('Client details:', lang='de')) client = self._clients[args[0]] self.log('UUID:', client.uuid, 'IP:', client.ip, 'Name:', client.name, 'User:', self._users[client.useruuid], pretty=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def client_list(self, *args): """Display a list of connected clients"""
if len(self._clients) == 0: self.log('No clients connected') else: self.log(self._clients, pretty=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def users_list(self, *args): """Display a list of connected users"""
if len(self._users) == 0: self.log('No users connected') else: self.log(self._users, pretty=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def who(self, *args): """Display a table of connected users and clients"""
if len(self._users) == 0: self.log('No users connected') if len(self._clients) == 0: self.log('No clients connected') return Row = namedtuple("Row", ['User', 'Client', 'IP']) rows = [] for user in self._users.values(): for key, client in self._clients.items(): if client.useruuid == user.uuid: row = Row(user.account.name, key, client.ip) rows.append(row) for key, client in self._clients.items(): if client.useruuid is None: row = Row('ANON', key, client.ip) rows.append(row) self.log("\n" + std_table(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disconnect(self, sock): """Handles socket disconnections"""
self.log("Disconnect ", sock, lvl=debug) try: if sock in self._sockets: self.log("Getting socket", lvl=debug) sockobj = self._sockets[sock] self.log("Getting clientuuid", lvl=debug) clientuuid = sockobj.clientuuid self.log("getting useruuid", lvl=debug) useruuid = self._clients[clientuuid].useruuid self.log("Firing disconnect event", lvl=debug) self.fireEvent(clientdisconnect(clientuuid, self._clients[ clientuuid].useruuid)) self.log("Logging out relevant client", lvl=debug) if useruuid is not None: self.log("Client was logged in", lvl=debug) try: self._logoutclient(useruuid, clientuuid) self.log("Client logged out", useruuid, clientuuid) except Exception as e: self.log("Couldn't clean up logged in user! ", self._users[useruuid], e, type(e), lvl=critical) self.log("Deleting Client (", self._clients.keys, ")", lvl=debug) del self._clients[clientuuid] self.log("Deleting Socket", lvl=debug) del self._sockets[sock] except Exception as e: self.log("Error during disconnect handling: ", e, type(e), lvl=critical)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _logoutclient(self, useruuid, clientuuid): """Log out a client and possibly associated user"""
self.log("Cleaning up client of logged in user.", lvl=debug) try: self._users[useruuid].clients.remove(clientuuid) if len(self._users[useruuid].clients) == 0: self.log("Last client of user disconnected.", lvl=verbose) self.fireEvent(userlogout(useruuid, clientuuid)) del self._users[useruuid] self._clients[clientuuid].useruuid = None except Exception as e: self.log("Error during client logout: ", e, type(e), clientuuid, useruuid, lvl=error, exc=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self, *args): """Registers new sockets and their clients and allocates uuids"""
self.log("Connect ", args, lvl=verbose) try: sock = args[0] ip = args[1] if sock not in self._sockets: self.log("New client connected:", ip, lvl=debug) clientuuid = str(uuid4()) self._sockets[sock] = Socket(ip, clientuuid) # Key uuid is temporary, until signin, will then be replaced # with account uuid self._clients[clientuuid] = Client( sock=sock, ip=ip, clientuuid=clientuuid, ) self.log("Client connected:", clientuuid, lvl=debug) else: self.log("Old IP reconnected!", lvl=warn) # self.fireEvent(write(sock, "Another client is # connecting from your IP!")) # self._sockets[sock] = (ip, uuid.uuid4()) except Exception as e: self.log("Error during connect: ", e, type(e), lvl=critical)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(self, event): """Sends a packet to an already known user or one of his clients by UUID"""
try: jsonpacket = json.dumps(event.packet, cls=ComplexEncoder) if event.sendtype == "user": # TODO: I think, caching a user name <-> uuid table would # make sense instead of looking this up all the time. if event.uuid is None: userobject = objectmodels['user'].find_one({ 'name': event.username }) else: userobject = objectmodels['user'].find_one({ 'uuid': event.uuid }) if userobject is None: self.log("No user by that name known.", lvl=warn) return else: uuid = userobject.uuid self.log("Broadcasting to all of users clients: '%s': '%s" % ( uuid, str(event.packet)[:20]), lvl=network) if uuid not in self._users: self.log("User not connected!", event, lvl=critical) return clients = self._users[uuid].clients for clientuuid in clients: sock = self._clients[clientuuid].sock if not event.raw: self.log("Sending json to client", jsonpacket[:50], lvl=network) self.fireEvent(write(sock, jsonpacket), "wsserver") else: self.log("Sending raw data to client") self.fireEvent(write(sock, event.packet), "wsserver") else: # only to client self.log("Sending to user's client: '%s': '%s'" % ( event.uuid, jsonpacket[:20]), lvl=network) if event.uuid not in self._clients: if not event.fail_quiet: self.log("Unknown client!", event.uuid, lvl=critical) self.log("Clients:", self._clients, lvl=debug) return sock = self._clients[event.uuid].sock if not event.raw: self.fireEvent(write(sock, jsonpacket), "wsserver") else: self.log("Sending raw data to client", lvl=network) self.fireEvent(write(sock, event.packet[:20]), "wsserver") except Exception as e: self.log("Exception during sending: %s (%s)" % (e, type(e)), lvl=critical, exc=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def broadcast(self, event): """Broadcasts an event either to all users or clients, depending on event flag"""
try: if event.broadcasttype == "users": if len(self._users) > 0: self.log("Broadcasting to all users:", event.content, lvl=network) for useruuid in self._users.keys(): self.fireEvent( send(useruuid, event.content, sendtype="user")) # else: # self.log("Not broadcasting, no users connected.", # lvl=debug) elif event.broadcasttype == "clients": if len(self._clients) > 0: self.log("Broadcasting to all clients: ", event.content, lvl=network) for client in self._clients.values(): self.fireEvent(write(client.sock, event.content), "wsserver") # else: # self.log("Not broadcasting, no clients # connected.", # lvl=debug) elif event.broadcasttype == "socks": if len(self._sockets) > 0: self.log("Emergency?! Broadcasting to all sockets: ", event.content) for sock in self._sockets: self.fireEvent(write(sock, event.content), "wsserver") # else: # self.log("Not broadcasting, no sockets # connected.", # lvl=debug) except Exception as e: self.log("Error during broadcast: ", e, type(e), lvl=critical)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _checkPermissions(self, user, event): """Checks if the user has in any role that allows to fire the event."""
for role in user.account.roles: if role in event.roles: self.log('Access granted', lvl=verbose) return True self.log('Access denied', lvl=verbose) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handleAuthorizedEvents(self, component, action, data, user, client): """Isolated communication link for authorized events."""
try: if component == "debugger": self.log(component, action, data, user, client, lvl=info) if not user and component in self.authorized_events.keys(): self.log("Unknown client tried to do an authenticated " "operation: %s", component, action, data, user) return event = self.authorized_events[component][action]['event'](user, action, data, client) self.log('Authorized event roles:', event.roles, lvl=verbose) if not self._checkPermissions(user, event): result = { 'component': 'hfos.ui.clientmanager', 'action': 'Permission', 'data': _('You have no role that allows this action.', lang='de') } self.fireEvent(send(event.client.uuid, result)) return self.log("Firing authorized event: ", component, action, str(data)[:100], lvl=debug) # self.log("", (user, action, data, client), lvl=critical) self.fireEvent(event) except Exception as e: self.log("Critical error during authorized event handling:", component, action, e, type(e), lvl=critical, exc=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handleAuthenticationEvents(self, requestdata, requestaction, clientuuid, sock): """Handler for authentication events"""
# TODO: Move this stuff over to ./auth.py if requestaction in ("login", "autologin"): try: self.log("Login request", lvl=verbose) if requestaction == "autologin": username = password = None requestedclientuuid = requestdata auto = True self.log("Autologin for", requestedclientuuid, lvl=debug) else: username = requestdata['username'] password = requestdata['password'] if 'clientuuid' in requestdata: requestedclientuuid = requestdata['clientuuid'] else: requestedclientuuid = None auto = False self.log("Auth request by", username, lvl=verbose) self.fireEvent(authenticationrequest( username, password, clientuuid, requestedclientuuid, sock, auto, ), "auth") return except Exception as e: self.log("Login failed: ", e, type(e), lvl=warn, exc=True) elif requestaction == "logout": self.log("User logged out, refreshing client.", lvl=network) try: if clientuuid in self._clients: client = self._clients[clientuuid] user_id = client.useruuid if client.useruuid: self.log("Logout client uuid: ", clientuuid) self._logoutclient(client.useruuid, clientuuid) self.fireEvent(clientdisconnect(clientuuid)) else: self.log("Client is not connected!", lvl=warn) except Exception as e: self.log("Error during client logout: ", e, type(e), lvl=error, exc=True) else: self.log("Unsupported auth action requested:", requestaction, lvl=warn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _reset_flood_offenders(self, *args): """Resets the list of flood offenders on event trigger"""
offenders = [] # self.log('Resetting flood offenders') for offender, offence_time in self._flooding.items(): if time() - offence_time < 10: self.log('Removed offender from flood list:', offender) offenders.append(offender) for offender in offenders: del self._flooding[offender]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_flood_protection(self, component, action, clientuuid): """Checks if any clients have been flooding the node"""
if clientuuid not in self._flood_counter: self._flood_counter[clientuuid] = 0 self._flood_counter[clientuuid] += 1 if self._flood_counter[clientuuid] > 100: packet = { 'component': 'hfos.ui.clientmanager', 'action': 'Flooding', 'data': True } self.fireEvent(send(clientuuid, packet)) self.log('Flooding from', clientuuid) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authentication(self, event): """Links the client to the granted account and profile, then notifies the client"""
try: self.log("Authorization has been granted by DB check:", event.username, lvl=debug) account, profile, clientconfig = event.userdata useruuid = event.useruuid originatingclientuuid = event.clientuuid clientuuid = clientconfig.uuid if clientuuid != originatingclientuuid: self.log("Mutating client uuid to request id:", clientuuid, lvl=network) # Assign client to user if useruuid in self._users: signedinuser = self._users[useruuid] else: signedinuser = User(account, profile, useruuid) self._users[account.uuid] = signedinuser if clientuuid in signedinuser.clients: self.log("Client configuration already logged in.", lvl=critical) # TODO: What now?? # Probably senseful would be to add the socket to the # client's other socket # The clients would be identical then - that could cause # problems # which could be remedied by duplicating the configuration else: signedinuser.clients.append(clientuuid) self.log("Active client (", clientuuid, ") registered to " "user", useruuid, lvl=debug) # Update socket.. socket = self._sockets[event.sock] socket.clientuuid = clientuuid self._sockets[event.sock] = socket # ..and client lists try: language = clientconfig.language except AttributeError: language = "en" # TODO: Rewrite and simplify this: newclient = Client( sock=event.sock, ip=socket.ip, clientuuid=clientuuid, useruuid=useruuid, name=clientconfig.name, config=clientconfig, language=language ) del (self._clients[originatingclientuuid]) self._clients[clientuuid] = newclient authpacket = {"component": "auth", "action": "login", "data": account.serializablefields()} self.log("Transmitting Authorization to client", authpacket, lvl=network) self.fireEvent( write(event.sock, json.dumps(authpacket)), "wsserver" ) profilepacket = {"component": "profile", "action": "get", "data": profile.serializablefields()} self.log("Transmitting Profile to client", profilepacket, lvl=network) self.fireEvent(write(event.sock, json.dumps(profilepacket)), "wsserver") clientconfigpacket = {"component": "clientconfig", "action": "get", "data": clientconfig.serializablefields()} self.log("Transmitting client configuration to client", clientconfigpacket, lvl=network) self.fireEvent(write(event.sock, json.dumps(clientconfigpacket)), "wsserver") self.fireEvent(userlogin(clientuuid, useruuid, clientconfig, signedinuser)) self.log("User configured: Name", signedinuser.account.name, "Profile", signedinuser.profile.uuid, "Clients", signedinuser.clients, lvl=debug) except Exception as e: self.log("Error (%s, %s) during auth grant: %s" % ( type(e), e, event), lvl=error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def selectlanguage(self, event): """Store client's selection of a new translation"""
self.log('Language selection event:', event.client, pretty=True) if event.data not in all_languages(): self.log('Unavailable language selected:', event.data, lvl=warn) language = None else: language = event.data if language is None: language = 'en' event.client.language = language if event.client.config is not None: event.client.config.language = language event.client.config.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getlanguages(self, event): """Compile and return a human readable list of registered translations"""
self.log('Client requests all languages.', lvl=verbose) result = { 'component': 'hfos.ui.clientmanager', 'action': 'getlanguages', 'data': language_token_to_name(all_languages()) } self.fireEvent(send(event.client.uuid, result))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(self, lat, lon, source, dest, height=0, datetime=None, precision=1e-10, ssheight=50*6371): """Converts between geodetic, modified apex, quasi-dipole and MLT. Parameters ========== lat : array_like Latitude lon : array_like Longitude/MLT source : {'geo', 'apex', 'qd', 'mlt'} Input coordinate system dest : {'geo', 'apex', 'qd', 'mlt'} Output coordinate system height : array_like, optional Altitude in km datetime : :class:`datetime.datetime` Date and time for MLT conversions (required for MLT conversions) precision : float, optional Precision of output (degrees) when converting to geo. A negative value of this argument produces a low-precision calculation of geodetic lat/lon based only on their spherical harmonic representation. A positive value causes the underlying Fortran routine to iterate until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces the input QD lat/lon to within the specified precision (all coordinates being converted to geo are converted to QD first and passed through APXG2Q). ssheight : float, optional Altitude in km to use for converting the subsolar point from geographic to magnetic coordinates. A high altitude is used to ensure the subsolar point is mapped to high latitudes, which prevents the South-Atlantic Anomaly (SAA) from influencing the MLT. Returns ======= lat : ndarray or float Converted latitude (if converting to MLT, output latitude is apex) lat : ndarray or float Converted longitude/MLT """
if datetime is None and ('mlt' in [source, dest]): raise ValueError('datetime must be given for MLT calculations') lat = helpers.checklat(lat) if source == dest: return lat, lon # from geo elif source == 'geo' and dest == 'apex': lat, lon = self.geo2apex(lat, lon, height) elif source == 'geo' and dest == 'qd': lat, lon = self.geo2qd(lat, lon, height) elif source == 'geo' and dest == 'mlt': lat, lon = self.geo2apex(lat, lon, height) lon = self.mlon2mlt(lon, datetime, ssheight=ssheight) # from apex elif source == 'apex' and dest == 'geo': lat, lon, _ = self.apex2geo(lat, lon, height, precision=precision) elif source == 'apex' and dest == 'qd': lat, lon = self.apex2qd(lat, lon, height=height) elif source == 'apex' and dest == 'mlt': lon = self.mlon2mlt(lon, datetime, ssheight=ssheight) # from qd elif source == 'qd' and dest == 'geo': lat, lon, _ = self.qd2geo(lat, lon, height, precision=precision) elif source == 'qd' and dest == 'apex': lat, lon = self.qd2apex(lat, lon, height=height) elif source == 'qd' and dest == 'mlt': lat, lon = self.qd2apex(lat, lon, height=height) lon = self.mlon2mlt(lon, datetime, ssheight=ssheight) # from mlt (input latitude assumed apex) elif source == 'mlt' and dest == 'geo': lon = self.mlt2mlon(lon, datetime, ssheight=ssheight) lat, lon, _ = self.apex2geo(lat, lon, height, precision=precision) elif source == 'mlt' and dest == 'apex': lon = self.mlt2mlon(lon, datetime, ssheight=ssheight) elif source == 'mlt' and dest == 'qd': lon = self.mlt2mlon(lon, datetime, ssheight=ssheight) lat, lon = self.apex2qd(lat, lon, height=height) # no other transformations are implemented else: estr = 'Unknown coordinate transformation: ' estr += '{} -> {}'.format(source, dest) raise NotImplementedError(estr) return lat, lon
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geo2apex(self, glat, glon, height): """Converts geodetic to modified apex coordinates. Parameters ========== glat : array_like Geodetic latitude glon : array_like Geodetic longitude height : array_like Altitude in km Returns ======= alat : ndarray or float Modified apex latitude alon : ndarray or float Modified apex longitude """
glat = helpers.checklat(glat, name='glat') alat, alon = self._geo2apex(glat, glon, height) if np.any(np.float64(alat) == -9999): warnings.warn('Apex latitude set to -9999 where undefined ' '(apex height may be < reference height)') # if array is returned, dtype is object, so convert to float return np.float64(alat), np.float64(alon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apex2geo(self, alat, alon, height, precision=1e-10): """Converts modified apex to geodetic coordinates. Parameters ========== alat : array_like Modified apex latitude alon : array_like Modified apex longitude height : array_like Altitude in km precision : float, optional Precision of output (degrees). A negative value of this argument produces a low-precision calculation of geodetic lat/lon based only on their spherical harmonic representation. A positive value causes the underlying Fortran routine to iterate until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces the input QD lat/lon to within the specified precision. Returns ======= glat : ndarray or float Geodetic latitude glon : ndarray or float Geodetic longitude error : ndarray or float The angular difference (degrees) between the input QD coordinates and the qlat/qlon produced by feeding the output glat and glon into geo2qd (APXG2Q) """
alat = helpers.checklat(alat, name='alat') qlat, qlon = self.apex2qd(alat, alon, height=height) glat, glon, error = self.qd2geo(qlat, qlon, height, precision=precision) return glat, glon, error
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geo2qd(self, glat, glon, height): """Converts geodetic to quasi-dipole coordinates. Parameters ========== glat : array_like Geodetic latitude glon : array_like Geodetic longitude height : array_like Altitude in km Returns ======= qlat : ndarray or float Quasi-dipole latitude qlon : ndarray or float Quasi-dipole longitude """
glat = helpers.checklat(glat, name='glat') qlat, qlon = self._geo2qd(glat, glon, height) # if array is returned, dtype is object, so convert to float return np.float64(qlat), np.float64(qlon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def qd2geo(self, qlat, qlon, height, precision=1e-10): """Converts quasi-dipole to geodetic coordinates. Parameters ========== qlat : array_like Quasi-dipole latitude qlon : array_like Quasi-dipole longitude height : array_like Altitude in km precision : float, optional Precision of output (degrees). A negative value of this argument produces a low-precision calculation of geodetic lat/lon based only on their spherical harmonic representation. A positive value causes the underlying Fortran routine to iterate until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces the input QD lat/lon to within the specified precision. Returns ======= glat : ndarray or float Geodetic latitude glon : ndarray or float Geodetic longitude error : ndarray or float The angular difference (degrees) between the input QD coordinates and the qlat/qlon produced by feeding the output glat and glon into geo2qd (APXG2Q) """
qlat = helpers.checklat(qlat, name='qlat') glat, glon, error = self._qd2geo(qlat, qlon, height, precision) # if array is returned, dtype is object, so convert to float return np.float64(glat), np.float64(glon), np.float64(error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apex2qd(self, alat, alon, height): """Converts modified apex to quasi-dipole coordinates. Parameters ========== alat : array_like Modified apex latitude alon : array_like Modified apex longitude height : array_like Altitude in km Returns ======= qlat : ndarray or float Quasi-dipole latitude qlon : ndarray or float Quasi-dipole longitude Raises ====== ApexHeightError if `height` > apex height """
qlat, qlon = self._apex2qd(alat, alon, height) # if array is returned, the dtype is object, so convert to float return np.float64(qlat), np.float64(qlon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def qd2apex(self, qlat, qlon, height): """Converts quasi-dipole to modified apex coordinates. Parameters ========== qlat : array_like Quasi-dipole latitude qlon : array_like Quasi-dipole longitude height : array_like Altitude in km Returns ======= alat : ndarray or float Modified apex latitude alon : ndarray or float Modified apex longitude Raises ====== ApexHeightError if apex height < reference height """
alat, alon = self._qd2apex(qlat, qlon, height) # if array is returned, the dtype is object, so convert to float return np.float64(alat), np.float64(alon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mlon2mlt(self, mlon, datetime, ssheight=50*6371): """Computes the magnetic local time at the specified magnetic longitude and UT. Parameters ========== mlon : array_like Magnetic longitude (apex and quasi-dipole longitude are always equal) datetime : :class:`datetime.datetime` Date and time ssheight : float, optional Altitude in km to use for converting the subsolar point from geographic to magnetic coordinates. A high altitude is used to ensure the subsolar point is mapped to high latitudes, which prevents the South-Atlantic Anomaly (SAA) from influencing the MLT. Returns ======= mlt : ndarray or float Magnetic local time [0, 24) Notes ===== To compute the MLT, we find the apex longitude of the subsolar point at the given time. Then the MLT of the given point will be computed from the separation in magnetic longitude from this point (1 hour = 15 degrees). """
ssglat, ssglon = helpers.subsol(datetime) ssalat, ssalon = self.geo2apex(ssglat, ssglon, ssheight) # np.float64 will ensure lists are converted to arrays return (180 + np.float64(mlon) - ssalon)/15 % 24
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mlt2mlon(self, mlt, datetime, ssheight=50*6371): """Computes the magnetic longitude at the specified magnetic local time and UT. Parameters ========== mlt : array_like Magnetic local time datetime : :class:`datetime.datetime` Date and time ssheight : float, optional Altitude in km to use for converting the subsolar point from geographic to magnetic coordinates. A high altitude is used to ensure the subsolar point is mapped to high latitudes, which prevents the South-Atlantic Anomaly (SAA) from influencing the MLT. Returns ======= mlon : ndarray or float Magnetic longitude [0, 360) (apex and quasi-dipole longitude are always equal) Notes ===== To compute the magnetic longitude, we find the apex longitude of the subsolar point at the given time. Then the magnetic longitude of the given point will be computed from the separation in magnetic local time from this point (1 hour = 15 degrees). """
ssglat, ssglon = helpers.subsol(datetime) ssalat, ssalon = self.geo2apex(ssglat, ssglon, ssheight) # np.float64 will ensure lists are converted to arrays return (15*np.float64(mlt) - 180 + ssalon + 360) % 360
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_to_height(self, glat, glon, height, newheight, conjugate=False, precision=1e-10): """Performs mapping of points along the magnetic field to the closest or conjugate hemisphere. Parameters ========== glat : array_like Geodetic latitude glon : array_like Geodetic longitude height : array_like Source altitude in km newheight : array_like Destination altitude in km conjugate : bool, optional Map to `newheight` in the conjugate hemisphere instead of the closest hemisphere precision : float, optional Precision of output (degrees). A negative value of this argument produces a low-precision calculation of geodetic lat/lon based only on their spherical harmonic representation. A positive value causes the underlying Fortran routine to iterate until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces the input QD lat/lon to within the specified precision. Returns ======= newglat : ndarray or float Geodetic latitude of mapped point newglon : ndarray or float Geodetic longitude of mapped point error : ndarray or float The angular difference (degrees) between the input QD coordinates and the qlat/qlon produced by feeding the output glat and glon into geo2qd (APXG2Q) Notes ===== The mapping is done by converting glat/glon/height to modified apex lat/lon, and converting back to geographic using newheight (if conjugate, use negative apex latitude when converting back) """
alat, alon = self.geo2apex(glat, glon, height) if conjugate: alat = -alat try: newglat, newglon, error = self.apex2geo(alat, alon, newheight, precision=precision) except ApexHeightError: raise ApexHeightError("newheight is > apex height") return newglat, newglon, error
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_E_to_height(self, alat, alon, height, newheight, E): """Performs mapping of electric field along the magnetic field. It is assumed that the electric field is perpendicular to B. Parameters ========== alat : (N,) array_like or float Modified apex latitude alon : (N,) array_like or float Modified apex longitude height : (N,) array_like or float Source altitude in km newheight : (N,) array_like or float Destination altitude in km E : (3,) or (3, N) array_like Electric field (at `alat`, `alon`, `height`) in geodetic east, north, and up components Returns ======= E : (3, N) or (3,) ndarray The electric field at `newheight` (geodetic east, north, and up components) """
return self._map_EV_to_height(alat, alon, height, newheight, E, 'E')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_V_to_height(self, alat, alon, height, newheight, V): """Performs mapping of electric drift velocity along the magnetic field. It is assumed that the electric field is perpendicular to B. Parameters ========== alat : (N,) array_like or float Modified apex latitude alon : (N,) array_like or float Modified apex longitude height : (N,) array_like or float Source altitude in km newheight : (N,) array_like or float Destination altitude in km V : (3,) or (3, N) array_like Electric drift velocity (at `alat`, `alon`, `height`) in geodetic east, north, and up components Returns ======= V : (3, N) or (3,) ndarray The electric drift velocity at `newheight` (geodetic east, north, and up components) """
return self._map_EV_to_height(alat, alon, height, newheight, V, 'V')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def basevectors_qd(self, lat, lon, height, coords='geo', precision=1e-10): """Returns quasi-dipole base vectors f1 and f2 at the specified coordinates. The vectors are described by Richmond [1995] [2]_ and Emmert et al. [2010] [3]_. The vector components are geodetic east and north. Parameters ========== lat : (N,) array_like or float Latitude lon : (N,) array_like or float Longitude height : (N,) array_like or float Altitude in km coords : {'geo', 'apex', 'qd'}, optional Input coordinate system precision : float, optional Precision of output (degrees) when converting to geo. A negative value of this argument produces a low-precision calculation of geodetic lat/lon based only on their spherical harmonic representation. A positive value causes the underlying Fortran routine to iterate until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces the input QD lat/lon to within the specified precision (all coordinates being converted to geo are converted to QD first and passed through APXG2Q). Returns ======= f1 : (2, N) or (2,) ndarray f2 : (2, N) or (2,) ndarray References ========== .. [2] Richmond, A. D. (1995), Ionospheric Electrodynamics Using Magnetic Apex Coordinates, Journal of geomagnetism and geoelectricity, 47(2), 191–212, :doi:`10.5636/jgg.47.191`. .. [3] Emmert, J. T., A. D. Richmond, and D. P. Drob (2010), A computationally compact representation of Magnetic-Apex and Quasi-Dipole coordinates with smooth base vectors, J. Geophys. Res., 115(A8), A08322, :doi:`10.1029/2010JA015326`. """
glat, glon = self.convert(lat, lon, coords, 'geo', height=height, precision=precision) f1, f2 = self._basevec(glat, glon, height) # if inputs are not scalar, each vector is an array of arrays, # so reshape to a single array if f1.dtype == object: f1 = np.vstack(f1).T f2 = np.vstack(f2).T return f1, f2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_apex(self, lat, height=None): """ Calculate apex height Parameters lat : (float) Latitude in degrees height : (float or NoneType) Height above the surface of the earth in km or NoneType to use reference height (default=None) Returns apex_height : (float) Height of the field line apex in km """
lat = helpers.checklat(lat, name='alat') if height is None: height = self.refh cos_lat_squared = np.cos(np.radians(lat))**2 apex_height = (self.RE + height) / cos_lat_squared - self.RE return apex_height
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_epoch(self, year): """Updates the epoch for all subsequent conversions. Parameters ========== year : float Decimal year """
fa.loadapxsh(self.datafile, np.float(year)) self.year = year
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def basic_parser(patterns, with_name=None): """ Basic ordered parser. """
def parse(line): output = None highest_order = 0 highest_pattern_name = None for pattern in patterns: results = pattern.findall(line) if results and any(results): if pattern.order > highest_order: output = results highest_order = pattern.order if with_name: highest_pattern_name = pattern.name if with_name: return output, highest_pattern_name return output return parse
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parser(parser_type=basic_parser, functions=None, patterns=None, expressions=None, patterns_yaml_path=None, expressions_yaml_path=None): """ A Reparse parser description. Simply provide the functions, patterns, & expressions to build. If you are using YAML for expressions + patterns, you can use ``expressions_yaml_path`` & ``patterns_yaml_path`` for convenience. The default parser_type is the basic ordered parser. """
from reparse.builders import build_all from reparse.validators import validate def _load_yaml(file_path): import yaml with open(file_path) as f: return yaml.safe_load(f) assert expressions or expressions_yaml_path, "Reparse can't build a parser without expressions" assert patterns or patterns_yaml_path, "Reparse can't build a parser without patterns" assert functions, "Reparse can't build without a functions" if patterns_yaml_path: patterns = _load_yaml(patterns_yaml_path) if expressions_yaml_path: expressions = _load_yaml(expressions_yaml_path) validate(patterns, expressions) return parser_type(build_all(patterns, expressions, functions))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _translate(self, input_filename, output_filename): """Translate KML file to geojson for import"""
command = [ self.translate_binary, '-f', 'GeoJSON', output_filename, input_filename ] result = self._runcommand(command) self.log('Result (Translate): ', result, lvl=debug)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_guide(self, guide, update=False, clear=True): """Update a single specified guide"""
kml_filename = os.path.join(self.cache_path, guide + '.kml') geojson_filename = os.path.join(self.cache_path, guide + '.geojson') if not os.path.exists(geojson_filename) or update: try: data = request.urlopen(self.guides[guide]).read().decode( 'utf-8') except (request.URLError, request.HTTPError) as e: self.log('Could not get web guide data:', e, type(e), lvl=warn) return with open(kml_filename, 'w') as f: f.write(data) self._translate(kml_filename, geojson_filename) with open(geojson_filename, 'r') as f: json_data = json.loads(f.read()) if len(json_data['features']) == 0: self.log('No features found!', lvl=warn) return layer = objectmodels['layer'].find_one({'name': guide}) if clear and layer is not None: layer.delete() layer = None if layer is None: layer_uuid = std_uuid() layer = objectmodels['layer']({ 'uuid': layer_uuid, 'name': guide, 'type': 'geoobjects' }) layer.save() else: layer_uuid = layer.uuid if clear: for item in objectmodels['geoobject'].find({'layer': layer_uuid}): self.log('Deleting old guide location', lvl=debug) item.delete() locations = [] for item in json_data['features']: self.log('Adding new guide location:', item, lvl=verbose) location = objectmodels['geoobject']({ 'uuid': std_uuid(), 'layer': layer_uuid, 'geojson': item, 'type': 'Skipperguide', 'name': 'Guide for %s' % (item['properties']['Name']) }) locations.append(location) self.log('Bulk inserting guide locations', lvl=debug) objectmodels['geoobject'].bulk_create(locations)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_mail_worker(config, mail, event): """Worker task to send out an email, which is a blocking process unless it is threaded"""
log = "" try: if config.get('ssl', True): server = SMTP_SSL(config['server'], port=config['port'], timeout=30) else: server = SMTP(config['server'], port=config['port'], timeout=30) if config['tls']: log += 'Starting TLS\n' server.starttls() if config['username'] != '': log += 'Logging in with ' + str(config['username']) + "\n" server.login(config['username'], config['password']) else: log += 'No username, trying anonymous access\n' log += 'Sending Mail\n' response_send = server.send_message(mail) server.quit() except timeout as e: log += 'Could not send email: ' + str(e) + "\n" return False, log, event log += 'Server response:' + str(response_send) return True, log, event
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def provision_system_user(items, database_name, overwrite=False, clear=False, skip_user_check=False): """Provision a system user"""
from hfos.provisions.base import provisionList from hfos.database import objectmodels # TODO: Add a root user and make sure owner can access it later. # Setting up details and asking for a password here is not very useful, # since this process is usually run automated. if overwrite is True: hfoslog('Refusing to overwrite system user!', lvl=warn, emitter='PROVISIONS') overwrite = False system_user_count = objectmodels['user'].count({'name': 'System'}) if system_user_count == 0 or clear is False: provisionList(Users, 'user', overwrite, clear, skip_user_check=True) hfoslog('Provisioning: Users: Done.', emitter="PROVISIONS") else: hfoslog('System user already present.', lvl=warn, emitter='PROVISIONS')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Group(expressions, final_function, inbetweens, name=""): """ Group expressions together with ``inbetweens`` and with the output of a ``final_functions``. """
lengths = [] functions = [] regex = "" i = 0 for expression in expressions: regex += inbetweens[i] regex += "(?:" + expression.regex + ")" lengths.append(sum(expression.group_lengths)) functions.append(expression.run) i += 1 regex += inbetweens[i] return Expression(regex, functions, lengths, final_function, name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def findall(self, string): """ Parse string, returning all outputs as parsed by functions """
output = [] for match in self.pattern.findall(string): if hasattr(match, 'strip'): match = [match] self._list_add(output, self.run(match)) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scan(self, string): """ Like findall, but also returning matching start and end string locations """
return list(self._scanner_to_matches(self.pattern.scanner(string), self.run))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, matches): """ Run group functions over matches """
def _run(matches): group_starting_pos = 0 for current_pos, (group_length, group_function) in enumerate(zip(self.group_lengths, self.group_functions)): start_pos = current_pos + group_starting_pos end_pos = current_pos + group_starting_pos + group_length yield group_function(matches[start_pos:end_pos]) group_starting_pos += group_length - 1 return self.final_function(list(_run(matches)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_logfile(path, instance): """Specify logfile path"""
global logfile logfile = os.path.normpath(path) + '/hfos.' + instance + '.log'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_muted(what): """ Checks if a logged event is to be muted for debugging purposes. Also goes through the solo list - only items in there will be logged! :param what: :return: """
state = False for item in solo: if item not in what: state = True else: state = False break for item in mute: if item in what: state = True break return state
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tagged(self, event): """Return a list of tagged objects for a schema"""
self.log("Tagged objects request for", event.data, "from", event.user, lvl=debug) if event.data in self.tags: tagged = self._get_tagged(event.data) response = { 'component': 'hfos.events.schemamanager', 'action': 'get', 'data': tagged } self.fireEvent(send(event.client.uuid, response)) else: self.log("Unavailable schema requested!", lvl=warn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def provision_system_vessel(items, database_name, overwrite=False, clear=False, skip_user_check=False): """Provisions the default system vessel"""
from hfos.provisions.base import provisionList from hfos.database import objectmodels vessel = objectmodels['vessel'].find_one({'name': 'Default System Vessel'}) if vessel is not None: if overwrite is False: hfoslog('Default vessel already existing. Skipping provisions.') return else: vessel.delete() provisionList([SystemVessel], 'vessel', overwrite, clear, skip_user_check) sysconfig = objectmodels['systemconfig'].find_one({'active': True}) hfoslog('Adapting system config for default vessel:', sysconfig) sysconfig.vesseluuid = SystemVessel['uuid'] sysconfig.save() hfoslog('Provisioning: Vessel: Done.', emitter='PROVISIONS')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def towgs84(E, N, pkm=False, presentation=None): """ Convert coordintes from TWD97 to WGS84 The east and north coordinates should be in meters and in float pkm true for Penghu, Kinmen and Matsu area You can specify one of the following presentations of the returned values: dms - A tuple with degrees (int), minutes (int) and seconds (float) dmsstr - [+/-]DDD°MMM'DDD.DDDDD" (unicode) mindec - A tuple with degrees (int) and minutes (float) mindecstr - [+/-]DDD°MMM.MMMMM' (unicode) (default)degdec - DDD.DDDDD (float) """
_lng0 = lng0pkm if pkm else lng0 E /= 1000.0 N /= 1000.0 epsilon = (N-N0) / (k0*A) eta = (E-E0) / (k0*A) epsilonp = epsilon - beta1*sin(2*1*epsilon)*cosh(2*1*eta) - \ beta2*sin(2*2*epsilon)*cosh(2*2*eta) - \ beta3*sin(2*3*epsilon)*cosh(2*3*eta) etap = eta - beta1*cos(2*1*epsilon)*sinh(2*1*eta) - \ beta2*cos(2*2*epsilon)*sinh(2*2*eta) - \ beta3*cos(2*3*epsilon)*sinh(2*3*eta) sigmap = 1 - 2*1*beta1*cos(2*1*epsilon)*cosh(2*1*eta) - \ 2*2*beta2*cos(2*2*epsilon)*cosh(2*2*eta) - \ 2*3*beta3*cos(2*3*epsilon)*cosh(2*3*eta) taup = 2*1*beta1*sin(2*1*epsilon)*sinh(2*1*eta) + \ 2*2*beta2*sin(2*2*epsilon)*sinh(2*2*eta) + \ 2*3*beta3*sin(2*3*epsilon)*sinh(2*3*eta) chi = asin(sin(epsilonp) / cosh(etap)) latitude = chi + delta1*sin(2*1*chi) + \ delta2*sin(2*2*chi) + \ delta3*sin(2*3*chi) longitude = _lng0 + atan(sinh(etap) / cos(epsilonp)) func = None presentation = 'to%s' % presentation if presentation else None if presentation in presentations: func = getattr(sys.modules[__name__], presentation) if func and func != 'todegdec': return func(degrees(latitude)), func(degrees(longitude)) return (degrees(latitude), degrees(longitude))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromwgs84(lat, lng, pkm=False): """ Convert coordintes from WGS84 to TWD97 pkm true for Penghu, Kinmen and Matsu area The latitude and longitude can be in the following formats: [+/-]DDD°MMM'SSS.SSSS" (unicode) [+/-]DDD°MMM.MMMM' (unicode) [+/-]DDD.DDDDD (string, unicode or float) The returned coordinates are in meters """
_lng0 = lng0pkm if pkm else lng0 lat = radians(todegdec(lat)) lng = radians(todegdec(lng)) t = sinh((atanh(sin(lat)) - 2*pow(n,0.5)/(1+n)*atanh(2*pow(n,0.5)/(1+n)*sin(lat)))) epsilonp = atan(t/cos(lng-_lng0)) etap = atan(sin(lng-_lng0) / pow(1+t*t, 0.5)) E = E0 + k0*A*(etap + alpha1*cos(2*1*epsilonp)*sinh(2*1*etap) + alpha2*cos(2*2*epsilonp)*sinh(2*2*etap) + alpha3*cos(2*3*epsilonp)*sinh(2*3*etap)) N = N0 + k0*A*(epsilonp + alpha1*sin(2*1*epsilonp)*cosh(2*1*etap) + alpha2*sin(2*2*epsilonp)*cosh(2*2*etap) + alpha3*sin(2*3*epsilonp)*cosh(2*3*etap)) return E*1000, N*1000
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def clipValue(self, value, minValue, maxValue): ''' Makes sure that value is within a specific range. If not, then the lower or upper bounds is returned ''' return min(max(value, minValue), maxValue)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getGroundResolution(self, latitude, level): ''' returns the ground resolution for based on latitude and zoom level. ''' latitude = self.clipValue(latitude, self.min_lat, self.max_lat); mapSize = self.getMapDimensionsByZoomLevel(level) return math.cos( latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \ mapSize
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getMapScale(self, latitude, level, dpi=96): ''' returns the map scale on the dpi of the screen ''' dpm = dpi / 0.0254 # convert to dots per meter return self.getGroundResolution(latitude, level) * dpm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def convertLatLngToPixelXY(self, lat, lng, level): ''' returns the x and y values of the pixel corresponding to a latitude and longitude. ''' mapSize = self.getMapDimensionsByZoomLevel(level) lat = self.clipValue(lat, self.min_lat, self.max_lat) lng = self.clipValue(lng, self.min_lng, self.max_lng) x = (lng + 180) / 360 sinlat = math.sin(lat * math.pi / 180) y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi) pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1)) pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1)) return (pixelX, pixelY)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def convertPixelXYToLngLat(self, pixelX, pixelY, level): ''' converts a pixel x, y to a latitude and longitude. ''' mapSize = self.getMapDimensionsByZoomLevel(level) x = (self.clipValue(pixelX, 0, mapSize - 1) / mapSize) - 0.5 y = 0.5 - (self.clipValue(pixelY, 0, mapSize - 1) / mapSize) lat = 90 - 360 * math.atan(math.exp(-y * 2 * math.pi)) / math.pi lng = 360 * x return (lng, lat)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def tileXYZToQuadKey(self, x, y, z): ''' Computes quadKey value based on tile x, y and z values. ''' quadKey = '' for i in range(z, 0, -1): digit = 0 mask = 1 << (i - 1) if (x & mask) != 0: digit += 1 if (y & mask) != 0: digit += 2 quadKey += str(digit) return quadKey
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def quadKeyToTileXYZ(self, quadKey): ''' Computes tile x, y and z values based on quadKey. ''' tileX = 0 tileY = 0 tileZ = len(quadKey) for i in range(tileZ, 0, -1): mask = 1 << (i - 1) value = quadKey[tileZ - i] if value == '0': continue elif value == '1': tileX |= mask elif value == '2': tileY |= mask elif value == '3': tileX |= mask tileY |= mask else: raise Exception('Invalid QuadKey') return (tileX, tileY, tileZ)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getTileUrlsByLatLngExtent(self, xmin, ymin, xmax, ymax, level): ''' Returns a list of tile urls by extent ''' # Upper-Left Tile tileXMin, tileYMin = self.tileUtils.convertLngLatToTileXY(xmin, ymax, level) # Lower-Right Tile tileXMax, tileYMax = self.tileUtils.convertLngLatToTileXY(xmax, ymin, level) tileUrls = [] for y in range(tileYMax, tileYMin - 1, -1): for x in range(tileXMin, tileXMax + 1, 1): tileUrls.append(self.createTileUrl(x, y, level)) return tileUrls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def createTileUrl(self, x, y, z): ''' returns new tile url based on template ''' return self.tileTemplate.replace('{{x}}', str(x)).replace('{{y}}', str( y)).replace('{{z}}', str(z))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def userlogin(self, event): """Checks if an alert is ongoing and alerts the newly connected client, if so."""
client_uuid = event.clientuuid self.log(event.user, pretty=True, lvl=verbose) self.log('Adding client') self.clients[event.clientuuid] = event.user for topic, alert in self.alerts.items(): self.alert(client_uuid, alert)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cli(ctx, instance, quiet, verbose, log_level, dbhost, dbname): """Isomer Management Tool This tool supports various operations to manage isomer instances. Most of the commands are grouped. To obtain more information about the groups' available sub commands/groups, try iso [group] To display details of a command or its sub groups, try iso [group] [subgroup] [..] [command] --help To get a map of all available commands, try iso cmdmap """
ctx.obj['instance'] = instance if dbname == db_default and instance != 'default': dbname = instance ctx.obj['quiet'] = quiet ctx.obj['verbose'] = verbose verbosity['console'] = log_level verbosity['global'] = log_level ctx.obj['dbhost'] = dbhost ctx.obj['dbname'] = dbname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Primary entry point for all AstroCats catalogs. From this entry point, all internal catalogs can be accessed and their public methods executed (for example: import scripts). """
from datetime import datetime # Initialize Command-Line and User-Config Settings, Log # ----------------------------------------------------- beg_time = datetime.now() # Process command-line arguments to determine action # If no subcommand (e.g. 'import') is given, returns 'None' --> exit args, sub_clargs = load_command_line_args() if args is None: return # Create a logging object log = load_log(args) # Run configuration/setup interactive script if args.command == 'setup': setup_user_config(log) return # Make sure configuration file exists, or that's what we're doing # (with the 'setup' subcommand) if not os.path.isfile(_CONFIG_PATH): raise RuntimeError("'{}' does not exist. " "Run `astrocats setup` to configure." "".format(_CONFIG_PATH)) git_vers = get_git() title_str = "Astrocats, version: {}, SHA: {}".format(__version__, git_vers) log.warning("\n\n{}\n{}\n{}\n".format(title_str, '=' * len(title_str), beg_time.ctime())) # Load the user settings from the home directory args = load_user_config(args, log) # Choose Catalog and Operation(s) to perform # ------------------------------------------ mod_name = args.command log.debug("Importing specified module: '{}'".format(mod_name)) # Try to import the specified module try: mod = importlib.import_module('.' + mod_name, package='astrocats') except Exception as err: log.error("Import of specified module '{}' failed.".format(mod_name)) log_raise(log, str(err), type(err)) # Run the `main.main` method of the specified module log.debug("Running `main.main()`") mod.main.main(args, sub_clargs, log) end_time = datetime.now() log.warning("\nAll complete at {}, After {}".format(end_time, end_time - beg_time)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_user_config(log): """Setup a configuration file in the user's home directory. Currently this method stores default values to a fixed configuration filename. It should be modified to run an interactive prompt session asking for parameters (or at least confirming the default ones). Arguments --------- log : `logging.Logger` object """
log.warning("AstroCats Setup") log.warning("Configure filepath: '{}'".format(_CONFIG_PATH)) # Create path to configuration file as needed config_path_dir = os.path.split(_CONFIG_PATH)[0] if not os.path.exists(config_path_dir): log.debug("Creating config directory '{}'".format(config_path_dir)) os.makedirs(config_path_dir) if not os.path.isdir(config_path_dir): log_raise(log, "Configure path error '{}'".format(config_path_dir)) # Determine default settings # Get this containing directory and use that as default data path def_base_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__))) log.warning("Setting '{}' to default path: '{}'".format(_BASE_PATH_KEY, def_base_path)) config = {_BASE_PATH_KEY: def_base_path} # Write settings to configuration file json.dump(config, open(_CONFIG_PATH, 'w')) if not os.path.exists(def_base_path): log_raise(log, "Problem creating configuration file.") return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_user_config(args, log): """Load settings from the user's confiuration file, and add them to `args`. Settings are loaded from the configuration file in the user's home directory. Those parameters are added (as attributes) to the `args` object. Arguments --------- args : `argparse.Namespace` Namespace object to which configuration attributes will be added. Returns ------- args : `argparse.Namespace` Namespace object with added attributes. """
if not os.path.exists(_CONFIG_PATH): err_str = ( "Configuration file does not exists ({}).\n".format(_CONFIG_PATH) + "Run `python -m astrocats setup` to configure.") log_raise(log, err_str) config = json.load(open(_CONFIG_PATH, 'r')) setattr(args, _BASE_PATH_KEY, config[_BASE_PATH_KEY]) log.debug("Loaded configuration: {}: {}".format(_BASE_PATH_KEY, config[ _BASE_PATH_KEY])) return args
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_command_line_args(clargs=None): """Load and parse command-line arguments. Arguments --------- args : str or None 'Faked' commandline arguments passed to `argparse`. Returns ------- args : `argparse.Namespace` object Namespace in which settings are stored - default values modified by the given command-line arguments. """
import argparse git_vers = get_git() parser = argparse.ArgumentParser( prog='astrocats', description='Generate catalogs for astronomical data.') parser.add_argument('command', nargs='?', default=None) parser.add_argument( '--version', action='version', version='AstroCats v{}, SHA: {}'.format(__version__, git_vers)) parser.add_argument( '--verbose', '-v', dest='verbose', default=False, action='store_true', help='Print more messages to the screen.') parser.add_argument( '--debug', '-d', dest='debug', default=False, action='store_true', help='Print excessive messages to the screen.') parser.add_argument( '--include-private', dest='private', default=False, action='store_true', help='Include private data in import.') parser.add_argument( '--travis', '-t', dest='travis', default=False, action='store_true', help='Run import script in test mode for Travis.') parser.add_argument( '--clone-depth', dest='clone_depth', default=0, type=int, help=('When cloning git repos, only clone out to this depth ' '(default: 0 = all levels).')) parser.add_argument( '--purge-outputs', dest='purge_outputs', default=False, action='store_true', help=('Purge git outputs after cloning.')) parser.add_argument( '--log', dest='log_filename', default=None, help='Filename to which to store logging information.') # If output files should be written or not # ---------------------------------------- write_group = parser.add_mutually_exclusive_group() write_group.add_argument( '--write', action='store_true', dest='write_entries', default=True, help='Write entries to files [default].') write_group.add_argument( '--no-write', action='store_false', dest='write_entries', default=True, help='do not write entries to file.') # If previously cleared output files should be deleted or not # ----------------------------------------------------------- delete_group = parser.add_mutually_exclusive_group() delete_group.add_argument( '--predelete', action='store_true', dest='delete_old', default=True, help='Delete all old event files to begin [default].') delete_group.add_argument( '--no-predelete', action='store_false', dest='delete_old', default=True, help='Do not delete all old event files to start.') args, sub_clargs = parser.parse_known_args(args=clargs) # Print the help information if no command is given if args.command is None: parser.print_help() return None, None return args, sub_clargs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_log(args): """Load a `logging.Logger` object. Arguments --------- args : `argparse.Namespace` object Namespace containing required settings: {`args.debug`, `args.verbose`, and `args.log_filename`}. Returns ------- log : `logging.Logger` object """
from astrocats.catalog.utils import logger # Determine verbosity ('None' means use default) log_stream_level = None if args.debug: log_stream_level = logger.DEBUG elif args.verbose: log_stream_level = logger.INFO # Create log log = logger.get_logger( stream_level=log_stream_level, tofile=args.log_filename) log._verbose = args.verbose log._debug = args.debug return log
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare_dicts(old_full, new_full, old_data, new_data, depth=0): """Function compares dictionaries by key-value recursively. Old and new input data are both dictionaries """
depth = depth + 1 indent = " "*depth # Print with an indentation matching the nested-dictionary depth def my_print(str): print("{}{}".format(indent, str)) old_keys = list(old_data.keys()) # Compare data key by key, in *this* dictionary level # Note: since we're comparing by keys explicity, order doesnt matter for key in old_keys: # Remove elements as we go old_vals = old_data.pop(key) # Current key my_print("{}".format(key)) # If `new_data` doesnt also have this key, return False if key not in new_data: my_print("Key '{}' not in new_data.".format(key)) my_print("Old:") my_print(pprint(new_data)) my_print("New:") my_print(pprint(new_data)) return False # If it does have the key, extract the values (remove as we go) new_vals = new_data.pop(key) # If these values are a sub-dictionary, compare those if isinstance(old_vals, dict) and isinstance(new_vals, dict): # If the sub-dictionary are not the same, return False if not compare_dicts(old_full, new_full, old_vals, new_vals, depth=depth): return False # If these values are a list of sub-dictionaries, compare each of those elif (isinstance(old_vals, list) and isinstance(old_vals[0], dict) and isinstance(old_vals, list) and isinstance(old_vals[0], dict)): for old_elem, new_elem in zip_longest(old_vals, new_vals): # If one or the other has extra elements, print message, but # continue on if old_elem is None or new_elem is None: my_print("Missing element!") my_print("\tOld: '{}'".format(old_elem)) my_print("\tNew: '{}'".format(new_elem)) else: if not compare_dicts(old_full, new_full, old_elem, new_elem, depth=depth): return False # At the lowest-dictionary level, compare the values themselves else: # Turn everything into a list for convenience (most things should be # already) if (not isinstance(old_vals, list) and not isinstance(new_vals, list)): old_vals = [old_vals] new_vals = [new_vals] # Sort both lists old_vals = sorted(old_vals) new_vals = sorted(new_vals) for oldv, newv in zip_longest(old_vals, new_vals): # If one or the other has extra elements, print message, but # continue on if oldv is None or newv is None: my_print("Missing element!") my_print("\tOld: '{}'".format(oldv)) my_print("\tNew: '{}'".format(newv)) # If values match, continue elif oldv == newv: my_print("Good Match: '{}'".format(key)) # If values dont match, return False else: my_print("Bad Match: '{}'".format(key)) my_print("\tOld: '{}'".format(oldv)) my_print("\tNew: '{}'".format(newv)) return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cohensutherland(xmin, ymax, xmax, ymin, x1, y1, x2, y2): """Clips a line to a rectangular area. This implements the Cohen-Sutherland line clipping algorithm. xmin, ymax, xmax and ymin denote the clipping area, into which the line defined by x1, y1 (start point) and x2, y2 (end point) will be clipped. If the line does not intersect with the rectangular clipping area, four None values will be returned as tuple. Otherwise a tuple of the clipped line points will be returned in the form (cx1, cy1, cx2, cy2). """
INSIDE, LEFT, RIGHT, LOWER, UPPER = 0, 1, 2, 4, 8 def _getclip(xa, ya): #if dbglvl>1: print('point: '),; print(xa,ya) p = INSIDE # default is inside # consider x if xa < xmin: p |= LEFT elif xa > xmax: p |= RIGHT # consider y if ya < ymin: p |= LOWER # bitwise OR elif ya > ymax: p |= UPPER # bitwise OR return p # check for trivially outside lines k1 = _getclip(x1, y1) k2 = _getclip(x2, y2) # %% examine non-trivially outside points # bitwise OR | while (k1 | k2) != 0: # if both points are inside box (0000) , ACCEPT trivial whole line in box # if line trivially outside window, REJECT if (k1 & k2) != 0: # bitwise AND & #if dbglvl>1: print(' REJECT trivially outside box') # return nan, nan, nan, nan return None, None, None, None # non-trivial case, at least one point outside window # this is not a bitwise or, it's the word "or" opt = k1 or k2 # take first non-zero point, short circuit logic if opt & UPPER: # these are bitwise ANDS x = x1 + (x2 - x1) * (ymax - y1) / (y2 - y1) y = ymax elif opt & LOWER: x = x1 + (x2 - x1) * (ymin - y1) / (y2 - y1) y = ymin elif opt & RIGHT: y = y1 + (y2 - y1) * (xmax - x1) / (x2 - x1) x = xmax elif opt & LEFT: y = y1 + (y2 - y1) * (xmin - x1) / (x2 - x1) x = xmin else: raise RuntimeError('Undefined clipping state') if opt == k1: x1, y1 = x, y k1 = _getclip(x1, y1) #if dbglvl>1: print('checking k1: ' + str(x) + ',' + str(y) + ' ' + str(k1)) elif opt == k2: #if dbglvl>1: print('checking k2: ' + str(x) + ',' + str(y) + ' ' + str(k2)) x2, y2 = x, y k2 = _getclip(x2, y2) return x1, y1, x2, y2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setupuv(rc): """ Horn Schunck legacy OpenCV function requires we use these old-fashioned cv matrices, not numpy array """
if cv is not None: (r, c) = rc u = cv.CreateMat(r, c, cv.CV_32FC1) v = cv.CreateMat(r, c, cv.CV_32FC1) return (u, v) else: return [None]*2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_cat_dict(self, cat_dict_class, key_in_self, **kwargs): """Initialize a CatDict object, checking for errors. """
# Catch errors associated with crappy, but not unexpected data try: new_entry = cat_dict_class(self, key=key_in_self, **kwargs) except CatDictError as err: if err.warn: self._log.info("'{}' Not adding '{}': '{}'".format(self[ self._KEYS.NAME], key_in_self, str(err))) return None return new_entry
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_cat_dict(self, cat_dict_class, key_in_self, check_for_dupes=True, **kwargs): """Add a CatDict to this Entry if initialization succeeds and it doesn't already exist within the Entry. """
# Try to create a new instance of this subclass of `CatDict` new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs) if new_entry is None: return False # Compare this new entry with all previous entries to make sure is new if cat_dict_class != Error: for item in self.get(key_in_self, []): if new_entry.is_duplicate_of(item): item.append_sources_from(new_entry) # Return the entry in case we want to use any additional # tags to augment the old entry return new_entry self.setdefault(key_in_self, []).append(new_entry) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pbar(iter, desc='', **kwargs): """Wrapper for `tqdm` progress bar. """
return tqdm( iter, desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' + desc), dynamic_ncols=True, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pbar_strings(files, desc='', **kwargs): """Wrapper for `tqdm` progress bar which also sorts list of strings """
return tqdm( sorted(files, key=lambda s: s.lower()), desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' + desc), dynamic_ncols=True, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_task_priority(tasks, task_priority): """Get the task `priority` corresponding to the given `task_priority`. If `task_priority` is an integer or 'None', return it. If `task_priority` is a str, return the priority of the task it matches. Otherwise, raise `ValueError`. """
if task_priority is None: return None if is_integer(task_priority): return task_priority if isinstance(task_priority, basestring): if task_priority in tasks: return tasks[task_priority].priority raise ValueError("Unrecognized task priority '{}'".format(task_priority))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_data(self): """Run all of the import tasks. This is executed by the 'scripts.main.py' when the module is run as an executable. This can also be run as a method, in which case default arguments are loaded, but can be overriden using `**kwargs`. """
tasks_list = self.load_task_list() warnings.filterwarnings( 'ignore', r'Warning: converting a masked element to nan.') # FIX warnings.filterwarnings('ignore', category=DeprecationWarning) # Delete all old (previously constructed) output files if self.args.delete_old: self.log.warning("Deleting all old entry files.") self.delete_old_entry_files() # In update mode, load all entry stubs. if self.args.load_stubs or self.args.update: self.load_stubs() if self.args.travis: self.log.warning("Running in `travis` mode.") prev_priority = 0 prev_task_name = '' # for task, task_obj in tasks_list.items(): for task_name, task_obj in tasks_list.items(): if not task_obj.active: continue self.log.warning("Task: '{}'".format(task_name)) nice_name = task_obj.nice_name mod_name = task_obj.module func_name = task_obj.function priority = task_obj.priority # Make sure things are running in the correct order if priority < prev_priority and priority > 0: raise RuntimeError("Priority for '{}': '{}', less than prev," "'{}': '{}'.\n{}" .format(task_name, priority, prev_task_name, prev_priority, task_obj)) self.log.debug("\t{}, {}, {}, {}".format(nice_name, priority, mod_name, func_name)) mod = importlib.import_module('.' + mod_name, package='astrocats') self.current_task = task_obj getattr(mod, func_name)(self) num_events, num_stubs = self.count() self.log.warning("Task finished. Events: {}, Stubs: {}".format( num_events, num_stubs)) self.journal_entries() num_events, num_stubs = self.count() self.log.warning("Journal finished. Events: {}, Stubs: {}".format( num_events, num_stubs)) prev_priority = priority prev_task_name = task_name process = psutil.Process(os.getpid()) memory = process.memory_info().rss self.log.warning('Memory used (MBs): ' '{:,}'.format(memory / 1024. / 1024.)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_entry(self, name, load=True, delete=True): """Find an existing entry in, or add a new one to, the `entries` dict. FIX: rename to `create_entry`??? Returns ------- entries : OrderedDict of Entry objects newname : str Name of matching entry found in `entries`, or new entry added to `entries` """
newname = self.clean_entry_name(name) if not newname: raise (ValueError('Fatal: Attempted to add entry with no name.')) # If entry already exists, return if newname in self.entries: self.log.debug("`newname`: '{}' (name: '{}') already exists.". format(newname, name)) # If this is a stub, we need to continue, possibly load file if self.entries[newname]._stub: self.log.debug("'{}' is a stub".format(newname)) # If a full (non-stub) event exists, return its name else: self.log.debug("'{}' is not a stub, returning".format(newname)) return newname # If entry is alias of another entry in `entries`, find and return that match_name = self.find_entry_name_of_alias(newname) if match_name is not None: self.log.debug( "`newname`: '{}' (name: '{}') already exists as alias for " "'{}'.".format(newname, name, match_name)) newname = match_name # Load entry from file if load: loaded_name = self.load_entry_from_name(newname, delete=delete) if loaded_name: return loaded_name # If we match an existing event, return that if match_name is not None: return match_name # Create new entry new_entry = self.proto(catalog=self, name=newname) new_entry[self.proto._KEYS.SCHEMA] = self.SCHEMA.URL self.log.log(self.log._LOADED, "Created new entry for '{}'".format(newname)) # Add entry to dictionary self.entries[newname] = new_entry return newname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_entry_name_of_alias(self, alias): """Return the first entry name with the given 'alias' included in its list of aliases. Returns ------- name of matching entry (str) or 'None' if no matches """
if alias in self.aliases: name = self.aliases[alias] if name in self.entries: return name else: # Name wasn't found, possibly merged or deleted. Now look # really hard. for name, entry in self.entries.items(): aliases = entry.get_aliases(includename=False) if alias in aliases: if (ENTRY.DISTINCT_FROM not in entry or alias not in entry[ENTRY.DISTINCT_FROM]): return name return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_entry_to_entry(self, fromentry, destentry, check_for_dupes=True, compare_to_existing=True): """Used by `merge_duplicates` """
self.log.info("Copy entry object '{}' to '{}'".format(fromentry[ fromentry._KEYS.NAME], destentry[destentry._KEYS.NAME])) newsourcealiases = {} if self.proto._KEYS.SOURCES in fromentry: for source in fromentry[self.proto._KEYS.SOURCES]: alias = source.pop(SOURCE.ALIAS) newsourcealiases[alias] = source newmodelaliases = {} if self.proto._KEYS.MODELS in fromentry: for model in fromentry[self.proto._KEYS.MODELS]: alias = model.pop(MODEL.ALIAS) newmodelaliases[alias] = model if self.proto._KEYS.ERRORS in fromentry: for err in fromentry[self.proto._KEYS.ERRORS]: destentry.setdefault(self.proto._KEYS.ERRORS, []).append(err) for rkey in fromentry: key = fromentry._KEYS.get_key_by_name(rkey) if key.no_source: continue for item in fromentry[key]: # isd = False if 'source' not in item: raise ValueError("Item has no source!") nsid = [] for sid in item['source'].split(','): if sid in newsourcealiases: source = newsourcealiases[sid] nsid.append(destentry.add_source(**source)) else: raise ValueError("Couldn't find source alias!") item['source'] = uniq_cdl(nsid) if 'model' in item: nmid = [] for mid in item['model'].split(','): if mid in newmodelaliases: model = newmodelaliases[mid] nmid.append(destentry.add_model(**model)) else: raise ValueError("Couldn't find model alias!") item['model'] = uniq_cdl(nmid) if key == ENTRY.PHOTOMETRY: destentry.add_photometry( compare_to_existing=compare_to_existing, **item) elif key == ENTRY.SPECTRA: destentry.add_spectrum( compare_to_existing=compare_to_existing, **item) elif key == ENTRY.ERRORS: destentry.add_error(**item) elif key == ENTRY.MODELS: continue else: destentry.add_quantity( compare_to_existing=compare_to_existing, check_for_dupes=False, quantities=key, **item) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _delete_entry_file(self, entry_name=None, entry=None): """Delete the file associated with the given entry. """
if entry_name is None and entry is None: raise RuntimeError("Either `entry_name` or `entry` must be given.") elif entry_name is not None and entry is not None: raise RuntimeError("Cannot use both `entry_name` and `entry`.") if entry_name is not None: entry = self.entries[entry_name] else: entry_name = entry[ENTRY.NAME] # FIX: do we also need to check for gzipped files?? entry_filename = self.entry_filename(entry_name) if self.args.write_entries: self.log.info("Deleting entry file '{}' of entry '{}'".format( entry_filename, entry_name)) if not os.path.exists(entry_filename): self.log.error( "Filename '{}' does not exist".format(entry_filename)) os.remove(entry_filename) else: self.log.debug("Not deleting '{}' because `write_entries`" " is False".format(entry_filename)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def journal_entries(self, clear=True, gz=False, bury=False, write_stubs=False, final=False): """Write all entries in `entries` to files, and clear. Depending on arguments and `tasks`. Iterates over all elements of `entries`, saving (possibly 'burying') and deleting. - If ``clear == True``, then each element of `entries` is deleted, and a `stubs` entry is added """
# if (self.current_task.priority >= 0 and # self.current_task.priority < self.min_journal_priority): # return # Write it all out! # NOTE: this needs to use a `list` wrapper to allow modification of # dict for name in list(self.entries.keys()): if self.args.write_entries: # If this is a stub and we aren't writing stubs, skip if self.entries[name]._stub and not write_stubs: continue # Bury non-SN entries here if only claimed type is non-SN type, # or if primary name starts with a non-SN prefix. bury_entry = False save_entry = True if bury: (bury_entry, save_entry) = self.should_bury(name) if save_entry: save_name = self.entries[name].save( bury=bury_entry, final=final) self.log.info( "Saved {} to '{}'.".format(name.ljust(20), save_name)) if (gz and os.path.getsize(save_name) > self.COMPRESS_ABOVE_FILESIZE): save_name = compress_gz(save_name) self.log.debug( "Compressed '{}' to '{}'".format(name, save_name)) # FIX: use subprocess outdir, filename = os.path.split(save_name) filename = filename.split('.')[0] os.system('cd ' + outdir + '; git rm --cached ' + filename + '.json; git add -f ' + filename + '.json.gz; cd ' + self.PATHS.PATH_BASE) if clear: self.entries[name] = self.entries[name].get_stub() self.log.debug("Entry for '{}' converted to stub".format(name)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_preferred_names(self): """Choose between each entries given name and its possible aliases for the best one. """
if len(self.entries) == 0: self.log.error("WARNING: `entries` is empty, loading stubs") self.load_stubs() task_str = self.get_current_task_str() for ni, oname in enumerate(pbar(self.entries, task_str)): name = self.add_entry(oname) self.entries[name].set_preferred_name() if self.args.travis and ni > self.TRAVIS_QUERY_LIMIT: break return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _prep_git_add_file_list(self, repo, size_limit, fail=True, file_types=None): """Get a list of files which should be added to the given repository. Notes ----- * Finds files in the *root* of the given repository path. * If `file_types` is given, only use those file types. * If an uncompressed file is above the `size_limit`, it is compressed. * If a compressed file is above the file limit, an error is raised (if `fail = True`) or it is skipped (if `fail == False`). Arguments --------- repo : str Path to repository size_limit : scalar fail : bool Raise an error if a compressed file is still above the size limit. file_types : list of str or None Exclusive list of file types to add. 'None' to add all filetypes. """
add_files = [] if file_types is None: file_patterns = ['*'] else: self.log.error( "WARNING: uncertain behavior with specified file types!") file_patterns = ['*.' + ft for ft in file_types] # Construct glob patterns for each file-type file_patterns = [os.path.join(repo, fp) for fp in file_patterns] for pattern in file_patterns: file_list = glob(pattern) for ff in file_list: fsize = os.path.getsize(ff) fname = str(ff) comp_failed = False # If the found file is too large if fsize > size_limit: self.log.debug("File '{}' size '{}' MB.".format( fname, fsize / 1028 / 1028)) # If the file is already compressed... fail or skip if ff.endswith('.gz'): self.log.error( "File '{}' is already compressed.".format(fname)) comp_failed = True # Not yet compressed - compress it else: fname = compress_gz(fname) fsize = os.path.getsize(fname) self.log.info("Compressed to '{}', size '{}' MB". format(fname, fsize / 1028 / 1028)) # If still too big, fail or skip if fsize > size_limit: comp_failed = True # If compressed file is too large, skip file or raise error if comp_failed: # Raise an error if fail: raise RuntimeError( "File '{}' cannot be added!".format(fname)) # Skip file without adding it self.log.info("Skipping file.") continue # If everything is good, add file to list add_files.append(fname) return add_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_url(self, url, timeout, fail=False, post=None, verify=True): """Download text from the given url. Returns `None` on failure. Arguments --------- self url : str URL web address to download. timeout : int Duration after which URL request should terminate. fail : bool If `True`, then an error will be raised on failure. If `False`, then 'None' is returned on failure. post : dict List of arguments to post to URL when requesting it. verify : bool Whether to check for valid SSL cert when downloading Returns ------- url_txt : str or None On success the text of the url is returned. On failure `None` is returned. """
_CODE_ERRORS = [500, 307, 404] import requests session = requests.Session() try: headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X ' '10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/39.0.2171.95 Safari/537.36' } if post: response = session.post( url, timeout=timeout, headers=headers, data=post, verify=verify) else: response = session.get( url, timeout=timeout, headers=headers, verify=verify) response.raise_for_status() # Look for errors for xx in response.history: xx.raise_for_status() if xx.status_code in _CODE_ERRORS: self.log.error("URL response returned status code '{}'". format(xx.status_code)) raise url_txt = response.text self.log.debug("Task {}: Loaded `url_txt` from '{}'.".format( self.current_task.name, url)) except (KeyboardInterrupt, SystemExit): raise except Exception as err: err_str = ("URL Download of '{}' failed ('{}')." .format(url, str(err))) # Raise an error on failure if fail: err_str += " and `fail` is set." self.log.error(err_str) raise RuntimeError(err_str) # Log a warning on error, and return None else: self.log.warning(err_str) return None return url_txt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_sources_from(self, other): """Merge the source alias lists of two CatDicts."""
# Get aliases lists from this `CatDict` and other self_aliases = self[self._KEYS.SOURCE].split(',') other_aliases = other[self._KEYS.SOURCE].split(',') # Store alias to `self` self[self._KEYS.SOURCE] = uniq_cdl(self_aliases + other_aliases) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def current_task(self, args): """Name of current action for progress-bar output. The specific task string is depends on the configuration via `args`. Returns ------- ctask : str String representation of this task. """
ctask = self.nice_name if self.nice_name is not None else self.name if args is not None: if args.update: ctask = ctask.replace('%pre', 'Updating') else: ctask = ctask.replace('%pre', 'Loading') return ctask
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_archive(self, args): """Whether previously archived data should be loaded. """
import warnings warnings.warn("`Task.load_archive()` is deprecated! " "`Catalog.load_url` handles the same functionality.") return self.archived or args.archived
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def git_add_commit_push_all_repos(cat): """Add all files in each data repository tree, commit, push. Creates a commit message based on the current catalog version info. If either the `git add` or `git push` commands fail, an error will be raised. Currently, if `commit` fails an error *WILL NOT* be raised because the `commit` command will return a nonzero exit status if FIX: improve the error checking on this. """
log = cat.log log.debug("gitter.git_add_commit_push_all_repos()") # Do not commit/push private repos all_repos = cat.PATHS.get_all_repo_folders(private=False) for repo in all_repos: log.info("Repo in: '{}'".format(repo)) # Get the initial git SHA sha_beg = get_sha(repo) log.debug("Current SHA: '{}'".format(sha_beg)) # Get files that should be added, compress and check sizes add_files = cat._prep_git_add_file_list(repo, cat.COMPRESS_ABOVE_FILESIZE) log.info("Found {} Files to add.".format(len(add_files))) if len(add_files) == 0: continue try: # Add all files in the repository directory tree git_comm = ["git", "add"] if cat.args.travis: git_comm.append("-f") git_comm.extend(add_files) _call_command_in_repo( git_comm, repo, cat.log, fail=True, log_flag=False) # Commit these files commit_msg = "'push' - adding all files." commit_msg = "{} : {}".format(cat._version_long, commit_msg) log.info(commit_msg) git_comm = ["git", "commit", "-am", commit_msg] _call_command_in_repo(git_comm, repo, cat.log) # Add all files in the repository directory tree git_comm = ["git", "push"] if not cat.args.travis: _call_command_in_repo(git_comm, repo, cat.log, fail=True) except Exception as err: try: git_comm = ["git", "reset", "HEAD"] _call_command_in_repo(git_comm, repo, cat.log, fail=True) except: pass raise err return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def git_pull_all_repos(cat, strategy_recursive=True, strategy='theirs'): """Perform a 'git pull' in each data repository. > `git pull -s recursive -X theirs` """
# raise RuntimeError("THIS DOESNT WORK YET!") log = cat.log log.debug("gitter.git_pull_all_repos()") log.warning("WARNING: using experimental `git_pull_all_repos()`!") all_repos = cat.PATHS.get_all_repo_folders() for repo_name in all_repos: log.info("Repo in: '{}'".format(repo_name)) # Get the initial git SHA sha_beg = get_sha(repo_name) log.debug("Current SHA: '{}'".format(sha_beg)) # Initialize the git repository repo = git.Repo(repo_name) # Construct the command to call git_comm = "git pull --verbose" if strategy_recursive: git_comm += " -s recursive" if strategy is not None: git_comm += " -X {:s}".format(strategy) log.debug("Calling '{}'".format(git_comm)) # Call git command (do this manually to use desired options) # Set `with_exceptions=False` to handle errors ourselves (below) code, out, err = repo.git.execute( git_comm.split(), with_stdout=True, with_extended_output=True, with_exceptions=False) # Handle output of git command if len(out): log.info(out) if len(err): log.info(err) # Hangle error-codes if code != 0: err_str = "Command '{}' returned exit code '{}'!".format(git_comm, code) err_str += "\n\tout: '{}'\n\terr: '{}'".format(out, err) log.error(err_str) raise RuntimeError(err_str) sha_end = get_sha(repo_name) if sha_end != sha_beg: log.info("Updated SHA: '{}'".format(sha_end)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def git_clone_all_repos(cat): """Perform a 'git clone' for each data repository that doesnt exist. """
log = cat.log log.debug("gitter.git_clone_all_repos()") all_repos = cat.PATHS.get_all_repo_folders() out_repos = cat.PATHS.get_repo_output_folders() for repo in all_repos: log.info("Repo in: '{}'".format(repo)) if os.path.isdir(repo): log.info("Directory exists.") else: log.debug("Cloning directory...") clone(repo, cat.log, depth=max(cat.args.clone_depth, 1)) if cat.args.purge_outputs and repo in out_repos: for fil in glob(os.path.join(repo, '*.json')): os.remove(fil) grepo = git.cmd.Git(repo) try: grepo.status() except git.GitCommandError: log.error("Repository does not exist!") raise # Get the initial git SHA sha_beg = get_sha(repo) log.debug("Current SHA: '{}'".format(sha_beg)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def git_reset_all_repos(cat, hard=True, origin=False, clean=True): """Perform a 'git reset' in each data repository. """
log = cat.log log.debug("gitter.git_reset_all_repos()") all_repos = cat.PATHS.get_all_repo_folders() for repo in all_repos: log.warning("Repo in: '{}'".format(repo)) # Get the initial git SHA sha_beg = get_sha(repo) log.debug("Current SHA: '{}'".format(sha_beg)) grepo = git.cmd.Git(repo) # Fetch first log.info("fetching") grepo.fetch() args = [] if hard: args.append('--hard') if origin: args.append('origin/master') log.info("resetting") retval = grepo.reset(*args) if len(retval): log.warning("Git says: '{}'".format(retval)) # Clean if clean: log.info("cleaning") # [q]uiet, [f]orce, [d]irectories retval = grepo.clean('-qdf') if len(retval): log.warning("Git says: '{}'".format(retval)) sha_end = get_sha(repo) if sha_end != sha_beg: log.debug("Updated SHA: '{}'".format(sha_end)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def git_status_all_repos(cat, hard=True, origin=False, clean=True): """Perform a 'git status' in each data repository. """
log = cat.log log.debug("gitter.git_status_all_repos()") all_repos = cat.PATHS.get_all_repo_folders() for repo_name in all_repos: log.info("Repo in: '{}'".format(repo_name)) # Get the initial git SHA sha_beg = get_sha(repo_name) log.debug("Current SHA: '{}'".format(sha_beg)) log.info("Fetching") fetch(repo_name, log=cat.log) git_comm = ["git", "status"] _call_command_in_repo( git_comm, repo_name, cat.log, fail=True, log_flag=True) sha_end = get_sha(repo_name) if sha_end != sha_beg: log.info("Updated SHA: '{}'".format(sha_end)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clone(repo, log, depth=1): """Given a list of repositories, make sure they're all cloned. Should be called from the subclassed `Catalog` objects, passed a list of specific repository names. Arguments --------- all_repos : list of str *Absolute* path specification of each target repository. """
kwargs = {} if depth > 0: kwargs['depth'] = depth try: repo_name = os.path.split(repo)[-1] repo_name = "https://github.com/astrocatalogs/" + repo_name + ".git" log.warning("Cloning '{}' (only needs to be done ".format(repo) + "once, may take few minutes per repo).") grepo = git.Repo.clone_from(repo_name, repo, **kwargs) except: log.error("CLONING '{}' INTERRUPTED".format(repo)) raise return grepo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check(self): """Check that spectrum has legal combination of attributes."""
# Run the super method super(Spectrum, self)._check() err_str = None has_data = self._KEYS.DATA in self has_wave = self._KEYS.WAVELENGTHS in self has_flux = self._KEYS.FLUXES in self has_filename = self._KEYS.FILENAME in self if not has_data: if (not has_wave or not has_flux) and not has_filename: err_str = ( "If `{}` not given".format(self._KEYS.DATA) + "; `{}` or `{}` needed".format( self._KEYS.WAVELENGTHS, self._KEYS.FLUXES)) if err_str is not None: raise ValueError(err_str) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_duplicate_of(self, other): """Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other): return True row_matches = 0 for ri, row in enumerate(self.get(self._KEYS.DATA, [])): lambda1, flux1 = tuple(row[0:2]) if (self._KEYS.DATA not in other or ri > len(other[self._KEYS.DATA])): break lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2]) minlambdalen = min(len(lambda1), len(lambda2)) minfluxlen = min(len(flux1), len(flux2)) if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and float(flux1[:minfluxlen + 1]) != 0.0): row_matches += 1 # Five row matches should be enough to be sure spectrum is a dupe. if row_matches >= 5: return True # Matches need to happen in the first 10 rows. if ri >= 10: break return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_func(self, key): """Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME: return 'aaa' if key == self._KEYS.DATA: return 'zzy' if key == self._KEYS.SOURCE: return 'zzz' return key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_func(self, key): """Sorting logic for `Quantity` objects."""
if key == self._KEYS.VALUE: return 'aaa' if key == self._KEYS.SOURCE: return 'zzz' return key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty(self): """Return a 'pretty' string representation of this `Key`. note: do not override the builtin `__str__` or `__repr__` methods! """
retval = ("Key(name={}, type={}, listable={}, compare={}, " "priority={}, kind_preference={}, " "replace_better={})").format( self.name, self.type, self.listable, self.compare, self.priority, self.kind_preference, self.replace_better) return retval
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self, val): """Make sure given value is consistent with this `Key` specification. NOTE: if `type` is 'None', then `listable` also is *not* checked. """
# If there is no `type` requirement, everything is allowed if self.type is None: return True is_list = isinstance(val, list) # If lists are not allowed, and this is a list --> false if not self.listable and is_list: return False # `is_number` already checks for either list or single value if self.type == KEY_TYPES.NUMERIC and not is_number(val): return False elif (self.type == KEY_TYPES.TIME and not is_number(val) and '-' not in val and '/' not in val): return False elif self.type == KEY_TYPES.STRING: # If its a list, check first element if is_list: if not isinstance(val[0], basestring): return False # Otherwise, check it elif not isinstance(val, basestring): return False elif self.type == KEY_TYPES.BOOL: if is_list and not isinstance(val[0], bool): return False elif not isinstance(val, bool): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_logger(name=None, stream_fmt=None, file_fmt=None, date_fmt=None, stream_level=None, file_level=None, tofile=None, tostr=True): """Create a standard logger object which logs to file and or stdout stream. If a logger has already been created in this session, it is returned (unless `name` is given). Arguments --------- name : str, Handle for this logger, must be distinct for a distinct logger. stream_fmt : str or `None`, Format of log messages to stream (stdout). If `None`, default settings are used. file_fmt : str or `None`, Format of log messages to file. If `None`, default settings are used. date_fmt : str or `None` Format of time stamps to stream and/or file. If `None`, default settings are used. stream_level : int, Logging level for stream. file_level : int, Logging level for file. tofile : str or `None`, Filename to log to (turned off if `None`). tostr : bool, Log to stdout stream. Returns ------- logger : ``logging.Logger`` object, Logger object to use for logging. """
if tofile is None and not tostr: raise ValueError( "Must log to something: `tofile` or `tostr` must be `True`.") logger = logging.getLogger(name) # Add a custom attribute to this `logger` so that we know when an existing # one is being returned if hasattr(logger, '_OSC_LOGGER'): return logger else: logger._OSC_LOGGER = True # Set other custom parameters logger._LOADED = _LOADED_LEVEL # Make sure handlers don't get duplicated (ipython issue) while len(logger.handlers) > 0: logger.handlers.pop() # Prevents duplication or something something... logger.propagate = 0 # Determine and Set Logging Levels if file_level is None: file_level = _FILE_LEVEL_DEF if stream_level is None: stream_level = _STREAM_LEVEL_DEF # Logger object must be at minimum level logger.setLevel(int(np.min([file_level, stream_level]))) if date_fmt is None: date_fmt = '%Y/%m/%d %H:%M:%S' # Log to file # ----------- if tofile is not None: if file_fmt is None: file_fmt = "%(asctime)s %(levelname)8.8s [%(filename)20.20s:" file_fmt += "%(funcName)-20.20s]%(indent)s%(message)s" fileFormatter = IndentFormatter(file_fmt, datefmt=date_fmt) fileHandler = logging.FileHandler(tofile, 'w') fileHandler.setFormatter(fileFormatter) fileHandler.setLevel(file_level) logger.addHandler(fileHandler) # Store output filename to `logger` object logger.filename = tofile # Log To stdout # ------------- if tostr: if stream_fmt is None: stream_fmt = "%(indent)s%(message)s" strFormatter = IndentFormatter(stream_fmt, datefmt=date_fmt) strHandler = logging.StreamHandler() strHandler.setFormatter(strFormatter) strHandler.setLevel(stream_level) logger.addHandler(strHandler) return logger
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_raise(log, err_str, err_type=RuntimeError): """Log an error message and raise an error. Arguments --------- log : `logging.Logger` object err_str : str Error message to be logged and raised. err_type : `Exception` object Type of error to raise. """
log.error(err_str) # Make sure output is flushed # (happens automatically to `StreamHandlers`, but not `FileHandlers`) for handle in log.handlers: handle.flush() # Raise given error raise err_type(err_str)