hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6cd9c08fed53bacc9f91d2a45b30a6403b5d3f2d | 9,855 | py | Python | scripts/multimodal_human_provider_node.py | YoanSallami/multimodal_human_provider | 02b07297fd805ed6c4ca63520b6117897d23df3a | [
"BSD-3-Clause"
] | null | null | null | scripts/multimodal_human_provider_node.py | YoanSallami/multimodal_human_provider | 02b07297fd805ed6c4ca63520b6117897d23df3a | [
"BSD-3-Clause"
] | null | null | null | scripts/multimodal_human_provider_node.py | YoanSallami/multimodal_human_provider | 02b07297fd805ed6c4ca63520b6117897d23df3a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import rospy
import re
import time
import numpy
import argparse
import math
import underworlds
import tf2_ros
from underworlds.helpers.geometry import get_world_transform
from underworlds.tools.loader import ModelLoader
from underworlds.helpers.transformations import translation_matrix, quaternion_matrix, euler_matrix, translation_from_matrix
from multimodal_human_provider.msg import GazeInfoArray
from underworlds.types import Camera, Mesh, MESH, Situation
TF_CACHE_TIME = 5.0
DEFAULT_CLIP_PLANE_NEAR = 0.01
DEFAULT_CLIP_PLANE_FAR = 1000.0
DEFAULT_HORIZONTAL_FOV = 50.0
DEFAULT_ASPECT = 1.33333
LOOK_AT_THRESHOLD = 0.7
MIN_NB_DETECTION = 3
MIN_DIST_DETECTION = 0.2
MAX_HEIGHT = 2.5
# just for convenience
def strip_leading_slash(s):
return s[1:] if s.startswith("/") else s
# just for convenience
def transformation_matrix(t, q):
translation_mat = translation_matrix(t)
rotation_mat = quaternion_matrix(q)
return numpy.dot(translation_mat, rotation_mat)
class MultimodalHumanProvider(object):
def __init__(self, ctx, output_world, mesh_dir, reference_frame):
self.ros_subscriber = {"gaze": rospy.Subscriber("/wp2/gaze", GazeInfoArray, self.callbackGaze)}
self.human_cameras_ids = {}
self.ctx = ctx
self.human_bodies = {}
self.target = ctx.worlds[output_world]
self.target_world_name = output_world
self.reference_frame = reference_frame
self.mesh_dir = mesh_dir
self.human_meshes = {}
self.human_aabb = {}
self.nb_gaze_detected = {}
self.added_human_id = []
self.detection_time = None
self.reco_durations = []
self.record_time = False
self.robot_name = rospy.get_param("robot_name", "pepper")
self.already_removed_nodes = []
nodes_loaded = []
try:
nodes_loaded = ModelLoader().load(self.mesh_dir + "face.blend", self.ctx,
world=output_world, root=None, only_meshes=True,
scale=1.0)
except Exception as e:
rospy.logwarn("[multimodal_human_provider] Exception occurred with %s : %s" % (self.mesh_dir + "face.blend", str(e)))
for n in nodes_loaded:
if n.type == MESH:
self.human_meshes["face"] = n.properties["mesh_ids"]
self.human_aabb["face"] = n.properties["aabb"]
self.tfBuffer = tf2_ros.Buffer(rospy.Duration(TF_CACHE_TIME), debug=False)
self.listener = tf2_ros.TransformListener(self.tfBuffer)
def create_human_pov(self, id):
new_node = Camera(name="human-" + str(id))
new_node.properties["clipplanenear"] = DEFAULT_CLIP_PLANE_NEAR
new_node.properties["clipplanefar"] = DEFAULT_CLIP_PLANE_FAR
new_node.properties["horizontalfov"] = math.radians(DEFAULT_HORIZONTAL_FOV)
new_node.properties["aspect"] = DEFAULT_ASPECT
new_node.parent = self.target.scene.rootnode.id
return new_node
def getLatestCommonTime(self, source_frame, dest_frame):
"""
This is here to provide compatibility with tf2 without having a dependency with old tf
See : /opt/ros/kinetic/lib/python2.7/dist-packages/tf/listener.py
@param source_frame:
@param dest_frame:
@return :
"""
return self.tfBuffer.get_latest_common_time(strip_leading_slash(source_frame), strip_leading_slash(dest_frame))
def lookupTransform(self, target_frame, source_frame, time):
"""
This is here to provide compatibility with tf2 without having a dependency with old tf
See : /opt/ros/kinetic/lib/python2.7/dist-packages/tf/listener.py
@param target_frame:
@param source_frame:
@param time:
@return :
"""
msg = self.tfBuffer.lookup_transform(strip_leading_slash(target_frame), strip_leading_slash(source_frame), time)
t = msg.transform.translation
r = msg.transform.rotation
return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w]
def callbackGaze(self, msg):
nodes_to_update = []
if msg.data:
for i, gaze in enumerate(msg.data):
human_id = gaze.person_id
track_id = gaze.track_id
if human_id not in self.nb_gaze_detected:
self.nb_gaze_detected[human_id] = 0
else:
self.nb_gaze_detected[human_id] += 1
if track_id == human_id:
self.detection_time = time.time()
self.record_time = True
else:
if self.record_time:
self.reco_durations.append(time.time() - self.detection_time)
self.record_time = False
if gaze.head_gaze_available and self.nb_gaze_detected[human_id] > MIN_NB_DETECTION:
new_node = self.create_human_pov(human_id)
if human_id in self.human_cameras_ids:
new_node.id = self.human_cameras_ids[human_id]
else:
self.human_cameras_ids[human_id] = new_node.id
t = [gaze.head_gaze.position.x, gaze.head_gaze.position.y, gaze.head_gaze.position.z]
q = [gaze.head_gaze.orientation.x, gaze.head_gaze.orientation.y, gaze.head_gaze.orientation.z, gaze.head_gaze.orientation.w]
if math.sqrt(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]) < MIN_DIST_DETECTION:
continue
(trans, rot) = self.lookupTransform(self.reference_frame, msg.header.frame_id, rospy.Time(0))
offset = euler_matrix(0, math.radians(90), math.radians(90), 'rxyz')
transform = numpy.dot(transformation_matrix(trans, rot), transformation_matrix(t, q))
new_node.transformation = numpy.dot(transform, offset)
if translation_from_matrix(new_node.transformation)[2] > MAX_HEIGHT:
continue
self.added_human_id.append(human_id)
nodes_to_update.append(new_node)
if human_id not in self.human_bodies:
self.human_bodies[human_id] = {}
if "face" not in self.human_bodies[human_id]:
new_node = Mesh(name="human_face-"+str(human_id))
new_node.properties["mesh_ids"] = self.human_meshes["face"]
new_node.properties["aabb"] = self.human_aabb["face"]
new_node.parent = self.human_cameras_ids[human_id]
offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz')
new_node.transformation = numpy.dot(new_node.transformation, offset)
self.human_bodies[human_id]["face"] = new_node.id
nodes_to_update.append(new_node)
#if gaze.probability_looking_at_robot >= LOOK_AT_THRESHOLD:
# self.target.timeline.start(Situation(desc="lookat(human-%s,%s)" % (str(gaze.person_id), self.robot_name)))
if nodes_to_update:
self.target.scene.nodes.update(nodes_to_update)
def clean_humans(self):
nodes_to_remove = []
for node in self.target.scene.nodes:
if node not in self.already_removed_nodes:
if re.match("human-", node.name):
if time.time() - node.last_update > 5.0:
nodes_to_remove.append(node)
for child in node.children:
nodes_to_remove.append(self.target.scene.nodes[child])
if nodes_to_remove:
rospy.logwarn(nodes_to_remove)
self.already_removed_nodes = nodes_to_remove
self.target.scene.nodes.remove(nodes_to_remove)
def run(self):
while not rospy.is_shutdown():
pass
import csv
with open("/home/ysallami/stat.csv", "w") as csvfile:
fieldnames = ["human_id", "nb_detection", "is_human"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for human_id, nb_detect in self.nb_gaze_detected.items():
writer.writerow({"human_id": int(human_id), "nb_detection": int(nb_detect),
"is_human": 1 if human_id in self.added_human_id else 0})
csvfile.close()
with open("/home/ysallami/duration_stat.csv", "w") as csvfile2:
fieldnames = ["reco_durations"]
writer = csv.DictWriter(csvfile2, fieldnames=fieldnames)
writer.writeheader()
for duration in self.reco_durations:
writer.writerow({"reco_durations": duration})
csvfile2.close()
if __name__ == "__main__":
sys.argv = [arg for arg in sys.argv if "__name" not in arg and "__log" not in arg]
sys.argc = len(sys.argv)
parser = argparse.ArgumentParser(description="Add in the given output world, the nodes from input "
"world and the robot agent from ROS")
parser.add_argument("output_world", help="Underworlds output world")
parser.add_argument("mesh_dir", help="The path used to localize the human meshes")
parser.add_argument("--reference", default="map", help="The reference frame")
args = parser.parse_args()
rospy.init_node('multimodal_human_provider', anonymous=False)
with underworlds.Context("Multimodal human provider") as ctx:
MultimodalHumanProvider(ctx, args.output_world, args.mesh_dir, args.reference).run()
| 41.407563 | 144 | 0.617757 | 7,973 | 0.809031 | 0 | 0 | 0 | 0 | 0 | 0 | 1,558 | 0.158092 |
6cda2551861539e7955fba8e1b052ccf729d24d4 | 3,171 | py | Python | execute.py | ikeban/InvoicesSender | 7eae2b0b201c91f31ee65bf64779a778d98bfa5e | [
"MIT"
] | null | null | null | execute.py | ikeban/InvoicesSender | 7eae2b0b201c91f31ee65bf64779a778d98bfa5e | [
"MIT"
] | null | null | null | execute.py | ikeban/InvoicesSender | 7eae2b0b201c91f31ee65bf64779a778d98bfa5e | [
"MIT"
] | null | null | null | import code.PdfReader as PdfReaderModule
import code.ExcelReader as ExcelReader
import code.TemplateParser as TemplateParser
import code.PdfAnalyzer as PdfAnalyzer
import code.EmailSender as EmailSender
def getFileContent(fileName):
read_data = ""
with open(fileName, encoding="utf-8") as f:
read_data = f.read()
return read_data
def main():
# TODO Do not forget, to remind user, that [MONTH] should be updated before continueing!
print("If you use [MONTH] in you template, don't forget to update it in InvoiceSenderControl.xlsx")
input("Press Enter to continue... (close window with script to CANCEL)")
print("Parsing excel...")
excelReader = ExcelReader.ExcelReader()
excelContent = excelReader.getData()
excelSmtpData = excelReader.getSmtpData()
print("Parsing pdf...")
pdfReader = PdfReaderModule.PdfReader()
pdfFileNameToItsContentMap = pdfReader.getReadedInvoicesMap()
print("Searching pdfs...")
pdfAnalyzer = PdfAnalyzer.PdfAnalyzer(pdfFileNameToItsContentMap)
emailContentAttachmentList = []
for (invoiceText, emailAddress, templateName, keyWordMap, emailSubject, messageId) in excelContent:
invoicesToAttach = pdfAnalyzer.searchSentenceAndUpdateStats(invoiceText)
if len(invoicesToAttach) == 0:
print("No invoices for: " + emailAddress + " SKIPPING!")
continue
templateContent = getFileContent("emailTemplates/" + templateName)
if templateContent is None or templateContent == "":
print("template not existing or empty for: " + emailAddress + " SKIPPING!")
continue
templateParser = TemplateParser.TemplateParser(templateContent, keyWordMap)
emailFilledTemplate = templateParser.getFilledTemplate()
emailContentAttachmentList.append( (emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId) )
print("What will be sent:")
for (emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId) in emailContentAttachmentList:
print("To " + emailAddress + " will be send " + str(invoicesToAttach))
print("Checking if all PDFs can be delivered:")
pdfAnalyzer.dropStatistics()
input("Press Enter to send emails.. (close window with script to CANCEL)")
print("Sending emails...")
(smtpAddress, smtpPort, ownerEmail, ownerPassword) = excelSmtpData
emailSender = EmailSender.EmailSender(smtpAddress, smtpPort, ownerEmail, ownerPassword)
for (emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId) in emailContentAttachmentList:
if messageId == None or messageId == "":
emailSender.sendEmail(emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach)
print("Sent an email to " + emailAddress + " with " + str(invoicesToAttach))
else:
emailSender.replayEmail(emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId)
print("Sent response to " + emailAddress + " with " + str(invoicesToAttach))
emailSender.close()
if __name__ == '__main__':
main()
| 45.3 | 123 | 0.71397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.201829 |
6cdb367c92bfbf774391e43f4ed3d4e6b4218ff8 | 1,146 | py | Python | pyclsload/__init__.py | nbdy/pyclsload | 7eafcef6562f5247720a6e634b90dc37bc7ef246 | [
"MIT"
] | null | null | null | pyclsload/__init__.py | nbdy/pyclsload | 7eafcef6562f5247720a6e634b90dc37bc7ef246 | [
"MIT"
] | null | null | null | pyclsload/__init__.py | nbdy/pyclsload | 7eafcef6562f5247720a6e634b90dc37bc7ef246 | [
"MIT"
] | null | null | null | from importlib.util import spec_from_file_location, module_from_spec
from os import listdir
from os.path import join
def load_cls(file_path: str, class_name: str):
s = spec_from_file_location(class_name, file_path)
m = module_from_spec(s)
s.loader.exec_module(m)
return m.__dict__[class_name]
def load_dir(directory: str) -> list:
r = []
for fn in listdir(directory):
r.append(load_cls(join(directory, fn), fn.split(".")[0]))
return r
def init_cls(file_path: str, class_name: str, *args, **kwargs):
m = load_cls(file_path, class_name)
return m.__dict__[class_name](*args, **kwargs)
class Cls(object):
cls = None
def __init__(self, file_path: str, class_name: str, *args, **kwargs):
self.cls = init_cls(file_path, class_name, *args, **kwargs)
self.file_path = file_path
self.class_name = class_name
self.args = args
self.kwargs = kwargs
def call(self, name: str, *args, **kwargs):
if hasattr(self.cls, name):
return getattr(self.cls, name)(*args, **kwargs)
return None
__all__ = ['load_cls', 'init_cls', 'Cls']
| 27.285714 | 73 | 0.657941 | 467 | 0.407504 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.024433 |
6cdc218d274321607edc4aee95141af773ac7b6c | 681 | py | Python | electionleaflets/apps/constituencies/templatetags/constituency_tags.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
] | null | null | null | electionleaflets/apps/constituencies/templatetags/constituency_tags.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
] | 23 | 2015-02-19T14:02:23.000Z | 2015-04-30T11:14:01.000Z | electionleaflets/apps/constituencies/templatetags/constituency_tags.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
] | 2 | 2015-02-02T19:39:54.000Z | 2017-02-08T09:19:53.000Z | from django import template
from django.conf import settings
from leaflets.models import Leaflet
from constituencies.models import Constituency
register = template.Library()
@register.inclusion_tag('constituencies/ordered_list.html')
def constituency_list_by_count():
constituencies = Constituency.objects.all().order_by('-count')[0:10]
return { 'MEDIA_URL': settings.MEDIA_URL, 'constituencies': constituencies }
@register.inclusion_tag('constituencies/zero_entry_list.html')
def constituency_list_with_none():
constituencies = Constituency.objects.filter(count=0).all()[0:10]
return { 'MEDIA_URL': settings.MEDIA_URL, 'constituencies': constituencies } | 42.5625 | 84 | 0.785609 | 0 | 0 | 0 | 0 | 495 | 0.726872 | 0 | 0 | 133 | 0.195301 |
6cdcdbaf3c47d1cc8b0a7c248d4e481fb6a439cc | 8,439 | py | Python | kolibri_zim_plugin/views.py | endlessm/kolibri-zim-plugin | 1fb4fc26e2c84938f5e134e1e7a9361a4130f30b | [
"MIT"
] | null | null | null | kolibri_zim_plugin/views.py | endlessm/kolibri-zim-plugin | 1fb4fc26e2c84938f5e134e1e7a9361a4130f30b | [
"MIT"
] | 5 | 2021-06-02T19:54:48.000Z | 2022-02-08T18:13:58.000Z | kolibri_zim_plugin/views.py | endlessm/kolibri-zim-plugin | 1fb4fc26e2c84938f5e134e1e7a9361a4130f30b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import textwrap
import time
import bs4
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.http import HttpResponseNotModified
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.http import JsonResponse
from django.utils.cache import patch_response_headers
from django.utils.http import http_date
from django.views import View
from kolibri.core.content.utils.paths import get_content_storage_file_path
from zimply_core.zim_core import to_bytes
from zimply_core.zim_core import ZIMClient
# This provides an API similar to the zipfile view in Kolibri core's zip_wsgi.
# In the future, we should replace this with a change adding Zim file support
# in the same place: <https://github.com/endlessm/kolibri/pull/3>.
#
# We are avoiding Django REST Framework here in case this code needs to be
# moved to the alternative zip_wsgi server.
YEAR_IN_SECONDS = 60 * 60 * 24 * 365
SNIPPET_MAX_CHARS = 280
class ZimFileNotFoundError(Exception):
pass
class ZimFileReadError(Exception):
pass
class _ZimFileViewMixin(View):
zim_client_args = {"enable_search": False}
def dispatch(self, request, *args, **kwargs):
zim_filename = kwargs["zim_filename"]
try:
self.zim_file = self.__get_zim_file(zim_filename)
except ZimFileNotFoundError:
return HttpResponseNotFound("Zim file does not exist")
except ZimFileReadError:
return HttpResponseServerError("Error reading Zim file")
return super(_ZimFileViewMixin, self).dispatch(request, *args, **kwargs)
def __get_zim_file(self, zim_filename):
zim_file_path = get_content_storage_file_path(zim_filename)
if not os.path.exists(zim_file_path):
raise ZimFileNotFoundError()
# Raises RuntimeError
try:
# A ZIMClient requires an encoding (usually UTF-8). The
# auto_delete property only applies to an FTS index and will
# automagically recreate an index if any issues are detected.
zim_file = ZIMClient(
zim_file_path,
encoding="utf-8",
auto_delete=True,
**self.zim_client_args
)
except RuntimeError as error:
raise ZimFileReadError(str(error))
return zim_file
class _ImmutableViewMixin(View):
def dispatch(self, request, *args, **kwargs):
if request.method != "GET":
return super(_ImmutableViewMixin, self).dispatch(request, *args, **kwargs)
elif request.META.get("HTTP_IF_MODIFIED_SINCE"):
return HttpResponseNotModified()
else:
response = super(_ImmutableViewMixin, self).dispatch(
request, *args, **kwargs
)
if response.status_code == 200:
patch_response_headers(response, cache_timeout=YEAR_IN_SECONDS)
return response
class ZimIndexView(_ImmutableViewMixin, _ZimFileViewMixin, View):
http_method_names = (
"get",
"options",
)
def get(self, request, zim_filename):
main_page = self.zim_file.main_page
if main_page is None:
return HttpResponseNotFound("Article does not exist")
article_url = _zim_article_url(request, zim_filename, main_page.full_url)
return HttpResponseRedirect(article_url)
class ZimArticleView(_ImmutableViewMixin, _ZimFileViewMixin, View):
http_method_names = (
"get",
"options",
)
def get(self, request, zim_filename, zim_article_path):
try:
if not zim_article_path:
return self._get_response_for_article(self.zim_file.main_page)
else:
zim_article = self.zim_file.get_article(zim_article_path)
return self._get_response_for_article(zim_article)
except KeyError:
return HttpResponseNotFound("Article does not exist")
@staticmethod
def _get_response_for_article(article):
if article is None:
return HttpResponseNotFound("Article does not exist")
response = HttpResponse()
article_bytes = to_bytes(article.data, "utf-8")
response["Content-Length"] = len(article_bytes)
# Ensure the browser knows not to try byte-range requests, as we don't support them here
response["Accept-Ranges"] = "none"
response["Last-Modified"] = http_date(time.time())
response["Content-Type"] = article.mimetype
response.write(article_bytes)
return response
class ZimRandomArticleView(_ZimFileViewMixin, View):
http_method_names = (
"get",
"options",
)
def get(self, request, zim_filename):
article_url = _zim_article_url(
request, zim_filename, self.zim_file.random_article_url
)
return HttpResponseRedirect(article_url)
class ZimSearchView(_ZimFileViewMixin, View):
zim_client_args = {"enable_search": True}
MAX_RESULTS_MAXIMUM = 100
def get(self, request, zim_filename):
query = request.GET.get("query")
suggest = "suggest" in request.GET
start = request.GET.get("start", 0)
max_results = request.GET.get("max_results", 30)
if suggest:
snippet_length = None
else:
snippet_length = request.GET.get("snippet_length", SNIPPET_MAX_CHARS)
if not query:
return HttpResponseBadRequest('Missing "query"')
try:
start = int(start)
except ValueError:
return HttpResponseBadRequest('Invalid "start"')
try:
max_results = int(max_results)
except ValueError:
return HttpResponseBadRequest('Invalid "max_results"')
if max_results < 0 or max_results > self.MAX_RESULTS_MAXIMUM:
return HttpResponseBadRequest('Invalid "max_results"')
# This results in a list of SearchResult objects ordered by their
# score (lower is better is earlier in the list)...
if suggest:
count = self.zim_file.get_suggestions_results_count(query)
search = self.zim_file.suggest(query, start=start, end=start + max_results)
else:
count = self.zim_file.get_search_results_count(query)
search = self.zim_file.search(query, start=start, end=start + max_results)
articles = list(
self.__article_metadata(result, snippet_length) for result in search
)
return JsonResponse({"articles": articles, "count": count})
def __article_metadata(self, search_result, snippet_length):
full_url = search_result.namespace + "/" + search_result.url
result = {"title": search_result.title, "path": full_url}
if snippet_length:
zim_article = self.zim_file.get_article(full_url)
result["snippet"] = _html_snippet(
to_bytes(zim_article.data, "utf-8"), max_chars=snippet_length
)
return result
def _zim_article_url(request, zim_filename, zim_article_path):
# I don't know why I need to torment the resolver like this instead of
# using django.urls.reverse, but something is trying to add a language
# prefix incorrectly and causing an error.
resolver = get_resolver(None)
redirect_url = resolver.reverse(
"zim_article", zim_filename=zim_filename, zim_article_path=zim_article_path
)
return request.build_absolute_uri("/" + redirect_url)
def _html_snippet(html_str, max_chars):
soup = bs4.BeautifulSoup(html_str, "lxml")
snippet_text = _html_snippet_text(soup)
return textwrap.shorten(snippet_text, width=max_chars, placeholder="")
def _html_snippet_text(soup):
meta_description = soup.find("meta", attrs={"name": "description"})
if meta_description:
return meta_description.get("content")
article_elems = soup.find("body").find_all(["h2", "h3", "h4", "h5", "h6", "p"])
article_elems_text = "\n".join(elem.get_text() for elem in article_elems)
if len(article_elems_text) > 0:
return article_elems_text
return soup.find("body").get_text()
| 34.165992 | 96 | 0.678279 | 6,057 | 0.717739 | 0 | 0 | 611 | 0.072402 | 0 | 0 | 1,516 | 0.179642 |
6ce066b2522d23ac66096c6f10c531332a69139e | 20,404 | py | Python | advanced lane tracking pipeline.py | mrpalazz/CarND-Advanced-Lane-Lines | 495d1f1025514b519182cc907a02b80592e8ba4f | [
"MIT"
] | null | null | null | advanced lane tracking pipeline.py | mrpalazz/CarND-Advanced-Lane-Lines | 495d1f1025514b519182cc907a02b80592e8ba4f | [
"MIT"
] | null | null | null | advanced lane tracking pipeline.py | mrpalazz/CarND-Advanced-Lane-Lines | 495d1f1025514b519182cc907a02b80592e8ba4f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 21:44:55 2017
@author: Mike
"""
import numpy as np
import cv2
import glob
import pickle
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
import os
from scipy import stats
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from camera_calibration import calibrate_camera, distortion_correct
from sobel_library import abs_sobel_image, sobel_mag_thresh, sobel_dir_thresh
from collections import deque
run_camera_cal = 1
#HLS Color space threshold filter
def color_binary(img, colorspace, color_thresh):
if colorspace == 'HLS':
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary_output = np.zeros_like(S)
binary_output[((S > color_thresh [0]) & (S < color_thresh [1]))] = 1
return binary_output
#combine the thresholds for the color map and the gradient threshold
# send in an image with binary color scheme and binary gradient scheme
def bin_color_gradient(binary_gradient , binary_color):
binary_output = np.zeros_like(binary_gradient)
binary_output[((binary_gradient == 1) | (binary_color == 1))] = 1
# polys = np.array([[(350,720),(580,500),(800,500),(1000,720)]], dtype = np.int32)
polys = np.array([[(350,720),(580,500),(800,500),(900,720)]], dtype = np.int32)
cv2.fillPoly(binary_output, polys, 0, lineType=8, shift=0)
return binary_output
#Function to warp images to birds eye view
def warp(img,source_points, destination_points):
img_shape = (img.shape[1], img.shape[0])
src = np.float32(source_points)
dst = np.float32(destination_points)
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(img,M,img_shape, flags = cv2.INTER_LINEAR)
return warped, M, Minv
global left_fit_deque
global right_fit_deque
deque_size = 3
left_fit_deque = []
left_fit_deque = deque(maxlen = deque_size)
right_fit_deque = []
right_fit_deque = deque(maxlen = deque_size)
class Lane():
def __init__(self):
self.llm = []
self.rlm = []
mylane = Lane()
coeffs = []
C0_L = np.zeros(deque_size)
C1_L = np.zeros(deque_size)
C2_L = np.zeros(deque_size)
C0_R = np.zeros(deque_size)
C1_R = np.zeros(deque_size)
C2_R = np.zeros(deque_size)
def polyfit(warped_image, orig_img, Minv):
#def polyfit(warped_image):
# print('Initiating line overlay onto binary warped image')
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(warped_image[warped_image.shape[0]//2:,:], axis=0)
#histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((warped_image, warped_image, warped_image))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(warped_image.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = warped_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped_image.shape[0] - (window+1)*window_height
win_y_high = warped_image.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
#Store the left poly coefficient in a deque for later use
left_fit_deque.append(left_fit)
# Take the deque of polynomial data and extract the three coefficients, avearge them for stability
for idx, coeffs in enumerate(left_fit_deque):
C0_L[idx] = coeffs[0]
C1_L[idx] = coeffs[1]
C2_L[idx] = coeffs[2]
average_C0_L = np.mean(C0_L)
average_C1_L = np.mean(C1_L)
average_C2_L = np.mean(C2_L)
left_fit[0] = average_C0_L
left_fit[1] = average_C1_L
left_fit[2] = average_C2_L
right_fit = np.polyfit(righty, rightx, 2)
#Store the left poly coefficient in a deque for later use
right_fit_deque.append(right_fit)
# Take the deque of polynomial data and extract the three coefficients, avearge them for stability
for idx, coeffs in enumerate(right_fit_deque):
C0_R[idx] = coeffs[0]
C1_R[idx] = coeffs[1]
C2_R[idx] = coeffs[2]
average_C0_R = np.mean(C0_R)
average_C1_R = np.mean(C1_R)
average_C2_R = np.mean(C2_R)
right_fit[0] = average_C0_R
right_fit[1] = average_C1_R
right_fit[2] = average_C2_R
# Generate x and y values for plotting
ploty = np.linspace(0, warped_image.shape[0]-1, warped_image.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
# left_fitx = left_fit_deque[0]*ploty**2 + left_fit_deque[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.figure(figsize = (20,10))
# plt.imshow(out_img)
# plt.plot(left_fitx, ploty, color='blue')
# plt.plot(right_fitx, ploty, color='red')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.show()
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# =============================================================================
# In this section we calculate the radius of curvature for the warped lines
# =============================================================================
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
# print(left_curverad, right_curverad)
# Example values: 1926.74 1908.48
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
# print(left_curverad, 'm', right_curverad, 'm')
# Example values: 632.1 m 626.2 m
# =============================================================================
# Calculate the position from center for the vehicle relative to the left lane
# =============================================================================
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (orig_img.shape[1], orig_img.shape[0]))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(newwarp,'Recording: project_video',(10,50), font, 1,(255,0,0),3,cv2.LINE_AA)
cv2.putText(newwarp,'Road Radius of curvature: {} km'.format(left_curverad/1000),(10,100), font, 1,(255,0,0),3,cv2.LINE_AA)
# =============================================================================
# Add the Section for fitting the radius of curvature to the image
# =============================================================================
vehicle_center = newwarp.shape[1]/2 #assuming that the video feed is from veh center
y_pixels = np.arange(newwarp.shape[0]-10, newwarp.shape[0]+1)
# y_pixels = 719
lx_loc = left_fit_cr[0]*y_pixels**2+left_fit_cr[1]*y_pixels+left_fit_cr[2]
rx_loc = right_fit_cr[0]*y_pixels**2+right_fit_cr[1]*y_pixels+right_fit_cr[2]
lane_center_pixel = (right_fitx[0] + left_fitx[0])/2
vehicle_offset = (vehicle_center - lane_center_pixel)*xm_per_pix
# pct_difference = vehicle_offset/
if vehicle_offset > 0:
cv2.putText(newwarp,'Ego Vehicle is {} meters right of lane center'.format(vehicle_offset),(10,150), font, 1,(255,0,0),3,cv2.LINE_AA)
if vehicle_offset < 0:
cv2.putText(newwarp,'Ego Vehicle is {} meters left of lane center'.format(vehicle_offset),(10,150), font, 1,(255,0,0),3,cv2.LINE_AA)
if vehicle_offset == 0:
cv2.putText(newwarp,'Ego Vehicle is directly on center!! Great job!',(10,150), font, 1,(255,0,0),3,cv2.LINE_AA)
# =============================================================================
# This plots the lane line data for debugging vehicle center
# =============================================================================
# plt.plot(lx_loc,y_pixels,'x')
# plt.title('Left Lane Line Pixel Locations')
# plt.show()
#
# plt.plot(rx_loc,y_pixels,'x')
# plt.title('Right Lane Line Pixel Locations')
# plt.show()
#
# plt.plot(left_fitx,'x')
# plt.plot(right_fitx,'o')
# plt.title('Left Lane and Right Lane overlay, horizontal dir i "y" in image space')
# plt.show()
#
# plt.figure(figsize = (15,15))
# plt.imshow(newwarp)
# plt.show()
#
# Combine the result with the original image
#img = cv2.imread(img)
img = cv2.cvtColor(orig_img,cv2.COLOR_BGR2RGB)
# result = cv2.addWeighted(orig_img, 1, newwarp, 0.3, 0)
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
#This is the final overlaid image with the texxto n it
# plt.figure(figsize = (10,10))
# plt.title('final result')
# plt.imshow(result)
# plt.show()
return result, left_fitx, right_fitx, ploty
if run_camera_cal == 1:
#--------------------- CAll functions and initiate camera cal and distortion corrrect-----------------------
#This section calls the camera calibration function
# Call the function to parse through the calibration image array and return
#the base object point, corners and a grascale image for reference size
#***** TURN THIS ON LATER!!!!!! when you want to calibrate the camera
# Make a list of calibration images
image_dir = "C:\\Users\\mrpal\\Documents\\Projects\\CarND-Advanced-Lane-Lines\\camera_cal\\"
images = os.listdir('camera_cal')
corners, imgpoints, objpoints, gray = calibrate_camera(image_dir, images)
##Generate the distortion coefficients and camera matrix, trans vector and rot vector
print('Generating distortion coefficients and camera matrix parameters')
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,gray.shape[::-1], None, None)
#Undistort the images in the test_images folder
image_dir = "C:\\Users\\mrpal\\Documents\\Projects\\CarND-Advanced-Lane-Lines\\test_images\\"
images = os.listdir('test_images')
print('Selected image directory is: {} '.format(image_dir))
print('The images in the directory are: {}' .format(images))
distortion_corrected = distortion_correct(image_dir, images, mtx, dist)
cv2.destroyAllWindows()
#--------------------- CAll functions to initiate a pipeline for image processing----------------------
image_dir = "C:\\Users\\mrpal\\Documents\\Projects\\CarND-Advanced-Lane-Lines\\test_images\\"
images = os.listdir('test_images')
print('Selected image directory is: {} '.format(image_dir))
print('The images in the directory are: {} \n' .format(images))
#print('The images in the directory are: {} \n' .format(images_new))
sobel_kernel = 9
#mag_thresh = [30,255]
#keep it
grad_threshold = [50,150]
sobel_mag = [0,255]
#distortion correct
if len(glob.glob('./test_images/*Distortion*.jpg')) == 0:
print('there are no distortion corrected images in the directory, let us create them')
distortion_corrected = distortion_correct(image_dir, images, mtx, dist)
images = glob.glob('./test_images/*Distortion*.jpg')
def process_image(images):
# for idx, fname in enumerate(images):
img = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)
# img = cv2.cvtColor(images, cv2.COLOR_RGB2BGR)
# orig_image = img
# img = cv2.imread(fname)
# plt.figure(figsize = (20,10))
# plt.imshow(img)
# plt.show()
#pull in the absolute binary gradient data in X and Y
gradx_binary = abs_sobel_image(img,'x',grad_threshold , sobel_kernel)
# plt.figure(figsize = (20,10))
# plt.title('Binary Gradient Thresholding in X direction')
# plt.imshow(gradx_binary, cmap='gray')
# plt.show()
grady_binary = abs_sobel_image(img,'y',grad_threshold , sobel_kernel)
# plt.figure(figsize = (20,10))
# plt.title('Binary Gradient Thresholding in Y direction')
# plt.imshow(grady_binary, cmap='gray')
# plt.show()
#Calculate the Sobel direction gradient binary threshold
dir_binary = sobel_dir_thresh(img, sobel_kernel=15, thresh=(0.6, np.pi/2))
# print(dir_binary.dtype)
# plt.figure(figsize = (20,10))
# plt.title('Binary Sobel (Absolute) Gradient Thresholding')
# plt.imshow(dir_binary, cmap = 'gray')
# mag_binary = sobel_mag_thresh(img, sobel_kernel, mag_thresh= (50, 150))
mag_binary = sobel_mag_thresh(img, sobel_kernel, mag_thresh= (80, 150))
# plt.figure(figsize = (20,10))
# plt.title('Binary Gradient Magnitude Thresholding')
# plt.imshow(mag_binary, cmap='gray')
# mag_binary
#Combine the gradient thresholds into a coherent image, there still may be gaps where color thresholding comes in
combined_binary = np.zeros_like(dir_binary)
# combined_binary[(gradx_binary == 1) | ((mag_binary == 1) | (dir_binary == 1))] = 1
combined_binary[(gradx_binary == 1) | ((mag_binary == 1) & (dir_binary == 1))] = 1
#combined_binary[((gradx_binary == 1) & (grady_binary == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
# plt.figure(figsize = (20,10))
# plt.title('Combined Binary Gradient Thresholding (X,Mag,Dir)')
# plt.imshow(combined_binary, cmap = 'gray')
# plt.show()
binary_color = color_binary(img, 'HLS', color_thresh = [80,255])
# binary_color = color_binary(img, 'HLS', color_thresh = [80,180])
# plt.figure(figsize = (20,10))
# plt.title('Binary Color Thresholding in HLS')
# plt.imshow(binary_color, cmap = 'gray')
# plt.show()
#Visualize the overall combined thresholding on the test images
color_grad_combined = bin_color_gradient(combined_binary , binary_color)
# plt.figure(figsize = (20,10))
# plt.title('Combined color and gradient mag thresholding')
# plt.imshow(color_grad_combined, cmap = 'gray')
# plt.show()
img_size = img.shape
offset = 100
src = np.float32([(200, 720), (580, 480), (720, 480), (1050, 720)])
dst = np.float32([(280, 720), (400, 190), (920, 190), (960, 720)])
destination_points = np.float32([[offset, img_size[1]-offset], [img_size[0]-offset, img_size[1]-offset],
[img_size[0]-offset, offset],
[offset, offset]])
source_points = np.float32(([450,780], [680, 1050], [680,250], [450, 500]))
binary_warped, M, Minv = warp(color_grad_combined,src, dst)
#warped_image_test = warp(img,source_points, destination_points)
# plt.figure(figsize = (20,10))
# plt.imshow(binary_warped, cmap='gray')
# plt.show()
#
#
# import numpy as np
# plt.figure(figsize = (20,10))
# histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# plt.plot(histogram)
# plt.show()
#
#Need the line data to be fed back out
out, left_fitx, right_fitx, ploty = polyfit(binary_warped,img, Minv)
# out = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
return out
#######--------------------------
##os.system("ffmpeg -i project_video.mp4 -vf fps=15/1 out_%03d.jpg'
Test_Video_dir = os.listdir("test_videos/")
video_output = 'project_video_output.mp4'
clip1 = VideoFileClip("test_videos/project_video.mp4").subclip(13,18)
#clip1 = VideoFileClip("test_videos/project_video.mp4")
clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
clip.write_videofile(video_output, audio=False)
#-------------------------------------------
| 39.619417 | 142 | 0.624926 | 100 | 0.004901 | 0 | 0 | 0 | 0 | 0 | 0 | 9,230 | 0.452362 |
6ce17b5622aced1091b006d9abe3f5934f61119a | 624 | py | Python | homeassistant/components/light/insteon_hub.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | null | null | null | homeassistant/components/light/insteon_hub.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | null | null | null | homeassistant/components/light/insteon_hub.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | 1 | 2018-11-20T17:44:08.000Z | 2018-11-20T17:44:08.000Z | """
homeassistant.components.light.insteon
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Insteon Hub lights.
"""
from homeassistant.components.insteon_hub import INSTEON, InsteonToggleDevice
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Insteon Hub light platform. """
devs = []
for device in INSTEON.devices:
if device.DeviceCategory == "Switched Lighting Control":
devs.append(InsteonToggleDevice(device))
if device.DeviceCategory == "Dimmable Lighting Control":
devs.append(InsteonToggleDevice(device))
add_devices(devs)
| 32.842105 | 77 | 0.677885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.346154 |
6ce1cbb5c8180e6b2722e37172b43bc380a6b812 | 3,481 | py | Python | exercises/fit_gaussian_estimators.py | YuvalAvshalom/IML.HUJI | 0afa89c23a1dc6b7c1e321ecd18fb3cb776bd4c7 | [
"MIT"
] | null | null | null | exercises/fit_gaussian_estimators.py | YuvalAvshalom/IML.HUJI | 0afa89c23a1dc6b7c1e321ecd18fb3cb776bd4c7 | [
"MIT"
] | null | null | null | exercises/fit_gaussian_estimators.py | YuvalAvshalom/IML.HUJI | 0afa89c23a1dc6b7c1e321ecd18fb3cb776bd4c7 | [
"MIT"
] | null | null | null | from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "simple_white"
SAMPLES_NUM = 1000
LEFT_CIRCLE = '('
RIGHT_CIRCLE = ')'
COMMA = ', '
GRAPH_SIZE = 500
HEATMAP_SIZE = 700
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
uni = UnivariateGaussian()
mu, sigma = 10, 1
s = np.random.normal(mu, sigma, SAMPLES_NUM)
res = uni.fit(s)
print(LEFT_CIRCLE + str(res.mu_) + COMMA + str(res.var_) + RIGHT_CIRCLE)
# Question 2 - Empirically showing sample mean is consistent
ms = np.linspace(10, 1000, 100).astype(int)
diff = []
for m in ms:
diff.append(abs(uni.fit(s[0:m]).mu_ - mu))
go.Figure([go.Scatter(x=ms, y=diff, mode='markers+lines')],
layout=go.Layout(title=r"$\text{ Distance between estimated "
r"and true value of the expectation as a function of samples number}$",
xaxis_title="$m\\text{ - number of samples}$",
yaxis_title="r$distance$",
height=GRAPH_SIZE)).show()
# Question 3 - Plotting Empirical PDF of fitted model
pdf_values = uni.pdf(s)
go.Figure([go.Scatter(x=s, y=pdf_values, mode='markers')],
layout=go.Layout(title=r"$\text{ Sampled values distribution}$",
xaxis_title="$m\\text{ - sampled values}$",
yaxis_title="r$ pdf - values$",
height=GRAPH_SIZE)).show()
# As I expected, the samples' distribution is gaussian around the expectation (10)
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
multi_uni = MultivariateGaussian()
mu = np.array([0, 0, 4, 0])
sigma = np.asarray([[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]])
s = np.random.multivariate_normal(mu, sigma, SAMPLES_NUM)
res = multi_uni.fit(s)
print(str(res.mu_) + '\n' + str(res.cov_))
# Question 5 - Likelihood evaluation
ms = np.linspace(-10, 10, 200)
logs = np.zeros((200, 200))
i = 0
j = 0
for f1 in ms:
for f3 in ms:
logs[i][j] = (MultivariateGaussian.log_likelihood(np.transpose([f1, 0, f3, 0]), sigma, s))
j += 1
j = 0
i += 1
go.Figure([go.Heatmap(x=ms, y=ms, z=np.asarray(logs), colorbar=dict(title="Log Likelihood"))],
layout=go.Layout(title=
r"$\text{ Log Likelihood as function of "
r"different expectancies}$",
width=HEATMAP_SIZE, height=HEATMAP_SIZE,
xaxis_title="$f3$", yaxis_title="$f1$")).show()
# Question 6 - Maximum likelihood
index = np.argmax(logs)
row = int(index / 200)
col = int(index % 200)
print("Maximum value is achieved for the pair: f1 = " + str(round(ms[row], 3)) + " f3 = " + str(round(ms[col], 3)))
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
| 35.520408 | 119 | 0.530882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.235277 |
6ce39f4772d1be6af817ba65217a2d9af6ad1bac | 562 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/digits_product.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/digits_product.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/digits_product.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | def digitsProduct(product):
"""
Given an integer product, find the smallest
positive (i.e. greater than 0) integer the
product of whose digits is equal to product.
If there is no such integer, return -1 instead.
Time Complexity: O(inf)
Space Complexity: O(1)
"""
number = 1
while True:
p = 1
digits = [int(x) for x in str(number)]
for n in digits:
p = p * n
if number > 10000:
return -1
if p == product:
return number
number += 1
| 25.545455 | 51 | 0.542705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.480427 |
6ce5b1fdedad3f99e65e3d1f3574b7a7cc248760 | 715 | py | Python | test/serial-gpu.py | ImperialCollegeLondon/software | e8bdb7935817af0fab4ab84b3cdd0509a8f7ccc8 | [
"BSD-3-Clause"
] | 8 | 2019-03-20T02:54:03.000Z | 2021-08-24T15:26:21.000Z | test/serial-gpu.py | ImperialCollegeLondon/software | e8bdb7935817af0fab4ab84b3cdd0509a8f7ccc8 | [
"BSD-3-Clause"
] | 32 | 2019-03-19T23:34:20.000Z | 2022-03-22T19:10:28.000Z | test/serial-gpu.py | ImperialCollegeLondon/software | e8bdb7935817af0fab4ab84b3cdd0509a8f7ccc8 | [
"BSD-3-Clause"
] | 4 | 2019-03-22T18:14:00.000Z | 2021-12-08T14:49:33.000Z | results = open('test-results-gpu.out', 'a')
results.write('** Starting serial GPU tests **\n')
try:
# Fresnel
#import fresnel
#results.write('Fresnel version : {}\n'.format(fresnel.__version__))
#dev = fresnel.Device(mode='gpu', n=1)
#results.write('Fresnel device : {}\n'.format(dev))
# HOOMD
import hoomd
context = hoomd.context.initialize('--mode=gpu')
assert(context.on_gpu())
results.write('HOOMD version : {}\n'.format(hoomd.__version__))
results.write('HOOMD flags : {}\n'.format(hoomd._hoomd.hoomd_compile_flags()))
results.write('** Serial GPU tests PASSED **\n\n')
except:
results.write('** Serial GPU tests FAILED **\n\n')
raise
| 31.086957 | 88 | 0.634965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.541259 |
6ce68ade0975f3498a289cd7d8896318aa38df5e | 1,027 | py | Python | Curso em Video/Desafio_023.py | tonmarcondes/UNIVESP | a66a623d4811e8f3f9e2999f09e38a4470035ae2 | [
"MIT"
] | null | null | null | Curso em Video/Desafio_023.py | tonmarcondes/UNIVESP | a66a623d4811e8f3f9e2999f09e38a4470035ae2 | [
"MIT"
] | null | null | null | Curso em Video/Desafio_023.py | tonmarcondes/UNIVESP | a66a623d4811e8f3f9e2999f09e38a4470035ae2 | [
"MIT"
] | null | null | null | numero = input('\nInsira um numero de 0 a 9999: \n')
if not numero.isnumeric():
print('Condição inválida, insira apenas números\n')
elif len(numero) == 1:
print('\nA unidade do número {} é {}\n'.format(numero, numero[-1]))
elif len(numero) == 2:
print('\nA unidade do número {} é {}'.format(numero, numero[-1]))
print('A dezena do número {} é {}\n'.format(numero, numero[-2]))
elif len(numero) == 3:
print('\nA unidade do número {} é {}'.format(numero, numero[-1]))
print('A dezena do número {} é {}'.format(numero, numero[-2]))
print('A centena do número {} é {}\n'.format(numero, numero[-3]))
elif len(numero) == 4:
print('\nA unidade do número {} é {}'.format(numero, numero[-1]))
print('A dezena do número {} é {}'.format(numero, numero[-2]))
print('A centena do número {} é {}'.format(numero, numero[-3]))
print('A milhar do número {} é {}\n'.format(numero, numero[-4]))
else:
print('O valor {} não está de acordo com o permitido\n'.format(numero))
| 48.904762 | 76 | 0.605648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.437797 |
6ce7fa57db757c55528fa98bee77cb99fa34cb9a | 4,785 | py | Python | mayan/apps/user_management/links.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/user_management/links.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/user_management/links.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 114 | 2015-01-08T20:21:05.000Z | 2018-12-10T19:07:53.000Z | from django.utils.translation import ugettext_lazy as _
from mayan.apps.authentication.link_conditions import condition_user_is_authenticated
from mayan.apps.navigation.classes import Link, Separator, Text
from mayan.apps.navigation.utils import factory_condition_queryset_access
from .icons import (
icon_current_user_details, icon_group_create, icon_group_delete_single,
icon_group_delete_multiple, icon_group_edit, icon_group_list,
icon_group_setup, icon_group_user_list, icon_user_create,
icon_user_edit, icon_user_group_list, icon_user_list,
icon_user_delete_single, icon_user_delete_multiple,
icon_user_set_options, icon_user_setup
)
from .link_conditions import condition_user_is_not_superuser
from .permissions import (
permission_group_create, permission_group_delete, permission_group_edit,
permission_group_view, permission_user_create, permission_user_delete,
permission_user_edit, permission_user_view
)
from .utils import get_user_label_text
# Current user
link_current_user_details = Link(
args='request.user.id',
condition=condition_user_is_authenticated,
icon=icon_current_user_details, text=_('User details'),
view='user_management:user_details'
)
# Group
link_group_create = Link(
icon=icon_group_create, permissions=(permission_group_create,),
text=_('Create new group'), view='user_management:group_create'
)
link_group_delete_single = Link(
args='object.id', icon=icon_group_delete_single,
permissions=(permission_group_delete,), tags='dangerous',
text=_('Delete'), view='user_management:group_delete_single'
)
link_group_delete_multiple = Link(
icon=icon_group_delete_multiple, tags='dangerous', text=_('Delete'),
view='user_management:group_delete_multiple'
)
link_group_edit = Link(
args='object.id', icon=icon_group_edit,
permissions=(permission_group_edit,), text=_('Edit'),
view='user_management:group_edit'
)
link_group_list = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='Group',
object_permission=permission_group_view,
), icon=icon_group_list, text=_('Groups'),
view='user_management:group_list'
)
link_group_user_list = Link(
args='object.id', icon=icon_group_user_list,
permissions=(permission_group_edit,), text=_('Users'),
view='user_management:group_members'
)
link_group_setup = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='Group',
callback=condition_user_is_not_superuser,
object_permission=permission_group_view,
view_permission=permission_group_create
), icon=icon_group_setup, text=_('Groups'),
view='user_management:group_list'
)
# User
link_user_create = Link(
condition=condition_user_is_authenticated, icon=icon_user_create,
permissions=(permission_user_create,), text=_('Create new user'),
view='user_management:user_create'
)
link_user_delete_single = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_delete_single, permissions=(permission_user_delete,),
tags='dangerous', text=_('Delete'),
view='user_management:user_delete_single'
)
link_user_delete_multiple = Link(
icon=icon_user_delete_multiple, tags='dangerous', text=_('Delete'),
view='user_management:user_delete_multiple'
)
link_user_edit = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_edit, permissions=(permission_user_edit,), text=_('Edit'),
view='user_management:user_edit'
)
link_user_group_list = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_group_list, permissions=(permission_user_edit,),
text=_('Groups'), view='user_management:user_groups'
)
link_user_list = Link(
icon=icon_user_list, text=_('Users'),
condition=factory_condition_queryset_access(
app_label='auth', model_name='User',
callback=condition_user_is_authenticated,
object_permission=permission_user_view,
view_permission=permission_user_create
), view='user_management:user_list'
)
link_user_set_options = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_set_options, permissions=(permission_user_edit,),
text=_('User options'), view='user_management:user_options'
)
link_user_setup = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='User',
object_permission=permission_user_view,
view_permission=permission_user_create,
), icon=icon_user_setup, text=_('Users'),
view='user_management:user_list'
)
separator_user_label = Separator()
text_user_label = Text(
html_extra_classes='menu-user-name', text=get_user_label_text
)
| 37.093023 | 85 | 0.777011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 877 | 0.183281 |
6ce92f7cc60e57aeb3a0f765328c9efb31ca244a | 626 | py | Python | .configs/sonatconfig.py | Armcollector/lockdown-workout | a1c4633c8bd47e399bc7297d77f980542885414e | [
"MIT"
] | null | null | null | .configs/sonatconfig.py | Armcollector/lockdown-workout | a1c4633c8bd47e399bc7297d77f980542885414e | [
"MIT"
] | null | null | null | .configs/sonatconfig.py | Armcollector/lockdown-workout | a1c4633c8bd47e399bc7297d77f980542885414e | [
"MIT"
] | null | null | null | import os
import urllib.parse
basedir = os.path.abspath(os.path.dirname(__file__))
if "DB_CONNECTIONSTRING" in os.environ:
params = urllib.parse.quote_plus(os.environ.get("DB_CONNECTIONSTRING"))
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY") or "iR33OXoRSUj5"
SQLALCHEMY_DATABASE_URI = "mssql+pyodbc:///?odbc_connect={}".format(params)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
VERSION = "2.1.0"
WTF_CSR_ENABLED = True
CACHE_TYPE = "simple"
CACHE_DEFAULT_TIMEOUT = 50
MAINTITLE = "Sonats Lockdown Workout "
INSTANCE = "SONAT"
| 28.454545 | 79 | 0.728435 | 421 | 0.672524 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.239617 |
6ce9b98de4c25e47cb5bd0d8d6d6881984255a1c | 3,254 | py | Python | hier/search.py | ruslan-ok/ServerApps | 541aa12f1933054a12f590ce78544178be374669 | [
"MIT"
] | 1 | 2021-06-07T02:14:13.000Z | 2021-06-07T02:14:13.000Z | hier/search.py | ruslan-ok/ServerApps | 541aa12f1933054a12f590ce78544178be374669 | [
"MIT"
] | 9 | 2021-08-14T07:53:47.000Z | 2022-03-18T19:07:22.000Z | hier/search.py | ruslan-ok/ServerApps | 541aa12f1933054a12f590ce78544178be374669 | [
"MIT"
] | null | null | null | from django.utils.translation import gettext_lazy as _, gettext
from .utils import get_main_menu_item, APPS
ENTITIES = {
'apart': ('apart', 'apartment'),
'meter': ('meters data', 'execute'),
'bill': ('bill', 'cost'),
'service': ('service', 'key'),
'price': ('tariff', 'application'),
'cars': ('car', 'car'),
'fuel': ('fueling', 'gas'),
'interval': ('spare part', 'part'),
'service': ('replacement', 'key'),
'note': ('note', '/'),
'news': ('news', '/'),
'project': ('project', 'work'),
'expense': ('expense', 'cost'),
'entry': ('password', 'key'),
'person': ('person', 'user'),
'trip': ('trip', 'car'),
'department':('department', 'application'),
'post': ('post', 'application'),
'pay_title': ('pay title', 'application'),
'employee': ('employee', 'user'),
'surname': ('surname change history', 'application'),
'child': ('child', 'user'),
'appoint': ('appointment', 'application'),
'education': ('education', 'application'),
'payment': ('payment', 'cost'),
'task': ('task', 'application'),
'group': ('group', '/'),
'list': ('list', '/'),
}
class SearchResult():
def __init__(self, query):
self.query = query
self.items = []
def add(self, app, entity, id, created, name, info, main_entity = True, detail1 = '', detail2 = ''):
prefix = ''
if (not info):
info = ''
if (len(info) > 500):
pos = info.find(self.query)
if (pos > 250):
pos -= 250
prefix = '... '
else:
pos = 0
info = prefix + info[pos:pos+500] + ' ...'
self.items.append(SearchItem(app, entity, id, created, name, info.replace(self.query, '<strong>' + self.query + '</strong>'), main_entity, detail1, detail2))
class SearchItem():
def __init__(self, app, entity, id, created, name, info, main_entity = True, detail1 = '', detail2 = ''):
self.app = app
self.entity = entity
self.main = main_entity
self.id = id
self.created = created
self.name = name
self.info = info
self.detail1 = detail1
self.detail2 = detail2
def __repr__(self):
return 'Application: "{}", Entity: "{}", Created: "{}", Name: "{}" , Info: "{}"'.format(self.app, self.entity, self.created, self.name, self.info)
def href(self):
pass
def app_name(self):
return get_main_menu_item(self.app)
def app_icon(self):
return 'rok/icon/' + APPS[self.app][0] + '.png'
def ent_icon(self):
if self.entity in ENTITIES:
icon_name = ENTITIES[self.entity][1]
if (icon_name == '/'):
icon_name = self.entity
return 'rok/icon/' + icon_name + '.png'
return 'rok/icon/inline/separator.png'
def ent_name(self):
if self.entity in ENTITIES:
return _(ENTITIES[self.entity][0]).capitalize()
return self.entity
| 35.369565 | 165 | 0.497234 | 1,883 | 0.578672 | 0 | 0 | 0 | 0 | 0 | 0 | 879 | 0.270129 |
6ce9ba705ab3634e77b1eb1d8846761d08e2253c | 6,288 | py | Python | utils/asyncTable.py | jiafangjun/DD_KaoRou | aa453db0793cac9202218bc5a28d5bed0ebeea99 | [
"MIT"
] | 108 | 2020-04-28T02:32:14.000Z | 2020-11-23T00:26:52.000Z | utils/asyncTable.py | zhimingshenjun/DD_KaoRou | aa453db0793cac9202218bc5a28d5bed0ebeea99 | [
"MIT"
] | 4 | 2020-04-29T13:05:29.000Z | 2020-05-05T12:00:43.000Z | utils/asyncTable.py | zhimingshenjun/DD_KaoRou | aa453db0793cac9202218bc5a28d5bed0ebeea99 | [
"MIT"
] | 9 | 2020-04-29T17:38:34.000Z | 2020-11-24T07:11:08.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from PySide2.QtWidgets import QWidget, QMainWindow, QGridLayout, QFileDialog, QToolBar,\
QAction, QDialog, QStyle, QSlider, QLabel, QPushButton, QStackedWidget, QHBoxLayout,\
QLineEdit, QTableWidget, QAbstractItemView, QTableWidgetItem, QGraphicsTextItem, QMenu,\
QGraphicsScene, QGraphicsView, QGraphicsDropShadowEffect, QComboBox, QMessageBox, QColorDialog
from PySide2.QtMultimedia import QMediaPlayer
from PySide2.QtMultimediaWidgets import QGraphicsVideoItem
from PySide2.QtGui import QIcon, QKeySequence, QFont, QBrush, QColor
from PySide2.QtCore import Qt, QTimer, QEvent, QPoint, Signal, QSizeF, QUrl, QThread
def cnt2Time(cnt, interval, value=0):
'''
receive int
return str
count of interval times -> m:s.ms
'''
labels = []
for i in range(value, cnt + value):
m, s = divmod(i * interval, 60000)
s, ms = divmod(s, 1000)
labels.append(('%s:%02d.%03d' % (m, s, ms))[:-1])
return labels
class refillVerticalLabel(QThread):
def __init__(self, value, globalInterval, subtitle, parent=None):
super(refillVerticalLabel, self).__init__(parent)
self.value = value - 1
self.globalInterval = globalInterval
self.oldInterval = self.globalInterval
self.subtitle = subtitle
def setGlobalInterval(self, globalInterval):
self.globalInterval = globalInterval
def run(self):
while 1:
scrollValue = self.subtitle.verticalScrollBar().value()
if scrollValue != self.oldInterval:
print(scrollValue)
self.oldInterval = scrollValue
refillToken = False
for y in range(scrollValue - 1, scrollValue + 60):
if not self.subtitle.verticalHeaderItem(y):
refillToken = True
break
if refillToken:
for cnt, label in enumerate(cnt2Time(60, self.globalInterval, self.value)):
self.subtitle.setVerticalHeaderItem(self.value + cnt, QTableWidgetItem(label))
time.sleep(0.000001)
time.sleep(20)
class asyncTable(QThread):
reconnect = Signal()
def __init__(self, subtitleDict, oldInterval, globalInterval, duration, subtitle, autoSub, tablePreset, position, parent=None):
super(asyncTable, self).__init__(parent)
self.subtitleDict = subtitleDict
self.oldInterval = oldInterval
self.globalInterval = globalInterval
self.duration = duration
self.subtitle = subtitle
self.autoSub = autoSub
self.tablePreset = tablePreset
self.position = position
def initSubtitle(self):
# for index, subData in self.subtitleDict.items():
# for start, rowData in subData.items():
# if start >= 0:
# startRow = start // self.oldInterval
# deltaRow = rowData[0] // self.oldInterval
# for y in range(startRow, startRow + deltaRow + 1):
# self.subtitle.setItem(y, index, QTableWidgetItem(''))
# self.subtitle.item(y, index).setBackground(QBrush(QColor('#232629'))) # 全部填黑
# if self.subtitle.rowspan(y, index) > 1:
# self.subtitle.setSpan(y, index, 1, 1)
self.subtitle.clear()
self.subtitle.setRowCount(self.duration // self.globalInterval + 1) # 重置表格行数
for t in self.autoSub: # 重新标记AI识别位置
start, end = t
startRow = start // self.globalInterval
endRow = end // self.globalInterval
if self.tablePreset[1]:
self.subtitle.setItem(startRow, 0, QTableWidgetItem(self.tablePreset[0]))
try:
self.subtitle.item(startRow, 0).setBackground(QBrush(QColor('#35545d')))
except:
pass
self.subtitle.setSpan(startRow, 0, endRow - startRow, 1)
if self.tablePreset[0]:
self.subtitleDict[0][start] = [end - start, self.tablePreset[0]]
else:
for y in range(startRow, endRow):
self.subtitle.setItem(y, 0, QTableWidgetItem(self.tablePreset[0]))
try:
self.subtitle.item(y, 0).setBackground(QBrush(QColor('#35545d')))
except:
pass
if self.tablePreset[0]:
self.subtitleDict[0][y * self.globalInterval] = [self.globalInterval, self.tablePreset[0]]
scrollValue = self.subtitle.verticalScrollBar().value() - 1
for cnt, label in enumerate(cnt2Time(60, self.globalInterval, scrollValue)):
self.subtitle.setVerticalHeaderItem(scrollValue + cnt, QTableWidgetItem(label))
time.sleep(0.000001)
# for cnt, label in enumerate(cnt2Time(200, self.globalInterval)): # 只画前200个 其余的行号随用户拖动条动态生成
# self.subtitle.setVerticalHeaderItem(cnt, QTableWidgetItem(label))
# time.sleep(0.000000001)
def run(self):
self.initSubtitle()
for index, subData in self.subtitleDict.items():
for start, rowData in subData.items():
startRow = start // self.globalInterval
deltaRow = rowData[0] // self.globalInterval
if deltaRow:
endRow = startRow + deltaRow
for row in range(startRow, endRow):
self.subtitle.setItem(row, index, QTableWidgetItem(rowData[1]))
if row >= 0:
self.subtitle.item(row, index).setBackground(QBrush(QColor('#35545d')))
if endRow - startRow > 1:
self.subtitle.setSpan(startRow, index, endRow - startRow, 1)
row = self.position // self.globalInterval
self.subtitle.selectRow(row)
self.subtitle.verticalScrollBar().setValue(row - 10)
self.reconnect.emit()
| 47.278195 | 132 | 0.578403 | 5,282 | 0.830242 | 0 | 0 | 0 | 0 | 0 | 0 | 1,143 | 0.17966 |
6ceaac512153bf1998132a4f1961ee9fbc239dba | 554,022 | py | Python | pyuvdata/uvdata/uvdata.py | e-koch/pyuvdata | ac36067f195c75127b28f02479eda1eb7a3400ed | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/uvdata/uvdata.py | e-koch/pyuvdata | ac36067f195c75127b28f02479eda1eb7a3400ed | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/uvdata/uvdata.py | e-koch/pyuvdata | ac36067f195c75127b28f02479eda1eb7a3400ed | [
"BSD-2-Clause"
] | null | null | null | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer datasets."""
import os
import copy
from collections.abc import Iterable
import warnings
import threading
import numpy as np
from scipy import ndimage as nd
from astropy import constants as const
import astropy.units as units
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, FK5, Angle
from astropy import coordinates as coord
from ..uvbase import UVBase
from .. import parameter as uvp
from .. import telescopes as uvtel
from .. import utils as uvutils
__all__ = ["UVData"]
class UVData(UVBase):
"""
A class for defining a radio interferometer dataset.
Currently supported file types: uvfits, miriad, fhd.
Provides phasing functions.
Attributes
----------
UVParameter objects :
For full list see UVData Parameters
(http://pyuvdata.readthedocs.io/en/latest/uvdata_parameters.html).
Some are always required, some are required for certain phase_types
and others are always optional.
"""
def __init__(self):
"""Create a new UVData object."""
# add the UVParameters to the class
# standard angle tolerance: 1 mas in radians.
radian_tol = 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)
self._Ntimes = uvp.UVParameter(
"Ntimes", description="Number of times", expected_type=int
)
self._Nbls = uvp.UVParameter(
"Nbls", description="Number of baselines", expected_type=int
)
self._Nblts = uvp.UVParameter(
"Nblts",
description="Number of baseline-times "
"(i.e. number of spectra). Not necessarily "
"equal to Nbls * Ntimes",
expected_type=int,
)
self._Nfreqs = uvp.UVParameter(
"Nfreqs", description="Number of frequency channels", expected_type=int
)
self._Npols = uvp.UVParameter(
"Npols", description="Number of polarizations", expected_type=int
)
desc = (
"Array of the visibility data, shape: (Nblts, 1, Nfreqs, "
"Npols) or (Nblts, Nfreqs, Npols) if future_array_shapes=True, "
"type = complex float, in units of self.vis_units"
)
# TODO: Spw axis to be collapsed in future release
self._data_array = uvp.UVParameter(
"data_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=complex,
)
desc = 'Visibility units, options are: "uncalib", "Jy" or "K str"'
self._vis_units = uvp.UVParameter(
"vis_units",
description=desc,
form="str",
expected_type=str,
acceptable_vals=["uncalib", "Jy", "K str"],
)
desc = (
"Number of data points averaged into each data element, "
"NOT required to be an integer, type = float, same shape as data_array."
"The product of the integration_time and the nsample_array "
"value for a visibility reflects the total amount of time "
"that went into the visibility. Best practice is for the "
"nsample_array to be used to track flagging within an integration_time "
"(leading to a decrease of the nsample array value below 1) and "
"LST averaging (leading to an increase in the nsample array "
"value). So datasets that have not been LST averaged should "
"have nsample array values less than or equal to 1."
"Note that many files do not follow this convention, but it is "
"safe to assume that the product of the integration_time and "
"the nsample_array is the total amount of time included in a visibility."
)
self._nsample_array = uvp.UVParameter(
"nsample_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=float,
)
desc = "Boolean flag, True is flagged, same shape as data_array."
self._flag_array = uvp.UVParameter(
"flag_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=bool,
)
self._Nspws = uvp.UVParameter(
"Nspws",
description="Number of spectral windows "
"(ie non-contiguous spectral chunks). ",
expected_type=int,
)
self._spw_array = uvp.UVParameter(
"spw_array",
description="Array of spectral window numbers, shape (Nspws)",
form=("Nspws",),
expected_type=int,
)
desc = (
"Projected baseline vectors relative to phase center, "
"shape (Nblts, 3), units meters. "
"Convention is: uvw = xyz(ant2) - xyz(ant1)."
"Note that this is the Miriad convention but it is different "
"from the AIPS/FITS convention (where uvw = xyz(ant1) - xyz(ant2))."
)
self._uvw_array = uvp.UVParameter(
"uvw_array",
description=desc,
form=("Nblts", 3),
expected_type=float,
acceptable_range=(0, 1e8),
tols=1e-3,
)
desc = (
"Array of times, center of integration, shape (Nblts), " "units Julian Date"
)
self._time_array = uvp.UVParameter(
"time_array",
description=desc,
form=("Nblts",),
expected_type=float,
tols=1e-3 / (60.0 * 60.0 * 24.0),
) # 1 ms in days
desc = (
"Array of local apparent sidereal times (LAST) at the center of "
"integration, shape (Nblts), units radians."
)
self._lst_array = uvp.UVParameter(
"lst_array",
description=desc,
form=("Nblts",),
expected_type=float,
tols=radian_tol,
)
desc = (
"Array of numbers for the first antenna, which is matched to that in "
"the antenna_numbers attribute. Shape (Nblts), type = int."
)
self._ant_1_array = uvp.UVParameter(
"ant_1_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of numbers for the second antenna, which is matched to that in "
"the antenna_numbers attribute. Shape (Nblts), type = int."
)
self._ant_2_array = uvp.UVParameter(
"ant_2_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of baseline numbers, shape (Nblts), "
"type = int; baseline = 2048 * (ant1+1) + (ant2+1) + 2^16"
)
self._baseline_array = uvp.UVParameter(
"baseline_array", description=desc, expected_type=int, form=("Nblts",),
)
# this dimensionality of freq_array does not allow for different spws
# to have different dimensions
desc = (
"Array of frequencies, center of the channel, "
"shape (1, Nfreqs) or (Nfreqs,) if future_array_shapes=True, units Hz"
)
# TODO: Spw axis to be collapsed in future release
self._freq_array = uvp.UVParameter(
"freq_array",
description=desc,
form=(1, "Nfreqs"),
expected_type=float,
tols=1e-3,
) # mHz
desc = (
"Array of polarization integers, shape (Npols). "
"AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); "
"circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). "
"NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations "
'"Stokes", but this is inaccurate as visibilities cannot be in '
"true Stokes polarizations for physical antennas. We adopt the "
"term pseudo-Stokes to refer to linear combinations of instrumental "
"visibility polarizations (e.g. pI = xx + yy)."
)
self._polarization_array = uvp.UVParameter(
"polarization_array",
description=desc,
expected_type=int,
acceptable_vals=list(np.arange(-8, 0)) + list(np.arange(1, 5)),
form=("Npols",),
)
desc = (
"Length of the integration in seconds, shape (Nblts). "
"The product of the integration_time and the nsample_array "
"value for a visibility reflects the total amount of time "
"that went into the visibility. Best practice is for the "
"integration_time to reflect the length of time a visibility "
"was integrated over (so it should vary in the case of "
"baseline-dependent averaging and be a way to do selections "
"for differently integrated baselines)."
"Note that many files do not follow this convention, but it is "
"safe to assume that the product of the integration_time and "
"the nsample_array is the total amount of time included in a visibility."
)
self._integration_time = uvp.UVParameter(
"integration_time",
description=desc,
form=("Nblts",),
expected_type=float,
tols=1e-3,
) # 1 ms
desc = (
"Width of frequency channels (Hz). If flex_spw = False and "
"future_array_shapes=False, then it is a "
"single value of type = float, otherwise it is an array of shape "
"(Nfreqs), type = float."
)
self._channel_width = uvp.UVParameter(
"channel_width", description=desc, expected_type=float, tols=1e-3,
) # 1 mHz
desc = (
"Name(s) of source(s) or field(s) observed, type string. If "
'multi_phase_center = True, set to "multi".'
)
self._object_name = uvp.UVParameter(
"object_name", description=desc, form="str", expected_type=str,
)
# --- multi phase center handling ---
desc = (
'Only relevant if phase_type = "phased". Specifies the that the data set '
"contains multiple sources within it."
)
self._multi_phase_center = uvp.UVParameter(
"multi_phase_center", description=desc, expected_type=bool, value=False,
)
desc = (
"Required if multi_phase_center = True. Specifies the number of sources "
"contained within the data set."
)
self._Nphase = uvp.UVParameter(
"Nphase", description=desc, expected_type=int, required=False,
)
desc = (
"Only relevant if multi_phase_center = True. Dictionary that acts as a "
"catalog, containing information on individual phase centers. Keys are the "
"names of the different phase centers in the UVData object. At a minimum, "
'each dictionary must contain the key "cat_type", which can be either '
'"sidereal" (fixed position in RA/Dec), "ephem" (position in RA/Dec which'
'moves with time), "driftscan" (fixed postion in Az/El, NOT the same as '
'`phase_type`="drift") and "unphased" (baseline coordinates in ENU, but '
'data are not phased, similar to `phase_type`="drift"). Other typical '
'keyworks include "cat_lon" (longitude coord, e.g. RA), "cat_lat" '
'(latitude coord, e.g. Dec.), "cat_frame" (coordinate frame, e.g. '
'icrs), "cat_epoch" (epoch and equinox of the coordinate frame), '
'"cat_times" (times for the coordinates, only used for "ephem" '
'types), "cat_pm_ra" (proper motion in RA), "cat_pm_dec" (proper '
'motion in Dec), "cat_dist" (physical distance), "cat_vrad" ('
'rest frame velocity), "info_source" (describes where catalog info came '
'from), and "cat_id" (matched to the parameter `phase_center_id_array`. '
"See the documentation of the `phase` method for more details."
)
self._phase_center_catalog = uvp.UVParameter(
"phase_center_catalog",
description=desc,
expected_type=dict,
required=False,
)
self._telescope_name = uvp.UVParameter(
"telescope_name",
description="Name of telescope " "(string)",
form="str",
expected_type=str,
)
self._instrument = uvp.UVParameter(
"instrument",
description="Receiver or backend. " "Sometimes identical to telescope_name",
form="str",
expected_type=str,
)
desc = (
"Telescope location: xyz in ITRF (earth-centered frame). "
"Can also be accessed using telescope_location_lat_lon_alt or "
"telescope_location_lat_lon_alt_degrees properties"
)
self._telescope_location = uvp.LocationParameter(
"telescope_location",
description=desc,
acceptable_range=(6.35e6, 6.39e6),
tols=1e-3,
)
self._history = uvp.UVParameter(
"history",
description="String of history, units English",
form="str",
expected_type=str,
)
# --- flexible spectral window information ---
desc = (
'Option to construct a "flexible spectral window", which stores'
"all spectral channels across the frequency axis of data_array. "
"Allows for spectral windows of variable sizes, and channels of "
"varying widths."
)
self._flex_spw = uvp.UVParameter(
"flex_spw", description=desc, expected_type=bool, value=False,
)
desc = (
"Required if flex_spw = True. Maps individual channels along the "
"frequency axis to individual spectral windows, as listed in the "
"spw_array. Shape (Nfreqs), type = int."
)
self._flex_spw_id_array = uvp.UVParameter(
"flex_spw_id_array",
description=desc,
form=("Nfreqs",),
expected_type=int,
required=False,
)
desc = "Flag indicating that this object is using the future array shapes."
self._future_array_shapes = uvp.UVParameter(
"future_array_shapes", description=desc, expected_type=bool, value=False,
)
# --- phasing information ---
desc = (
'String indicating phasing type. Allowed values are "drift" and '
'"phased" (n.b., "drift" is not the same as `cat_type="driftscan"`, '
"the latter of which _is_ phased to a fixed az-el position)."
)
self._phase_type = uvp.UVParameter(
"phase_type",
form="str",
expected_type=str,
description=desc,
value=None,
acceptable_vals=["drift", "phased"],
)
desc = (
'Required if phase_type = "phased". Epoch year of the phase '
"applied to the data (eg 2000.)"
)
self._phase_center_epoch = uvp.UVParameter(
"phase_center_epoch", required=False, description=desc, expected_type=float,
)
desc = (
"Required if phase_type = 'phased'. Right ascension of phase "
"center (see uvw_array), units radians. Can also be accessed using "
"phase_center_ra_degrees."
)
self._phase_center_ra = uvp.AngleParameter(
"phase_center_ra",
required=False,
description=desc,
expected_type=float,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Declination of phase center '
"(see uvw_array), units radians. Can also be accessed using "
"phase_center_dec_degrees."
)
self._phase_center_dec = uvp.AngleParameter(
"phase_center_dec",
required=False,
description=desc,
expected_type=float,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Apparent right ascension of phase '
"center in the topocentric frame of the observatory, units radians."
"Shape (Nblts,), type = float."
)
self._phase_center_app_ra = uvp.AngleParameter(
"phase_center_app_ra",
required=False,
form=("Nblts",),
expected_type=float,
description=desc,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Declination of phase center '
"in the topocentric frame of the observatory, units radians. "
"Shape (Nblts,), type = float."
)
self._phase_center_app_dec = uvp.AngleParameter(
"phase_center_app_dec",
required=False,
form=("Nblts",),
expected_type=float,
description=desc,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Position angle between the hour '
"circle (which is a great circle that goes through the target postion and "
"both poles) in the apparent/topocentric frame, and the frame given in "
"the phase_center_frame attribute."
"Shape (Nblts,), type = float."
)
# The tolerance here is set by the fact that is is calculated using an arctan,
# the limiting precision of which happens around values of 1.
self._phase_center_frame_pa = uvp.AngleParameter(
"phase_center_frame_pa",
required=False,
form=("Nblts",),
expected_type=float,
description=desc,
tols=2e-8,
)
desc = (
'Only relevant if phase_type = "phased". Specifies the frame the'
' data and uvw_array are phased to. Options are "icrs", "gcrs", and "fk5";'
' default is "icrs"'
)
self._phase_center_frame = uvp.UVParameter(
"phase_center_frame",
required=False,
description=desc,
expected_type=str,
acceptable_vals=["icrs", "gcrs", "fk5"],
)
desc = (
"Required if multi_phase_center = True. Maps individual indices along the "
"Nblt axis to an entry in `phase_center_catalog`, with the ID number of "
"individual entries stored as `cat_id`, along with other metadata. "
"Shape (Nblts), type = int."
)
self._phase_center_id_array = uvp.UVParameter(
"phase_center_id_array",
description=desc,
form=("Nblts",),
expected_type=int,
required=False,
)
desc = (
"Optional when reading a MS. Retains the scan number when reading a MS."
" Shape (Nblts), type = int."
)
self._scan_number_array = uvp.UVParameter(
"scan_number_array",
description=desc,
form=("Nblts",),
expected_type=int,
required=False,
)
# --- antenna information ----
desc = (
"Number of antennas with data present (i.e. number of unique "
"entries in ant_1_array and ant_2_array). May be smaller "
"than the number of antennas in the array"
)
self._Nants_data = uvp.UVParameter(
"Nants_data", description=desc, expected_type=int
)
desc = (
"Number of antennas in the array. May be larger "
"than the number of antennas with data"
)
self._Nants_telescope = uvp.UVParameter(
"Nants_telescope", description=desc, expected_type=int
)
desc = (
"List of antenna names, shape (Nants_telescope), "
"with numbers given by antenna_numbers (which can be matched "
"to ant_1_array and ant_2_array). There must be one entry "
"here for each unique entry in ant_1_array and "
"ant_2_array, but there may be extras as well. "
)
self._antenna_names = uvp.UVParameter(
"antenna_names",
description=desc,
form=("Nants_telescope",),
expected_type=str,
)
desc = (
"List of integer antenna numbers corresponding to antenna_names, "
"shape (Nants_telescope). There must be one "
"entry here for each unique entry in ant_1_array and "
"ant_2_array, but there may be extras as well."
"Note that these are not indices -- they do not need to start "
"at zero or be continuous."
)
self._antenna_numbers = uvp.UVParameter(
"antenna_numbers",
description=desc,
form=("Nants_telescope",),
expected_type=int,
)
desc = (
"Array giving coordinates of antennas relative to "
"telescope_location (ITRF frame), shape (Nants_telescope, 3), "
"units meters. See the tutorial page in the documentation "
"for an example of how to convert this to topocentric frame."
)
self._antenna_positions = uvp.UVParameter(
"antenna_positions",
description=desc,
form=("Nants_telescope", 3),
expected_type=float,
tols=1e-3, # 1 mm
)
# -------- extra, non-required parameters ----------
desc = (
"Orientation of the physical dipole corresponding to what is "
"labelled as the x polarization. Options are 'east' "
"(indicating east/west orientation) and 'north (indicating "
"north/south orientation)"
)
self._x_orientation = uvp.UVParameter(
"x_orientation",
description=desc,
required=False,
expected_type=str,
acceptable_vals=["east", "north"],
)
blt_order_options = ["time", "baseline", "ant1", "ant2", "bda"]
desc = (
"Ordering of the data array along the blt axis. A tuple with "
'the major and minor order (minor order is omitted if order is "bda"). '
"The allowed values are: "
+ " ,".join([str(val) for val in blt_order_options])
)
self._blt_order = uvp.UVParameter(
"blt_order",
description=desc,
form=(2,),
required=False,
expected_type=str,
acceptable_vals=blt_order_options,
)
desc = (
"Any user supplied extra keywords, type=dict. Keys should be "
"8 character or less strings if writing to uvfits or miriad files. "
'Use the special key "comment" for long multi-line string comments.'
)
self._extra_keywords = uvp.UVParameter(
"extra_keywords",
required=False,
description=desc,
value={},
spoof_val={},
expected_type=dict,
)
desc = (
"Array of antenna diameters in meters. Used by CASA to "
"construct a default beam if no beam is supplied."
)
self._antenna_diameters = uvp.UVParameter(
"antenna_diameters",
required=False,
description=desc,
form=("Nants_telescope",),
expected_type=float,
tols=1e-3, # 1 mm
)
# --- other stuff ---
# the below are copied from AIPS memo 117, but could be revised to
# merge with other sources of data.
self._gst0 = uvp.UVParameter(
"gst0",
required=False,
description="Greenwich sidereal time at " "midnight on reference date",
spoof_val=0.0,
expected_type=float,
)
self._rdate = uvp.UVParameter(
"rdate",
required=False,
description="Date for which the GST0 or " "whatever... applies",
spoof_val="",
form="str",
)
self._earth_omega = uvp.UVParameter(
"earth_omega",
required=False,
description="Earth's rotation rate " "in degrees per day",
spoof_val=360.985,
expected_type=float,
)
self._dut1 = uvp.UVParameter(
"dut1",
required=False,
description="DUT1 (google it) AIPS 117 " "calls it UT1UTC",
spoof_val=0.0,
expected_type=float,
)
self._timesys = uvp.UVParameter(
"timesys",
required=False,
description="We only support UTC",
spoof_val="UTC",
form="str",
)
desc = (
"FHD thing we do not understand, something about the time "
"at which the phase center is normal to the chosen UV plane "
"for phasing"
)
self._uvplane_reference_time = uvp.UVParameter(
"uvplane_reference_time", required=False, description=desc, spoof_val=0
)
desc = "Per-antenna and per-frequency equalization coefficients"
self._eq_coeffs = uvp.UVParameter(
"eq_coeffs",
required=False,
description=desc,
form=("Nants_telescope", "Nfreqs"),
expected_type=float,
spoof_val=1.0,
)
desc = "Convention for how to remove eq_coeffs from data"
self._eq_coeffs_convention = uvp.UVParameter(
"eq_coeffs_convention",
required=False,
description=desc,
form="str",
spoof_val="divide",
)
desc = (
"List of strings containing the unique basenames (not the full path) of "
"input files."
)
self._filename = uvp.UVParameter(
"filename", required=False, description=desc, expected_type=str,
)
super(UVData, self).__init__()
def _set_flex_spw(self):
"""
Set flex_spw to True, and adjust required parameters.
This method should not be called directly by users; instead it is called
by the file-reading methods to indicate that an object has multiple spectral
windows concatenated together across the frequency axis.
"""
# Mark once-optional arrays as now required
self.flex_spw = True
self._flex_spw_id_array.required = True
# Now make sure that chan_width is set to be an array
self._channel_width.form = ("Nfreqs",)
def _set_scan_numbers(self, override=False):
"""
Set scan numbers by grouping consecutive integrations on the same phase center.
This approach mimics the definition of scan number in measurement sets and is
especially helpful for distinguishing between repeated visits to multiple
phase centers.
Parameters
----------
override : bool
When True, will redefine existing scan numbers. Default is False.
"""
if self.scan_number_array is None or override:
# We are grouping based on integrations on a phase center.
# If this isn't defined, we cannot define scan numbers in this way
# and default to a single "scan".
if self.phase_center_catalog is None:
self.scan_number_array = np.ones((self.Nblts,), dtype=int)
else:
sou_list = list(self.phase_center_catalog.keys())
sou_list.sort()
slice_list = []
# This loops over phase centers, finds contiguous integrations with
# ndimage.label, and then finds the slices to return those contiguous
# integrations with nd.find_objects.
for idx in range(self.Nphase):
sou_id = self.phase_center_catalog[sou_list[idx]]["cat_id"]
slice_list.extend(
nd.find_objects(
nd.label(self.phase_center_id_array == sou_id)[0]
)
)
# Sort by start integration number, which we can extract from
# the start of each slice in the list.
slice_list_ord = sorted(slice_list, key=lambda x: x[0].start)
# Incrementally increase the scan number with each group in
# slice_list_ord
scan_array = np.zeros_like(self.phase_center_id_array)
for ii, slice_scan in enumerate(slice_list_ord):
scan_array[slice_scan] = ii + 1
self.scan_number_array = scan_array
def _look_in_catalog(
self,
cat_name,
phase_dict=None,
cat_type=None,
cat_lon=None,
cat_lat=None,
cat_frame=None,
cat_epoch=None,
cat_times=None,
cat_pm_ra=None,
cat_pm_dec=None,
cat_dist=None,
cat_vrad=None,
ignore_name=False,
):
"""
Check the catalog to see if an existing entry matches provided data.
This is a helper function for verifying if an entry already exists within
the catalog, contained within the attribute `phase_center_catalog`.
Parameters
----------
cat_name : str
Name of the phase center, which should match a key in
`phase_center_catalog`.
phase_dict : dict
Instead of providing individual parameters, one may provide a dict which
matches that format used within `phase_center_catalog` for checking for
existing entries. If used, all other parameters (save for `ignore_name` and
`cat_name`) are disregarded.
cat_type : str
Type of phase center of the entry. Must be one of:
"sidereal" (fixed RA/Dec),
"ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position),
"unphased" (no w-projection, equivalent to `phase_type` == "drift").
cat_lon : float or ndarray
Value of the longitudinal coordinate (e.g., RA, Az, l) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_lat : float or ndarray
Value of the latitudinal coordinate (e.g., Dec, El, b) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_frame : str
Coordinate frame that cat_lon and cat_lat are given in. Only used for
sidereal and ephem phase centers. Can be any of the several supported frames
in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic).
cat_epoch : str or float
Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given
in unites of fractional years, either as a float or as a string with the
epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0).
cat_times : ndarray of floats
Only used when `cat_type="ephem"`. Describes the time for which the values
of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,).
cat_pm_ra : float
Proper motion in RA, in units of mas/year. Only used for sidereal phase
centers.
cat_pm_dec : float
Proper motion in Dec, in units of mas/year. Only used for sidereal phase
centers.
cat_dist : float or ndarray of float
Distance of the source, in units of pc. Only used for sidereal and ephem
phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
cat_vrad : float or ndarray of float
Radial velocity of the source, in units of km/s. Only used for sidereal and
ephem phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
ignore_name : bool
Nominally, `_look_in_catalog` will only look at entries where `cat_name`
matches the name of an entry in the catalog. However, by setting this to
True, the method will search all entries in the catalog and see if any
match all of the provided data (excluding `cat_name`).
Returns
-------
cat_id : int or None
The unique ID number for the phase center added to the internal catalog.
This value is used in the `phase_center_id_array` attribute to denote which
source a given baseline-time corresponds to. If no catalog entry matches,
then None is returned.
cat_diffs : int
The number of differences between the information provided and the catalog
entry contained within `phase_center_catalog`. If everything matches, then
`cat_diffs=0`.
"""
# 1 marcsec tols
radian_tols = (0, 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0))
default_tols = (1e-5, 1e-8)
cat_id = None
cat_diffs = 0
# Emulate the defaults that are set if None is detected for
# unphased and driftscan types.
if (cat_type == "unphased") or (cat_type == "driftscan"):
if cat_lon is None:
cat_lon = 0.0
if cat_lat is None:
cat_lat = np.pi / 2
if cat_frame is None:
cat_frame = "altaz"
if phase_dict is None:
phase_dict = {
"cat_type": cat_type,
"cat_lon": cat_lon,
"cat_lat": cat_lat,
"cat_frame": cat_frame,
"cat_epoch": cat_epoch,
"cat_times": cat_times,
"cat_pm_ra": cat_pm_ra,
"cat_pm_dec": cat_pm_dec,
"cat_dist": cat_dist,
"cat_vrad": cat_vrad,
}
if self.multi_phase_center:
check_dict = self.phase_center_catalog
else:
check_dict = {}
is_phased = self.phase_type == "phased"
check_dict[self.object_name] = {
"cat_type": "sidereal" if is_phased else "unphased",
"cat_lon": self.phase_center_ra if is_phased else 0.0,
"cat_lat": self.phase_center_dec if is_phased else np.pi / 2.0,
"cat_frame": self.phase_center_frame if is_phased else "altaz",
"cat_epoch": self.phase_center_epoch if is_phased else None,
"cat_times": None,
"cat_pm_ra": None,
"cat_pm_dec": None,
"cat_dist": None,
"cat_vrad": None,
"cat_id": 0,
}
tol_dict = {
"cat_type": None,
"cat_lon": radian_tols,
"cat_lat": radian_tols,
"cat_frame": None,
"cat_epoch": None,
"cat_times": default_tols,
"cat_pm_ra": default_tols,
"cat_pm_dec": default_tols,
"cat_dist": default_tols,
"cat_vrad": default_tols,
}
if self.multi_phase_center:
name_list = list(self.phase_center_catalog.keys())
else:
name_list = [self.object_name]
for name in name_list:
cat_diffs = 0
if (cat_name != name) and (not ignore_name):
continue
for key in tol_dict.keys():
if phase_dict.get(key) is not None:
if check_dict[name].get(key) is None:
cat_diffs += 1
elif tol_dict[key] is None:
# If no tolerance specified, expect attributes to be identical
cat_diffs += phase_dict.get(key) != check_dict[name].get(key)
else:
# Numpy will throw a Value error if you have two arrays
# of different shape, which we can catch to flag that
# the two arrays are actually not within tolerance.
if np.shape(phase_dict[key]) != np.shape(check_dict[name][key]):
cat_diffs += 1
else:
cat_diffs += not np.allclose(
phase_dict[key],
check_dict[name][key],
tol_dict[key][0],
tol_dict[key][1],
)
else:
cat_diffs += check_dict[name][key] is not None
if (cat_diffs == 0) or (cat_name == name):
cat_id = check_dict[name]["cat_id"]
break
return cat_id, cat_diffs
def _add_phase_center(
self,
cat_name,
cat_type=None,
cat_lon=None,
cat_lat=None,
cat_frame=None,
cat_epoch=None,
cat_times=None,
cat_pm_ra=None,
cat_pm_dec=None,
cat_dist=None,
cat_vrad=None,
info_source="user",
force_update=False,
cat_id=None,
):
"""
Add an entry to the internal object/source catalog.
This is a helper function for adding a source to the internal
catalog, contained within the attribute `phase_center_catalog`.
Parameters
----------
cat_name : str
Name of the phase center to be added, must be unique (i.e., not contained
as a key in the UVData attribute `phase_center_catalog`).
cat_type : str
Type of phase center to be added. Must be one of:
"sidereal" (fixed RA/Dec),
"ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position),
"unphased" (no w-projection, equivalent to `phase_type` == "drift").
cat_lon : float or ndarray
Value of the longitudinal coordinate (e.g., RA, Az, l) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_lat : float or ndarray
Value of the latitudinal coordinate (e.g., Dec, El, b) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_frame : str
Coordinate frame that cat_lon and cat_lat are given in. Only used
for sidereal and ephem targets. Can be any of the several supported frames
in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic).
cat_epoch : str or float
Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given
in unites of fractional years, either as a float or as a string with the
epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0).
cat_times : ndarray of floats
Only used when `cat_type="ephem"`. Describes the time for which the values
of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,).
cat_pm_ra : float
Proper motion in RA, in units of mas/year. Only used for sidereal phase
centers.
cat_pm_dec : float
Proper motion in Dec, in units of mas/year. Only used for sidereal phase
centers.
cat_dist : float or ndarray of float
Distance of the source, in units of pc. Only used for sidereal and ephem
phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
cat_vrad : float or ndarray of float
Radial velocity of the source, in units of km/s. Only used for sidereal and
ephem phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
info_source : str
Optional string describing the source of the information provided. Used
primarily in UVData to denote when an ephemeris has been supplied by the
JPL-Horizons system, user-supplied, or read in by one of the various file
interpreters. Default is 'user'.
force_update : bool
Normally, `_add_phase_center` will throw an error if there already exists an
identically named phase center with different properties. However, if one
sets `force_update=True`, the method will overwrite the existing entry in
`phase_center_catalog` with the paramters supplied, preserving only the
parameters `cat_id` and `cat_name`. Note that doing this will _not_ update
other atributes of the `UVData` object. Default is False.
cat_id : int
An integer signifying the ID number for the phase center, used in the
`phase_center_id_array` attribute. The default is for the method to assign
this value automatically.
Returns
-------
cat_id : int
The unique ID number for the phase center added to the internal catalog.
This value is used in the `phase_center_id_array` attribute to denote which
source a given baseline-time corresponds to.
Raises
------
ValueError
If attempting to add a non-unique source name, attempting to use the method
w/ a UVData object where multi_phase_center=False, or if adding a sidereal
source without coordinates.
"""
# Check whether we should actually be doing this in the first place
if not self.multi_phase_center:
raise ValueError("Cannot add a source if multi_phase_center != True.")
if not isinstance(cat_name, str):
raise ValueError("cat_name must be a string.")
# The catalog name "unphased" is used internally whenever we have to make a
# block of data as unphased in a data set. To avoid naming collisions, check
# that someone hasn't tried to use it for any other purpose.
if (cat_name == "unphased") and (cat_type != "unphased"):
raise ValueError(
"The name unphased is reserved. Please choose another value for "
"cat_name."
)
# We currently only have 4 supported types -- make sure the user supplied
# one of those
if cat_type not in ["sidereal", "ephem", "driftscan", "unphased"]:
raise ValueError(
"Only sidereal, ephem, driftscan or unphased may be used "
"for cat_type."
)
# Both proper motion parameters need to be set together
if (cat_pm_ra is None) != (cat_pm_dec is None):
raise ValueError(
"Must supply values for either both or neither of "
"cat_pm_ra and cat_pm_dec."
)
# If left unset, unphased and driftscan defaulted to Az, El = (0, 90)
if (cat_type == "unphased") or (cat_type == "driftscan"):
if cat_lon is None:
cat_lon = 0.0
if cat_lat is None:
cat_lat = np.pi / 2
if cat_frame is None:
cat_frame = "altaz"
# Let's check some case-specific things and make sure all the entires are value
if (cat_times is None) and (cat_type == "ephem"):
raise ValueError("cat_times cannot be None for ephem object.")
elif (cat_times is not None) and (cat_type != "ephem"):
raise ValueError("cat_times cannot be used for non-ephem phase centers.")
if (cat_lon is None) and (cat_type in ["sidereal", "ephem"]):
raise ValueError("cat_lon cannot be None for sidereal phase centers.")
if (cat_lat is None) and (cat_type in ["sidereal", "ephem"]):
raise ValueError("cat_lat cannot be None for sidereal phase centers.")
if (cat_frame is None) and (cat_type in ["sidereal", "ephem"]):
raise ValueError("cat_frame cannot be None for sidereal phase centers.")
elif (cat_frame != "altaz") and (cat_type in ["driftscan", "unphased"]):
raise ValueError(
"cat_frame must be either None or 'altaz' when the cat type "
"is either driftscan or unphased."
)
if (cat_type == "unphased") and (cat_lon != 0.0):
raise ValueError(
"Catalog entries that are unphased must have cat_lon set to either "
"0 or None."
)
if (cat_type == "unphased") and (cat_lat != (np.pi / 2)):
raise ValueError(
"Catalog entries that are unphased must have cat_lat set to either "
"pi/2 or None."
)
if (cat_type != "sidereal") and (
(cat_pm_ra is not None) or (cat_pm_dec is not None)
):
raise ValueError(
"Non-zero proper motion values (cat_pm_ra, cat_pm_dec) "
"for cat types other than sidereal are not supported."
)
if isinstance(cat_epoch, Time) or isinstance(cat_epoch, str):
if cat_frame in ["fk4", "fk4noeterms"]:
cat_epoch = Time(cat_epoch).byear
else:
cat_epoch = Time(cat_epoch).jyear
elif cat_epoch is not None:
cat_epoch = float(cat_epoch)
if cat_type == "ephem":
cat_times = np.array(cat_times, dtype=float).reshape(-1)
cshape = cat_times.shape
try:
cat_lon = np.array(cat_lon, dtype=float).reshape(cshape)
cat_lat = np.array(cat_lat, dtype=float).reshape(cshape)
if cat_dist is not None:
cat_dist = np.array(cat_dist, dtype=float).reshape(cshape)
if cat_vrad is not None:
cat_vrad = np.array(cat_vrad, dtype=float).reshape(cshape)
except ValueError:
raise ValueError(
"Object properties -- lon, lat, pm_ra, pm_dec, dist, vrad -- must "
"be of the same size as cat_times for ephem phase centers."
)
else:
cat_lon = None if cat_lon is None else float(cat_lon)
cat_lat = None if cat_lat is None else float(cat_lat)
cat_pm_ra = None if cat_pm_ra is None else float(cat_pm_ra)
cat_pm_dec = None if cat_pm_dec is None else float(cat_pm_dec)
cat_dist = None if cat_dist is None else float(cat_dist)
cat_vrad = None if cat_vrad is None else float(cat_vrad)
# Names serve as dict keys, so we need to make sure that they're unique
if not force_update:
temp_id, cat_diffs = self._look_in_catalog(
cat_name,
cat_type=cat_type,
cat_lon=cat_lon,
cat_lat=cat_lat,
cat_frame=cat_frame,
cat_epoch=cat_epoch,
cat_times=cat_times,
cat_pm_ra=cat_pm_ra,
cat_pm_dec=cat_pm_dec,
cat_dist=cat_dist,
cat_vrad=cat_vrad,
)
# If the source does have the same name, check to see if all the
# atributes match. If so, no problem, go about your business
if temp_id is not None:
if cat_diffs == 0:
# Everything matches, return the catalog ID of the matching entry
return temp_id
else:
raise ValueError(
"Cannot add different source with an non-unique name."
)
# We want to create a unique ID for each source, for use in indexing arrays.
# The logic below ensures that we pick the lowest positive integer that is
# not currently being used by another source
used_cat_ids = {
self.phase_center_catalog[name]["cat_id"]: name
for name in self.phase_center_catalog.keys()
}
if force_update and (cat_name in self.phase_center_catalog.keys()):
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
elif cat_id is None:
cat_id = int(
np.arange(self.Nphase + 1)[
~np.isin(np.arange(self.Nphase + 1), list(used_cat_ids.keys()))
][0]
)
elif cat_id in used_cat_ids.keys():
raise ValueError(
"Provided cat_id belongs to another source (%s)." % used_cat_ids[cat_id]
)
# If source is unique, begin creating a dictionary for it
phase_dict = {
"cat_id": cat_id,
"cat_type": cat_type,
"cat_lon": cat_lon,
"cat_lat": cat_lat,
"cat_frame": cat_frame,
"cat_epoch": cat_epoch,
"cat_times": cat_times,
"cat_pm_ra": cat_pm_ra,
"cat_pm_dec": cat_pm_dec,
"cat_vrad": cat_vrad,
"cat_dist": cat_dist,
"info_source": info_source,
}
self.phase_center_catalog[cat_name] = phase_dict
self.Nphase = len(self.phase_center_catalog.keys())
return cat_id
def _remove_phase_center(self, defunct_name):
"""
Remove an entry from the internal object/source catalog.
Removes an entry from the attribute `phase_center_catalog`. Only allowed when
the UVData object in question is a multi phase center data set (i.e.,
`multi_phase_center=True`).
Parameters
----------
defunct_name : str
Name of the source to be removed
Raises
------
ValueError
If multi_phase_center is not set to True
IndexError
If the name provided is not found as a key in `phase_center_catalog`
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot remove a phase center if multi_phase_center != True."
)
if defunct_name not in self.phase_center_catalog.keys():
raise IndexError("No source by that name contained in the catalog.")
del self.phase_center_catalog[defunct_name]
self.Nphase = len(self.phase_center_catalog.keys())
def _clear_unused_phase_centers(self):
"""
Remove objects dictionaries and names that are no longer in use.
Goes through the `phase_center_catalog` attribute in of a UVData object and
clears out entries that are no longer being used, and appropriately updates
`phase_center_id_array` accordingly. This function is not typically called
by users, but instead is used by other methods.
Raises
------
ValueError
If attempting to call the method when multi_phase_center=False.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot remove a phase center if multi_phase_center != True."
)
unique_cat_ids = np.unique(self.phase_center_id_array)
defunct_list = []
Nphase = 0
for cat_name in self.phase_center_catalog.keys():
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
if cat_id in unique_cat_ids:
Nphase += 1
else:
defunct_list.append(cat_name)
# Check the number of "good" sources we have -- if we haven't dropped any,
# then we are free to bail, otherwise update the Nphase attribute
if Nphase == self.Nphase:
return
# Time to kill the entries that are no longer in the source stack
for defunct_name in defunct_list:
self._remove_phase_center(defunct_name)
def _check_for_unphased(self):
"""
Check which Nblts are unphased in a multi phase center dataset.
This convenience method returns back a boolean mask to identify which data
along the Blt axis contains unphased objects (which is only applicable when
multi_phase_center=True)
Returns
-------
blt_mask : ndarray of bool
A boolean mask for identifying which elements contain unphased objects
"""
if self.multi_phase_center:
# Check and see if we have any unphased objects, in which case
# their w-values should be zeroed out.
nophase_dict = {
self.phase_center_catalog[name]["cat_id"]: self.phase_center_catalog[
name
]["cat_type"]
== "unphased"
for name in self.phase_center_catalog.keys()
}
# Use dict to construct a bool array
blt_mask = np.array(
[nophase_dict[idx] for idx in self.phase_center_id_array], dtype=bool
)
else:
# If not multi phase center, we just need to check the phase type
blt_mask = np.repeat(self.phase_type == "drift", self.Nblts)
return blt_mask
def rename_phase_center(self, old_name, new_name):
"""
Rename a phase center/catalog entry within a multi phase center data set.
Parameters
----------
old_name : str
Phase center name for the data to be renamed.
new_name : str
New name for the phase center.
Raises
------
ValueError
If attempting to run the method on a non multi phase center data set, if
`old_name` is not found as a key in `phase_center_catalog`, if `new_name`
already exists as a key in `phase_center_catalog`, or if attempting to
name a source "unphased" (which is reserved).
TypeError
If `new_name` is not actually a string.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot rename a phase center if multi_phase_center != True."
)
if old_name not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % old_name)
if not isinstance(new_name, str):
raise TypeError("Value provided to new_name must be a string.")
if new_name == old_name:
# This is basically just a no-op, so return to user
return
if new_name in self.phase_center_catalog.keys():
raise ValueError(
"Must include a unique name for new_name, %s is already present "
"in phase_center_catalog." % new_name
)
if (new_name == "unphased") and (
self.phase_center_catalog[old_name]["cat_type"] != "unphased"
):
raise ValueError(
"The name unphased is reserved. Please choose another value for "
"new_name."
)
self.phase_center_catalog[new_name] = self.phase_center_catalog[old_name]
self.Nphase = len(self.phase_center_catalog.keys())
self._remove_phase_center(old_name)
def split_phase_center(self, cat_name, new_name, select_mask, downselect=False):
"""
Rename the phase center (but preserve other properties) of a subset of data.
Allows you to rename a subset of the data phased to a particular phase center,
marked by a different name than the original. Useful when you want to phase to
one position, but want to differentiate different groups of data (e.g., marking
every other integration to make jackknifing easier).
Parameters
----------
cat_name : str
Name of the phase center to be split.
new_name : str
New name for the object.
select_mask : array_like
Selection mask for which data should be identified as belonging to the phase
center labeled by `new_name`. Any array-like able to be used as an index
is suitable -- the most typical is an array of bool with length `Nblts`,
or an array of ints within the range (-Nblts, Nblts).
downselect : bool
If selecting data that is not marked as belonging to `cat_name`,
normally an error is thrown. By setting this to True, `select_mask` will
be modified to exclude data not marked as belonging to `cat_name`.
Raises
------
ValueError
If attempting to run the method on a non multi phase center data set, if
`old_name` is not found as a key in `phase_center_catalog`, if `new_name`
already exists as a key in `phase_center_catalog`, or if attempting to
name a source "unphased" (which is reserved). Also raised if `select_mask`
contains data that doesn't belong to `cat_name`, unless setting
`downselect` to True.
IndexError
If select_mask is not a valid indexing array.
UserWarning
If all data for `cat_name` was selected (in which case `rename_phase_center`
is called instead), or if no valid data was selected.
"""
# Check to make sure that everything lines up with
if not self.multi_phase_center:
raise ValueError(
"Cannot use split_phase_center on a non-multi phase center data set."
)
if not isinstance(new_name, str):
raise TypeError("Value provided to new_name must be a string.")
if cat_name not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % cat_name)
if new_name in self.phase_center_catalog.keys():
raise ValueError(
"The name %s is already found in the catalog, choose another name "
"for new_name." % new_name
)
if (new_name == "unphased") and (
self.phase_center_catalog[cat_name]["cat_type"] != "unphased"
):
raise ValueError(
"The name unphased is reserved. Please choose another value for "
"new_name."
)
try:
inv_mask = np.ones(self.Nblts, dtype=bool)
inv_mask[select_mask] = False
except IndexError:
raise IndexError(
"select_mask must be an array-like, either of ints with shape (Nblts), "
"or of ints within the range (-Nblts, Nblts)."
)
# Now that we know nthat all the inputs are sensible, lets make sure that
# the select_mask choice is sensible
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
# If we have selected any entries that don't correspond to the cat_id
# in question, either downselect or raise an error.
if np.any(cat_id != self.phase_center_id_array[select_mask]):
if downselect:
select_mask = np.logical_and(
~inv_mask, cat_id == self.phase_center_id_array
)
inv_mask = ~select_mask
else:
raise ValueError(
"Data selected with select_mask includes that which has not been "
"phased to %s. You can fix this by either revising select_mask or "
"setting downselect=True." % cat_name
)
# Now check for no(-ish) ops
if np.all(inv_mask):
# You didn't actually select anything we could change
warnings.warn(
"No relevant data selected - %s not added to the data set" % new_name
)
elif not np.any(cat_id == self.phase_center_id_array[inv_mask]):
# No matching catalog IDs found outside the range, so this is really a
# replace more than a split.
warnings.warn(
"All data for %s selected - using rename_phase_center instead of a "
"split_phase_center." % cat_name
)
self.rename_phase_center(cat_name, new_name)
else:
temp_dict = self.phase_center_catalog[cat_name]
cat_id = self._add_phase_center(
new_name,
temp_dict["cat_type"],
cat_lon=temp_dict.get("cat_lon"),
cat_lat=temp_dict.get("cat_lat"),
cat_frame=temp_dict.get("cat_frame"),
cat_epoch=temp_dict.get("cat_epoch"),
cat_times=temp_dict.get("cat_times"),
cat_pm_ra=temp_dict.get("cat_pm_ra"),
cat_pm_dec=temp_dict.get("cat_pm_dec"),
cat_dist=temp_dict.get("cat_dist"),
cat_vrad=temp_dict.get("cat_vrad"),
)
self.phase_center_id_array[select_mask] = cat_id
def merge_phase_centers(self, catname1, catname2, force_merge=False):
"""
Merge two differently named objects into one within a mutli-phase-ctr data set.
Recombines two different objects into a single catalog entry -- useful if
having previously used `split_phase_center` or when multiple objects with
different names share the same source parameters.
Parameters
----------
catname1 : str
String containing the name of the first phase center. Note that this name
will be preserved in the UVData object.
catname2 : str
String containing the name of the second phase center, which will be merged
into the first phase center. Note that once the merge is complete, all
information about this phase center is removed.
force_merge : bool
Normally, the method will throw an error if the phase center properties
differ for `catname1` and `catname2`. This can be overriden by setting this
to True. Default is False.
Raises
------
ValueError
If catname1 or catname2 are not found in the UVData object, of if their
properties differ (and `force_merge` is not set to True).
UserWarning
If forcing the merge of two objects with different properties.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot use merge_phase_centers on a non-multi phase center data set."
)
if catname1 not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % catname1)
if catname2 not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % catname2)
temp_dict = self.phase_center_catalog[catname2]
# First, let's check and see if the dict entries are identical
cat_id, cat_diffs = self._look_in_catalog(
catname1,
cat_type=temp_dict["cat_type"],
cat_lon=temp_dict.get("cat_lon"),
cat_lat=temp_dict.get("cat_lat"),
cat_frame=temp_dict.get("cat_frame"),
cat_epoch=temp_dict.get("cat_epoch"),
cat_times=None,
cat_pm_ra=None,
cat_pm_dec=None,
cat_dist=None,
cat_vrad=None,
)
if cat_diffs != 0:
if force_merge:
warnings.warn(
"Forcing %s and %s together, even though their attributes "
"differ" % (catname1, catname2)
)
else:
raise ValueError(
"Attributes of %s and %s differ in phase_center_catalog, which "
"means that they are likely not referring to the same position in "
"the sky. You can ignore this error and force merge_phase_centers "
"to complete by setting force_merge=True, but this should be done "
"with substantial caution." % (catname1, catname2)
)
old_cat_id = self.phase_center_catalog[catname2]["cat_id"]
self.phase_center_id_array[self.phase_center_id_array == old_cat_id] = cat_id
self._remove_phase_center(catname2)
def print_phase_center_info(
self, cat_name=None, hms_format=None, return_str=False, print_table=True
):
"""
Print out the details of objects in a mutli-phase-ctr data set.
Prints out an ASCII table that contains the details of the
`phase_center_catalog` attribute, which acts as the internal source catalog
for UVData objects.
Parameters
----------
cat_name : str
Optional parameter which, if provided, will cause the method to only return
information on the phase center with the matching name. Default is to print
out information on all catalog entries.
hms_format : bool
Optional parameter, which if selected, can be used to force coordinates to
be printed out in Hours-Min-Sec (if set to True) or Deg-Min-Sec (if set to
False) format. Default is to print out in HMS if all the objects have
coordinate frames of icrs, gcrs, fk5, fk4, and top; otherwise, DMS format
is used.
return_str: bool
If set to True, the method returns an ASCII string which contains all the
table infrmation. Default is False.
print_table : bool
If set to True, prints the table to the terminal window. Default is True.
Returns
-------
table_str : bool
If return_str=True, an ASCII string containing the entire table text
Raises
------
ValueError
If `cat_name` matches no keys in `phase_center_catalog`.
"""
r2d = 180.0 / np.pi
r2m = 60.0 * 180.0 / np.pi
r2s = 3600.0 * 180.0 / np.pi
ra_frames = ["icrs", "gcrs", "fk5", "fk4", "topo"]
if not self.multi_phase_center:
raise ValueError(
"Cannot use print_phase_center_info on a "
"non-multi phase center data set."
)
if cat_name is None:
name_list = list(self.phase_center_catalog.keys())
dict_list = [self.phase_center_catalog[name] for name in name_list]
elif cat_name in self.phase_center_catalog.keys():
name_list = [cat_name]
dict_list = [self.phase_center_catalog[cat_name]]
else:
raise ValueError("No entry by the name %s in the catalog." % cat_name)
# We want to check and actually see which fields we need to
# print
any_lon = any_lat = any_frame = any_epoch = any_times = False
any_pm_ra = any_pm_dec = any_dist = any_vrad = False
cat_id_list = []
for indv_dict in dict_list:
cat_id_list.append(indv_dict["cat_id"])
any_lon = any_lon or indv_dict.get("cat_lon") is not None
any_lat = any_lat or indv_dict.get("cat_lat") is not None
any_frame = any_frame or indv_dict.get("cat_frame") is not None
any_epoch = any_epoch or indv_dict.get("cat_epoch") is not None
any_times = any_times or indv_dict.get("cat_times") is not None
any_pm_ra = any_pm_ra or indv_dict.get("cat_pm_ra") is not None
any_pm_dec = any_pm_dec or indv_dict.get("cat_pm_dec") is not None
any_dist = any_dist or indv_dict.get("cat_dist") is not None
any_vrad = any_vrad or indv_dict.get("cat_vrad") is not None
if any_lon and (hms_format is None):
cat_frame = indv_dict.get("cat_frame")
cat_type = indv_dict["cat_type"]
if (cat_frame not in ra_frames) or (cat_type == "driftscan"):
hms_format = False
if hms_format is None:
hms_format = True
col_list = []
col_list.append(
{"hdr": ("ID", "#"), "fmt": "% 4i", "field": " %4s ", "name": "cat_id"}
)
col_list.append(
{
"hdr": ("Cat Entry", "Name"),
"fmt": "%12s",
"field": " %12s ",
"name": "cat_name",
}
)
col_list.append(
{"hdr": ("Type", ""), "fmt": "%9s", "field": " %9s ", "name": "cat_type"}
)
if any_lon:
col_list.append(
{
"hdr": ("Az/Lon/RA", "hours" if hms_format else "deg"),
"fmt": "% 3i:%02i:%05.2f",
"field": (" %12s " if hms_format else " %13s "),
"name": "cat_lon",
}
)
if any_lat:
col_list.append(
{
"hdr": ("El/Lat/Dec", "deg"),
"fmt": "%1s%2i:%02i:%05.2f",
"field": " %12s ",
"name": "cat_lat",
}
)
if any_frame:
col_list.append(
{
"hdr": ("Frame", ""),
"fmt": "%5s",
"field": " %5s ",
"name": "cat_frame",
}
)
if any_epoch:
col_list.append(
{
"hdr": ("Epoch", ""),
"fmt": "%7s",
"field": " %7s ",
"name": "cat_epoch",
}
)
if any_times:
col_list.append(
{
"hdr": (" Ephem Range ", "Start-MJD End-MJD"),
"fmt": " %8.2f % 8.2f",
"field": " %20s ",
"name": "cat_times",
}
)
if any_pm_ra:
col_list.append(
{
"hdr": ("PM-Ra", "mas/yr"),
"fmt": "%.4g",
"field": " %6s ",
"name": "cat_pm_ra",
}
)
if any_pm_dec:
col_list.append(
{
"hdr": ("PM-Dec", "mas/yr"),
"fmt": "%.4g",
"field": " %6s ",
"name": "cat_pm_dec",
}
)
if any_dist:
col_list.append(
{
"hdr": ("Dist", "pc"),
"fmt": "%.1e",
"field": " %7s ",
"name": "cat_dist",
}
)
if any_vrad:
col_list.append(
{
"hdr": ("V_rad", "km/s"),
"fmt": "%.4g",
"field": " %6s ",
"name": "cat_vrad",
}
)
top_str = ""
bot_str = ""
for col in col_list:
top_str += col["field"] % col["hdr"][0]
bot_str += col["field"] % col["hdr"][1]
info_str = ""
info_str += top_str + "\n"
info_str += bot_str + "\n"
info_str += ("-" * len(bot_str)) + "\n"
# We want to print in the order of cat_id
for idx in np.argsort(cat_id_list):
tbl_str = ""
for col in col_list:
# If we have a "special" field that needs extra handling,
# take care of that up front
if col["name"] == "cat_name":
temp_val = name_list[idx]
else:
temp_val = dict_list[idx][col["name"]]
if temp_val is None:
temp_str = ""
elif col["name"] == "cat_lon":
temp_val = np.median(temp_val)
temp_val /= 15.0 if hms_format else 1.0
coord_tuple = (
np.mod(temp_val * r2d, 360.0),
np.mod(temp_val * r2m, 60.0),
np.mod(temp_val * r2s, 60.0),
)
temp_str = col["fmt"] % coord_tuple
elif col["name"] == "cat_lat":
temp_val = np.median(temp_val)
coord_tuple = (
"-" if temp_val < 0.0 else "+",
np.mod(np.abs(temp_val) * r2d, 360.0),
np.mod(np.abs(temp_val) * r2m, 60.0),
np.mod(np.abs(temp_val) * r2s, 60.0),
)
temp_str = col["fmt"] % coord_tuple
elif col["name"] == "cat_epoch":
use_byrs = dict_list[idx]["cat_frame"] in ["fk4", "fk4noeterms"]
temp_val = ("B%6.1f" if use_byrs else "J%6.1f") % temp_val
temp_str = col["fmt"] % temp_val
elif col["name"] == "cat_times":
time_tuple = (
np.min(temp_val) - 2400000.5,
np.max(temp_val) - 2400000.5,
)
temp_str = col["fmt"] % time_tuple
elif (col["name"] == "cat_dist") or (col["name"] == "cat_vrad"):
temp_val = np.median(temp_val)
temp_str = col["fmt"] % temp_val
else:
temp_str = col["fmt"] % temp_val
tbl_str += col["field"] % temp_str
info_str += tbl_str + "\n"
if print_table:
# We need this extra bit of code to handle trailing whitespace, since
# otherwise some checks (e.g., doc check on tutorials) will balk
print(
"\n".join([line.rstrip() for line in info_str.split("\n")]), end=""
) # pragma: nocover
if return_str:
return info_str
def _update_phase_center_id(self, cat_name, new_cat_id=None, reserved_ids=None):
"""
Update a phase center with a new catalog ID number.
Parameters
----------
cat_name : str
Name of the phase center, which corresponds to a key in the attribute
`phase_center_catalog`.
new_cat_id : int
Optional argument. If supplied, then the method will attempt to use the
provided value as the new catalog ID, provided that an existing catalog
entry is not already using the same value. If not supplied, then the
method will automatically assign a value.
reserved_ids : array-like in int
Optional argument. An array-like of ints that denotes which ID numbers
are already reserved. Useful for when combining two separate catalogs.
Raises
------
ValueError
If not using the method on a multi-phase-ctr data set, if there's no entry
that matches `cat_name`, or of the value `new_cat_id` is already taken.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot use _update_phase_center_id on a "
"non-multi phase center data set."
)
if cat_name not in self.phase_center_catalog.keys():
raise ValueError(
"Cannot run _update_phase_center_id: no entry with name %s." % cat_name
)
old_cat_id = self.phase_center_catalog[cat_name]["cat_id"]
used_cat_ids = [] if (reserved_ids is None) else reserved_ids.copy()
for name in self.phase_center_catalog.keys():
if name != cat_name:
used_cat_ids.append(self.phase_center_catalog[name]["cat_id"])
if new_cat_id is None:
# If the old ID is in the reserved list, then we'll need to update it
if old_cat_id not in used_cat_ids:
# Don't need to actually update anything
return
else:
new_cat_id = np.arange(len(used_cat_ids) + 1)[
~np.isin(np.arange(len(used_cat_ids) + 1), used_cat_ids)
][0]
else:
if new_cat_id in used_cat_ids:
raise ValueError("Catalog ID supplied already taken by another source.")
self.phase_center_id_array[
self.phase_center_id_array == old_cat_id
] = new_cat_id
self.phase_center_catalog[cat_name]["cat_id"] = new_cat_id
def _set_multi_phase_center(self, preserve_phase_center_info=False):
"""
Set multi_phase_center to True, and adjust required paramteres.
This method is typically not be called directly by users; instead it is called
by the file-reading methods to indicate that an object has multiple phase
centers with in the same data set.
Parameters
----------
preserve_phase_center_info : bool
Preserve the source information located in `object_name`, and for phased
data sets, also `phase_center_ra`, `phase_center_dec`, `phase_center_epoch`
and `phase_center_frame`. Default is False. Note that setting this to
False will mean that some required attributes will NOT be correctly set,
e.g., `phase_center_id_array` -- these will need to be set after calling
`preserve_phase_center_info` in order for the UVData object to be viable.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
# If you have already set this, don't do anything
if self.multi_phase_center:
return
# All multi phase center objects have phase_type="phased", even if they are
# unphased.
if self.phase_type == "phased":
cat_type = "sidereal"
else:
self._set_phased()
cat_type = "unphased"
self.multi_phase_center = True
# Mark once-option arrays as now required
self._phase_center_id_array.required = True
self._Nphase.required = True
self._phase_center_catalog.required = True
# This should technically be required for any phased data set, but for now,
# we are only gonna make it mandatory for mutli-phase-ctr data sets.
self._phase_center_app_ra.required = True
self._phase_center_app_dec.required = True
self._phase_center_frame_pa.required = True
self.Nphase = 0
self.phase_center_catalog = {}
cat_name = self.object_name
self.object_name = "multi"
if preserve_phase_center_info:
cat_id = self._add_phase_center(
cat_name,
cat_type=cat_type,
cat_lon=self.phase_center_ra,
cat_lat=self.phase_center_dec,
cat_frame=self.phase_center_frame,
cat_epoch=self.phase_center_epoch,
)
self.phase_center_id_array = np.zeros(self.Nblts, dtype=int) + cat_id
self.phase_center_ra = 0.0
self.phase_center_dec = 0.0
if self.phase_center_frame is None:
self.phase_center_frame = "icrs"
if self.phase_center_epoch is None:
self.phase_center_epoch = 2000.0
if (cat_type == "unphased") and preserve_phase_center_info:
# If moving from unphased, then we'll fill in app_ra and app_dec in
# the way that we normally would if this were an "unphased" object.
self._set_app_coords_helper()
def _set_drift(self):
"""
Set phase_type to 'drift' and adjust required parameters.
This method should not be called directly by users; instead it is called
by phasing methods and file-reading methods to indicate the object has a
`phase_type` of "drift" and define which metadata are required.
"""
self.phase_type = "drift"
self._phase_center_frame.required = False
self._phase_center_ra.required = False
self._phase_center_dec.required = False
self._phase_center_app_ra.required = False
self._phase_center_app_dec.required = False
self._phase_center_frame_pa.required = False
def _set_phased(self):
"""
Set phase_type to 'phased' and adjust required parameters.
This method should not be called directly by users; instead it is called
by phasing methods and file-reading methods to indicate the object has a
`phase_type` of "phased" and define which metadata are required.
"""
self.phase_type = "phased"
self._phase_center_frame.required = True
self._phase_center_ra.required = True
self._phase_center_dec.required = True
self._phase_center_app_ra.required = True
self._phase_center_app_dec.required = True
self._phase_center_frame_pa.required = True
@property
def _data_params(self):
"""List of strings giving the data-like parameters."""
return ["data_array", "nsample_array", "flag_array"]
@property
def data_like_parameters(self):
"""Iterate defined parameters which are data-like (not metadata-like)."""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def metadata_only(self):
"""
Property that determines whether this is a metadata only object.
An object is metadata only if data_array, nsample_array and flag_array
are all None.
"""
metadata_only = all(d is None for d in self.data_like_parameters)
for param_name in self._data_params:
getattr(self, "_" + param_name).required = not metadata_only
return metadata_only
def _set_future_array_shapes(self):
"""
Set future_array_shapes to True and adjust required parameters.
This method should not be called directly by users; instead it is called
by file-reading methods and `use_future_array_shapes` to indicate the
`future_array_shapes` is True and define expected parameter shapes.
"""
self.future_array_shapes = True
self._freq_array.form = ("Nfreqs",)
self._channel_width.form = ("Nfreqs",)
for param_name in self._data_params:
getattr(self, "_" + param_name).form = ("Nblts", "Nfreqs", "Npols")
def use_future_array_shapes(self):
"""
Change the array shapes of this object to match the planned future shapes.
This method sets allows users to convert to the planned array shapes changes
before the changes go into effect. This method sets the `future_array_shapes`
parameter on this object to True.
"""
self._set_future_array_shapes()
if not self.metadata_only:
# remove the length-1 spw axis for all data-like parameters
for param_name in self._data_params:
setattr(self, param_name, (getattr(self, param_name))[:, 0, :, :])
# remove the length-1 spw axis for the freq_array
self.freq_array = self.freq_array[0, :]
if not self.flex_spw:
# make channel_width be an array of length Nfreqs rather than a single value
# (not needed with flexible spws because this is already done in that case)
self.channel_width = (
np.zeros(self.Nfreqs, dtype=np.float64) + self.channel_width
)
def use_current_array_shapes(self):
"""
Change the array shapes of this object to match the current future shapes.
This method sets allows users to convert back to the current array shapes.
This method sets the `future_array_shapes` parameter on this object to False.
"""
if not self.flex_spw:
unique_channel_widths = np.unique(self.channel_width)
if unique_channel_widths.size > 1:
raise ValueError(
"channel_width parameter contains multiple unique values, but "
"only one spectral window is present. Cannot collapse "
"channel_width to a single value."
)
self._channel_width.form = ()
self.channel_width = unique_channel_widths[0]
self.future_array_shapes = False
for param_name in self._data_params:
getattr(self, "_" + param_name).form = ("Nblts", 1, "Nfreqs", "Npols")
if not self.metadata_only:
for param_name in self._data_params:
setattr(
self, param_name, (getattr(self, param_name))[:, np.newaxis, :, :]
)
self._freq_array.form = (
1,
"Nfreqs",
)
self.freq_array = self.freq_array[np.newaxis, :]
def known_telescopes(self):
"""
Get a list of telescopes known to pyuvdata.
This is just a shortcut to uvdata.telescopes.known_telescopes()
Returns
-------
list of str
List of names of known telescopes
"""
return uvtel.known_telescopes()
def set_telescope_params(self, overwrite=False):
"""
Set telescope related parameters.
If the telescope_name is in the known_telescopes, set any missing
telescope-associated parameters (e.g. telescope location) to the value
for the known telescope.
Parameters
----------
overwrite : bool
Option to overwrite existing telescope-associated parameters with
the values from the known telescope.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
telescope_obj = uvtel.get_telescope(self.telescope_name)
if telescope_obj is not False:
params_set = []
for p in telescope_obj:
telescope_param = getattr(telescope_obj, p)
self_param = getattr(self, p)
if telescope_param.value is not None and (
overwrite is True or self_param.value is None
):
telescope_shape = telescope_param.expected_shape(telescope_obj)
self_shape = self_param.expected_shape(self)
if telescope_shape == self_shape:
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, getattr(telescope_obj, prop_name))
else:
# expected shapes aren't equal. This can happen
# e.g. with diameters,
# which is a single value on the telescope object but is
# an array of length Nants_telescope on the UVData object
# use an assert here because we want an error if this condition
# isn't true, but it's really an internal consistency check.
# This will error if there are changes to the Telescope
# object definition, but nothing that a normal user
# does will cause an error
assert telescope_shape == () and self_shape != "str"
# this parameter is as of this comment most likely a float
# since only diameters and antenna positions will probably
# trigger this else statement
# assign float64 as the type of the array
array_val = (
np.zeros(self_shape, dtype=np.float64,)
+ telescope_param.value
)
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, array_val)
if len(params_set) > 0:
params_set_str = ", ".join(params_set)
warnings.warn(
"{params} is not set. Using known values "
"for {telescope_name}.".format(
params=params_set_str,
telescope_name=telescope_obj.telescope_name,
)
)
else:
raise ValueError(
f"Telescope {self.telescope_name} is not in known_telescopes."
)
def _calc_single_integration_time(self):
"""
Calculate a single integration time in seconds when not otherwise specified.
This function computes the shortest time difference present in the
time_array, and returns it to be used as the integration time for all
samples.
Returns
-------
int_time : int
integration time in seconds to be assigned to all samples in the data.
"""
# The time_array is in units of days, and integration_time has units of
# seconds, so we need to convert.
return np.diff(np.sort(list(set(self.time_array))))[0] * 86400
def _set_lsts_helper(self):
latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees
unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True)
unique_lst_array = uvutils.get_lst_for_time(
unique_times, latitude, longitude, altitude,
)
self.lst_array = unique_lst_array[inverse_inds]
return
def _set_app_coords_helper(self, pa_only=False):
"""
Set values for the apparent coordinate arrays.
This is an internal helper function, which is not designed to be called by
users, but rather individual read/write functions for the UVData object.
Users should use the phase() method for updating/adjusting coordinate values.
Parameters
----------
pa_only : bool, False
Skip the calculation of the apparent RA/Dec, and only calculate the
position angle between `phase_center_frame` and the apparent coordinate
system. Useful for reading in data formats that do not calculate a PA.
"""
if self.phase_type != "phased":
# Uhhh... what do you want me to do? If the dataset isn't phased, there
# isn't an apparent position to calculate. Time to bail, I guess...
return
if pa_only:
app_ra = self.phase_center_app_ra
app_dec = self.phase_center_app_dec
elif self.multi_phase_center:
app_ra = np.zeros(self.Nblts, dtype=float)
app_dec = np.zeros(self.Nblts, dtype=float)
for name in self.phase_center_catalog.keys():
temp_dict = self.phase_center_catalog[name]
select_mask = self.phase_center_id_array == temp_dict["cat_id"]
cat_type = temp_dict["cat_type"]
lon_val = temp_dict.get("cat_lon")
lat_val = temp_dict.get("cat_lat")
epoch = temp_dict.get("cat_epoch")
frame = temp_dict.get("cat_frame")
pm_ra = temp_dict.get("cat_pm_ra")
pm_dec = temp_dict.get("cat_pm_dec")
vrad = temp_dict.get("vrad")
dist = temp_dict.get("cat_dist")
app_ra[select_mask], app_dec[select_mask] = uvutils.calc_app_coords(
lon_val,
lat_val,
frame,
coord_epoch=epoch,
pm_ra=pm_ra,
pm_dec=pm_dec,
vrad=vrad,
dist=dist,
time_array=self.time_array[select_mask],
lst_array=self.lst_array[select_mask],
telescope_loc=self.telescope_location_lat_lon_alt,
coord_type=cat_type,
)
else:
# So this is actually the easier of the two cases -- just use the object
# properties to fill in the relevant data
app_ra, app_dec = uvutils.calc_app_coords(
self.phase_center_ra,
self.phase_center_dec,
self.phase_center_frame,
coord_epoch=self.phase_center_epoch,
time_array=self.time_array,
lst_array=self.lst_array,
telescope_loc=self.telescope_location_lat_lon_alt,
coord_type="sidereal",
)
# Now that we have the apparent coordinates sorted out, we can figure out what
# it is we want to do with the position angle
frame_pa = uvutils.calc_frame_pos_angle(
self.time_array,
app_ra,
app_dec,
self.telescope_location_lat_lon_alt,
self.phase_center_frame,
ref_epoch=self.phase_center_epoch,
)
self.phase_center_app_ra = app_ra
self.phase_center_app_dec = app_dec
self.phase_center_frame_pa = frame_pa
def set_lsts_from_time_array(self, background=False):
"""Set the lst_array based from the time_array.
Parameters
----------
background : bool, False
When set to True, start the calculation on a threading.Thread in the
background and return the thread to the user.
Returns
-------
proc : None or threading.Thread instance
When background is set to True, a thread is returned which must be
joined before the lst_array exists on the UVData object.
"""
if not background:
self._set_lsts_helper()
return
else:
proc = threading.Thread(target=self._set_lsts_helper)
proc.start()
return proc
def _check_flex_spw_contiguous(self):
"""
Check if the spectral windows are contiguous for flex_spw datasets.
This checks the flex_spw_id_array to make sure that all channels for each
spectral window are together in one block, versus being interspersed (e.g.,
channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory,
UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file
formats cannot, so we just consider it forbidden.
"""
if self.flex_spw:
exp_spw_ids = np.unique(self.spw_array)
# This is an internal consistency check to make sure that the indexes match
# up as expected -- this shouldn't error unless someone is mucking with
# settings they shouldn't be.
assert np.all(np.unique(self.flex_spw_id_array) == exp_spw_ids)
n_breaks = np.sum(self.flex_spw_id_array[1:] != self.flex_spw_id_array[:-1])
if (n_breaks + 1) != self.Nspws:
raise ValueError(
"Channels from different spectral windows are interspersed with "
"one another, rather than being grouped together along the "
"frequency axis. Most file formats do not support such "
"non-grouping of data."
)
else:
# If this isn't a flex_spw data set, then there is only 1 spectral window,
# which means that the check always passes
pass
return True
def _check_freq_spacing(self, raise_errors=True):
"""
Check if frequencies are evenly spaced and separated by their channel width.
This is a requirement for writing uvfits & miriad files.
Parameters
----------
raise_errors : bool
Option to raise errors if the various checks do not pass.
Returns
-------
spacing_error : bool
Flag that channel spacings or channel widths are not equal.
chanwidth_error : bool
Flag that channel spacing does not match channel width.
"""
spacing_error = False
chanwidth_error = False
if self.future_array_shapes:
freq_spacing = np.diff(self.freq_array)
freq_array_use = self.freq_array
else:
freq_spacing = np.diff(self.freq_array[0])
freq_array_use = self.freq_array[0]
if self.Nfreqs == 1:
# Skip all of this if there is only 1 channel
pass
elif self.flex_spw:
# Check to make sure that the flexible spectral window has indicies set up
# correctly (grouped together) for this check
self._check_flex_spw_contiguous()
diff_chanwidth = np.diff(self.channel_width)
freq_dir = []
# We want to grab unique spw IDs, in the order that they appear in the data
select_mask = np.append((np.diff(self.flex_spw_id_array) != 0), True)
for idx in self.flex_spw_id_array[select_mask]:
chan_mask = self.flex_spw_id_array == idx
freq_dir += [
np.sign(np.mean(np.diff(freq_array_use[chan_mask])))
] * np.sum(chan_mask)
# Pop off the first entry, since the above arrays are diff'd
# (and thus one element shorter)
freq_dir = np.array(freq_dir[1:])
# Ignore cases where looking at the boundaries of spectral windows
bypass_check = self.flex_spw_id_array[1:] != self.flex_spw_id_array[:-1]
if not np.all(
np.logical_or(
bypass_check,
np.isclose(
diff_chanwidth,
0.0,
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
),
)
):
spacing_error = True
if not np.all(
np.logical_or(
bypass_check,
np.isclose(
freq_spacing,
self.channel_width[1:] * freq_dir,
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
),
)
):
chanwidth_error = True
else:
freq_dir = np.sign(np.mean(freq_spacing))
if not np.isclose(
np.min(freq_spacing),
np.max(freq_spacing),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
spacing_error = True
if self.future_array_shapes:
if not np.isclose(
np.min(self.channel_width),
np.max(self.channel_width),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
spacing_error = True
else:
if not np.isclose(
np.mean(freq_spacing),
np.mean(self.channel_width) * freq_dir,
rtol=self._channel_width.tols[0],
atol=self._channel_width.tols[1],
):
chanwidth_error = True
else:
if not np.isclose(
np.mean(freq_spacing),
self.channel_width * freq_dir,
rtol=self._channel_width.tols[0],
atol=self._channel_width.tols[1],
):
chanwidth_error = True
if raise_errors and spacing_error:
raise ValueError(
"The frequencies are not evenly spaced (probably "
"because of a select operation) or has differing "
"values of channel widths. Some file formats "
"(e.g. uvfits, miriad) and methods (frequency_average) "
"do not support unevenly spaced frequencies."
)
if raise_errors and chanwidth_error:
raise ValueError(
"The frequencies are separated by more than their "
"channel width (probably because of a select operation). "
"Some file formats (e.g. uvfits, miriad) and "
"methods (frequency_average) do not support "
"frequencies that are spaced by more than their "
"channel width."
)
return spacing_error, chanwidth_error
def _calc_nants_data(self):
"""Calculate the number of antennas from ant_1_array and ant_2_array arrays."""
return int(np.union1d(self.ant_1_array, self.ant_2_array).size)
def check(
self,
check_extra=True,
run_check_acceptability=True,
check_freq_spacing=False,
strict_uvw_antpos_check=False,
allow_flip_conj=False,
):
"""
Add some extra checks on top of checks on UVBase class.
Check that required parameters exist. Check that parameters have
appropriate shapes and optionally that the values are acceptable.
Parameters
----------
check_extra : bool
If true, check all parameters, otherwise only check required parameters.
run_check_acceptability : bool
Option to check if values in parameters are acceptable.
check_freq_spacing : bool
Option to check if frequencies are evenly spaced and the spacing is
equal to their channel_width. This is not required for UVData
objects in general but is required to write to uvfits and miriad files.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
allow_flip_conj : bool
If set to True, and the UVW coordinates do not match antenna positions,
check and see if flipping the conjugation of the baselines (i.e, multiplying
the UVWs by -1) resolves the apparent discrepancy -- and if it does, fix
the apparent conjugation error in `uvw_array` and `data_array`. Default is
False.
Returns
-------
bool
True if check passes
Raises
------
ValueError
if parameter shapes or types are wrong or do not have acceptable
values (if run_check_acceptability is True)
"""
# first run the basic check from UVBase
# set the phase type based on object's value
if self.phase_type == "phased":
self._set_phased()
elif self.phase_type == "drift":
self._set_drift()
else:
raise ValueError('Phase type must be either "phased" or "drift"')
super(UVData, self).check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
# Check internal consistency of numbers which don't explicitly correspond
# to the shape of another array.
if self.Nants_data != self._calc_nants_data():
raise ValueError(
"Nants_data must be equal to the number of unique "
"values in ant_1_array and ant_2_array"
)
if self.Nbls != len(np.unique(self.baseline_array)):
raise ValueError(
"Nbls must be equal to the number of unique "
"baselines in the data_array"
)
if self.Ntimes != len(np.unique(self.time_array)):
raise ValueError(
"Ntimes must be equal to the number of unique "
"times in the time_array"
)
# require that all entries in ant_1_array and ant_2_array exist in
# antenna_numbers
if not set(np.unique(self.ant_1_array)).issubset(self.antenna_numbers):
raise ValueError("All antennas in ant_1_array must be in antenna_numbers.")
if not set(np.unique(self.ant_2_array)).issubset(self.antenna_numbers):
raise ValueError("All antennas in ant_2_array must be in antenna_numbers.")
# issue warning if extra_keywords keys are longer than 8 characters
for key in self.extra_keywords.keys():
if len(key) > 8:
warnings.warn(
"key {key} in extra_keywords is longer than 8 "
"characters. It will be truncated to 8 if written "
"to uvfits or miriad file formats.".format(key=key)
)
# issue warning if extra_keywords values are lists, arrays or dicts
for key, value in self.extra_keywords.items():
if isinstance(value, (list, dict, np.ndarray)):
warnings.warn(
"{key} in extra_keywords is a list, array or dict, "
"which will raise an error when writing uvfits or "
"miriad file types".format(key=key)
)
if run_check_acceptability:
# check that the uvws make sense given the antenna positions
# make a metadata only copy of this object to properly calculate uvws
temp_obj = self.copy(metadata_only=True)
if temp_obj.phase_center_frame is not None:
output_phase_frame = temp_obj.phase_center_frame
else:
output_phase_frame = "icrs"
with warnings.catch_warnings():
warnings.simplefilter("ignore")
temp_obj.set_uvws_from_antenna_positions(
allow_phasing=True, output_phase_frame=output_phase_frame,
)
if not np.allclose(temp_obj.uvw_array, self.uvw_array, atol=1):
max_diff = np.max(np.abs(temp_obj.uvw_array - self.uvw_array))
if allow_flip_conj and np.allclose(
-temp_obj.uvw_array, self.uvw_array, atol=1
):
warnings.warn(
"UVW orientation appears to be flipped, attempting to "
"fix by changing conjugation of baselines."
)
self.uvw_array *= -1
self.data_array = np.conj(self.data_array)
elif not strict_uvw_antpos_check:
warnings.warn(
"The uvw_array does not match the expected values given "
"the antenna positions. The largest discrepancy is "
f"{max_diff} meters. This is a fairly common situation "
"but might indicate an error in the antenna positions, "
"the uvws or the phasing."
)
else:
raise ValueError(
"The uvw_array does not match the expected values given "
"the antenna positions. The largest discrepancy is "
f"{max_diff} meters."
)
# check auto and cross-corrs have sensible uvws
autos = np.isclose(self.ant_1_array - self.ant_2_array, 0.0)
if not np.all(
np.isclose(
self.uvw_array[autos],
0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1],
)
):
raise ValueError(
"Some auto-correlations have non-zero uvw_array coordinates."
)
if np.any(
np.isclose(
# this line used to use np.linalg.norm but it turns out
# squaring and sqrt is slightly more efficient unless the array
# is "very large".
np.sqrt(
self.uvw_array[~autos, 0] ** 2
+ self.uvw_array[~autos, 1] ** 2
+ self.uvw_array[~autos, 2] ** 2
),
0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1],
)
):
raise ValueError(
"Some cross-correlations have near-zero uvw_array magnitudes."
)
if check_freq_spacing:
self._check_freq_spacing()
return True
def copy(self, metadata_only=False):
"""
Make and return a copy of the UVData object.
Parameters
----------
metadata_only : bool
If True, only copy the metadata of the object.
Returns
-------
UVData
Copy of self.
"""
if not metadata_only:
return super(UVData, self).copy()
else:
uv = UVData()
# include all attributes, not just UVParameter ones.
for attr in self.__iter__(uvparams_only=False):
# skip properties
if isinstance(getattr(type(self), attr, None), property):
continue
# skip data like parameters
# parameter names have a leading underscore we want to ignore
if attr.lstrip("_") in self._data_params:
continue
setattr(uv, attr, copy.deepcopy(getattr(self, attr)))
if uv.future_array_shapes:
for param_name in uv._data_params:
getattr(uv, "_" + param_name).form = ("Nblts", "Nfreqs", "Npols")
return uv
def baseline_to_antnums(self, baseline):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of int
baseline number
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)
def antnums_to_baseline(self, ant1, ant2, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
attempt256 : bool
Option to try to use the older 256 standard used in many uvfits files
(will use 2048 standard if there are more than 256 antennas).
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
return uvutils.antnums_to_baseline(
ant1, ant2, self.Nants_telescope, attempt256=attempt256
)
def antpair2ind(self, ant1, ant2=None, ordered=True):
"""
Get indices along the baseline-time axis for a given antenna pair.
This will search for either the key as specified, or the key and its
conjugate.
Parameters
----------
ant1, ant2 : int
Either an antenna-pair key, or key expanded as arguments,
e.g. antpair2ind( (10, 20) ) or antpair2ind(10, 20)
ordered : bool
If True, search for antpair as provided, else search for it and
its conjugate.
Returns
-------
inds : ndarray of int-64
indices of the antpair along the baseline-time axis.
"""
# check for expanded antpair or key
if ant2 is None:
if not isinstance(ant1, tuple):
raise ValueError(
"antpair2ind must be fed an antpair tuple "
"or expand it as arguments"
)
ant2 = ant1[1]
ant1 = ant1[0]
else:
if not isinstance(ant1, (int, np.integer)):
raise ValueError(
"antpair2ind must be fed an antpair tuple or "
"expand it as arguments"
)
if not isinstance(ordered, (bool, np.bool_)):
raise ValueError("ordered must be a boolean")
# if getting auto-corr, ordered must be True
if ant1 == ant2:
ordered = True
# get indices
inds = np.where((self.ant_1_array == ant1) & (self.ant_2_array == ant2))[0]
if ordered:
return inds
else:
ind2 = np.where((self.ant_1_array == ant2) & (self.ant_2_array == ant1))[0]
inds = np.asarray(np.append(inds, ind2), dtype=np.int64)
return inds
def _key2inds(self, key):
"""
Interpret user specified key as antenna pair and/or polarization.
Parameters
----------
key : tuple of int
Identifier of data. Key can be length 1, 2, or 3:
if len(key) == 1:
if (key < 5) or (type(key) is str): interpreted as a
polarization number/name, return all blts for that pol.
else: interpreted as a baseline number. Return all times and
polarizations for that baseline.
if len(key) == 2: interpreted as an antenna pair. Return all
times and pols for that baseline.
if len(key) == 3: interpreted as antenna pair and pol (ant1, ant2, pol).
Return all times for that baseline, pol. pol may be a string.
Returns
-------
blt_ind1 : ndarray of int
blt indices for antenna pair.
blt_ind2 : ndarray of int
blt indices for conjugate antenna pair.
Note if a cross-pol baseline is requested, the polarization will
also be reversed so the appropriate correlations are returned.
e.g. asking for (1, 2, 'xy') may return conj(2, 1, 'yx'), which
is equivalent to the requesting baseline. See utils.conj_pol() for
complete conjugation mapping.
pol_ind : tuple of ndarray of int
polarization indices for blt_ind1 and blt_ind2
"""
key = uvutils._get_iterable(key)
if type(key) is str:
# Single string given, assume it is polarization
pol_ind1 = np.where(
self.polarization_array
== uvutils.polstr2num(key, x_orientation=self.x_orientation)
)[0]
if len(pol_ind1) > 0:
blt_ind1 = np.arange(self.Nblts, dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
else:
raise KeyError("Polarization {pol} not found in data.".format(pol=key))
elif len(key) == 1:
key = key[0] # For simplicity
if isinstance(key, Iterable):
# Nested tuple. Call function again.
blt_ind1, blt_ind2, pol_ind = self._key2inds(key)
elif key < 5:
# Small number, assume it is a polarization number a la AIPS memo
pol_ind1 = np.where(self.polarization_array == key)[0]
if len(pol_ind1) > 0:
blt_ind1 = np.arange(self.Nblts)
blt_ind2 = np.array([], dtype=np.int64)
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
else:
raise KeyError(
"Polarization {pol} not found in data.".format(pol=key)
)
else:
# Larger number, assume it is a baseline number
inv_bl = self.antnums_to_baseline(
self.baseline_to_antnums(key)[1], self.baseline_to_antnums(key)[0]
)
blt_ind1 = np.where(self.baseline_array == key)[0]
blt_ind2 = np.where(self.baseline_array == inv_bl)[0]
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError("Baseline {bl} not found in data.".format(bl=key))
if len(blt_ind1) > 0:
pol_ind1 = np.arange(self.Npols)
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
try:
pol_ind2 = uvutils.reorder_conj_pols(self.polarization_array)
except ValueError:
if len(blt_ind1) == 0:
raise KeyError(
f"Baseline {key} not found for polarization "
"array in data."
)
else:
pol_ind2 = np.array([], dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
elif len(key) == 2:
# Key is an antenna pair
blt_ind1 = self.antpair2ind(key[0], key[1])
blt_ind2 = self.antpair2ind(key[1], key[0])
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError("Antenna pair {pair} not found in data".format(pair=key))
if len(blt_ind1) > 0:
pol_ind1 = np.arange(self.Npols)
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
try:
pol_ind2 = uvutils.reorder_conj_pols(self.polarization_array)
except ValueError:
if len(blt_ind1) == 0:
raise KeyError(
f"Baseline {key} not found for polarization array in data."
)
else:
pol_ind2 = np.array([], dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
elif len(key) == 3:
# Key is an antenna pair + pol
blt_ind1 = self.antpair2ind(key[0], key[1])
blt_ind2 = self.antpair2ind(key[1], key[0])
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError(
"Antenna pair {pair} not found in "
"data".format(pair=(key[0], key[1]))
)
if type(key[2]) is str:
# pol is str
if len(blt_ind1) > 0:
pol_ind1 = np.where(
self.polarization_array
== uvutils.polstr2num(key[2], x_orientation=self.x_orientation)
)[0]
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
pol_ind2 = np.where(
self.polarization_array
== uvutils.polstr2num(
uvutils.conj_pol(key[2]), x_orientation=self.x_orientation
)
)[0]
else:
pol_ind2 = np.array([], dtype=np.int64)
else:
# polarization number a la AIPS memo
if len(blt_ind1) > 0:
pol_ind1 = np.where(self.polarization_array == key[2])[0]
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
pol_ind2 = np.where(
self.polarization_array == uvutils.conj_pol(key[2])
)[0]
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
if len(blt_ind1) * len(pol_ind[0]) + len(blt_ind2) * len(pol_ind[1]) == 0:
raise KeyError(
"Polarization {pol} not found in data.".format(pol=key[2])
)
# Catch autos
if np.array_equal(blt_ind1, blt_ind2):
blt_ind2 = np.array([], dtype=np.int64)
return (blt_ind1, blt_ind2, pol_ind)
def _smart_slicing(
self, data, ind1, ind2, indp, squeeze="default", force_copy=False
):
"""
Quickly get the relevant section of a data-like array.
Used in get_data, get_flags and get_nsamples.
Parameters
----------
data : ndarray
4-dimensional array shaped like self.data_array
ind1 : array_like of int
blt indices for antenna pair (e.g. from self._key2inds)
ind2 : array_like of int
blt indices for conjugate antenna pair. (e.g. from self._key2inds)
indp : tuple array_like of int
polarization indices for ind1 and ind2 (e.g. from self._key2inds)
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of data
"""
p_reg_spaced = [False, False]
p_start = [0, 0]
p_stop = [0, 0]
dp = [1, 1]
for i, pi in enumerate(indp):
if len(pi) == 0:
continue
if len(set(np.ediff1d(pi))) <= 1:
p_reg_spaced[i] = True
p_start[i] = pi[0]
p_stop[i] = pi[-1] + 1
if len(pi) != 1:
dp[i] = pi[1] - pi[0]
if len(ind2) == 0:
# only unconjugated baselines
if len(set(np.ediff1d(ind1))) <= 1:
blt_start = ind1[0]
blt_stop = ind1[-1] + 1
if len(ind1) == 1:
dblt = 1
else:
dblt = ind1[1] - ind1[0]
if p_reg_spaced[0]:
if self.future_array_shapes:
out = data[
blt_start:blt_stop:dblt, :, p_start[0] : p_stop[0] : dp[0]
]
else:
out = data[
blt_start:blt_stop:dblt,
:,
:,
p_start[0] : p_stop[0] : dp[0],
]
else:
if self.future_array_shapes:
out = data[blt_start:blt_stop:dblt, :, indp[0]]
else:
out = data[blt_start:blt_stop:dblt, :, :, indp[0]]
else:
out = data[ind1]
if p_reg_spaced[0]:
if self.future_array_shapes:
out = out[:, :, p_start[0] : p_stop[0] : dp[0]]
else:
out = out[:, :, :, p_start[0] : p_stop[0] : dp[0]]
else:
if self.future_array_shapes:
out = out[:, :, indp[0]]
else:
out = out[:, :, :, indp[0]]
elif len(ind1) == 0:
# only conjugated baselines
if len(set(np.ediff1d(ind2))) <= 1:
blt_start = ind2[0]
blt_stop = ind2[-1] + 1
if len(ind2) == 1:
dblt = 1
else:
dblt = ind2[1] - ind2[0]
if p_reg_spaced[1]:
if self.future_array_shapes:
out = np.conj(
data[
blt_start:blt_stop:dblt,
:,
p_start[1] : p_stop[1] : dp[1],
]
)
else:
out = np.conj(
data[
blt_start:blt_stop:dblt,
:,
:,
p_start[1] : p_stop[1] : dp[1],
]
)
else:
if self.future_array_shapes:
out = np.conj(data[blt_start:blt_stop:dblt, :, indp[1]])
else:
out = np.conj(data[blt_start:blt_stop:dblt, :, :, indp[1]])
else:
out = data[ind2]
if p_reg_spaced[1]:
if self.future_array_shapes:
out = np.conj(out[:, :, p_start[1] : p_stop[1] : dp[1]])
else:
out = np.conj(out[:, :, :, p_start[1] : p_stop[1] : dp[1]])
else:
if self.future_array_shapes:
out = np.conj(out[:, :, indp[1]])
else:
out = np.conj(out[:, :, :, indp[1]])
else:
# both conjugated and unconjugated baselines
out = (data[ind1], np.conj(data[ind2]))
if p_reg_spaced[0] and p_reg_spaced[1]:
if self.future_array_shapes:
out = np.append(
out[0][:, :, p_start[0] : p_stop[0] : dp[0]],
out[1][:, :, p_start[1] : p_stop[1] : dp[1]],
axis=0,
)
else:
out = np.append(
out[0][:, :, :, p_start[0] : p_stop[0] : dp[0]],
out[1][:, :, :, p_start[1] : p_stop[1] : dp[1]],
axis=0,
)
else:
if self.future_array_shapes:
out = np.append(
out[0][:, :, indp[0]], out[1][:, :, indp[1]], axis=0
)
else:
out = np.append(
out[0][:, :, :, indp[0]], out[1][:, :, :, indp[1]], axis=0
)
if squeeze == "full":
out = np.squeeze(out)
elif squeeze == "default":
if self.future_array_shapes:
if out.shape[2] == 1:
# one polarization dimension
out = np.squeeze(out, axis=2)
else:
if out.shape[3] == 1:
# one polarization dimension
out = np.squeeze(out, axis=3)
if out.shape[1] == 1:
# one spw dimension
out = np.squeeze(out, axis=1)
elif squeeze != "none":
raise ValueError(
'"' + str(squeeze) + '" is not a valid option for squeeze.'
'Only "default", "none", or "full" are allowed.'
)
if force_copy:
out = np.array(out)
elif out.base is not None:
# if out is a view rather than a copy, make it read-only
out.flags.writeable = False
return out
def get_ants(self):
"""
Get the unique antennas that have data associated with them.
Returns
-------
ndarray of int
Array of unique antennas with data associated with them.
"""
return np.unique(np.append(self.ant_1_array, self.ant_2_array))
def get_baseline_nums(self):
"""
Get the unique baselines that have data associated with them.
Returns
-------
ndarray of int
Array of unique baselines with data associated with them.
"""
return np.unique(self.baseline_array)
def get_antpairs(self):
"""
Get the unique antpair tuples that have data associated with them.
Returns
-------
list of tuples of int
list of unique antpair tuples (ant1, ant2) with data associated with them.
"""
return list(zip(*self.baseline_to_antnums(self.get_baseline_nums())))
def get_pols(self):
"""
Get the polarizations in the data.
Returns
-------
list of str
list of polarizations (as strings) in the data.
"""
return uvutils.polnum2str(
self.polarization_array, x_orientation=self.x_orientation
)
def get_antpairpols(self):
"""
Get the unique antpair + pol tuples that have data associated with them.
Returns
-------
list of tuples of int
list of unique antpair + pol tuples (ant1, ant2, pol) with data
associated with them.
"""
pols = self.get_pols()
bls = self.get_antpairs()
return [(bl) + (pol,) for bl in bls for pol in pols]
def get_feedpols(self):
"""
Get the unique antenna feed polarizations in the data.
Returns
-------
list of str
list of antenna feed polarizations (e.g. ['X', 'Y']) in the data.
Raises
------
ValueError
If any pseudo-Stokes visibilities are present
"""
if np.any(self.polarization_array > 0):
raise ValueError(
"Pseudo-Stokes visibilities cannot be interpreted as feed polarizations"
)
else:
return list(set("".join(self.get_pols())))
def get_data(self, key1, key2=None, key3=None, squeeze="default", force_copy=False):
"""
Get the data corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all data for
that pol.
else:
interpreted as a baseline number, get all data for that baseline.
if key is length 2: interpreted as an antenna pair, get all data
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all data for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of data.
If data exists conjugate to requested antenna pair, it will be conjugated
before returning.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
out = self._smart_slicing(
self.data_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
)
return out
def get_flags(
self, key1, key2=None, key3=None, squeeze="default", force_copy=False
):
"""
Get the flags corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all flags for
that pol.
else:
interpreted as a baseline number, get all flags for that baseline.
if key is length 2: interpreted as an antenna pair, get all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all flags for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of flags.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
# When we select conjugated baselines, there is a call to np.conj()
# inside of _smart_slicing to correct the data array. This has the
# unintended consequence of promoting the dtype of an array of np.bool_
# to np.int8. Rather than having a bunch of special handling for this
# ~corner case, we instead explicitly cast back to np.bool_ before we
# hand back to the user.
out = self._smart_slicing(
self.flag_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
).astype(np.bool_)
return out
def get_nsamples(
self, key1, key2=None, key3=None, squeeze="default", force_copy=False
):
"""
Get the nsamples corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all nsamples for
that pol.
else:
interpreted as a baseline number, get all nsamples for that
baseline.
if key is length 2: interpreted as an antenna pair, get all nsamples
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all nsamples for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of
nsample_array.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
out = self._smart_slicing(
self.nsample_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
)
return out
def get_times(self, key1, key2=None, key3=None):
"""
Get the times for a given antpair or baseline number.
Meant to be used in conjunction with get_data function.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all times.
else:
interpreted as a baseline number, get all times for that baseline.
if key is length 2: interpreted as an antenna pair, get all times
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all times for that baseline.
Returns
-------
ndarray
times from the time_array for the given antpair or baseline.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
inds1, inds2, indp = self._key2inds(key)
return self.time_array[np.append(inds1, inds2)]
def get_lsts(self, key1, key2=None, key3=None):
"""
Get the LSTs for a given antpair or baseline number.
Meant to be used in conjunction with get_data function.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all times.
else:
interpreted as a baseline number, get all times for that baseline.
if key is length 2: interpreted as an antenna pair, get all times
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all times for that baseline.
Returns
-------
ndarray
LSTs from the lst_array for the given antpair or baseline.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
inds1, inds2, indp = self._key2inds(key)
return self.lst_array[np.append(inds1, inds2)]
def get_ENU_antpos(self, center=False, pick_data_ants=False):
"""
Get antenna positions in ENU (topocentric) coordinates in units of meters.
Parameters
----------
center : bool
If True, subtract median of array position from antpos
pick_data_ants : bool
If True, return only antennas found in data
Returns
-------
antpos : ndarray
Antenna positions in ENU (topocentric) coordinates in units of
meters, shape=(Nants, 3)
ants : ndarray
Antenna numbers matching ordering of antpos, shape=(Nants,)
"""
antpos = uvutils.ENU_from_ECEF(
(self.antenna_positions + self.telescope_location),
*self.telescope_location_lat_lon_alt,
)
ants = self.antenna_numbers
if pick_data_ants:
data_ants = np.unique(np.concatenate([self.ant_1_array, self.ant_2_array]))
telescope_ants = self.antenna_numbers
select = np.in1d(telescope_ants, data_ants)
antpos = antpos[select, :]
ants = telescope_ants[select]
if center is True:
antpos -= np.median(antpos, axis=0)
return antpos, ants
def _set_method_helper(self, dshape, key1, key2=None, key3=None):
"""
Extract the indices for setting data, flags, or nsample arrays.
This is a helper method designed to work with set_data, set_flags, and
set_nsamples. Given the shape of the data-like array and the keys
corresponding to where the data should end up, it finds the indices
that are needed for the `_index_dset` method.
Parameters
----------
dshape : tuple of int
The shape of the data-like array. This is used to ensure the array
is compatible with the indices selected.
key1, key2, key3 : int or tuple of ints
Identifier of which flags to set, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, set all flags for
that pol.
else:
interpreted as a baseline number, set all flags for that baseline.
if key is length 2: interpreted as an antenna pair, set all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
set all flags for that baseline, pol. pol may be a string or int.
Returns
-------
inds : tuple of int
The indices in the data-like array to slice into.
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
if len(ind2) != 0:
raise ValueError(
"the requested key is present on the object, but conjugated. Please "
"conjugate data and keys appropriately and try again"
)
if self.future_array_shapes:
expected_shape = (len(ind1), self.Nfreqs, len(indp[0]))
else:
expected_shape = (len(ind1), 1, self.Nfreqs, len(indp[0]))
if dshape != expected_shape:
raise ValueError(
"the input array is not compatible with the shape of the destination. "
f"Input array shape is {dshape}, expected shape is {expected_shape}."
)
blt_slices, blt_sliceable = uvutils._convert_to_slices(
ind1, max_nslice_frac=0.1
)
pol_slices, pol_sliceable = uvutils._convert_to_slices(
indp[0], max_nslice_frac=0.5
)
if self.future_array_shapes:
inds = [ind1, np.s_[:], indp[0]]
else:
inds = [ind1, np.s_[:], np.s_[:], indp[0]]
if blt_sliceable:
inds[0] = blt_slices
if pol_sliceable:
inds[-1] = pol_slices
return tuple(inds)
def set_data(self, data, key1, key2=None, key3=None):
"""
Set the data array to some values provided by the user.
Parameters
----------
data : ndarray of complex
The data to overwrite into the data_array. Must be the same shape as
the target indices.
key1, key2, key3 : int or tuple of ints
Identifier of which data to set, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all data for
that pol.
else:
interpreted as a baseline number, get all data for that baseline.
if key is length 2: interpreted as an antenna pair, get all data
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all data for that baseline, pol. pol may be a string or int.
Returns
-------
None
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
dshape = data.shape
inds = self._set_method_helper(dshape, key1, key2, key3)
uvutils._index_dset(self.data_array, inds, data)
return
def set_flags(self, flags, key1, key2=None, key3=None):
"""
Set the flag array to some values provided by the user.
Parameters
----------
flag : ndarray of boolean
The flags to overwrite into the fkag_array. Must be the same shape
as the target indices.
key1, key2, key3 : int or tuple of ints
Identifier of which flags to set, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, set all flags for
that pol.
else:
interpreted as a baseline number, set all flags for that baseline.
if key is length 2: interpreted as an antenna pair, set all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
set all flags for that baseline, pol. pol may be a string or int.
Returns
-------
None
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
dshape = flags.shape
inds = self._set_method_helper(dshape, key1, key2, key3)
uvutils._index_dset(self.flag_array, inds, flags)
return
def set_nsamples(self, nsamples, key1, key2=None, key3=None):
"""
Set the nsamples array to some values provided by the user.
Parameters
----------
nsamples : ndarray of float
The nsamples to overwrite into the nsample_array. Must be the same
shape as the target indices.
key1, key2, key3 : int or tuple of ints
Identifier of which nsamples to set, can be passed as 1, 2, or 3
arguments or as a single tuple of length 1, 2, or 3. These are
collectively called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, set all data for
that pol.
else:
interpreted as a baseline number, set all nsamples for that
baseline.
if key is length 2: interpreted as an antenna pair, set all nsamples
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2,
pol), set all nsamples for that baseline, pol. pol may be a
string or int.
Returns
-------
None
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
dshape = nsamples.shape
inds = self._set_method_helper(dshape, key1, key2, key3)
uvutils._index_dset(self.nsample_array, inds, nsamples)
return
def antpairpol_iter(self, squeeze="default"):
"""
Iterate the data for each antpair, polarization combination.
Parameters
----------
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
Yields
------
key : tuple
antenna1, antenna2, and polarization string
data : ndarray of complex
data for the ant pair and polarization specified in key
"""
antpairpols = self.get_antpairpols()
for key in antpairpols:
yield (key, self.get_data(key, squeeze=squeeze))
def conjugate_bls(self, convention="ant1<ant2", use_enu=True, uvw_tol=0.0):
"""
Conjugate baselines according to one of the supported conventions.
This will fail if only one of the cross pols is present (because
conjugation requires changing the polarization number for cross pols).
Parameters
----------
convention : str or array_like of int
A convention for the directions of the baselines, options are:
'ant1<ant2', 'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0' or an
index array of blt indices to conjugate.
use_enu : bool
Use true antenna positions to determine uv location (as opposed to
uvw array). Only applies if `convention` is 'u<0', 'u>0', 'v<0', 'v>0'.
Set to False to use uvw array values.
uvw_tol : float
Defines a tolerance on uvw coordinates for setting the
u>0, u<0, v>0, or v<0 conventions. Defaults to 0m.
Raises
------
ValueError
If convention is not an allowed value or if not all conjugate pols exist.
"""
if isinstance(convention, (np.ndarray, list, tuple)):
convention = np.array(convention)
if (
np.max(convention) >= self.Nblts
or np.min(convention) < 0
or convention.dtype not in [int, np.int_, np.int32, np.int64]
):
raise ValueError(
"If convention is an index array, it must "
"contain integers and have values greater "
"than zero and less than NBlts"
)
else:
if convention not in ["ant1<ant2", "ant2<ant1", "u<0", "u>0", "v<0", "v>0"]:
raise ValueError(
"convention must be one of 'ant1<ant2', "
"'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0' or "
"an index array with values less than NBlts"
)
if isinstance(convention, str):
if convention in ["u<0", "u>0", "v<0", "v>0"]:
if use_enu is True:
enu, anum = self.get_ENU_antpos()
anum = anum.tolist()
uvw_array_use = np.zeros_like(self.uvw_array)
for i, bl in enumerate(self.baseline_array):
a1, a2 = self.ant_1_array[i], self.ant_2_array[i]
i1, i2 = anum.index(a1), anum.index(a2)
uvw_array_use[i, :] = enu[i2] - enu[i1]
else:
uvw_array_use = copy.copy(self.uvw_array)
if convention == "ant1<ant2":
index_array = np.asarray(self.ant_1_array > self.ant_2_array).nonzero()
elif convention == "ant2<ant1":
index_array = np.asarray(self.ant_2_array > self.ant_1_array).nonzero()
elif convention == "u<0":
index_array = np.asarray(
(uvw_array_use[:, 0] > uvw_tol)
| (uvw_array_use[:, 1] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
).nonzero()
elif convention == "u>0":
index_array = np.asarray(
(uvw_array_use[:, 0] < -uvw_tol)
| (
(uvw_array_use[:, 1] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
)
| (
(uvw_array_use[:, 2] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
)
).nonzero()
elif convention == "v<0":
index_array = np.asarray(
(uvw_array_use[:, 1] > uvw_tol)
| (uvw_array_use[:, 0] > uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
).nonzero()
elif convention == "v>0":
index_array = np.asarray(
(uvw_array_use[:, 1] < -uvw_tol)
| (uvw_array_use[:, 0] < -uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
).nonzero()
else:
index_array = convention
if index_array[0].size > 0:
new_pol_inds = uvutils.reorder_conj_pols(self.polarization_array)
self.uvw_array[index_array] *= -1
if not self.metadata_only:
orig_data_array = copy.copy(self.data_array)
for pol_ind in np.arange(self.Npols):
if self.future_array_shapes:
self.data_array[
index_array, :, new_pol_inds[pol_ind]
] = np.conj(orig_data_array[index_array, :, pol_ind])
else:
self.data_array[
index_array, :, :, new_pol_inds[pol_ind]
] = np.conj(orig_data_array[index_array, :, :, pol_ind])
ant_1_vals = self.ant_1_array[index_array]
ant_2_vals = self.ant_2_array[index_array]
self.ant_1_array[index_array] = ant_2_vals
self.ant_2_array[index_array] = ant_1_vals
self.baseline_array[index_array] = self.antnums_to_baseline(
self.ant_1_array[index_array], self.ant_2_array[index_array]
)
self.Nbls = np.unique(self.baseline_array).size
def reorder_pols(
self,
order="AIPS",
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Rearrange polarizations in the event they are not uvfits compatible.
Parameters
----------
order : str
Either a string specifying a canonical ordering ('AIPS' or 'CASA')
or an index array of length Npols that specifies how to shuffle the
data (this is not the desired final pol order).
CASA ordering has cross-pols in between (e.g. XX,XY,YX,YY)
AIPS ordering has auto-pols followed by cross-pols (e.g. XX,YY,XY,YX)
Default ('AIPS') will sort by absolute value of pol values.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If the order is not one of the allowed values.
"""
if isinstance(order, (np.ndarray, list, tuple)):
order = np.array(order)
if (
order.size != self.Npols
or order.dtype not in [int, np.int_, np.int32, np.int64]
or np.min(order) < 0
or np.max(order) >= self.Npols
):
raise ValueError(
"If order is an index array, it must "
"contain integers and be length Npols."
)
index_array = order
elif order == "AIPS":
index_array = np.argsort(np.abs(self.polarization_array))
elif order == "CASA":
casa_order = np.array([1, 2, 3, 4, -1, -3, -4, -2, -5, -7, -8, -6])
pol_inds = []
for pol in self.polarization_array:
pol_inds.append(np.where(casa_order == pol)[0][0])
index_array = np.argsort(pol_inds)
else:
raise ValueError(
"order must be one of: 'AIPS', 'CASA', or an "
"index array of length Npols"
)
self.polarization_array = self.polarization_array[index_array]
if not self.metadata_only:
# data array is special and large, take is faster here
if self.future_array_shapes:
self.data_array = np.take(self.data_array, index_array, axis=2)
self.nsample_array = self.nsample_array[:, :, index_array]
self.flag_array = self.flag_array[:, :, index_array]
else:
self.data_array = np.take(self.data_array, index_array, axis=3)
self.nsample_array = self.nsample_array[:, :, :, index_array]
self.flag_array = self.flag_array[:, :, :, index_array]
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
def reorder_blts(
self,
order="time",
minor_order=None,
conj_convention=None,
uvw_tol=0.0,
conj_convention_use_enu=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Arrange blt axis according to desired order.
Optionally conjugate some baselines.
Parameters
----------
order : str or array_like of int
A string describing the desired order along the blt axis.
Options are: `time`, `baseline`, `ant1`, `ant2`, `bda` or an
index array of length Nblts that specifies the new order.
minor_order : str
Optionally specify a secondary ordering. Default depends on how
order is set: if order is 'time', this defaults to `baseline`,
if order is `ant1`, or `ant2` this defaults to the other antenna,
if order is `baseline` the only allowed value is `time`. Ignored if
order is `bda` If this is the same as order, it is reset to the default.
conj_convention : str or array_like of int
Optionally conjugate baselines to make the baselines have the
desired orientation. See conjugate_bls for allowed values and details.
uvw_tol : float
If conjugating baselines, sets a tolerance for determining the signs
of u,v, and w, and whether or not they are zero.
See conjugate_bls for details.
conj_convention_use_enu: bool
If `conj_convention` is set, this is passed to conjugate_bls, see that
method for details.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If parameter values are inappropriate
"""
if isinstance(order, (np.ndarray, list, tuple)):
order = np.array(order)
if order.size != self.Nblts or order.dtype not in [
int,
np.int_,
np.int32,
np.int64,
]:
raise ValueError(
"If order is an index array, it must "
"contain integers and be length Nblts."
)
if minor_order is not None:
raise ValueError(
"Minor order cannot be set if order is an index array."
)
else:
if order not in ["time", "baseline", "ant1", "ant2", "bda"]:
raise ValueError(
"order must be one of 'time', 'baseline', "
"'ant1', 'ant2', 'bda' or an index array of "
"length Nblts"
)
if minor_order == order:
minor_order = None
if minor_order is not None:
if minor_order not in ["time", "baseline", "ant1", "ant2"]:
raise ValueError(
"minor_order can only be one of 'time', "
"'baseline', 'ant1', 'ant2'"
)
if isinstance(order, np.ndarray) or order == "bda":
raise ValueError(
"minor_order cannot be specified if order is "
"'bda' or an index array."
)
if order == "baseline":
if minor_order in ["ant1", "ant2"]:
raise ValueError("minor_order conflicts with order")
else:
if order == "time":
minor_order = "baseline"
elif order == "ant1":
minor_order = "ant2"
elif order == "ant2":
minor_order = "ant1"
elif order == "baseline":
minor_order = "time"
if conj_convention is not None:
self.conjugate_bls(
convention=conj_convention,
use_enu=conj_convention_use_enu,
uvw_tol=uvw_tol,
)
if isinstance(order, str):
if minor_order is None:
self.blt_order = (order,)
self._blt_order.form = (1,)
else:
self.blt_order = (order, minor_order)
# set it back to the right shape in case it was set differently before
self._blt_order.form = (2,)
else:
self.blt_order = None
if not isinstance(order, np.ndarray):
# Use lexsort to sort along different arrays in defined order.
if order == "time":
arr1 = self.time_array
if minor_order == "ant1":
arr2 = self.ant_1_array
arr3 = self.ant_2_array
elif minor_order == "ant2":
arr2 = self.ant_2_array
arr3 = self.ant_1_array
else:
# minor_order is baseline
arr2 = self.baseline_array
arr3 = self.baseline_array
elif order == "ant1":
arr1 = self.ant_1_array
if minor_order == "time":
arr2 = self.time_array
arr3 = self.ant_2_array
elif minor_order == "ant2":
arr2 = self.ant_2_array
arr3 = self.time_array
else: # minor_order is baseline
arr2 = self.baseline_array
arr3 = self.time_array
elif order == "ant2":
arr1 = self.ant_2_array
if minor_order == "time":
arr2 = self.time_array
arr3 = self.ant_1_array
elif minor_order == "ant1":
arr2 = self.ant_1_array
arr3 = self.time_array
else:
# minor_order is baseline
arr2 = self.baseline_array
arr3 = self.time_array
elif order == "baseline":
arr1 = self.baseline_array
# only allowed minor order is time
arr2 = self.time_array
arr3 = self.time_array
elif order == "bda":
arr1 = self.integration_time
# only allowed minor order is time
arr2 = self.baseline_array
arr3 = self.time_array
# lexsort uses the listed arrays from last to first
# (so the primary sort is on the last one)
index_array = np.lexsort((arr3, arr2, arr1))
else:
index_array = order
# actually do the reordering
self.ant_1_array = self.ant_1_array[index_array]
self.ant_2_array = self.ant_2_array[index_array]
self.baseline_array = self.baseline_array[index_array]
self.uvw_array = self.uvw_array[index_array, :]
self.time_array = self.time_array[index_array]
self.lst_array = self.lst_array[index_array]
self.integration_time = self.integration_time[index_array]
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[index_array]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[index_array]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[index_array]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[index_array]
if not self.metadata_only:
self.data_array = self.data_array[index_array]
self.flag_array = self.flag_array[index_array]
self.nsample_array = self.nsample_array[index_array]
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
def reorder_freqs(
self,
spw_order=None,
channel_order=None,
select_spw=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Arrange frequency axis according to desired order.
Can be applied across the entire frequency axis, or just a subset.
Parameters
----------
spw_order : str or array_like of int
A string describing the desired order of spectral windows along the
frequecy axis. Allowed strings include `number` (sort on spectral window
number) and `freq` (sort on median frequency). A '-' can be appended
to signify descending order instead of the default ascending order,
e.g., if you have SPW #1 and 2, and wanted them ordered as [2, 1],
you would specify `-number`. Alternatively, one can supply an array
of length Nspws that specifies the new order, with values matched to
the specral window number given in `spw_array`. Default is to apply no
sorting of spectral windows.
channel_order : str or array_like of int
A string describing the desired order of frequency channels within a
spectral window. Allowed strings include `freq`, which will sort channels
within a spectral window by frequency. A '-' can be optionally appended
to signify descending order instead of the default ascending order.
Alternatively, one can supply an index array of length Nfreqs that
specifies the new order. Default is to apply no sorting of channels
within a single spectral window. Note that proving an array_like of ints
will cause the values given to `spw_order` and `select_spw` to be ignored.
select_spw : int or array_like of int
An int or array_like of ints which specifies which spectral windows to
apply sorting. Note that setting this argument will cause the value
given to `spw_order` to be ignored.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Returns
-------
None
Raises
------
UserWarning
Raised if providing arguments to select_spw and freq_screen (the latter
overrides the former).
ValueError
Raised if select_spw contains values not in spw_array, or if freq_screen
is not the same length as freq_array.
"""
if (spw_order is None) and (channel_order is None):
warnings.warn(
"Not specifying either spw_order or channel_order causes "
"no sorting actions to be applied. Returning object unchanged."
)
return
# Check to see if there are arguments we should be ignoring
if isinstance(channel_order, (np.ndarray, list, tuple)):
if select_spw is not None:
warnings.warn(
"The select_spw argument is ignored when providing an "
"array_like of int for channel_order"
)
if spw_order is not None:
warnings.warn(
"The spw_order argument is ignored when providing an "
"array_like of int for channel_order"
)
if not np.all(np.sort(channel_order) == np.arange(self.Nfreqs)):
raise ValueError(
"Index array for channel_order must contain all indicies for "
"the frequency axis, without duplicates."
)
index_array = channel_order
else:
index_array = np.arange(self.Nfreqs)
# Multipy by 1.0 here to make a cheap copy of the array to manipulate
temp_freqs = 1.0 * (
self.freq_array if self.future_array_shapes else self.freq_array[0, :]
)
# Same trick for ints -- add 0 to make a cheap copy
temp_spws = 0 + (
self.flex_spw_id_array
if self.flex_spw
else (np.zeros(self.Nfreqs) + self.spw_array)
)
# Check whether or not we need to sort the channels in individual windows
sort_spw = {idx: channel_order is not None for idx in self.spw_array}
if select_spw is not None:
if spw_order is not None:
warnings.warn(
"The spw_order argument is ignored when providing an "
"argument for select_spw"
)
if channel_order is None:
warnings.warn(
"Specifying select_spw without providing channel_order causes "
"no sorting actions to be applied. Returning object unchanged."
)
return
if isinstance(select_spw, (np.ndarray, list, tuple)):
sort_spw = {idx: idx in select_spw for idx in self.spw_array}
else:
sort_spw = {idx: idx == select_spw for idx in self.spw_array}
elif spw_order is not None:
if isinstance(spw_order, (np.ndarray, list, tuple)):
if not np.all(np.sort(spw_order) == np.sort(self.spw_array)):
raise ValueError(
"Index array for spw_order must contain all indicies for "
"the frequency axis, without duplicates."
)
elif spw_order not in ["number", "freq", "-number", "-freq", None]:
raise ValueError(
"spw_order can only be one of 'number', '-number', "
"'freq', '-freq', or None"
)
elif self.Nspws > 1:
# Only need to do this step if we actually have multiple spws.
# If the string starts with a '-', then we will flip the order at
# the end of the operation
flip_spws = spw_order[0] == "-"
if "number" in spw_order:
spw_order = np.sort(self.spw_array)
elif "freq" in spw_order:
spw_order = self.spw_array[
np.argsort(
[
np.median(temp_freqs[temp_spws == idx])
for idx in self.spw_array
]
)
]
if flip_spws:
spw_order = np.flip(spw_order)
# Now that we know the spw order, we can apply the first sort
index_array = np.concatenate(
[index_array[temp_spws == idx] for idx in spw_order]
)
temp_freqs = temp_freqs[index_array]
temp_spws = temp_spws[index_array]
# Spectral windows are assumed sorted at this point
if channel_order is not None:
if channel_order not in ["freq", "-freq"]:
raise ValueError(
"channel_order can only be one of 'freq' or '-freq'"
)
for idx in self.spw_array:
if sort_spw[idx]:
select_mask = temp_spws == idx
subsort_order = index_array[select_mask]
subsort_order = subsort_order[
np.argsort(temp_freqs[select_mask])
]
index_array[select_mask] = (
np.flip(subsort_order)
if channel_order[0] == "-"
else subsort_order
)
if np.all(index_array[1:] > index_array[:-1]):
# Nothing to do - the data are already sorted!
return
# Now update all of the arrays.
if self.future_array_shapes:
self.freq_array = self.freq_array[index_array]
if not self.metadata_only:
self.data_array = self.data_array[:, index_array, :]
self.flag_array = self.flag_array[:, index_array, :]
self.nsample_array = self.nsample_array[:, index_array, :]
else:
self.freq_array = self.freq_array[:, index_array]
if not self.metadata_only:
self.data_array = self.data_array[:, :, index_array, :]
self.flag_array = self.flag_array[:, :, index_array, :]
self.nsample_array = self.nsample_array[:, :, index_array, :]
if self.flex_spw:
self.flex_spw_id_array = self.flex_spw_id_array[index_array]
self.channel_width = self.channel_width[index_array]
# Reorder the spw-axis items based on their first appearance in the data
unique_index = np.sort(
np.unique(self.flex_spw_id_array, return_index=True)[1]
)
self.spw_array = self.flex_spw_id_array[unique_index]
if self.eq_coeffs is not None:
self.eq_coeffs = self.eq_coeffs[:, index_array]
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
def remove_eq_coeffs(self):
"""
Remove equalization coefficients from the data.
Some telescopes, e.g. HERA, apply per-antenna, per-frequency gain
coefficients as part of the signal chain. These are stored in the
`eq_coeffs` attribute of the object. This method will remove them, so
that the data are in "unnormalized" raw units.
Parameters
----------
None
Returns
-------
None
Raises
------
ValueError
Raised if eq_coeffs or eq_coeffs_convention are not defined on the
object, or if eq_coeffs_convention is not one of "multiply" or "divide".
"""
if self.eq_coeffs is None:
raise ValueError(
"The eq_coeffs attribute must be defined on the object to apply them."
)
if self.eq_coeffs_convention is None:
raise ValueError(
"The eq_coeffs_convention attribute must be defined on the object "
"to apply them."
)
if self.eq_coeffs_convention not in ("multiply", "divide"):
raise ValueError(
"Got unknown convention {}. Must be one of: "
'"multiply", "divide"'.format(self.eq_coeffs_convention)
)
# apply coefficients for each baseline
for key in self.get_antpairs():
# get indices for this key
blt_inds = self.antpair2ind(key)
ant1_index = np.asarray(self.antenna_numbers == key[0]).nonzero()[0][0]
ant2_index = np.asarray(self.antenna_numbers == key[1]).nonzero()[0][0]
eq_coeff1 = self.eq_coeffs[ant1_index, :]
eq_coeff2 = self.eq_coeffs[ant2_index, :]
# make sure coefficients are the right size to broadcast
eq_coeff1 = np.repeat(eq_coeff1[:, np.newaxis], self.Npols, axis=1)
eq_coeff2 = np.repeat(eq_coeff2[:, np.newaxis], self.Npols, axis=1)
if self.eq_coeffs_convention == "multiply":
self.data_array[blt_inds] *= eq_coeff1 * eq_coeff2
else:
self.data_array[blt_inds] /= eq_coeff1 * eq_coeff2
return
def _apply_w_proj(self, new_w_vals, old_w_vals, select_mask=None):
"""
Apply corrections based on changes to w-coord.
Adjusts the data to account for a change along the w-axis of a baseline.
Parameters
----------
new_w_vals: float or ndarray of float
New w-coordinates for the baselines, in units of meters. Can either be a
solitary float (helpful for unphasing data, where new_w_vals can be set to
0.0) or an array of shape (Nselect,) (which is Nblts if select_mask=None).
old_w_vals: float or ndarray of float
Old w-coordinates for the baselines, in units of meters. Can either be a
solitary float (helpful for unphasing data, where new_w_vals can be set to
0.0) or an array of shape (Nselect,) (which is Nblts if select_mask=None).
select_mask: ndarray of bool
Array is of shape (Nblts,), where the sum of all enties marked True is
equal to Nselect (mentioned above).
Raises
------
IndexError
If the length of new_w_vals or old_w_vals isn't compatible with
select_mask, or if select mask isn't the right length.
"""
# If we only have metadata, then we have no work to do. W00t!
if self.metadata_only or (self.data_array is None):
return
if select_mask is None:
select_len = self.Nblts
else:
try:
inv_mask = np.ones(self.Nblts, dtype=bool)
inv_mask[select_mask] = False
select_mask = ~inv_mask
select_len = np.sum(select_mask)
except IndexError:
raise IndexError(
"select_mask must be an array-like, either of ints with shape "
"(Nblts), or of ints within the range (-Nblts, Nblts)."
)
# Promote everything to float64 ndarrays if they aren't already
old_w_vals = np.array(old_w_vals, dtype=np.float64)
old_w_vals.shape += (1,) if (old_w_vals.ndim == 0) else ()
new_w_vals = np.array(new_w_vals, dtype=np.float64)
new_w_vals.shape += (1,) if (new_w_vals.ndim == 0) else ()
# Make sure the lengths of everything make sense
new_val_len = len(new_w_vals)
old_val_len = len(old_w_vals)
if new_val_len not in [1, select_len]:
raise IndexError(
"The length of new_w_vals is wrong (expected 1 or %i, got %i)!"
% (select_len, new_val_len)
)
if old_val_len not in [1, select_len]:
raise IndexError(
"The length of old_w_vals is wrong (expected 1 or %i, got %i)!"
% (select_len, old_val_len)
)
# Calculate the difference in w terms as a function of freq. Note that the
# 1/c is there to speed of processing (faster to multiply than divide)
delta_w_lambda = (
(new_w_vals - old_w_vals).reshape(-1, 1)
* (1.0 / const.c.to("m/s").value)
* self.freq_array.reshape(1, self.Nfreqs)
)
if select_mask is None or np.all(select_mask):
# If all the w values are changing, it turns out to be twice as fast
# to ditch any sort of selection mask and just do the full multiply.
if self.future_array_shapes:
self.data_array *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, :, None]
)
else:
self.data_array *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, None, :, None]
)
elif np.any(select_mask):
# In the case we are _not_ doing all baselines, use a selection mask to
# only update the values we need. In the worse case, it slows down the
# processing by ~2x, but it can save a lot on time and memory if only
# needing to update a select number of baselines.
if self.future_array_shapes:
self.data_array[select_mask] *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, :, None]
)
else:
self.data_array[select_mask] *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, None, :, None]
)
def unphase_to_drift(
self, phase_frame=None, use_ant_pos=True, use_old_proj=False,
):
"""
Convert from a phased dataset to a drift dataset.
See the phasing memo under docs/references for more documentation.
Parameters
----------
phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation, 'icrs' also includes abberation.
Defaults to using the 'phase_center_frame' attribute or 'icrs'
if that attribute is None.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws. Default is True.
use_old_proj : bool
If True, uses the 'old' way of calculating baseline projections.
Default is False.
Raises
------
ValueError
If the phase_type is not 'phased'
"""
if self.phase_type == "phased":
pass
else:
raise ValueError(
"The data is already drift scanning; can only unphase phased data."
)
if not use_old_proj:
# Check to make sure that these attributes are actually filled. Otherwise,
# you probably want to use the old phase method.
if (
(not use_ant_pos)
and (self.phase_center_app_ra is None)
or (self.phase_center_app_dec is None)
):
raise AttributeError(
"Object missing phase_center_ra_app or phase_center_dec_app, "
"which implies that the data were phased using the 'old' "
"method for phasing (which is not compatible with the new "
"version of the code). Please run unphase_to_drift with "
"use_old_proj=True to continue."
)
telescope_location = self.telescope_location_lat_lon_alt
# Check and see if we have any unphased objects, in which case
# their w-values should be zeroed out.
select_mask = ~self._check_for_unphased()
new_uvw = uvutils.calc_uvw(
lst_array=self.lst_array,
use_ant_pos=use_ant_pos,
uvw_array=self.uvw_array,
antenna_positions=self.antenna_positions,
antenna_numbers=self.antenna_numbers,
ant_1_array=self.ant_1_array,
ant_2_array=self.ant_2_array,
old_app_ra=self.phase_center_app_ra,
old_app_dec=self.phase_center_app_dec,
old_frame_pa=self.phase_center_frame_pa,
telescope_lat=telescope_location[0],
telescope_lon=telescope_location[1],
to_enu=True,
)
self._apply_w_proj(0.0, self.uvw_array[select_mask, 2], select_mask)
self.uvw_array = new_uvw
# remove/update phase center
if self.multi_phase_center:
self.phase_center_id_array[:] = self._add_phase_center(
"unphased", "unphased"
)
self.phase_center_app_ra = self.lst_array.copy()
self.phase_center_app_dec[:] = (
np.zeros(self.Nblts) + self.telescope_location[0]
)
self.phase_center_frame_pa = np.zeros(self.Nblts)
else:
self.phase_center_frame = None
self.phase_center_ra = None
self.phase_center_dec = None
self.phase_center_epoch = None
self.phase_center_app_ra = None
self.phase_center_app_dec = None
self.phase_center_frame_pa = None
self._set_drift()
return
# If you are a multi phase center data set, there's no valid reason to be going
# back to the old phase method. Time to bail!
if self.multi_phase_center:
raise ValueError(
"Multi phase center data sets are not compatible with the old phasing "
"method, please set use_old_proj=False."
)
if phase_frame is None:
if self.phase_center_frame is not None:
phase_frame = self.phase_center_frame
else:
phase_frame = "icrs"
icrs_coord = SkyCoord(
ra=self.phase_center_ra,
dec=self.phase_center_dec,
unit="radian",
frame="icrs",
)
if phase_frame == "icrs":
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format="jd")
frame_phase_center = icrs_coord.transform_to("gcrs")
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
# In some cases, the uvws are already float64, but sometimes they're not
self.uvw_array = np.float64(self.uvw_array)
# apply -w phasor
if not self.metadata_only:
w_lambda = (
self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to("m/s").value
* self.freq_array.reshape(1, self.Nfreqs)
)
if self.future_array_shapes:
phs = np.exp(-1j * 2 * np.pi * (-1) * w_lambda[:, :, None])
else:
phs = np.exp(-1j * 2 * np.pi * (-1) * w_lambda[:, None, :, None])
self.data_array *= phs
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
telescope_location = EarthLocation.from_geocentric(
*self.telescope_location, unit=units.m
)
obs_times = Time(unique_times, format="jd")
itrs_telescope_locations = telescope_location.get_itrs(obstime=obs_times)
itrs_telescope_locations = SkyCoord(itrs_telescope_locations)
# just calling transform_to(coord.GCRS) will delete the obstime information
# need to re-add obstimes for a GCRS transformation
if phase_frame == "gcrs":
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())(obstime=obs_times)
)
else:
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())
)
frame_telescope_locations.representation_type = "cartesian"
for ind, jd in enumerate(unique_times):
inds = np.where(self.time_array == jd)[0]
obs_time = obs_times[ind]
if use_ant_pos:
ant_uvw = uvutils.phase_uvw(
self.telescope_location_lat_lon_alt[1],
self.telescope_location_lat_lon_alt[0],
self.antenna_positions,
)
# instead of looping through every ind, find the spot in antenna number
# array where ant_num <= ant1 < ant_number and similarly for ant2
# for all baselines in inds
# then find the uvw coordinate for all at the same time
# antenna_numbers does not necessarily need to be in order on the object
# but needs to be in order for the searchsorted to work.
# ant1_index and ant2_index arrays will preserve the order of blts
ant_sort = np.argsort(self.antenna_numbers)
ant1_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_1_array[inds]
)
ant2_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_2_array[inds]
)
self.uvw_array[inds] = (
ant_uvw[ant_sort][ant2_index, :] - ant_uvw[ant_sort][ant1_index, :]
)
else:
frame_telescope_location = frame_telescope_locations[ind]
itrs_lat_lon_alt = self.telescope_location_lat_lon_alt
uvws_use = self.uvw_array[inds, :]
uvw_rel_positions = uvutils.unphase_uvw(
frame_phase_center.ra.rad, frame_phase_center.dec.rad, uvws_use
)
frame_uvw_coord = SkyCoord(
x=uvw_rel_positions[:, 0] * units.m + frame_telescope_location.x,
y=uvw_rel_positions[:, 1] * units.m + frame_telescope_location.y,
z=uvw_rel_positions[:, 2] * units.m + frame_telescope_location.z,
frame=phase_frame,
obstime=obs_time,
representation_type="cartesian",
)
itrs_uvw_coord = frame_uvw_coord.transform_to("itrs")
# now convert them to ENU, which is the space uvws are in
self.uvw_array[inds, :] = uvutils.ENU_from_ECEF(
itrs_uvw_coord.cartesian.get_xyz().value.T, *itrs_lat_lon_alt
)
# remove phase center
self.phase_center_frame = None
self.phase_center_ra = None
self.phase_center_dec = None
self.phase_center_epoch = None
self._set_drift()
def _phase_dict_helper(
self,
ra,
dec,
epoch,
phase_frame,
ephem_times,
cat_type,
pm_ra,
pm_dec,
dist,
vrad,
cat_name,
lookup_name,
select_mask,
time_array,
):
"""
Supplies a dictionary with parametrs for the phase method to use.
This method should not be called directly by users; it is instead a function
called by the `phase` method, which packages up phase center information
into a single dictionary to allow for consistent behavior between different
instantiations of `UVData` objects.
"""
cat_id = None
info_source = "user"
if self.multi_phase_center:
name_list = list(self.phase_center_catalog.keys())
else:
name_list = [self.object_name]
# We only want to use the JPL-Horizons service if using a non-mutli-phase-ctr
# instance of a UVData object.
if lookup_name and (cat_name not in name_list) and self.multi_phase_center:
if (cat_type is None) or (cat_type == "ephem"):
[
cat_times,
cat_lon,
cat_lat,
cat_dist,
cat_vrad,
] = uvutils.lookup_jplhorizons(
cat_name,
time_array,
telescope_loc=self.telescope_location_lat_lon_alt,
)
cat_type = "ephem"
cat_pm_ra = cat_pm_dec = None
cat_epoch = 2000.0
cat_frame = "icrs"
info_source = "jplh"
else:
raise ValueError(
"Unable to find %s in among the existing sources "
"recorded in the catalog. Please supply source "
"information (e.g., RA and Dec coordinates) and "
"set lookup_name=False." % cat_name
)
elif (cat_name in name_list) and self.multi_phase_center:
# If the name of the source matches, then verify that all of its
# properties are the same as what is stored in phase_center_catalog.
if lookup_name:
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
cat_diffs = 0
else:
cat_id, cat_diffs = self._look_in_catalog(
cat_name,
cat_type=cat_type,
cat_lon=ra,
cat_lat=dec,
cat_frame=phase_frame,
cat_epoch=epoch,
cat_times=ephem_times,
cat_pm_ra=pm_ra,
cat_pm_dec=pm_dec,
cat_dist=dist,
cat_vrad=vrad,
)
# If cat_diffs > 0, it means that the catalog entries dont match
if cat_diffs != 0:
# Last chance here -- if we have selected all of the data phased
# to this phase center, then we are still okay.
if select_mask is None:
# We have selected all data, so we're good
pass
elif np.all(
np.not_equal(
self.phase_center_id_array[~select_mask],
self.phase_center_catalog[cat_name]["cat_id"],
)
):
# We have selected a subset of the data that contains
# everything that was phased to the object
pass
else:
raise ValueError(
"The entry name %s is not unique, but arguments to phase "
"do not match that stored in phase_center_catalog. Try using a "
"different name, using select_mask to select all data "
"phased to this phase center, or using the existing phase "
"center information by setting lookup_name=True." % cat_name
)
cat_type = "sidereal" if cat_type is None else cat_type
cat_lon = ra
cat_lat = dec
cat_frame = phase_frame
cat_epoch = epoch
cat_times = ephem_times
cat_pm_ra = pm_ra
cat_pm_dec = pm_dec
cat_dist = dist
cat_vrad = vrad
else:
temp_dict = self.phase_center_catalog[cat_name]
cat_id = temp_dict["cat_id"]
cat_type = temp_dict["cat_type"]
info_source = temp_dict["info_source"]
# Get here will return None if no key found, which we want
cat_lon = temp_dict.get("cat_lon")
cat_lat = temp_dict.get("cat_lat")
cat_frame = temp_dict.get("cat_frame")
cat_epoch = temp_dict.get("cat_epoch")
cat_times = temp_dict.get("cat_times")
cat_pm_ra = temp_dict.get("cat_pm_ra")
cat_pm_dec = temp_dict.get("cat_pm_dec")
cat_dist = temp_dict.get("cat_dist")
cat_vrad = temp_dict.get("cat_vrad")
else:
# Either this is not a multi phase center data set, or the name of the
# source is unique!
cat_type = "sidereal" if cat_type is None else cat_type
cat_lon = ra
cat_lat = dec
cat_frame = phase_frame
cat_epoch = epoch
cat_times = ephem_times
cat_pm_ra = pm_ra
cat_pm_dec = pm_dec
cat_dist = dist
cat_vrad = vrad
if cat_epoch is None:
cat_epoch = 1950.0 if (cat_frame in ["fk4", "fk4noeterms"]) else 2000.0
if isinstance(cat_epoch, str) or isinstance(cat_epoch, Time):
cat_epoch = Time(cat_epoch).to_value(
"byear" if cat_frame in ["fk4", "fk4noeterms"] else "jyear"
)
# One last check - if we have an ephem phase center, lets make sure that the
# time range of the ephemeris encapsulates the entire range of time_array
check_ephem = False
if cat_type == "ephem":
# Take advantage of this to make sure that lat, lon, and times are all
# ndarray types
cat_lon = np.array(cat_lon, dtype=float)
cat_lat = np.array(cat_lat, dtype=float)
cat_times = np.array(cat_times, dtype=float)
cat_lon.shape += (1,) if (cat_lon.ndim == 0) else ()
cat_lat.shape += (1,) if (cat_lat.ndim == 0) else ()
cat_times.shape += (1,) if (cat_times.ndim == 0) else ()
check_ephem = np.min(time_array) < np.min(cat_times)
check_ephem = check_ephem or (np.max(time_array) > np.max(cat_times))
# If the ephem was supplied by JPL-Horizons, then we can easily expand
# it to cover the requested range.
if check_ephem and (info_source == "jplh"):
# Concat the two time ranges to make sure that we cover both the
# requested time range _and_ the original time range.
[
cat_times,
cat_lon,
cat_lat,
cat_dist,
cat_vrad,
] = uvutils.lookup_jplhorizons(
cat_name,
np.concatenate((np.reshape(time_array, -1), cat_times)),
telescope_loc=self.telescope_location_lat_lon_alt,
)
elif check_ephem:
# The ephem was user-supplied during the call to the phase method,
# raise an error to ask for more ephem data.
raise ValueError(
"Ephemeris data does not cover the entirety of the time range "
"attempted to be phased. Please supply additional ephem data "
"(and if used, set lookup_name=False)."
)
# Time to repackage everything into a dict
phase_dict = {
"cat_name": cat_name,
"cat_type": cat_type,
"cat_lon": cat_lon,
"cat_lat": cat_lat,
"cat_frame": cat_frame,
"cat_epoch": cat_epoch,
"cat_times": cat_times,
"cat_pm_ra": cat_pm_ra,
"cat_pm_dec": cat_pm_dec,
"cat_dist": cat_dist,
"cat_vrad": cat_vrad,
"info_source": info_source,
"cat_id": cat_id,
}
# Finally, make sure everything is a float or an ndarray of floats
for key in phase_dict.keys():
if isinstance(phase_dict[key], np.ndarray):
phase_dict[key] = phase_dict[key].astype(float)
elif (key == "cat_id") and (phase_dict[key] is not None):
# If this is the cat_id, make it an int
phase_dict[key] == int(phase_dict[key])
elif not ((phase_dict[key] is None) or isinstance(phase_dict[key], str)):
phase_dict[key] = float(phase_dict[key])
return phase_dict
def phase(
self,
ra,
dec,
epoch="J2000",
phase_frame="icrs",
cat_type=None,
ephem_times=None,
pm_ra=None,
pm_dec=None,
dist=None,
vrad=None,
cat_name=None,
lookup_name=False,
use_ant_pos=True,
allow_rephase=True,
orig_phase_frame=None,
select_mask=None,
cleanup_old_sources=True,
use_old_proj=False,
fix_old_proj=True,
):
"""
Phase a drift scan dataset to a single ra/dec at a particular epoch.
See the phasing memo under docs/references for more documentation.
Tested against MWA_Tools/CONV2UVFITS/convutils.
Parameters
----------
ra : float
The ra to phase to in radians.
dec : float
The dec to phase to in radians.
epoch : astropy.time.Time object or str
The epoch to use for phasing. Either an astropy Time object or the
string "J2000" (which is the default).
Note that the epoch is only used to evaluate the ra & dec values,
if the epoch is not J2000, the ra & dec values are interpreted
as FK5 ra/dec values and transformed to J2000, the data are then
phased to the J2000 ra/dec values.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
cat_type : str
Type of phase center to be added. Must be one of:
"sidereal" (fixed RA/Dec), "ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position). Default is "sidereal", other selections
are only permissible if `multi_phase_center=True`.
ephem_times : ndarray of float
Only used when `cat_type="ephem"`. Describes the time for which the values
of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,).
pm_ra : float
Proper motion in RA, in units of mas/year. Only used for sidereal phase
centers.
pm_dec : float
Proper motion in Dec, in units of mas/year. Only used for sidereal phase
centers.
dist : float or ndarray of float
Distance of the source, in units of pc. Only used for sidereal and ephem
phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
vrad : float or ndarray of float
Radial velocity of the source, in units of km/s. Only used for sidereal and
ephem phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
cat_name :str
Name of the phase center being phased to. Required if
`multi_phase_center=True`, otherwise `object_name` set to this value.
lookup_name : bool
Only used if `multi_phase_center=True`, allows the user to lookup phase
center infomation in `phase_center_catalog` (for the entry matching
`cat_name`). Setting this to `True` will ignore the values supplied to the
`ra`, `dec`, `epoch`, `phase_frame`, `pm_ra`, `pm_dec`, `dist`, `vrad`.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
allow_rephase : bool
If True, allow unphasing and rephasing if this object is already
phased.
orig_phase_frame : str
The original phase frame of this object (to use in unphasing). Only
used if the object is already phased, `allow_rephase` is True and
the phase_center_ra/dec of the object does not match `ra` and `dec`.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None.
select_mask : ndarray of bool
Optional mask for selecting which data to operate on along the blt-axis,
only used if with multi phase center data sets (i.e.,
`multi_phase_center=True`). Shape is (Nblts,).
use_old_proj : bool
If True, use the "old" method for calculating baseline uvw-coordinates,
which involved using astropy to move antenna positions (in ITRF) into
the requested reference frame (either GCRS or ICRS). Default is False.
fix_old_proj : bool
If True, the method will convert a data set with coordinates calculated
using the "old" method, correct them, and then produce new coordinates
using the "new" method.
Raises
------
ValueError
If the phase_type is 'phased' and allow_rephase is False
"""
# Non-multi phase center datasets don't (yet) have a way of recording the
# 'extra' source properties, or selection mask, so make sure that these aren't
# using any of those if looking at a single object.
if not self.multi_phase_center:
if select_mask is not None:
raise ValueError(
"Cannot apply a selection mask if multi_phase_center=False. "
"Remove the select_mask argument to continue."
)
check_params = [pm_ra, pm_dec, dist, vrad]
check_names = ["pm_ra", "pm_dec", "dist", "vrad"]
for name, value in zip(check_names, check_params):
if value not in [0, None]:
raise ValueError(
"Non-zero values of %s not supported when "
"multi_phase_center=False." % name
)
if (cat_type != "sidereal") and (cat_type is not None):
raise ValueError(
"Only sidereal sources are supported when multi_phase_center=False"
)
if lookup_name:
raise ValueError(
"Object name lookup is not supported when multi_phase_center=False"
)
else:
if cat_name is None:
raise ValueError(
"Must supply a unique name for cat_name when phasing a "
"multi phase center data set."
)
# If you are a multi phase center data set, there's no valid reason to be going
# back to the old phase method. Time to bail!
if self.multi_phase_center and use_old_proj:
raise NotImplementedError(
"Multi phase center data sets are not compatible with the old phasing "
"method, please set use_old_proj=False."
)
if not allow_rephase and (self.phase_type == "phased"):
raise ValueError(
"The data is already phased; set allow_rephase"
" to True to unphase and rephase."
)
# Right up front, we're gonna split off the piece of the code that
# does the phasing using the "new" method, since its a lot more flexible
# and because I think at some point, everything outside of this loop
# can be deprecated
if not use_old_proj:
needs_fix = (
(not use_ant_pos)
and (self.phase_type == "phased")
and (
self.phase_center_app_ra is None
or self.phase_center_app_dec is None
)
)
if needs_fix:
if fix_old_proj:
# So to fix the 'old' projection, we use the unphase_to_drift
# method with the 'old' projection to bring the data set back
# to ENU, and then we can move from there. Of course, none of
# this is actually neccessary if calculating the coordinates
# from antenna positions, so you do you, puvudataset.
self.unphase_to_drift(
phase_frame=orig_phase_frame,
use_old_proj=True,
use_ant_pos=use_ant_pos,
)
else:
raise AttributeError(
"Data missing phase_center_ra_app or phase_center_dec_app, "
"which implies that the data were phased using the 'old' "
"method for phasing (which is not compatible with the new "
"version of the code). You can fix this by calling the "
"phase method with fix_old_proj=True, or can otherwise "
"proceed by using the 'old' projection method by setting "
"use_old_proj=True."
)
# Grab all the meta-data we need for the rotations
time_array = self.time_array
lst_array = self.lst_array
uvw_array = self.uvw_array
ant_1_array = self.ant_1_array
ant_2_array = self.ant_2_array
old_w_vals = self.uvw_array[:, 2].copy()
old_w_vals[self._check_for_unphased()] = 0.0
old_app_ra = self.phase_center_app_ra
old_app_dec = self.phase_center_app_dec
old_frame_pa = self.phase_center_frame_pa
# Check and see if we have any unphased objects, in which case
# their w-values should be zeroed out.
if select_mask is not None:
if len(select_mask) != self.Nblts:
raise IndexError("Selection mask must be of length Nblts.")
time_array = time_array[select_mask]
lst_array = lst_array[select_mask]
uvw_array = uvw_array[select_mask, :]
ant_1_array = ant_1_array[select_mask]
ant_2_array = ant_2_array[select_mask]
if isinstance(old_w_vals, np.ndarray):
old_w_vals = old_w_vals[select_mask]
# Before moving forward with the heavy calculations, we need to do some
# basic housekeeping to make sure that we've got the coordinate data that
# we need in order to proceed.
phase_dict = self._phase_dict_helper(
ra,
dec,
epoch,
phase_frame,
ephem_times,
cat_type,
pm_ra,
pm_dec,
dist,
vrad,
cat_name,
lookup_name,
select_mask,
time_array,
)
# We got the meta-data, now handle calculating the apparent coordinates.
# First, check if we need to look up the phase center in question
new_app_ra, new_app_dec = uvutils.calc_app_coords(
phase_dict["cat_lon"],
phase_dict["cat_lat"],
coord_frame=phase_dict["cat_frame"],
coord_epoch=phase_dict["cat_epoch"],
coord_times=phase_dict["cat_times"],
coord_type=phase_dict["cat_type"],
time_array=time_array,
lst_array=lst_array,
pm_ra=phase_dict["cat_pm_ra"],
pm_dec=phase_dict["cat_pm_dec"],
vrad=phase_dict["cat_vrad"],
dist=phase_dict["cat_dist"],
telescope_loc=self.telescope_location_lat_lon_alt,
)
# Now calculate position angles. If this is a single phase center data set,
# the ref frame is always equal to the source coordinate frame. In a multi
# phase center data set, those two components are allowed to be decoupled.
new_frame_pa = uvutils.calc_frame_pos_angle(
time_array,
new_app_ra,
new_app_dec,
self.telescope_location_lat_lon_alt,
self.phase_center_frame if self.multi_phase_center else phase_frame,
ref_epoch=self.phase_center_epoch if self.multi_phase_center else epoch,
)
# Now its time to do some rotations and calculate the new coordinates
new_uvw = uvutils.calc_uvw(
app_ra=new_app_ra,
app_dec=new_app_dec,
frame_pa=new_frame_pa,
lst_array=lst_array,
use_ant_pos=use_ant_pos,
uvw_array=uvw_array,
antenna_positions=self.antenna_positions,
antenna_numbers=self.antenna_numbers,
ant_1_array=ant_1_array,
ant_2_array=ant_2_array,
old_app_ra=old_app_ra,
old_app_dec=old_app_dec,
old_frame_pa=old_frame_pa,
telescope_lat=self.telescope_location_lat_lon_alt[0],
telescope_lon=self.telescope_location_lat_lon_alt[1],
from_enu=(self.phase_type == "drift"),
)
# With all operations complete, we now start manipulating the UVData object
if self.multi_phase_center:
cat_id = self._add_phase_center(
phase_dict["cat_name"],
phase_dict["cat_type"],
cat_lon=phase_dict["cat_lon"],
cat_lat=phase_dict["cat_lat"],
cat_frame=phase_dict["cat_frame"],
cat_epoch=phase_dict["cat_epoch"],
cat_times=phase_dict["cat_times"],
cat_pm_ra=phase_dict["cat_pm_ra"],
cat_pm_dec=phase_dict["cat_pm_dec"],
cat_dist=phase_dict["cat_dist"],
cat_vrad=phase_dict["cat_vrad"],
info_source=phase_dict["info_source"],
cat_id=phase_dict["cat_id"],
force_update=True,
)
# Now its time to update the raw data. This will return empty if
# metadata_only is set to True. Note that cat_type is only allowed
# to be unphased if this is a multi_phase_center data set.
new_w_vals = 0.0 if (cat_type == "unphased") else new_uvw[:, 2]
self._apply_w_proj(new_w_vals, old_w_vals, select_mask=select_mask)
# Finally, we now take it upon ourselves to update some metadata. What we
# do here will depend a little bit on whether or not we have a selection
# mask active, since most everything is affected by that.
if select_mask is not None:
self.uvw_array[select_mask] = new_uvw
self.phase_center_app_ra[select_mask] = new_app_ra
self.phase_center_app_dec[select_mask] = new_app_dec
self.phase_center_frame_pa[select_mask] = new_frame_pa
if self.multi_phase_center:
self.phase_center_id_array[select_mask] = cat_id
else:
self.uvw_array = new_uvw
self.phase_center_app_ra = new_app_ra
self.phase_center_app_dec = new_app_dec
self.phase_center_frame_pa = new_frame_pa
if self.multi_phase_center:
self.phase_center_id_array[:] = cat_id
# If not multi phase center, make sure to update the ra/dec values, since
# otherwise we'll have no record of source properties.
if not self.multi_phase_center:
# Make sure this is actually marked as a phased dataset now
self._set_phased()
# Update the phase center properties
self.phase_center_ra = phase_dict["cat_lon"]
self.phase_center_dec = phase_dict["cat_lat"]
self.phase_center_epoch = phase_dict["cat_epoch"]
self.phase_center_frame = phase_dict["cat_frame"]
if cat_name is not None:
self.object_name = cat_name
else:
self.phase_center_ra = 0.0
self.phase_center_dec = 0.0
self.phase_center_epoch = 2000.0
if cleanup_old_sources:
self._clear_unused_phase_centers()
# All done w/ the new phase method
return
warnings.warn(
"The original `phase` method is deprecated, and will be removed in "
"pyuvdata v3.0 (although `fix_phase` will remain for longer). "
"Note that the old and new phase methods are NOT compatible with one "
"another, so if you have phased using the old method, you should call "
"the phase method with fix_old_proj=True, or otherwise can use the "
"unphase_to_drift method with use_old_proj=True to undo the old "
"corrections before using the new version of the phase method.",
DeprecationWarning,
)
if self.phase_type == "drift":
pass
elif self.phase_type == "phased":
# To get to this point, allow_rephase has to be true
if not np.isclose(
self.phase_center_ra,
ra,
rtol=self._phase_center_ra.tols[0],
atol=self._phase_center_ra.tols[1],
) or not np.isclose(
self.phase_center_dec,
dec,
rtol=self._phase_center_dec.tols[0],
atol=self._phase_center_dec.tols[1],
):
self.unphase_to_drift(
phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
use_old_proj=True,
)
else:
raise ValueError(
"The phasing type of the data is unknown. "
'Set the phase_type to "drift" or "phased" to '
"reflect the phasing status of the data"
)
if phase_frame not in ["icrs", "gcrs"]:
raise ValueError("phase_frame can only be set to icrs or gcrs.")
if epoch == "J2000" or epoch == 2000:
icrs_coord = SkyCoord(ra=ra, dec=dec, unit="radian", frame="icrs")
else:
assert isinstance(epoch, Time)
phase_center_coord = SkyCoord(
ra=ra, dec=dec, unit="radian", equinox=epoch, frame=FK5
)
# convert to icrs (i.e. J2000) to write to object
icrs_coord = phase_center_coord.transform_to("icrs")
self.phase_center_ra = icrs_coord.ra.radian
self.phase_center_dec = icrs_coord.dec.radian
self.phase_center_epoch = 2000.0
self.phase_center_app_ra = None
self.phase_center_app_dec = None
self.phase_center_frame_pa = None
if phase_frame == "icrs":
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format="jd")
frame_phase_center = icrs_coord.transform_to("gcrs")
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
self.uvw_array = np.float64(self.uvw_array)
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
telescope_location = EarthLocation.from_geocentric(
*self.telescope_location, unit=units.m
)
obs_times = Time(unique_times, format="jd")
itrs_telescope_locations = telescope_location.get_itrs(obstime=obs_times)
itrs_telescope_locations = SkyCoord(itrs_telescope_locations)
# just calling transform_to(coord.GCRS) will delete the obstime information
# need to re-add obstimes for a GCRS transformation
if phase_frame == "gcrs":
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())(obstime=obs_times)
)
else:
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())
)
# set the representation_type to cartensian to get xyz later
frame_telescope_locations.representation_type = "cartesian"
for ind, jd in enumerate(unique_times):
inds = np.where(self.time_array == jd)[0]
obs_time = obs_times[ind]
itrs_lat_lon_alt = self.telescope_location_lat_lon_alt
frame_telescope_location = frame_telescope_locations[ind]
if use_ant_pos:
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
ecef_ant_pos = (
np.float64(self.antenna_positions) + self.telescope_location
)
itrs_ant_coord = SkyCoord(
x=ecef_ant_pos[:, 0] * units.m,
y=ecef_ant_pos[:, 1] * units.m,
z=ecef_ant_pos[:, 2] * units.m,
frame="itrs",
obstime=obs_time,
)
frame_ant_coord = itrs_ant_coord.transform_to(phase_frame)
frame_ant_rel = (
(frame_ant_coord.cartesian - frame_telescope_location.cartesian)
.get_xyz()
.T.value
)
frame_ant_uvw = uvutils.phase_uvw(
frame_phase_center.ra.rad, frame_phase_center.dec.rad, frame_ant_rel
)
# instead of looping through every ind, find the spot in antenna number
# array where ant_num <= ant1 < ant_number and similarly for ant2
# for all baselines in inds
# then find the uvw coordinate for all at the same time
# antenna_numbers does not necessarily need to be in order on the object
# but needs to be in order for the searchsorted to work.
# ant1_index and ant2_index arrays will preserve the order of blts
ant_sort = np.argsort(self.antenna_numbers)
ant1_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_1_array[inds]
)
ant2_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_2_array[inds]
)
self.uvw_array[inds] = (
frame_ant_uvw[ant_sort][ant2_index, :]
- frame_ant_uvw[ant_sort][ant1_index, :]
)
else:
# Also, uvws should be thought of like ENU, not ECEF (or rotated ECEF)
# convert them to ECEF to transform between frames
uvws_use = self.uvw_array[inds, :]
uvw_ecef = uvutils.ECEF_from_ENU(uvws_use, *itrs_lat_lon_alt)
itrs_uvw_coord = SkyCoord(
x=uvw_ecef[:, 0] * units.m,
y=uvw_ecef[:, 1] * units.m,
z=uvw_ecef[:, 2] * units.m,
frame="itrs",
obstime=obs_time,
)
frame_uvw_coord = itrs_uvw_coord.transform_to(phase_frame)
# this takes out the telescope location in the new frame,
# so these are vectors again
frame_rel_uvw = (
frame_uvw_coord.cartesian.get_xyz().value.T
- frame_telescope_location.cartesian.get_xyz().value
)
self.uvw_array[inds, :] = uvutils.phase_uvw(
frame_phase_center.ra.rad, frame_phase_center.dec.rad, frame_rel_uvw
)
# calculate data and apply phasor
if not self.metadata_only:
w_lambda = (
self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to("m/s").value
* self.freq_array.reshape(1, self.Nfreqs)
)
if self.future_array_shapes:
phs = np.exp(-1j * 2 * np.pi * w_lambda[:, :, None])
else:
phs = np.exp(-1j * 2 * np.pi * w_lambda[:, None, :, None])
self.data_array *= phs
self.phase_center_frame = phase_frame
self._set_phased()
def phase_to_time(
self,
time,
phase_frame="icrs",
use_ant_pos=True,
use_old_proj=False,
allow_rephase=True,
orig_phase_frame=None,
select_mask=None,
):
"""
Phase a drift scan dataset to the ra/dec of zenith at a particular time.
See the phasing memo under docs/references for more documentation.
Parameters
----------
time : astropy.time.Time object or float
The time to phase to, an astropy Time object or a float Julian Date
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
allow_rephase : bool
If True, allow unphasing and rephasing if this object is already
phased.
orig_phase_frame : str
The original phase frame of this object (to use in unphasing). Only
used if the object is already phased, `allow_rephase` is True and
the phase_center_ra/dec of the object does not match `ra` and `dec`.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None.
select_mask : array_like
Selection mask for which data should be rephased, only applicable if
`multi_phase_center=True`. Any array-like able to be used as an index
is suitable -- the most typical is an array of bool with length `Nblts`,
or an array of ints within the range (-Nblts, Nblts).
Raises
------
ValueError
If the phase_type is not 'drift'
TypeError
If time is not an astropy.time.Time object or Julian Date as a float
"""
if isinstance(time, (float, np.floating)):
time = Time(time, format="jd")
if not isinstance(time, Time):
raise TypeError("time must be an astropy.time.Time object or a float")
# Generate ra/dec of zenith at time in the phase_frame coordinate
# system to use for phasing
telescope_location = EarthLocation.from_geocentric(
*self.telescope_location, unit="m"
)
zenith_coord = SkyCoord(
alt=Angle(90 * units.deg),
az=Angle(0 * units.deg),
obstime=time,
frame="altaz",
location=telescope_location,
)
obs_zenith_coord = zenith_coord.transform_to(phase_frame)
zenith_ra = obs_zenith_coord.ra.rad
zenith_dec = obs_zenith_coord.dec.rad
self.phase(
zenith_ra,
zenith_dec,
epoch="J2000",
phase_frame=phase_frame,
use_ant_pos=use_ant_pos,
use_old_proj=use_old_proj,
allow_rephase=allow_rephase,
orig_phase_frame=orig_phase_frame,
select_mask=select_mask,
cat_name=("zenith_at_jd%f" % self.time_array[0])
if self.multi_phase_center
else None,
)
def set_uvws_from_antenna_positions(
self,
allow_phasing=False,
require_phasing=True,
orig_phase_frame=None,
output_phase_frame="icrs",
use_old_proj=False,
):
"""
Calculate UVWs based on antenna_positions.
Parameters
----------
allow_phasing : bool
Option for phased data. If data is phased and allow_phasing=True,
UVWs will be calculated and the visibilities will be rephased. Default
is False.
require_phasing : bool
Option for phased data. If data is phased and require_phasing=True, then
the method will throw an error unless allow_phasing=True, otherwise if
`require_phasing=False` and `allow_phasing=False`, the UVWs will be
recalculated but the data will NOT be rephased. This feature should only be
used in limited circumstances (e.g., when certain metadata like exact time
are not trusted), as misuse can significantly corrupt data.
orig_phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None. Only used if allow_phasing is True and use_old_proj
is True.
output_phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'. Only used if
allow_phasing is True, and use_old_proj is True.
use_old_proj : bool
If set to True, uses the 'old' method of calculating baseline vectors.
Default is False, which will instead use the 'new' method.
Raises
------
ValueError
If data is phased and allow_phasing is False.
Warns
-----
UserWarning
If the phase_type is 'phased'
"""
if not use_old_proj and not (
self.phase_center_app_ra is None or self.phase_center_app_dec is None
):
if (self.phase_type == "phased") and (
not (allow_phasing) and require_phasing
):
raise ValueError(
"UVW recalculation requires either unphased data or the ability "
"to rephase data. Use unphase_to_drift or set allow_phasing=True."
)
telescope_location = self.telescope_location_lat_lon_alt
new_uvw = uvutils.calc_uvw(
app_ra=self.phase_center_app_ra,
app_dec=self.phase_center_app_dec,
frame_pa=self.phase_center_frame_pa,
lst_array=self.lst_array,
use_ant_pos=True,
antenna_positions=self.antenna_positions,
antenna_numbers=self.antenna_numbers,
ant_1_array=self.ant_1_array,
ant_2_array=self.ant_2_array,
telescope_lat=telescope_location[0],
telescope_lon=telescope_location[1],
from_enu=(self.phase_type != "phased"),
to_enu=(self.phase_type != "phased"),
)
if self.phase_type == "phased":
if allow_phasing:
old_w_vals = self.uvw_array[:, 2].copy()
old_w_vals[self._check_for_unphased()] = 0.0
self._apply_w_proj(new_uvw[:, 2], old_w_vals)
else:
warnings.warn(
"Recalculating uvw_array without adjusting visibility phases "
"-- this can introduce significant errors if used incorrectly."
)
# If the data are phased, we've already adjusted the phases. Now we just
# need to update the uvw's and we are home free.
self.uvw_array = new_uvw
return
# mutli-phase-ctr datasets should never use the 'old' uvw calculation method
if self.multi_phase_center:
raise NotImplementedError(
"Multi phase center data sets are not compatible with the old uvw "
"calculation method, please set use_old_proj=False."
)
phase_type = self.phase_type
if phase_type == "phased":
if allow_phasing:
if not self.metadata_only:
warnings.warn(
"Data will be unphased and rephased "
"to calculate UVWs, which might introduce small "
"inaccuracies to the data."
)
if orig_phase_frame not in [None, "icrs", "gcrs"]:
raise ValueError(
"Invalid parameter orig_phase_frame. "
'Options are "icrs", "gcrs", or None.'
)
if output_phase_frame not in ["icrs", "gcrs"]:
raise ValueError(
"Invalid parameter output_phase_frame. "
'Options are "icrs" or "gcrs".'
)
phase_center_ra = self.phase_center_ra
phase_center_dec = self.phase_center_dec
phase_center_epoch = self.phase_center_epoch
self.unphase_to_drift(
phase_frame=orig_phase_frame, use_old_proj=True,
)
else:
raise ValueError(
"UVW calculation requires unphased data. "
"Use unphase_to_drift or set "
"allow_phasing=True."
)
antenna_locs_ENU, _ = self.get_ENU_antpos(center=False)
# this code used to loop through every bl in the unique,
# find the index into self.antenna_array of ant1 and ant2
# and fill out the self.uvw_array for all matching bls.
# instead, find the indices and reverse inds from the unique,
# create the unique ant1 and ant2 arrays
# use searchsorted to find the index of the antenna numbers into ant1 and ant2
# create the unique uvw array then broadcast to self.uvw_array
bls, unique_inds, reverse_inds = np.unique(
self.baseline_array, return_index=True, return_inverse=True
)
# antenna_numbers does not necessarily need to be in order on the object
# but needs to be in order for the searchsorted to work.
# ant1_index and ant2_index arrays will preserve the order of blts
ant_sort = np.argsort(self.antenna_numbers)
ant1_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_1_array[unique_inds],
)
ant2_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_2_array[unique_inds],
)
_uvw_array = np.zeros((bls.size, 3))
_uvw_array = (
antenna_locs_ENU[ant_sort][ant2_index, :]
- antenna_locs_ENU[ant_sort][ant1_index, :]
)
self.uvw_array = _uvw_array[reverse_inds]
if phase_type == "phased":
self.phase(
phase_center_ra,
phase_center_dec,
phase_center_epoch,
phase_frame=output_phase_frame,
use_old_proj=use_old_proj,
)
def fix_phase(
self, use_ant_pos=True,
):
"""
Fix the data to be consistent with the new phasing method.
This is a simple utility function for updating UVW coordinates calculated using
the 'old' phasing algorithm with those calculated by the 'new' algorithm. Note
that this step is required for using the new methods with data phased using the
`phase` methiod prior to pyuvdata v2.2.
Parameters
----------
use_ant_pos : bool
Use the antenna positions for determining UVW coordinates. Default is True.
"""
# If we are missing apparent coordinates, we should calculate those now
if (self.phase_center_app_ra is None) or (self.phase_center_app_dec is None):
self._set_app_coords_helper()
# If we are just using the antenna positions, we don't actually need to do
# anything, since the new baseline vectors will be unaffected by the prior
# phasing method, and the delta_w values already get correctly corrected for.
if use_ant_pos:
self.set_uvws_from_antenna_positions(
allow_phasing=True, use_old_proj=False,
)
elif self.multi_phase_center:
raise ValueError(
"Cannot run fix_phase on a mutli-phase-ctr dataset without using the "
"antenna positions. Please set use_ant_pos=True."
)
else:
# Record the old values
phase_center_ra = self.phase_center_ra
phase_center_dec = self.phase_center_dec
phase_center_frame = self.phase_center_frame
phase_center_epoch = self.phase_center_epoch
cat_name = self.object_name
# Bring the UVWs back to ENU/unphased
self.unphase_to_drift(
phase_frame=self.phase_center_frame,
use_ant_pos=False,
use_old_proj=True,
)
# Check for any autos, since their uvws get potentially corrupted
# by the above operation
auto_mask = self.ant_1_array == self.ant_2_array
if any(auto_mask):
self.uvw_array[auto_mask, :] = 0.0
# And rephase the data using the new algorithm
self.phase(
phase_center_ra,
phase_center_dec,
phase_frame=phase_center_frame,
epoch=phase_center_epoch,
cat_name=cat_name,
use_ant_pos=False,
)
def __add__(
self,
other,
inplace=False,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
orig_phase_frame=None,
use_ant_pos=True,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
make_multi_phase=False,
ignore_name=False,
):
"""
Combine two UVData objects along frequency, polarization and/or baseline-time.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). Note that if this keyword is not set
and the two UVData objects are phased to different phase centers
or if one is phased and one is drift, this method will error
because the objects are not compatible.
unphase_to_drift : bool
If True, unphase the objects to drift before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
two objects are different, the combined object will keep all the history of
both input objects (if many objects are combined in succession this can
lead to very long histories). If False and if the histories for the two
objects are different, the combined object will have the history of the
first object and only the parts of the second object history that are unique
(this is done word by word and can result in hard to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. Setting this to true will allow for two
UVData objects to be combined, even if the phase center properties do not
agree (so long as the names are unique for each phase center). Default is
False.
ignore_name : bool
Option to ignore the name of the phase center (`cat_name` in
`phase_center_catalog` when `multi_phase_center=True`, otherwise
`object_name`) when combining two UVData objects. Doing so effectively
adopts the name found in the first UVData object in the sum. Default is
False.
Raises
------
ValueError
If other is not a UVData object, self and other are not compatible
or if data in self and other overlap. One way they can not be
compatible is if they have different phasing, in that case set
`unphase_to_drift` or `phase_center_radec` to (un)phase them so they
are compatible.
If `phase_center_radec` is not None and is not length 2.
"""
if inplace:
this = self
else:
this = self.copy()
# Check that both objects are UVData and valid
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError(
"Only UVData (or subclass) objects can be "
"added to a UVData (or subclass) object"
)
other.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
# Check to make sure that both objects are consistent w/ use of flex_spw
if this.flex_spw != other.flex_spw:
raise ValueError(
"To combine these data, flex_spw must be set to the same "
"value (True or False) for both objects."
)
# check that both objects have the same array shapes
if this.future_array_shapes != other.future_array_shapes:
raise ValueError(
"Both objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
if phase_center_radec is not None and unphase_to_drift:
raise ValueError(
"phase_center_radec cannot be set if unphase_to_drift is True."
)
if unphase_to_drift:
if this.phase_type != "drift":
warnings.warn("Unphasing this UVData object to drift")
this.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
if other.phase_type != "drift":
warnings.warn("Unphasing other UVData object to drift")
other.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
if phase_center_radec is not None:
if np.array(phase_center_radec).size != 2:
raise ValueError("phase_center_radec should have length 2.")
# If this object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if this.phase_type == "drift" or (
not np.isclose(
this.phase_center_ra,
phase_center_radec[0],
rtol=this._phase_center_ra.tols[0],
atol=this._phase_center_ra.tols[1],
)
or not np.isclose(
this.phase_center_dec,
phase_center_radec[1],
rtol=this._phase_center_dec.tols[0],
atol=this._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing this UVData object to phase_center_radec")
this.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
# If other object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if other.phase_type == "drift" or (
not np.isclose(
other.phase_center_ra,
phase_center_radec[0],
rtol=other._phase_center_ra.tols[0],
atol=other._phase_center_ra.tols[1],
)
or not np.isclose(
other.phase_center_dec,
phase_center_radec[1],
rtol=other._phase_center_dec.tols[0],
atol=other._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing other UVData object to phase_center_radec")
other.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
# Define parameters that must be the same to add objects
# But phase_center should be the same, even if in drift (empty parameters)
compatibility_params = [
"_vis_units",
"_telescope_name",
"_instrument",
"_telescope_location",
"_phase_type",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
"_phase_center_frame",
"_phase_center_epoch",
]
if not this.future_array_shapes and not this.flex_spw:
compatibility_params.append("_channel_width")
multi_obj_check = False
if this.multi_phase_center == other.multi_phase_center:
# If the names are different and we are making a mutli-phase-ctr data set,
# then we can skip the step of checking the ra and dec, otherwise we need to
# check it
multi_obj_check = make_multi_phase or this.multi_phase_center
if not ((this.object_name != other.object_name) and multi_obj_check):
compatibility_params.append("_phase_center_ra")
compatibility_params.append("_phase_center_dec")
# Also, if we are not supposed to ignore the name, then make sure that its
# one of the parameters we check for compatibility.
if not (ignore_name or multi_obj_check):
compatibility_params.append("_object_name")
elif not (this.multi_phase_center or make_multi_phase):
raise ValueError(
"To combine these data, please run the add operation with the UVData "
"object with multi_phase_center set to True as the first object in the "
"add operation."
)
# Build up history string
history_update_string = " Combined data along "
n_axes = 0
# Create blt arrays for convenience
prec_t = -2 * np.floor(np.log10(this._time_array.tols[-1])).astype(int)
prec_b = 8
this_blts = np.array(
[
"_".join(
["{1:.{0}f}".format(prec_t, blt[0]), str(blt[1]).zfill(prec_b)]
)
for blt in zip(this.time_array, this.baseline_array)
]
)
other_blts = np.array(
[
"_".join(
["{1:.{0}f}".format(prec_t, blt[0]), str(blt[1]).zfill(prec_b)]
)
for blt in zip(other.time_array, other.baseline_array)
]
)
# Check we don't have overlapping data
both_pol, this_pol_ind, other_pol_ind = np.intersect1d(
this.polarization_array, other.polarization_array, return_indices=True
)
# If we have a flexible spectral window, the handling here becomes a bit funky,
# because we are allowed to have channels with the same frequency *if* they
# belong to different spectral windows (one real-life example: you might want
# to preserve guard bands in the correlator, which can have overlaping RF
# frequency channels)
if this.flex_spw:
this_freq_ind = np.array([], dtype=np.int64)
other_freq_ind = np.array([], dtype=np.int64)
both_freq = np.array([], dtype=float)
both_spw = np.intersect1d(this.spw_array, other.spw_array)
for idx in both_spw:
this_mask = np.where(this.flex_spw_id_array == idx)[0]
other_mask = np.where(other.flex_spw_id_array == idx)[0]
if this.future_array_shapes:
both_spw_freq, this_spw_ind, other_spw_ind = np.intersect1d(
this.freq_array[this_mask],
other.freq_array[other_mask],
return_indices=True,
)
else:
both_spw_freq, this_spw_ind, other_spw_ind = np.intersect1d(
this.freq_array[0, this_mask],
other.freq_array[0, other_mask],
return_indices=True,
)
this_freq_ind = np.append(this_freq_ind, this_mask[this_spw_ind])
other_freq_ind = np.append(other_freq_ind, other_mask[other_spw_ind])
both_freq = np.append(both_freq, both_spw_freq)
else:
if this.future_array_shapes:
both_freq, this_freq_ind, other_freq_ind = np.intersect1d(
this.freq_array, other.freq_array, return_indices=True
)
else:
both_freq, this_freq_ind, other_freq_ind = np.intersect1d(
this.freq_array[0, :], other.freq_array[0, :], return_indices=True
)
both_blts, this_blts_ind, other_blts_ind = np.intersect1d(
this_blts, other_blts, return_indices=True
)
if not self.metadata_only and (
len(both_pol) > 0 and len(both_freq) > 0 and len(both_blts) > 0
):
# check that overlapping data is not valid
if this.future_array_shapes:
this_inds = np.ravel_multi_index(
(
this_blts_ind[:, np.newaxis, np.newaxis],
this_freq_ind[np.newaxis, :, np.newaxis],
this_pol_ind[np.newaxis, np.newaxis, :],
),
this.data_array.shape,
).flatten()
other_inds = np.ravel_multi_index(
(
other_blts_ind[:, np.newaxis, np.newaxis],
other_freq_ind[np.newaxis, :, np.newaxis],
other_pol_ind[np.newaxis, np.newaxis, :],
),
other.data_array.shape,
).flatten()
else:
this_inds = np.ravel_multi_index(
(
this_blts_ind[:, np.newaxis, np.newaxis, np.newaxis],
np.zeros((1, 1, 1, 1), dtype=np.int64),
this_freq_ind[np.newaxis, np.newaxis, :, np.newaxis],
this_pol_ind[np.newaxis, np.newaxis, np.newaxis, :],
),
this.data_array.shape,
).flatten()
other_inds = np.ravel_multi_index(
(
other_blts_ind[:, np.newaxis, np.newaxis, np.newaxis],
np.zeros((1, 1, 1, 1), dtype=np.int64),
other_freq_ind[np.newaxis, np.newaxis, :, np.newaxis],
other_pol_ind[np.newaxis, np.newaxis, np.newaxis, :],
),
other.data_array.shape,
).flatten()
this_all_zero = np.all(this.data_array.flatten()[this_inds] == 0)
this_all_flag = np.all(this.flag_array.flatten()[this_inds])
other_all_zero = np.all(other.data_array.flatten()[other_inds] == 0)
other_all_flag = np.all(other.flag_array.flatten()[other_inds])
if this_all_zero and this_all_flag:
# we're fine to overwrite; update history accordingly
history_update_string = " Overwrote invalid data using pyuvdata."
this.history += history_update_string
elif other_all_zero and other_all_flag:
raise ValueError(
"To combine these data, please run the add operation again, "
"but with the object whose data is to be overwritten as the "
"first object in the add operation."
)
else:
raise ValueError(
"These objects have overlapping data and cannot be combined."
)
# find the blt indices in "other" but not in "this"
temp = np.nonzero(~np.in1d(other_blts, this_blts))[0]
if len(temp) > 0:
bnew_inds = temp
new_blts = other_blts[temp]
history_update_string += "baseline-time"
n_axes += 1
else:
bnew_inds, new_blts = ([], [])
# if there's any overlap in blts, check extra params
temp = np.nonzero(np.in1d(other_blts, this_blts))[0]
if len(temp) > 0:
# add metadata to be checked to compatibility params
extra_params = [
"_integration_time",
"_uvw_array",
"_lst_array",
"_phase_center_app_ra",
"_phase_center_app_dec",
"_phase_center_frame_pa",
"_phase_center_id_array",
"_phase_center_catalog",
"_Nphase",
]
compatibility_params.extend(extra_params)
if not ignore_name and ("_object_name" not in compatibility_params):
compatibility_params.append("_object_name")
# TODO: Add handling for what happens when you have two different source
# catalogs that you want to combine
# find the freq indices in "other" but not in "this"
if self.flex_spw:
other_mask = np.ones_like(other.flex_spw_id_array, dtype=bool)
for idx in np.intersect1d(this.spw_array, other.spw_array):
if this.future_array_shapes:
other_mask[other.flex_spw_id_array == idx] = np.isin(
other.freq_array[other.flex_spw_id_array == idx],
this.freq_array[this.flex_spw_id_array == idx],
invert=True,
)
else:
other_mask[other.flex_spw_id_array == idx] = np.isin(
other.freq_array[0, other.flex_spw_id_array == idx],
this.freq_array[0, this.flex_spw_id_array == idx],
invert=True,
)
temp = np.where(other_mask)[0]
else:
if this.future_array_shapes:
temp = np.nonzero(~np.in1d(other.freq_array, this.freq_array))[0]
else:
temp = np.nonzero(
~np.in1d(other.freq_array[0, :], this.freq_array[0, :])
)[0]
if len(temp) > 0:
fnew_inds = temp
if n_axes > 0:
history_update_string += ", frequency"
else:
history_update_string += "frequency"
n_axes += 1
else:
fnew_inds = []
# if channel width is an array and there's any overlap in freqs,
# check extra params
if this.future_array_shapes or this.flex_spw:
if this.future_array_shapes:
temp = np.nonzero(np.in1d(other.freq_array, this.freq_array))[0]
else:
temp = np.nonzero(
np.in1d(other.freq_array[0, :], this.freq_array[0, :])
)[0]
if len(temp) > 0:
# add metadata to be checked to compatibility params
extra_params = ["_channel_width"]
compatibility_params.extend(extra_params)
# find the pol indices in "other" but not in "this"
temp = np.nonzero(~np.in1d(other.polarization_array, this.polarization_array))[
0
]
if len(temp) > 0:
pnew_inds = temp
if n_axes > 0:
history_update_string += ", polarization"
else:
history_update_string += "polarization"
n_axes += 1
else:
pnew_inds = []
# Actually check compatibility parameters
for cp in compatibility_params:
if cp == "_integration_time":
# only check that overlapping blt indices match
params_match = np.allclose(
this.integration_time[this_blts_ind],
other.integration_time[other_blts_ind],
rtol=this._integration_time.tols[0],
atol=this._integration_time.tols[1],
)
elif cp == "_uvw_array":
# only check that overlapping blt indices match
params_match = np.allclose(
this.uvw_array[this_blts_ind, :],
other.uvw_array[other_blts_ind, :],
rtol=this._uvw_array.tols[0],
atol=this._uvw_array.tols[1],
)
elif cp == "_lst_array":
# only check that overlapping blt indices match
params_match = np.allclose(
this.lst_array[this_blts_ind],
other.lst_array[other_blts_ind],
rtol=this._lst_array.tols[0],
atol=this._lst_array.tols[1],
)
elif cp == "_channel_width" and this.future_array_shapes or this.flex_spw:
# only check that overlapping freq indices match
params_match = np.allclose(
this.channel_width[this_freq_ind],
other.channel_width[other_freq_ind],
rtol=this._channel_width.tols[0],
atol=this._channel_width.tols[1],
)
elif (cp == "_phase_center_app_ra") and (this.phase_type == "phased"):
# only check that overlapping blt indices match
params_match = np.allclose(
this.phase_center_app_ra[this_blts_ind],
other.phase_center_app_ra[other_blts_ind],
rtol=this._phase_center_app_ra.tols[0],
atol=this._phase_center_app_ra.tols[1],
)
elif (cp == "_phase_center_app_dec") and (this.phase_type == "phased"):
# only check that overlapping blt indices match
params_match = np.allclose(
this.phase_center_app_dec[this_blts_ind],
other.phase_center_app_dec[other_blts_ind],
rtol=this._phase_center_app_dec.tols[0],
atol=this._phase_center_app_dec.tols[1],
)
elif (cp == "_phase_center_frame_pa") and (this.phase_type == "phased"):
# only check that overlapping blt indices match
params_match = np.allclose(
this.phase_center_frame_pa[this_blts_ind],
other.phase_center_frame_pa[other_blts_ind],
rtol=this._phase_center_frame_pa.tols[0],
atol=this._phase_center_frame_pa.tols[1],
)
else:
params_match = getattr(this, cp) == getattr(other, cp)
if not params_match:
msg = (
"UVParameter " + cp[1:] + " does not match. Cannot combine objects."
)
if cp[1:] == "object_name":
msg += (
" This can potentially be remedied by setting "
"ignore_name=True, or by allowing the creation of a "
"mutli-phase-ctr dataset (by setting make_multi_phase=True)."
)
raise ValueError(msg)
# At this point, we are assuming that the two data sets _mostly_ compatible.
# Last thing we need to check is if these are mutli-phase-ctr data sets, whether
# or not they are compatible.
if this.multi_phase_center or make_multi_phase:
if other.multi_phase_center:
other_names = list(other.phase_center_catalog.keys())
other_cat = other.phase_center_catalog
else:
other_names = [other.object_name]
other_cat = {
other_names[0]: {
"cat_type": "sidereal",
"cat_lon": other.phase_center_ra,
"cat_lat": other.phase_center_dec,
"cat_frame": other.phase_center_frame,
"cat_epoch": other.phase_center_epoch,
},
}
for name in other_names:
cat_id, cat_diffs = this._look_in_catalog(
name, phase_dict=other_cat[name]
)
if (cat_id is not None) and (cat_diffs != 0):
# We have a name conflict, raise an error now
raise ValueError(
"There exists a target named %s in both objects in the "
"sum, but their properties are different. Use the rename_"
"phase_center method in order to rename it in one object."
% name
)
# Begin manipulating the objects.
if make_multi_phase and (not this.multi_phase_center):
this._set_multi_phase_center(preserve_phase_center_info=True)
if other.multi_phase_center:
# This to get adding stuff to the catalog
reserved_ids = [
other.phase_center_catalog[name]["cat_id"]
for name in other.phase_center_catalog.keys()
]
# First loop, we want to look at the sources that are in this, but not
# other, since we need to choose catalog IDs that won't collide with the
# catalog that exists.
for name in this.phase_center_catalog.keys():
if name not in other.phase_center_catalog.keys():
this._update_phase_center_id(name, reserved_ids=reserved_ids)
# Next loop, we want to update the IDs of sources that are in both
for name in this.phase_center_catalog.keys():
if name in other.phase_center_catalog.keys():
this._update_phase_center_id(
name, new_cat_id=other.phase_center_catalog[name]["cat_id"],
)
# Finally, add those other objects not found in this
for name in other.phase_center_catalog.keys():
if name not in this.phase_center_catalog.keys():
this._add_phase_center(
name,
cat_type=other.phase_center_catalog[name]["cat_type"],
cat_lon=other.phase_center_catalog[name]["cat_lon"],
cat_lat=other.phase_center_catalog[name]["cat_lat"],
cat_frame=other.phase_center_catalog[name]["cat_frame"],
cat_epoch=other.phase_center_catalog[name]["cat_epoch"],
cat_times=other.phase_center_catalog[name]["cat_times"],
cat_pm_ra=other.phase_center_catalog[name]["cat_pm_ra"],
cat_pm_dec=other.phase_center_catalog[name]["cat_pm_dec"],
cat_dist=other.phase_center_catalog[name]["cat_dist"],
cat_vrad=other.phase_center_catalog[name]["cat_vrad"],
info_source=other.phase_center_catalog[name]["info_source"],
cat_id=other.phase_center_catalog[name]["cat_id"],
)
elif this.multi_phase_center:
# If other is not multi phase center, then we'll go ahead and add the object
# information here.
other_cat_id = this._add_phase_center(
other.object_name,
cat_type="sidereal",
cat_lon=other.phase_center_ra,
cat_lat=other.phase_center_dec,
cat_frame=other.phase_center_frame,
cat_epoch=other.phase_center_epoch,
)
# Pad out self to accommodate new data
if len(bnew_inds) > 0:
this_blts = np.concatenate((this_blts, new_blts))
blt_order = np.argsort(this_blts)
if not self.metadata_only:
if this.future_array_shapes:
zero_pad = np.zeros((len(bnew_inds), this.Nfreqs, this.Npols))
else:
zero_pad = np.zeros((len(bnew_inds), 1, this.Nfreqs, this.Npols))
this.data_array = np.concatenate([this.data_array, zero_pad], axis=0)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=0
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=0
).astype(np.bool_)
this.uvw_array = np.concatenate(
[this.uvw_array, other.uvw_array[bnew_inds, :]], axis=0
)[blt_order, :]
this.time_array = np.concatenate(
[this.time_array, other.time_array[bnew_inds]]
)[blt_order]
this.integration_time = np.concatenate(
[this.integration_time, other.integration_time[bnew_inds]]
)[blt_order]
this.lst_array = np.concatenate(
[this.lst_array, other.lst_array[bnew_inds]]
)[blt_order]
this.ant_1_array = np.concatenate(
[this.ant_1_array, other.ant_1_array[bnew_inds]]
)[blt_order]
this.ant_2_array = np.concatenate(
[this.ant_2_array, other.ant_2_array[bnew_inds]]
)[blt_order]
this.baseline_array = np.concatenate(
[this.baseline_array, other.baseline_array[bnew_inds]]
)[blt_order]
if this.phase_type == "phased":
this.phase_center_app_ra = np.concatenate(
[this.phase_center_app_ra, other.phase_center_app_ra[bnew_inds]]
)[blt_order]
this.phase_center_app_dec = np.concatenate(
[this.phase_center_app_dec, other.phase_center_app_dec[bnew_inds]]
)[blt_order]
this.phase_center_frame_pa = np.concatenate(
[this.phase_center_frame_pa, other.phase_center_frame_pa[bnew_inds]]
)[blt_order]
if this.multi_phase_center:
if other.multi_phase_center:
this.phase_center_id_array = np.concatenate(
[
this.phase_center_id_array,
other.phase_center_id_array[bnew_inds],
]
)[blt_order]
else:
this.phase_center_id_array = np.concatenate(
[this.phase_center_id_array, [other_cat_id] * len(bnew_inds)]
)[blt_order]
if len(fnew_inds) > 0:
if this.future_array_shapes:
this.freq_array = np.concatenate(
[this.freq_array, other.freq_array[fnew_inds]]
)
else:
this.freq_array = np.concatenate(
[this.freq_array, other.freq_array[:, fnew_inds]], axis=1
)
if this.flex_spw or this.future_array_shapes:
this.channel_width = np.concatenate(
[this.channel_width, other.channel_width[fnew_inds]]
)
if this.flex_spw:
this.flex_spw_id_array = np.concatenate(
[this.flex_spw_id_array, other.flex_spw_id_array[fnew_inds]]
)
this.spw_array = np.concatenate([this.spw_array, other.spw_array])
# We want to preserve per-spw information based on first appearance
# in the concatenated array.
unique_index = np.sort(
np.unique(this.flex_spw_id_array, return_index=True)[1]
)
this.spw_array = this.flex_spw_id_array[unique_index]
this.Nspws = len(this.spw_array)
# If we have a flex/multi-spw data set, need to sort out the order of the
# individual windows first.
if this.flex_spw:
f_order = np.concatenate(
[
np.where(this.flex_spw_id_array == idx)[0]
for idx in sorted(this.spw_array)
]
)
# With spectral windows sorted, check and see if channels within
# windows need sorting. If they are ordered in ascending or descending
# fashion, leave them be. If not, sort in ascending order
for idx in this.spw_array:
select_mask = this.flex_spw_id_array[f_order] == idx
check_freqs = (
this.freq_array[f_order[select_mask]]
if this.future_array_shapes
else this.freq_array[0, f_order[select_mask]]
)
if (not np.all(check_freqs[1:] > check_freqs[:-1])) and (
not np.all(check_freqs[1:] < check_freqs[:-1])
):
subsort_order = f_order[select_mask]
f_order[select_mask] = subsort_order[np.argsort(check_freqs)]
else:
if this.future_array_shapes:
f_order = np.argsort(this.freq_array)
else:
f_order = np.argsort(this.freq_array[0, :])
if not self.metadata_only:
if this.future_array_shapes:
zero_pad = np.zeros(
(this.data_array.shape[0], len(fnew_inds), this.Npols)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=1
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=1
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=1
).astype(np.bool_)
else:
zero_pad = np.zeros(
(this.data_array.shape[0], 1, len(fnew_inds), this.Npols)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=2
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=2
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)
if len(pnew_inds) > 0:
this.polarization_array = np.concatenate(
[this.polarization_array, other.polarization_array[pnew_inds]]
)
p_order = np.argsort(np.abs(this.polarization_array))
if not self.metadata_only:
if this.future_array_shapes:
zero_pad = np.zeros(
(
this.data_array.shape[0],
this.data_array.shape[1],
len(pnew_inds),
)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=2
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=2
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)
else:
zero_pad = np.zeros(
(
this.data_array.shape[0],
1,
this.data_array.shape[2],
len(pnew_inds),
)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=3
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=3
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=3
).astype(np.bool_)
# Now populate the data
pol_t2o = np.nonzero(
np.in1d(this.polarization_array, other.polarization_array)
)[0]
if this.future_array_shapes:
freq_t2o = np.nonzero(np.in1d(this.freq_array, other.freq_array))[0]
else:
freq_t2o = np.nonzero(
np.in1d(this.freq_array[0, :], other.freq_array[0, :])
)[0]
blt_t2o = np.nonzero(np.in1d(this_blts, other_blts))[0]
if not self.metadata_only:
if this.future_array_shapes:
this.data_array[np.ix_(blt_t2o, freq_t2o, pol_t2o)] = other.data_array
this.nsample_array[
np.ix_(blt_t2o, freq_t2o, pol_t2o)
] = other.nsample_array
this.flag_array[np.ix_(blt_t2o, freq_t2o, pol_t2o)] = other.flag_array
else:
this.data_array[
np.ix_(blt_t2o, [0], freq_t2o, pol_t2o)
] = other.data_array
this.nsample_array[
np.ix_(blt_t2o, [0], freq_t2o, pol_t2o)
] = other.nsample_array
this.flag_array[
np.ix_(blt_t2o, [0], freq_t2o, pol_t2o)
] = other.flag_array
if not self.metadata_only:
if this.future_array_shapes:
if len(bnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[blt_order, :, :])
if len(fnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, f_order, :])
if len(pnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, :, p_order])
else:
if len(bnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[blt_order, :, :, :])
if len(fnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, :, f_order, :])
if len(pnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, :, :, p_order])
if len(fnew_inds) > 0:
if this.future_array_shapes:
this.freq_array = this.freq_array[f_order]
else:
this.freq_array = this.freq_array[:, f_order]
if this.flex_spw or this.future_array_shapes:
this.channel_width = this.channel_width[f_order]
if this.flex_spw:
this.flex_spw_id_array = this.flex_spw_id_array[f_order]
if len(pnew_inds) > 0:
this.polarization_array = this.polarization_array[p_order]
# Update N parameters (e.g. Npols)
this.Ntimes = len(np.unique(this.time_array))
this.Nbls = len(np.unique(this.baseline_array))
this.Nblts = this.uvw_array.shape[0]
this.Nfreqs = this.freq_array.size
this.Npols = this.polarization_array.shape[0]
this.Nants_data = this._calc_nants_data()
# Update filename parameter
this.filename = uvutils._combine_filenames(this.filename, other.filename)
if this.filename is not None:
this._filename.form = (len(this.filename),)
# Check specific requirements
if this.Nfreqs > 1:
spacing_error, chanwidth_error = this._check_freq_spacing(
raise_errors=False
)
if spacing_error:
warnings.warn(
"Combined frequencies are not evenly spaced or have differing "
"values of channel widths. This will make it impossible to write "
"this data out to some file types."
)
elif chanwidth_error:
warnings.warn(
"Combined frequencies are separated by more than their "
"channel width. This will make it impossible to write this data "
"out to some file types."
)
if n_axes > 0:
history_update_string += " axis using pyuvdata."
histories_match = uvutils._check_histories(this.history, other.history)
this.history += history_update_string
if not histories_match:
if verbose_history:
this.history += " Next object history follows. " + other.history
else:
extra_history = uvutils._combine_history_addition(
this.history, other.history
)
if extra_history is not None:
this.history += (
" Unique part of next object history follows. "
+ extra_history
)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return this
def __iadd__(
self,
other,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
orig_phase_frame=None,
use_ant_pos=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
make_multi_phase=False,
ignore_name=False,
):
"""
In place add.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). Note that if this keyword is not set
and the two UVData objects are phased to different phase centers
or if one is phased and one is drift, this method will error
because the objects are not compatible.
unphase_to_drift : bool
If True, unphase the objects to drift before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. Setting this to true will allow for two
UVData objects to be combined, even if the phase center properties do not
agree (so long as the names are unique for each phase center). Default is
False.
ignore_name : bool
Option to ignore the name of the phase center (`cat_name` in
`phase_center_catalog` when `multi_phase_center=True`, otherwise
`object_name`) when combining two UVData objects. Doing so effectively
adopts the name found in the first UVData object in the sum. Default is
False.
Raises
------
ValueError
If other is not a UVData object, self and other are not compatible
or if data in self and other overlap. One way they can not be
compatible is if they have different phasing, in that case set
`unphase_to_drift` or `phase_center_radec` to (un)phase them so they
are compatible.
If `phase_center_radec` is not None and is not length 2.
"""
self.__add__(
other,
inplace=True,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
make_multi_phase=make_multi_phase,
ignore_name=ignore_name,
)
return self
def fast_concat(
self,
other,
axis,
inplace=False,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
orig_phase_frame=None,
use_ant_pos=True,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
ignore_name=False,
):
"""
Concatenate two UVData objects along specified axis with almost no checking.
Warning! This method assumes all the metadata along other axes is sorted
the same way. The __add__ method is much safer, it checks all the metadata,
but it is slower. Some quick checks are run, but this method doesn't
make any guarantees that the resulting object is correct.
Parameters
----------
other : UVData object or list of UVData objects
UVData object or list of UVData objects which will be added to self.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. Allowed values are: 'blt', 'freq', 'polarization'.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). Note that if this keyword is not set
and the two UVData objects are phased to different phase centers
or if one is phased and one is drift, this method will error
because the objects are not compatible.
unphase_to_drift : bool
If True, unphase the objects to drift before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
objects are different, the combined object will keep all the history of
all input objects (if many objects are combined this can lead to very long
histories). If False and if the histories for the objects are different,
the combined object will have the history of the first object and only the
parts of the other object histories that are unique (this is done word by
word and can result in hard to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
ignore_name : bool
Option to ignore the name of the phase center (`cat_name` in
`phase_center_catalog` when `multi_phase_center=True`, otherwise
`object_name`) when combining two UVData objects. Doing so effectively
adopts the name found in the first UVData object in the sum. Default is
False.
Raises
------
ValueError
If other is not a UVData object, axis is not an allowed value or if
self and other are not compatible.
"""
if inplace:
this = self
else:
this = self.copy()
if not isinstance(other, (list, tuple, np.ndarray)):
# if this is a UVData object already, stick it in a list
other = [other]
# Check that both objects are UVData and valid
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
for obj in other:
if not issubclass(obj.__class__, this.__class__):
if not issubclass(this.__class__, obj.__class__):
raise ValueError(
"Only UVData (or subclass) objects can be "
"added to a UVData (or subclass) object"
)
obj.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
# check that all objects have the same array shapes
for obj in other:
if this.future_array_shapes != obj.future_array_shapes:
raise ValueError(
"All objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
if phase_center_radec is not None and unphase_to_drift:
raise ValueError(
"phase_center_radec cannot be set if unphase_to_drift is True."
)
if unphase_to_drift:
if this.phase_type != "drift":
warnings.warn("Unphasing this UVData object to drift")
this.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
for obj in other:
if obj.phase_type != "drift":
warnings.warn("Unphasing other UVData object to drift")
obj.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
if phase_center_radec is not None:
if np.array(phase_center_radec).size != 2:
raise ValueError("phase_center_radec should have length 2.")
# If this object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if this.phase_type == "drift" or (
not np.isclose(
this.phase_center_ra,
phase_center_radec[0],
rtol=this._phase_center_ra.tols[0],
atol=this._phase_center_ra.tols[1],
)
or not np.isclose(
this.phase_center_dec,
phase_center_radec[1],
rtol=this._phase_center_dec.tols[0],
atol=this._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing this UVData object to phase_center_radec")
this.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
# If other object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
for obj in other:
if obj.phase_type == "drift" or (
not np.isclose(
obj.phase_center_ra,
phase_center_radec[0],
rtol=obj._phase_center_ra.tols[0],
atol=obj._phase_center_ra.tols[1],
)
or not np.isclose(
obj.phase_center_dec,
phase_center_radec[1],
rtol=obj._phase_center_dec.tols[0],
atol=obj._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing other UVData object to phase_center_radec")
obj.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
allowed_axes = ["blt", "freq", "polarization"]
if axis not in allowed_axes:
raise ValueError(
"If axis is specifed it must be one of: " + ", ".join(allowed_axes)
)
compatibility_params = [
"_vis_units",
"_telescope_name",
"_instrument",
"_telescope_location",
"_phase_type",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
"_phase_center_ra",
"_phase_center_dec",
"_phase_center_epoch",
"_multi_phase_center",
"_phase_center_catalog",
"_Nphase",
]
if not this.future_array_shapes and not this.flex_spw:
compatibility_params.append("_channel_width")
if not (this.multi_phase_center or ignore_name):
compatibility_params += ["_object_name"]
history_update_string = " Combined data along "
if axis == "freq":
history_update_string += "frequency"
compatibility_params += [
"_polarization_array",
"_ant_1_array",
"_ant_2_array",
"_integration_time",
"_uvw_array",
"_lst_array",
"_phase_center_id_array",
]
elif axis == "polarization":
history_update_string += "polarization"
compatibility_params += [
"_freq_array",
"_ant_1_array",
"_ant_2_array",
"_integration_time",
"_uvw_array",
"_lst_array",
"_phase_center_id_array",
]
elif axis == "blt":
history_update_string += "baseline-time"
compatibility_params += ["_freq_array", "_polarization_array"]
history_update_string += " axis using pyuvdata."
histories_match = []
for obj in other:
histories_match.append(uvutils._check_histories(this.history, obj.history))
this.history += history_update_string
for obj_num, obj in enumerate(other):
if not histories_match[obj_num]:
if verbose_history:
this.history += " Next object history follows. " + obj.history
else:
extra_history = uvutils._combine_history_addition(
this.history, obj.history
)
if extra_history is not None:
this.history += (
" Unique part of next object history follows. "
+ extra_history
)
# Actually check compatibility parameters
for obj in other:
for a in compatibility_params:
params_match = getattr(this, a) == getattr(obj, a)
if not params_match:
msg = (
"UVParameter "
+ a[1:]
+ " does not match. Cannot combine objects."
)
raise ValueError(msg)
if axis == "freq":
this.Nfreqs = sum([this.Nfreqs] + [obj.Nfreqs for obj in other])
if this.future_array_shapes:
this.freq_array = np.concatenate(
[this.freq_array] + [obj.freq_array for obj in other]
)
else:
this.freq_array = np.concatenate(
[this.freq_array] + [obj.freq_array for obj in other], axis=1
)
if this.flex_spw or this.future_array_shapes:
this.channel_width = np.concatenate(
[this.channel_width] + [obj.channel_width for obj in other]
)
if this.flex_spw:
this.flex_spw_id_array = np.concatenate(
[this.flex_spw_id_array] + [obj.flex_spw_id_array for obj in other]
)
this.spw_array = np.concatenate(
[this.spw_array] + [obj.spw_array for obj in other]
)
# We want to preserve per-spw information based on first appearance
# in the concatenated array.
unique_index = np.sort(
np.unique(this.flex_spw_id_array, return_index=True)[1]
)
this.spw_array = this.flex_spw_id_array[unique_index]
this.Nspws = len(this.spw_array)
spacing_error, chanwidth_error = this._check_freq_spacing(
raise_errors=False
)
if spacing_error:
warnings.warn(
"Combined frequencies are not evenly spaced or have differing "
"values of channel widths. This will make it impossible to write "
"this data out to some file types."
)
elif chanwidth_error:
warnings.warn(
"Combined frequencies are separated by more than their "
"channel width. This will make it impossible to write this data "
"out to some file types."
)
if not self.metadata_only:
if this.future_array_shapes:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=1,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=1,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=1,
)
else:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=2,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=2,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=2,
)
elif axis == "polarization":
this.polarization_array = np.concatenate(
[this.polarization_array] + [obj.polarization_array for obj in other]
)
this.Npols = sum([this.Npols] + [obj.Npols for obj in other])
pol_separation = np.diff(this.polarization_array)
if np.min(pol_separation) < np.max(pol_separation):
warnings.warn(
"Combined polarizations are not evenly spaced. This will "
"make it impossible to write this data out to some file types."
)
if not self.metadata_only:
if this.future_array_shapes:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=2,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=2,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=2,
)
else:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=3,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=3,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=3,
)
elif axis == "blt":
this.Nblts = sum([this.Nblts] + [obj.Nblts for obj in other])
this.ant_1_array = np.concatenate(
[this.ant_1_array] + [obj.ant_1_array for obj in other]
)
this.ant_2_array = np.concatenate(
[this.ant_2_array] + [obj.ant_2_array for obj in other]
)
this.Nants_data = this._calc_nants_data()
this.uvw_array = np.concatenate(
[this.uvw_array] + [obj.uvw_array for obj in other], axis=0
)
this.time_array = np.concatenate(
[this.time_array] + [obj.time_array for obj in other]
)
this.Ntimes = len(np.unique(this.time_array))
this.lst_array = np.concatenate(
[this.lst_array] + [obj.lst_array for obj in other]
)
this.baseline_array = np.concatenate(
[this.baseline_array] + [obj.baseline_array for obj in other]
)
this.Nbls = len(np.unique(this.baseline_array))
this.integration_time = np.concatenate(
[this.integration_time] + [obj.integration_time for obj in other]
)
if not self.metadata_only:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=0,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other], axis=0,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=0,
)
if this.phase_type == "phased":
this.phase_center_app_ra = np.concatenate(
[this.phase_center_app_ra]
+ [obj.phase_center_app_ra for obj in other]
)
this.phase_center_app_dec = np.concatenate(
[this.phase_center_app_dec]
+ [obj.phase_center_app_dec for obj in other]
)
this.phase_center_frame_pa = np.concatenate(
[this.phase_center_frame_pa]
+ [obj.phase_center_frame_pa for obj in other]
)
if this.multi_phase_center:
this.phase_center_id_array = np.concatenate(
[this.phase_center_id_array]
+ [obj.phase_center_id_array for obj in other]
)
# update filename attribute
for obj in other:
this.filename = uvutils._combine_filenames(this.filename, obj.filename)
if this.filename is not None:
this._filename.form = len(this.filename)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return this
def sum_vis(
self,
other,
inplace=False,
difference=False,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
override_params=None,
):
"""
Sum visibilities between two UVData objects.
By default requires that all UVParameters are the same on the two objects
except for `history`, `data_array`, `object_name`, and `extra_keywords`.
The `object_name` values are concatenated if they are different. If keys
in `extra_keywords` have different values the values from the first
object are taken.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
difference : bool
If True, differences the visibilities of the two UVData objects
rather than summing them.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
two objects are different, the combined object will keep all the history of
both input objects (this can lead to long histories). If False and if the
histories for the two objects are different, the combined object will have
the history of the first object and only the parts of the second object
history that are unique (this is done word by word and can result in hard
to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
override_params : array_like of strings
List of object UVParameters to omit from compatibility check. Overridden
parameters will not be compared between the objects, and the values
for these parameters will be taken from the first object.
Returns
-------
UVData Object
If inplace parameter is False.
Raises
------
ValueError
If other is not a UVData object, or if self and other
are not compatible.
"""
if inplace:
this = self
else:
this = self.copy()
# Check that both objects are UVData and valid
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError(
"Only UVData (or subclass) objects can be "
"added to a UVData (or subclass) object"
)
other.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
# check that both objects have the same array shapes
if this.future_array_shapes != other.future_array_shapes:
raise ValueError(
"Both objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
compatibility_params = list(this.__iter__())
remove_params = ["_history", "_data_array", "_object_name", "_extra_keywords"]
# Add underscores to override_params to match list from __iter__()
# Add to parameters to be removed
if override_params and all(isinstance(param, str) for param in override_params):
for param in override_params:
if param[0] != "_":
param = "_" + param
if param not in compatibility_params:
msg = (
"Provided parameter " + param[1:] + " is not a recognizable "
"UVParameter."
)
raise ValueError(msg)
remove_params.append(param)
# compatibility_params should define the parameters that need to
# be the same for objects to be summed or diffed
compatibility_params = list(set(compatibility_params) - set(remove_params))
# Check each UVParameter in compatibility_params
for param in compatibility_params:
params_match = getattr(this, param) == getattr(other, param)
if not params_match:
msg = (
"UVParameter " + param[1:] + " does not match. Cannot "
"combine objects."
)
raise ValueError(msg)
# Merge extra keywords and object_name
for intersection in set(this.extra_keywords.keys()) & set(
other.extra_keywords.keys()
):
if this.extra_keywords[intersection] != other.extra_keywords[intersection]:
warnings.warn(
"Keyword " + intersection + " in _extra_keywords is different "
"in the two objects. Taking the first object's entry."
)
# Merge extra_keywords lists, taking values from the first object
this.extra_keywords = dict(
list(other.extra_keywords.items()) + list(this.extra_keywords.items())
)
# Merge object_name if different.
if this.object_name != other.object_name:
this.object_name = this.object_name + "-" + other.object_name
# Do the summing / differencing
if difference:
this.data_array = this.data_array - other.data_array
history_update_string = " Visibilities differenced using pyuvdata."
else:
this.data_array = this.data_array + other.data_array
history_update_string = " Visibilities summed using pyuvdata."
histories_match = uvutils._check_histories(this.history, other.history)
this.history += history_update_string
if not histories_match:
if verbose_history:
this.history += " Second object history follows. " + other.history
else:
extra_history = uvutils._combine_history_addition(
this.history, other.history
)
if extra_history is not None:
this.history += (
" Unique part of second object history follows. "
+ extra_history
)
# merge file names
this.filename = uvutils._combine_filenames(this.filename, other.filename)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return this
def diff_vis(
self,
other,
inplace=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
override_params=None,
):
"""
Difference visibilities between two UVData objects.
By default requires that all UVParameters are the same on the two objects
except for `history`, `data_array`, `object_name`, and `extra_keywords`.
The `object_name` values are concatenated if they are different. If keys
in `extra_keywords` have different values the values from the first
object are taken.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
override_params : array_like of strings
List of object UVParameters to omit from compatibility check. Overridden
parameters will not be compared between the objects, and the values
for these parameters will be taken from the first object.
Returns
-------
UVData Object
If inplace parameter is False.
Raises
------
ValueError
If other is not a UVData object, or if self and other
are not compatible.
"""
if inplace:
self.sum_vis(
other,
difference=True,
inplace=inplace,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
override_params=override_params,
)
else:
return self.sum_vis(
other,
difference=True,
inplace=inplace,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
override_params=override_params,
)
def parse_ants(self, ant_str, print_toggle=False):
"""
Get antpair and polarization from parsing an aipy-style ant string.
Used to support the select function. Generates two lists of antenna pair
tuples and polarization indices based on parsing of the string ant_str.
If no valid polarizations (pseudo-Stokes params, or combinations of [lr]
or [xy]) or antenna numbers are found in ant_str, ant_pairs_nums and
polarizations are returned as None.
Parameters
----------
ant_str : str
String containing antenna information to parse. Can be 'all',
'auto', 'cross', or combinations of antenna numbers and polarization
indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used
in front of an antenna number or baseline to exclude it from being
output in ant_pairs_nums. If ant_str has a minus sign as the first
character, 'all,' will be appended to the beginning of the string.
See the tutorial for examples of valid strings and their behavior.
print_toggle : bool
Boolean for printing parsed baselines for a visual user check.
Returns
-------
ant_pairs_nums : list of tuples of int or None
List of tuples containing the parsed pairs of antenna numbers, or
None if ant_str is 'all' or a pseudo-Stokes polarizations.
polarizations : list of int or None
List of desired polarizations or None if ant_str does not contain a
polarization specification.
"""
return uvutils.parse_ants(
uv=self,
ant_str=ant_str,
print_toggle=print_toggle,
x_orientation=self.x_orientation,
)
def _select_preprocess(
self,
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
):
"""
Build up blt_inds, freq_inds, pol_inds and history_update_string for select.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple or list of int, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]), a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]), or a list of
baseline numbers (e.g. [67599, 71699, 73743]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the
numbers within the tuple does not matter. For length-3 tuples, the
polarization string is in the order of the two antennas. If
length-3 tuples are provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should exist
in the time_array. Cannot be used with `time_range`, `lsts`, or
`lst_array`.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be length
2. Some of the times in the object should fall between the first and
last elements. Cannot be used with `times`, `lsts`, or `lst_array`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
Returns
-------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
"""
# build up history string as we go
history_update_string = " Downselected to specific "
n_selects = 0
if ant_str is not None:
if not (
antenna_nums is None
and antenna_names is None
and bls is None
and polarizations is None
):
raise ValueError(
"Cannot provide ant_str with antenna_nums, antenna_names, "
"bls, or polarizations."
)
else:
bls, polarizations = self.parse_ants(ant_str)
if bls is not None and len(bls) == 0:
raise ValueError(
f"There is no data matching ant_str={ant_str} in this object."
)
# Antennas, times and blt_inds all need to be combined into a set of
# blts indices to keep.
# test for blt_inds presence before adding inds from antennas & times
if blt_inds is not None:
blt_inds = uvutils._get_iterable(blt_inds)
if np.array(blt_inds).ndim > 1:
blt_inds = np.array(blt_inds).flatten()
history_update_string += "baseline-times"
n_selects += 1
if antenna_names is not None:
if antenna_nums is not None:
raise ValueError(
"Only one of antenna_nums and antenna_names can be provided."
)
if not isinstance(antenna_names, (list, tuple, np.ndarray)):
antenna_names = (antenna_names,)
if np.array(antenna_names).ndim > 1:
antenna_names = np.array(antenna_names).flatten()
antenna_nums = []
for s in antenna_names:
if s not in self.antenna_names:
raise ValueError(
"Antenna name {a} is not present in the antenna_names"
" array".format(a=s)
)
antenna_nums.append(
self.antenna_numbers[np.where(np.array(self.antenna_names) == s)][0]
)
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
if np.array(antenna_nums).ndim > 1:
antenna_nums = np.array(antenna_nums).flatten()
if n_selects > 0:
history_update_string += ", antennas"
else:
history_update_string += "antennas"
n_selects += 1
# Check to make sure that we actually have these antenna nums in the data
ant_check = np.logical_or(
np.isin(antenna_nums, self.ant_1_array),
np.isin(antenna_nums, self.ant_2_array),
)
if not np.all(ant_check):
raise ValueError(
"Antenna number % i is not present in the ant_1_array or "
"ant_2_array" % antenna_nums[~ant_check][0]
)
ant_blt_inds = np.where(
np.logical_and(
np.isin(self.ant_1_array, antenna_nums),
np.isin(self.ant_2_array, antenna_nums),
)
)[0]
else:
ant_blt_inds = None
if bls is not None:
if isinstance(bls, list) and all(
isinstance(bl_ind, (int, np.integer,),) for bl_ind in bls
):
for bl_ind in bls:
if not (bl_ind in self.baseline_array):
raise ValueError(
"Baseline number {i} is not present in the "
"baseline_array".format(i=bl_ind)
)
bls = list(zip(*self.baseline_to_antnums(bls)))
elif isinstance(bls, tuple) and (len(bls) == 2 or len(bls) == 3):
bls = [bls]
if len(bls) == 0 or not all(isinstance(item, tuple) for item in bls):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization) or a list of baseline numbers."
)
if not all(
[isinstance(item[0], (int, np.integer,),) for item in bls]
+ [isinstance(item[1], (int, np.integer,),) for item in bls]
):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization) or a list of baseline numbers."
)
if all(len(item) == 3 for item in bls):
if polarizations is not None:
raise ValueError(
"Cannot provide length-3 tuples and also specify polarizations."
)
if not all(isinstance(item[2], str) for item in bls):
raise ValueError(
"The third element in each bl must be a polarization string"
)
if ant_str is None:
if n_selects > 0:
history_update_string += ", baselines"
else:
history_update_string += "baselines"
else:
history_update_string += "antenna pairs"
n_selects += 1
bls_blt_inds = np.zeros(0, dtype=np.int64)
bl_pols = set()
for bl in bls:
if not (bl[0] in self.ant_1_array or bl[0] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[0])
)
if not (bl[1] in self.ant_1_array or bl[1] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[1])
)
wh1 = np.where(
np.logical_and(self.ant_1_array == bl[0], self.ant_2_array == bl[1])
)[0]
wh2 = np.where(
np.logical_and(self.ant_1_array == bl[1], self.ant_2_array == bl[0])
)[0]
if len(wh1) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh1))
if len(bl) == 3:
bl_pols.add(bl[2])
elif len(wh2) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh2))
if len(bl) == 3:
# find conjugate polarization
bl_pols.add(uvutils.conj_pol(bl[2]))
else:
raise ValueError(
"Antenna pair {p} does not have any data "
"associated with it.".format(p=bl)
)
if len(bl_pols) > 0:
polarizations = list(bl_pols)
if ant_blt_inds is not None:
# Use intersection (and) to join antenna_names/nums & ant_pairs_nums
ant_blt_inds = np.array(
list(set(ant_blt_inds).intersection(bls_blt_inds))
)
else:
ant_blt_inds = bls_blt_inds
if ant_blt_inds is not None:
if blt_inds is not None:
# Use intersection (and) to join antenna_names/nums/ant_pairs_nums
# with blt_inds
blt_inds = np.array(
list(set(blt_inds).intersection(ant_blt_inds)), dtype=np.int64
)
else:
blt_inds = ant_blt_inds
have_times = times is not None
have_time_range = time_range is not None
have_lsts = lsts is not None
have_lst_range = lst_range is not None
if (
np.count_nonzero([have_times, have_time_range, have_lsts, have_lst_range])
> 1
):
raise ValueError(
"Only one of [times, time_range, lsts, lst_range] may be "
"specified per selection operation."
)
if times is not None:
times = uvutils._get_iterable(times)
if np.array(times).ndim > 1:
times = np.array(times).flatten()
time_blt_inds = np.zeros(0, dtype=np.int64)
for jd in times:
if np.any(
np.isclose(
self.time_array,
jd,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
)
):
time_blt_inds = np.append(
time_blt_inds,
np.where(
np.isclose(
self.time_array,
jd,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
)
)[0],
)
else:
raise ValueError(
"Time {t} is not present in the time_array".format(t=jd)
)
if time_range is not None:
if np.size(time_range) != 2:
raise ValueError("time_range must be length 2.")
time_blt_inds = np.nonzero(
(self.time_array <= time_range[1]) & (self.time_array >= time_range[0])
)[0]
if time_blt_inds.size == 0:
raise ValueError(
f"No elements in time range between {time_range[0]} and "
f"{time_range[1]}."
)
if lsts is not None:
if np.any(np.asarray(lsts) > 2 * np.pi):
warnings.warn(
"The lsts parameter contained a value greater than 2*pi. "
"LST values are assumed to be in radians, not hours."
)
lsts = uvutils._get_iterable(lsts)
if np.array(lsts).ndim > 1:
lsts = np.array(lsts).flatten()
time_blt_inds = np.zeros(0, dtype=np.int64)
for lst in lsts:
if np.any(
np.isclose(
self.lst_array,
lst,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
):
time_blt_inds = np.append(
time_blt_inds,
np.where(
np.isclose(
self.lst_array,
lst,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
)[0],
)
else:
raise ValueError(f"LST {lst} is not present in the lst_array")
if lst_range is not None:
if np.size(lst_range) != 2:
raise ValueError("lst_range must be length 2.")
if np.any(np.asarray(lst_range) > 2 * np.pi):
warnings.warn(
"The lst_range contained a value greater than 2*pi. "
"LST values are assumed to be in radians, not hours."
)
if lst_range[1] < lst_range[0]:
# we're wrapping around LST = 2*pi = 0
lst_range_1 = [lst_range[0], 2 * np.pi]
lst_range_2 = [0, lst_range[1]]
time_blt_inds1 = np.nonzero(
(self.lst_array <= lst_range_1[1])
& (self.lst_array >= lst_range_1[0])
)[0]
time_blt_inds2 = np.nonzero(
(self.lst_array <= lst_range_2[1])
& (self.lst_array >= lst_range_2[0])
)[0]
time_blt_inds = np.union1d(time_blt_inds1, time_blt_inds2)
else:
time_blt_inds = np.nonzero(
(self.lst_array <= lst_range[1]) & (self.lst_array >= lst_range[0])
)[0]
if time_blt_inds.size == 0:
raise ValueError(
f"No elements in LST range between {lst_range[0]} and "
f"{lst_range[1]}."
)
if times is not None or time_range is not None:
if n_selects > 0:
history_update_string += ", times"
else:
history_update_string += "times"
n_selects += 1
if blt_inds is not None:
# Use intesection (and) to join
# antenna_names/nums/ant_pairs_nums/blt_inds with times
blt_inds = np.array(
list(set(blt_inds).intersection(time_blt_inds)), dtype=np.int64
)
else:
blt_inds = time_blt_inds
if lsts is not None or lst_range is not None:
if n_selects > 0:
history_update_string += ", lsts"
else:
history_update_string += "lsts"
n_selects += 1
if blt_inds is not None:
# Use intesection (and) to join
# antenna_names/nums/ant_pairs_nums/blt_inds with times
blt_inds = np.array(
list(set(blt_inds).intersection(time_blt_inds)), dtype=np.int64
)
else:
blt_inds = time_blt_inds
if blt_inds is not None:
if len(blt_inds) == 0:
raise ValueError("No baseline-times were found that match criteria")
if max(blt_inds) >= self.Nblts:
raise ValueError("blt_inds contains indices that are too large")
if min(blt_inds) < 0:
raise ValueError("blt_inds contains indices that are negative")
blt_inds = sorted(set(blt_inds))
if freq_chans is not None:
freq_chans = uvutils._get_iterable(freq_chans)
if np.array(freq_chans).ndim > 1:
freq_chans = np.array(freq_chans).flatten()
if frequencies is None:
if self.future_array_shapes:
frequencies = self.freq_array[freq_chans]
else:
frequencies = self.freq_array[0, freq_chans]
else:
frequencies = uvutils._get_iterable(frequencies)
if self.future_array_shapes:
frequencies = np.sort(
list(set(frequencies) | set(self.freq_array[freq_chans]))
)
else:
frequencies = np.sort(
list(set(frequencies) | set(self.freq_array[0, freq_chans]))
)
if frequencies is not None:
frequencies = uvutils._get_iterable(frequencies)
if np.array(frequencies).ndim > 1:
frequencies = np.array(frequencies).flatten()
if n_selects > 0:
history_update_string += ", frequencies"
else:
history_update_string += "frequencies"
n_selects += 1
if self.future_array_shapes:
freq_arr_use = self.freq_array
else:
freq_arr_use = self.freq_array[0, :]
# Check and see that all requested freqs are available
freq_check = np.isin(frequencies, freq_arr_use)
if not np.all(freq_check):
raise ValueError(
"Frequency %g is not present in the freq_array"
% frequencies[np.where(~freq_check)[0][0]]
)
freq_inds = np.where(np.isin(freq_arr_use, frequencies))[0]
if len(frequencies) > 1:
freq_ind_separation = freq_inds[1:] - freq_inds[:-1]
if self.flex_spw:
freq_ind_separation = freq_ind_separation[
np.diff(self.flex_spw_id_array[freq_inds]) == 0
]
if np.min(freq_ind_separation) < np.max(freq_ind_separation):
warnings.warn(
"Selected frequencies are not evenly spaced. This "
"will make it impossible to write this data out to "
"some file types"
)
elif np.max(freq_ind_separation) > 1:
warnings.warn(
"Selected frequencies are not contiguous. This "
"will make it impossible to write this data out to "
"some file types."
)
freq_inds = sorted(set(freq_inds))
else:
freq_inds = None
if polarizations is not None:
polarizations = uvutils._get_iterable(polarizations)
if np.array(polarizations).ndim > 1:
polarizations = np.array(polarizations).flatten()
if n_selects > 0:
history_update_string += ", polarizations"
else:
history_update_string += "polarizations"
n_selects += 1
pol_inds = np.zeros(0, dtype=np.int64)
for p in polarizations:
if isinstance(p, str):
p_num = uvutils.polstr2num(p, x_orientation=self.x_orientation)
else:
p_num = p
if p_num in self.polarization_array:
pol_inds = np.append(
pol_inds, np.where(self.polarization_array == p_num)[0]
)
else:
raise ValueError(
"Polarization {p} is not present in the "
"polarization_array".format(p=p)
)
if len(pol_inds) > 2:
pol_ind_separation = pol_inds[1:] - pol_inds[:-1]
if np.min(pol_ind_separation) < np.max(pol_ind_separation):
warnings.warn(
"Selected polarization values are not evenly spaced. This "
"will make it impossible to write this data out to "
"some file types"
)
pol_inds = sorted(set(pol_inds))
else:
pol_inds = None
history_update_string += " using pyuvdata."
return blt_inds, freq_inds, pol_inds, history_update_string
def _select_metadata(
self,
blt_inds,
freq_inds,
pol_inds,
history_update_string,
keep_all_metadata=True,
):
"""
Perform select on everything except the data-sized arrays.
Parameters
----------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
keep_all_metadata : bool
Option to keep metadata for antennas that are no longer in the dataset.
"""
if blt_inds is not None:
self.Nblts = len(blt_inds)
self.baseline_array = self.baseline_array[blt_inds]
self.Nbls = len(np.unique(self.baseline_array))
self.time_array = self.time_array[blt_inds]
self.integration_time = self.integration_time[blt_inds]
self.lst_array = self.lst_array[blt_inds]
self.uvw_array = self.uvw_array[blt_inds, :]
self.ant_1_array = self.ant_1_array[blt_inds]
self.ant_2_array = self.ant_2_array[blt_inds]
self.Nants_data = self._calc_nants_data()
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[blt_inds]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[blt_inds]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[blt_inds]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[blt_inds]
self.Ntimes = len(np.unique(self.time_array))
if not keep_all_metadata:
ants_to_keep = set(np.unique(self.ant_1_array)).union(
np.unique(self.ant_2_array)
)
inds_to_keep = [
self.antenna_numbers.tolist().index(ant) for ant in ants_to_keep
]
self.antenna_names = [self.antenna_names[ind] for ind in inds_to_keep]
self.antenna_numbers = self.antenna_numbers[inds_to_keep]
self.antenna_positions = self.antenna_positions[inds_to_keep, :]
if self.antenna_diameters is not None:
self.antenna_diameters = self.antenna_diameters[inds_to_keep]
self.Nants_telescope = int(len(ants_to_keep))
if freq_inds is not None:
self.Nfreqs = len(freq_inds)
if self.future_array_shapes:
self.freq_array = self.freq_array[freq_inds]
else:
self.freq_array = self.freq_array[:, freq_inds]
if self.flex_spw or self.future_array_shapes:
self.channel_width = self.channel_width[freq_inds]
if self.flex_spw:
self.flex_spw_id_array = self.flex_spw_id_array[freq_inds]
# Use the spw ID array to check and see which SPWs are left
self.spw_array = self.spw_array[
np.isin(self.spw_array, self.flex_spw_id_array)
]
self.Nspws = len(self.spw_array)
if pol_inds is not None:
self.Npols = len(pol_inds)
self.polarization_array = self.polarization_array[pol_inds]
self.history = self.history + history_update_string
def select(
self,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
inplace=True,
keep_all_metadata=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Downselect data to keep on the object along various axes.
Axes that can be selected along include antenna names or numbers,
antenna pairs, frequencies, times and polarizations. Specific
baseline-time indices can also be selected, but this is not commonly
used.
The history attribute on the object will be updated to identify the
operations performed.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are
provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array. Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be
length 2. Some of the times in the object should fall between the
first and last elements. Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
inplace : bool
Option to perform the select directly on self or return a new UVData
object with just the selected data (the default is True, meaning the
select will be done on self).
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do do not have data associated with them after the select option.
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object (the default is True, meaning the
acceptable range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Returns
-------
UVData object or None
None is returned if inplace is True, otherwise a new UVData object
with just the selected data is returned
Raises
------
ValueError
If any of the parameters are set to inappropriate values.
"""
if inplace:
uv_object = self
else:
uv_object = self.copy()
(
blt_inds,
freq_inds,
pol_inds,
history_update_string,
) = uv_object._select_preprocess(
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
)
# do select operations on everything except data_array, flag_array
# and nsample_array
uv_object._select_metadata(
blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata
)
if self.metadata_only:
if not inplace:
return uv_object
else:
return
if blt_inds is not None:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[blt_inds])
if freq_inds is not None:
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, freq_inds, :])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, freq_inds, :])
if pol_inds is not None:
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, pol_inds])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, :, pol_inds])
# check if object is uv_object-consistent
if run_check:
uv_object.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return uv_object
def _harmonize_resample_arrays(
self,
inds_to_keep,
temp_baseline,
temp_time,
temp_int_time,
temp_data,
temp_flag,
temp_nsample,
):
"""
Make a self-consistent object after up/downsampling.
This function is called by both upsample_in_time and downsample_in_time.
See those functions for more information about arguments.
"""
self.baseline_array = self.baseline_array[inds_to_keep]
self.time_array = self.time_array[inds_to_keep]
self.integration_time = self.integration_time[inds_to_keep]
self.baseline_array = np.concatenate((self.baseline_array, temp_baseline))
self.time_array = np.concatenate((self.time_array, temp_time))
self.integration_time = np.concatenate((self.integration_time, temp_int_time))
if not self.metadata_only:
self.data_array = self.data_array[inds_to_keep]
self.flag_array = self.flag_array[inds_to_keep]
self.nsample_array = self.nsample_array[inds_to_keep]
# concatenate temp array with existing arrays
self.data_array = np.concatenate((self.data_array, temp_data), axis=0)
self.flag_array = np.concatenate((self.flag_array, temp_flag), axis=0)
self.nsample_array = np.concatenate(
(self.nsample_array, temp_nsample), axis=0
)
# set antenna arrays from baseline_array
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
self.baseline_array
)
# update metadata
self.Nblts = self.baseline_array.shape[0]
self.Ntimes = np.unique(self.time_array).size
self.uvw_array = np.zeros((self.Nblts, 3))
# update app source coords to new times
self._set_app_coords_helper()
# set lst array
self.set_lsts_from_time_array()
# temporarily store the metadata only to calculate UVWs correctly
uv_temp = self.copy(metadata_only=True)
# properly calculate the UVWs self-consistently
uv_temp.set_uvws_from_antenna_positions(allow_phasing=True)
self.uvw_array = uv_temp.uvw_array
return
def upsample_in_time(
self,
max_int_time,
blt_order="time",
minor_order="baseline",
summing_correlator_mode=False,
allow_drift=False,
):
"""
Resample to a shorter integration time.
This method will resample a UVData object such that all data samples have
an integration time less than or equal to the `max_int_time`. The new
samples are copied from the original samples (not interpolated).
Parameters
----------
max_int_time : float
Maximum integration time to upsample to in seconds.
blt_order : str
Major baseline ordering for output object. Default is "time". See
the documentation on the `reorder_blts` method for more info.
minor_order : str
Minor baseline ordering for output object. Default is "baseline".
summing_correlator_mode : bool
Option to split the flux from the original samples into the new
samples rather than duplicating the original samples in all the new
samples (undoing an integration rather than an average) to emulate
undoing the behavior in some correlators (e.g. HERA).
allow_drift : bool
Option to allow resampling of drift mode data. If this is False,
drift mode data will be phased before resampling and then unphased
after resampling. Phasing and unphasing can introduce small errors,
but resampling in drift mode may result in unexpected behavior.
Returns
-------
None
"""
# check that max_int_time is sensible given integration_time
min_integration_time = np.amin(self.integration_time)
sensible_min = 1e-2 * min_integration_time
if max_int_time < sensible_min:
raise ValueError(
"Decreasing the integration time by more than a "
"factor of 100 is not supported. Also note that "
"max_int_time should be in seconds."
)
# figure out where integration_time is longer than max_int_time
inds_to_upsample = np.nonzero(
(self.integration_time > max_int_time)
& (
~np.isclose(
self.integration_time,
max_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
)
)
if len(inds_to_upsample[0]) == 0:
warnings.warn(
"All values in the integration_time array are already "
"longer than the value specified; doing nothing."
)
return
input_phase_type = self.phase_type
if input_phase_type == "drift":
if allow_drift:
print(
"Data are in drift mode and allow_drift is True, so "
"resampling will be done without phasing."
)
else:
# phase to RA/dec of zenith
print("Data are in drift mode, phasing before resampling.")
phase_time = Time(self.time_array[0], format="jd")
self.phase_to_time(phase_time)
# we want the ceil of this, but we don't want to get the wrong answer
# when the number is very close to an integer but just barely above it.
temp_new_samples = self.integration_time[inds_to_upsample] / max_int_time
mask_close_floor = np.isclose(temp_new_samples, np.floor(temp_new_samples))
temp_new_samples[mask_close_floor] = np.floor(
temp_new_samples[mask_close_floor]
)
n_new_samples = np.asarray(list(map(int, np.ceil(temp_new_samples))))
temp_Nblts = np.sum(n_new_samples)
temp_baseline = np.zeros((temp_Nblts,), dtype=np.int64)
temp_time = np.zeros((temp_Nblts,))
temp_int_time = np.zeros((temp_Nblts,))
if self.metadata_only:
temp_data = None
temp_flag = None
temp_nsample = None
else:
if self.future_array_shapes:
temp_data = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
else:
temp_data = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
i0 = 0
for i, ind in enumerate(inds_to_upsample[0]):
i1 = i0 + n_new_samples[i]
temp_baseline[i0:i1] = self.baseline_array[ind]
if not self.metadata_only:
if summing_correlator_mode:
temp_data[i0:i1] = self.data_array[ind] / n_new_samples[i]
else:
temp_data[i0:i1] = self.data_array[ind]
temp_flag[i0:i1] = self.flag_array[ind]
temp_nsample[i0:i1] = self.nsample_array[ind]
# compute the new times of the upsampled array
t0 = self.time_array[ind]
dt = self.integration_time[ind] / n_new_samples[i]
# `offset` will be 0.5 or 1, depending on whether n_new_samples for
# this baseline is even or odd.
offset = 0.5 + 0.5 * (n_new_samples[i] % 2)
n2 = n_new_samples[i] // 2
# Figure out the new center for sample ii taking offset into
# account. Because `t0` is the central time for the original time
# sample, `nt` will range from negative to positive so that
# `temp_time` will result in the central time for the new samples.
# `idx2` tells us how to far to shift and in what direction for each
# new sample.
for ii, idx in enumerate(range(i0, i1)):
idx2 = ii + offset + n2 - n_new_samples[i]
nt = ((t0 * units.day) + (dt * idx2 * units.s)).to(units.day).value
temp_time[idx] = nt
temp_int_time[i0:i1] = dt
i0 = i1
# harmonize temporary arrays with existing ones
inds_to_keep = np.nonzero(self.integration_time <= max_int_time)
self._harmonize_resample_arrays(
inds_to_keep,
temp_baseline,
temp_time,
temp_int_time,
temp_data,
temp_flag,
temp_nsample,
)
if input_phase_type == "drift" and not allow_drift:
print("Unphasing back to drift mode.")
self.unphase_to_drift()
# reorganize along blt axis
self.reorder_blts(order=blt_order, minor_order=minor_order)
# check the resulting object
self.check()
# add to the history
history_update_string = (
" Upsampled data to {:f} second integration time "
"using pyuvdata.".format(max_int_time)
)
self.history = self.history + history_update_string
return
def downsample_in_time(
self,
min_int_time=None,
n_times_to_avg=None,
blt_order="time",
minor_order="baseline",
keep_ragged=True,
summing_correlator_mode=False,
allow_drift=False,
):
"""
Average to a longer integration time.
This method will average a UVData object either by an integer factor
(by setting `n_times_to_avg`) or by a factor that can differ by
baseline-time sample such that after averaging, the samples have an
integration time greater than or equal to the `min_int_time` (up to the
tolerance on the integration_time).
Note that if the integrations for a baseline do not divide evenly by the
`n_times_to_avg` or into the specified `min_int_time`, the final
integrations for that baseline may have integration times less than
`min_int_time` or be composed of fewer input integrations than `n_times_to_avg`.
This behavior can be controlled with the `keep_ragged` argument.
The new samples are averages of the original samples (not interpolations).
Parameters
----------
min_int_time : float
Minimum integration time to average the UVData integration_time to
in seconds.
n_times_to_avg : int
Number of time integrations to average together.
blt_order : str
Major baseline ordering for output object. Default is "time". See the
documentation on the `reorder_blts` method for more details.
minor_order : str
Minor baseline ordering for output object. Default is "baseline".
keep_ragged : bool
When averaging baselines that do not evenly divide into min_int_time,
or that have a number of integrations that do not evenly divide by
n_times_to_avg, keep_ragged controls whether to keep the (averaged)
integrations corresponding to the remaining samples (keep_ragged=True),
or discard them (keep_ragged=False).
summing_correlator_mode : bool
Option to integrate the flux from the original samples rather than
average the flux to emulate the behavior in some correlators (e.g. HERA).
allow_drift : bool
Option to allow averaging of drift mode data. If this is False,
drift mode data will be phased before resampling and then unphased
after resampling. Phasing and unphasing can introduce small errors,
but averaging in drift mode may result in more decoherence.
Returns
-------
None
"""
if min_int_time is None and n_times_to_avg is None:
raise ValueError("Either min_int_time or n_times_to_avg must be set.")
if min_int_time is not None and n_times_to_avg is not None:
raise ValueError("Only one of min_int_time or n_times_to_avg can be set.")
if self.Ntimes == 1:
raise ValueError("Only one time in this object, cannot downsample.")
if min_int_time is not None:
# check that min_int_time is sensible given integration_time
max_integration_time = np.amax(self.integration_time)
sensible_max = 1e2 * max_integration_time
if min_int_time > sensible_max:
raise ValueError(
"Increasing the integration time by more than a "
"factor of 100 is not supported. Also note that "
"min_int_time should be in seconds."
)
# first figure out where integration_time is shorter than min_int_time
inds_to_downsample = np.nonzero(
(self.integration_time < min_int_time)
& (
~np.isclose(
self.integration_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
)
)
if len(inds_to_downsample[0]) == 0:
warnings.warn(
"All values in the integration_time array are already "
"longer than the value specified; doing nothing."
)
return
else:
if not isinstance(n_times_to_avg, (int, np.integer)):
raise ValueError("n_times_to_avg must be an integer.")
# If we're going to do actual work, reorder the baselines to ensure time is
# monotonically increasing.
# Default of reorder_blts is baseline major, time minor, which is what we want.
self.reorder_blts()
if min_int_time is not None:
# now re-compute inds_to_downsample, in case things have changed
inds_to_downsample = np.nonzero(
(self.integration_time < min_int_time)
& ~np.isclose(
self.integration_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
)
bls_to_downsample = np.unique(self.baseline_array[inds_to_downsample])
else:
bls_to_downsample = np.unique(self.baseline_array)
# figure out how many baseline times we'll end up with at the end
n_new_samples = 0
for bl in bls_to_downsample:
bl_inds = np.nonzero(self.baseline_array == bl)[0]
int_times = self.integration_time[bl_inds]
if min_int_time is not None:
running_int_time = 0.0
for itime, int_time in enumerate(int_times):
running_int_time += int_time
over_min_int_time = running_int_time > min_int_time or np.isclose(
running_int_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
last_sample = itime == len(bl_inds) - 1
# We sum up all the samples found so far if we're over the
# target minimum time, or we've hit the end of the time
# samples for this baseline.
if over_min_int_time or last_sample:
if last_sample and not (over_min_int_time or keep_ragged):
# don't do anything -- implicitly drop these integrations
continue
n_new_samples += 1
running_int_time = 0.0
else:
n_bl_times = self.time_array[bl_inds].size
nsample_temp = np.sum(n_bl_times / n_times_to_avg)
if keep_ragged and not np.isclose(nsample_temp, np.floor(nsample_temp)):
n_new_samples += np.ceil(nsample_temp).astype(int)
else:
n_new_samples += np.floor(nsample_temp).astype(int)
# figure out if there are any time gaps in the data
# meaning that the time differences are larger than the integration times
# time_array is in JD, need to convert to seconds for the diff
dtime = np.ediff1d(self.time_array[bl_inds]) * 24 * 3600
int_times = int_times
if len(np.unique(int_times)) == 1:
# this baseline has all the same integration times
if len(np.unique(dtime)) > 1 and not np.isclose(
np.max(dtime),
np.min(dtime),
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
):
warnings.warn(
"There is a gap in the times of baseline {bl}. "
"The output may include averages across long "
"time gaps.".format(bl=self.baseline_to_antnums(bl))
)
elif not np.isclose(
dtime[0],
int_times[0],
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
):
warnings.warn(
"The time difference between integrations is "
"not the same as the integration time for "
"baseline {bl}. The output may average across "
"longer time intervals than "
"expected".format(bl=self.baseline_to_antnums(bl))
)
else:
# varying integration times for this baseline, need to be more careful
expected_dtimes = (int_times[:-1] + int_times[1:]) / 2
wh_diff = np.nonzero(~np.isclose(dtime, expected_dtimes))
if wh_diff[0].size > 1:
warnings.warn(
"The time difference between integrations is "
"different than the expected given the "
"integration times for baseline {bl}. The "
"output may include averages across long time "
"gaps.".format(bl=self.baseline_to_antnums(bl))
)
temp_Nblts = n_new_samples
input_phase_type = self.phase_type
if input_phase_type == "drift":
if allow_drift:
print(
"Data are in drift mode and allow_drift is True, so "
"resampling will be done without phasing."
)
else:
# phase to RA/dec of zenith
print("Data are in drift mode, phasing before resampling.")
phase_time = Time(self.time_array[0], format="jd")
self.phase_to_time(phase_time)
# make temporary arrays
temp_baseline = np.zeros((temp_Nblts,), dtype=np.int64)
temp_time = np.zeros((temp_Nblts,))
temp_int_time = np.zeros((temp_Nblts,))
if self.metadata_only:
temp_data = None
temp_flag = None
temp_nsample = None
else:
if self.future_array_shapes:
temp_data = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
else:
temp_data = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
temp_idx = 0
for bl in bls_to_downsample:
bl_inds = np.nonzero(self.baseline_array == bl)[0]
running_int_time = 0.0
summing_idx = 0
n_sum = 0
for itime, int_time in enumerate(self.integration_time[bl_inds]):
running_int_time += int_time
n_sum += 1
if min_int_time is not None:
over_min_int_time = running_int_time > min_int_time or np.isclose(
running_int_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
else:
over_min_int_time = n_sum >= n_times_to_avg
last_sample = itime == len(bl_inds) - 1
# We sum up all the samples found so far if we're over the
# target minimum time, or we've hit the end of the time
# samples for this baseline.
if over_min_int_time or last_sample:
if last_sample and not (over_min_int_time or keep_ragged):
# don't do anything -- implicitly drop these integrations
continue
# sum together that number of samples
temp_baseline[temp_idx] = bl
# this might be wrong if some of the constituent times are
# *totally* flagged
averaging_idx = bl_inds[summing_idx : summing_idx + n_sum]
# take potential non-uniformity of integration_time into account
temp_time[temp_idx] = np.sum(
self.time_array[averaging_idx]
* self.integration_time[averaging_idx]
) / np.sum(self.integration_time[averaging_idx])
temp_int_time[temp_idx] = running_int_time
if not self.metadata_only:
# if all inputs are flagged, the flag array should be True,
# otherwise it should be False.
# The sum below will be zero if it's all flagged and
# greater than zero otherwise
# Then we use a test against 0 to turn it into a Boolean
temp_flag[temp_idx] = (
np.sum(~self.flag_array[averaging_idx], axis=0) == 0
)
mask = self.flag_array[averaging_idx]
# need to update mask if a downsampled visibility will
# be flagged so that we don't set it to zero
if (temp_flag[temp_idx]).any():
if self.future_array_shapes:
ax1_inds, ax2_inds = np.nonzero(temp_flag[temp_idx])
mask[:, ax1_inds, ax2_inds] = False
else:
ax1_inds, ax2_inds, ax3_inds = np.nonzero(
temp_flag[temp_idx]
)
mask[:, ax1_inds, ax2_inds, ax3_inds] = False
masked_data = np.ma.masked_array(
self.data_array[averaging_idx], mask=mask
)
# nsample array is the fraction of data that we actually kept,
# relative to the amount that went into the sum or average
nsample_dtype = self.nsample_array.dtype.type
# promote nsample dtype if half-precision
if nsample_dtype is np.float16:
masked_nsample_dtype = np.float32
else:
masked_nsample_dtype = nsample_dtype
masked_nsample = np.ma.masked_array(
self.nsample_array[averaging_idx],
mask=mask,
dtype=masked_nsample_dtype,
)
if self.future_array_shapes:
int_time_arr = self.integration_time[
averaging_idx, np.newaxis, np.newaxis
]
else:
int_time_arr = self.integration_time[
averaging_idx, np.newaxis, np.newaxis, np.newaxis
]
masked_int_time = np.ma.masked_array(
np.ones_like(
self.data_array[averaging_idx],
dtype=self.integration_time.dtype,
)
* int_time_arr,
mask=mask,
)
if summing_correlator_mode:
temp_data[temp_idx] = np.sum(masked_data, axis=0)
else:
# take potential non-uniformity of integration_time
# and nsamples into account
weights = masked_nsample * masked_int_time
weighted_data = masked_data * weights
temp_data[temp_idx] = np.sum(
weighted_data, axis=0
) / np.sum(weights, axis=0)
# output of masked array calculation should be coerced
# to the datatype of temp_nsample (which has the same
# precision as the original nsample_array)
temp_nsample[temp_idx] = np.sum(
masked_nsample * masked_int_time, axis=0
) / np.sum(self.integration_time[averaging_idx])
# increment counters and reset values
temp_idx += 1
summing_idx += n_sum
running_int_time = 0.0
n_sum = 0
# make sure we've populated the right number of baseline-times
assert temp_idx == temp_Nblts, (
"Wrong number of baselines. Got {:d}, expected {:d}. This is a bug, "
"please make an issue at https://github.com/RadioAstronomySoftwareGroup/"
"pyuvdata/issues".format(temp_idx, temp_Nblts)
)
# harmonize temporary arrays with existing ones
if min_int_time is not None:
bls_not_downsampled = set(self.baseline_array) - set(bls_to_downsample)
inds_to_keep = []
for bl in bls_not_downsampled:
inds_to_keep += np.nonzero(self.baseline_array == bl)[0].tolist()
inds_to_keep = np.array(inds_to_keep, dtype=np.int64)
else:
inds_to_keep = np.array([], dtype=bool)
self._harmonize_resample_arrays(
inds_to_keep,
temp_baseline,
temp_time,
temp_int_time,
temp_data,
temp_flag,
temp_nsample,
)
if input_phase_type == "drift" and not allow_drift:
print("Unphasing back to drift mode.")
self.unphase_to_drift()
# reorganize along blt axis
self.reorder_blts(order=blt_order, minor_order=minor_order)
# check the resulting object
self.check()
# add to the history
if min_int_time is not None:
history_update_string = (
" Downsampled data to {:f} second integration "
"time using pyuvdata.".format(min_int_time)
)
else:
history_update_string = (
" Downsampled data by a factor of {} in "
"time using pyuvdata.".format(n_times_to_avg)
)
self.history = self.history + history_update_string
return
def resample_in_time(
self,
target_time,
only_downsample=False,
only_upsample=False,
blt_order="time",
minor_order="baseline",
keep_ragged=True,
summing_correlator_mode=False,
allow_drift=False,
):
"""
Intelligently upsample or downsample a UVData object to the target time.
Parameters
----------
target_time : float
The target integration time to resample to, in seconds.
only_downsample : bool
Option to only call bda_downsample.
only_upsample : bool
Option to only call bda_upsample.
blt_order : str
Major baseline ordering for output object. Default is "time". See the
documentation on the `reorder_blts` method for more details.
minor_order : str
Minor baseline ordering for output object. Default is "baseline".
keep_ragged : bool
When averaging baselines that do not evenly divide into min_int_time,
keep_ragged controls whether to keep the (summed) integrations
corresponding to the remaining samples (keep_ragged=True), or
discard them (keep_ragged=False). Note this option only applies to the
`bda_downsample` method.
summing_correlator_mode : bool
Option to integrate or split the flux from the original samples
rather than average or duplicate the flux from the original samples
to emulate the behavior in some correlators (e.g. HERA).
allow_drift : bool
Option to allow resampling of drift mode data. If this is False,
drift mode data will be phased before resampling and then unphased
after resampling. Phasing and unphasing can introduce small errors,
but resampling in drift mode may result in unexpected behavior.
Returns
-------
None
"""
# figure out integration times relative to target time
min_int_time = np.amin(self.integration_time)
max_int_time = np.amax(self.integration_time)
if int(np.floor(target_time / min_int_time)) >= 2 and not only_upsample:
downsample = True
else:
downsample = False
if int(np.floor(max_int_time / target_time)) >= 2 and not only_downsample:
upsample = True
else:
upsample = False
if not downsample and not upsample:
warnings.warn(
"No resampling will be done because target time is not "
"a factor of 2 or more off from integration_time. To "
"force resampling set only_upsample or only_downsample "
"keywords or call upsample_in_time or downsample_in_time."
)
return
if downsample:
self.downsample_in_time(
target_time,
blt_order=blt_order,
minor_order=minor_order,
keep_ragged=keep_ragged,
summing_correlator_mode=summing_correlator_mode,
allow_drift=allow_drift,
)
if upsample:
self.upsample_in_time(
target_time,
blt_order=blt_order,
minor_order=minor_order,
summing_correlator_mode=summing_correlator_mode,
allow_drift=allow_drift,
)
return
def frequency_average(
self, n_chan_to_avg, summing_correlator_mode=False, propagate_flags=False
):
"""
Average in frequency.
Does a simple average over an integer number of input channels, leaving
flagged samples out of the average.
In the future, this method will support non-equally spaced channels
and varying channel widths. It will also support setting the frequency
to the true mean of the averaged non-flagged frequencies rather than
the simple mean of the input channel frequencies. For now it does not.
Parameters
----------
n_chan_to_avg : int
Number of channels to average together. If Nfreqs does not divide
evenly by this number, the frequencies at the end of the freq_array
will be dropped to make it evenly divisable. To control which
frequencies are removed, use select before calling this method.
summing_correlator_mode : bool
Option to integrate or split the flux from the original samples
rather than average or duplicate the flux from the original samples
to emulate the behavior in some correlators (e.g. HERA).
propagate_flags: bool
Option to flag an averaged entry even if some of its contributors
are not flagged. The averaged result will still leave the flagged
samples out of the average, except when all contributors are
flagged.
"""
if self.flex_spw:
raise NotImplementedError(
"Frequency averaging not (yet) available for flexible spectral windows"
)
self._check_freq_spacing()
n_final_chan = int(np.floor(self.Nfreqs / n_chan_to_avg))
nfreq_mod_navg = self.Nfreqs % n_chan_to_avg
if nfreq_mod_navg != 0:
# not an even number of final channels
warnings.warn(
"Nfreqs does not divide by `n_chan_to_avg` evenly. "
"The final {} frequencies will be excluded, to "
"control which frequencies to exclude, use a "
"select to control.".format(nfreq_mod_navg)
)
chan_to_keep = np.arange(n_final_chan * n_chan_to_avg)
self.select(freq_chans=chan_to_keep)
if self.future_array_shapes:
self.freq_array = self.freq_array.reshape(
(n_final_chan, n_chan_to_avg)
).mean(axis=1)
self.channel_width = self.channel_width.reshape(
(n_final_chan, n_chan_to_avg)
).sum(axis=1)
else:
self.freq_array = self.freq_array.reshape(
(1, n_final_chan, n_chan_to_avg)
).mean(axis=2)
self.channel_width = self.channel_width * n_chan_to_avg
self.Nfreqs = n_final_chan
if self.eq_coeffs is not None:
eq_coeff_diff = np.diff(self.eq_coeffs, axis=1)
if np.abs(np.max(eq_coeff_diff)) > 0:
warnings.warn(
"eq_coeffs vary by frequency. They should be "
"applied to the data using `remove_eq_coeffs` "
"before frequency averaging."
)
self.eq_coeffs = self.eq_coeffs.reshape(
(self.Nants_telescope, n_final_chan, n_chan_to_avg)
).mean(axis=2)
if not self.metadata_only:
if self.future_array_shapes:
shape_tuple = (
self.Nblts,
n_final_chan,
n_chan_to_avg,
self.Npols,
)
else:
shape_tuple = (
self.Nblts,
1,
n_final_chan,
n_chan_to_avg,
self.Npols,
)
mask = self.flag_array.reshape(shape_tuple)
if propagate_flags:
# if any contributors are flagged, the result should be flagged
if self.future_array_shapes:
self.flag_array = np.any(
self.flag_array.reshape(shape_tuple), axis=2
)
else:
self.flag_array = np.any(
self.flag_array.reshape(shape_tuple), axis=3
)
else:
# if all inputs are flagged, the flag array should be True,
# otherwise it should be False.
# The sum below will be zero if it's all flagged and
# greater than zero otherwise
# Then we use a test against 0 to turn it into a Boolean
if self.future_array_shapes:
self.flag_array = (
np.sum(~self.flag_array.reshape(shape_tuple), axis=2) == 0
)
else:
self.flag_array = (
np.sum(~self.flag_array.reshape(shape_tuple), axis=3) == 0
)
# need to update mask if a downsampled visibility will be flagged
# so that we don't set it to zero
for n_chan in np.arange(n_final_chan):
if self.future_array_shapes:
if (self.flag_array[:, n_chan]).any():
ax0_inds, ax2_inds = np.nonzero(self.flag_array[:, n_chan, :])
# Only if all entries are masked
# May not happen due to propagate_flags keyword
# mask should be left alone otherwise
if np.all(mask[ax0_inds, n_chan, :, ax2_inds]):
mask[ax0_inds, n_chan, :, ax2_inds] = False
else:
if (self.flag_array[:, :, n_chan]).any():
ax0_inds, ax1_inds, ax3_inds = np.nonzero(
self.flag_array[:, :, n_chan, :]
)
# Only if all entries are masked
# May not happen due to propagate_flags keyword
# mask should be left alone otherwise
if np.all(mask[ax0_inds, ax1_inds, n_chan, :, ax3_inds]):
mask[ax0_inds, ax1_inds, n_chan, :, ax3_inds] = False
masked_data = np.ma.masked_array(
self.data_array.reshape(shape_tuple), mask=mask
)
self.nsample_array = self.nsample_array.reshape(shape_tuple)
# promote nsample dtype if half-precision
nsample_dtype = self.nsample_array.dtype.type
if nsample_dtype is np.float16:
masked_nsample_dtype = np.float32
else:
masked_nsample_dtype = nsample_dtype
masked_nsample = np.ma.masked_array(
self.nsample_array, mask=mask, dtype=masked_nsample_dtype
)
if summing_correlator_mode:
if self.future_array_shapes:
self.data_array = np.sum(masked_data, axis=2).data
else:
self.data_array = np.sum(masked_data, axis=3).data
else:
# need to weight by the nsample_array
if self.future_array_shapes:
self.data_array = (
np.sum(masked_data * masked_nsample, axis=2)
/ np.sum(masked_nsample, axis=2)
).data
else:
self.data_array = (
np.sum(masked_data * masked_nsample, axis=3)
/ np.sum(masked_nsample, axis=3)
).data
# nsample array is the fraction of data that we actually kept,
# relative to the amount that went into the sum or average.
# Need to take care to return precision back to original value.
if self.future_array_shapes:
self.nsample_array = (
np.sum(masked_nsample, axis=2) / float(n_chan_to_avg)
).data.astype(nsample_dtype)
else:
self.nsample_array = (
np.sum(masked_nsample, axis=3) / float(n_chan_to_avg)
).data.astype(nsample_dtype)
def get_redundancies(
self,
tol=1.0,
use_antpos=False,
include_conjugates=False,
include_autos=True,
conjugate_bls=False,
):
"""
Get redundant baselines to a given tolerance.
This can be used to identify redundant baselines present in the data,
or find all possible redundant baselines given the antenna positions.
Parameters
----------
tol : float
Redundancy tolerance in meters (default 1m).
use_antpos : bool
Use antenna positions to find all possible redundant groups for this
telescope (default False).
The returned baselines are in the 'u>0' convention.
include_conjugates : bool
Option to include baselines that are redundant under conjugation.
Only used if use_antpos is False.
include_autos : bool
Option to include autocorrelations in the full redundancy list.
Only used if use_antpos is True.
conjugate_bls : bool
If using antenna positions, this will conjugate baselines on this
object to correspond with those in the returned groups.
Returns
-------
baseline_groups : list of lists of int
List of lists of redundant baseline numbers
vec_bin_centers : list of ndarray of float
List of vectors describing redundant group uvw centers
lengths : list of float
List of redundant group baseline lengths in meters
conjugates : list of int, or None, optional
List of indices for baselines that must be conjugated to fit into their
redundant groups.
Will return None if use_antpos is True and include_conjugates is True
Only returned if include_conjugates is True
Notes
-----
If use_antpos is set, then this function will find all redundant baseline groups
for this telescope, under the u>0 antenna ordering convention.
If use_antpos is not set, this function will look for redundant groups
in the data.
"""
if use_antpos:
antpos, numbers = self.get_ENU_antpos(center=False)
result = uvutils.get_antenna_redundancies(
numbers, antpos, tol=tol, include_autos=include_autos
)
if conjugate_bls:
self.conjugate_bls(convention="u>0", uvw_tol=tol)
if include_conjugates:
result = result + (None,)
return result
_, unique_inds = np.unique(self.baseline_array, return_index=True)
unique_inds.sort()
baseline_vecs = np.take(self.uvw_array, unique_inds, axis=0)
baselines = np.take(self.baseline_array, unique_inds)
return uvutils.get_baseline_redundancies(
baselines, baseline_vecs, tol=tol, with_conjugates=include_conjugates
)
def compress_by_redundancy(
self, method="select", tol=1.0, inplace=True, keep_all_metadata=True
):
"""
Downselect or average to only have one baseline per redundant group.
Either select the first baseline in the redundant group or average over
the baselines in the redundant group.
Uses utility functions to find redundant baselines to the given tolerance,
then select on those.
Parameters
----------
tol : float
Redundancy tolerance in meters, default is 1.0 corresponding to 1 meter.
method : str
Options are "select", which just keeps the first baseline in each
redundant group or "average" which averages over the baselines in each
redundant group and assigns the average to the first baseline in the group.
inplace : bool
Option to do selection on current object.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas,
even those that do not remain after the select option.
Returns
-------
UVData object or None
if inplace is False, return the compressed UVData object
"""
allowed_methods = ["select", "average"]
if method not in allowed_methods:
raise ValueError(f"method must be one of {allowed_methods}")
red_gps, centers, lengths, conjugates = self.get_redundancies(
tol, include_conjugates=True
)
bl_ants = [self.baseline_to_antnums(gp[0]) for gp in red_gps]
if method == "average":
# do a metadata only select to get all the metadata right
new_obj = self.copy(metadata_only=True)
new_obj.select(bls=bl_ants, keep_all_metadata=keep_all_metadata)
if not self.metadata_only:
# initalize the data like arrays
if new_obj.future_array_shapes:
temp_data_array = np.zeros(
(new_obj.Nblts, new_obj.Nfreqs, new_obj.Npols),
dtype=self.data_array.dtype,
)
temp_nsample_array = np.zeros(
(new_obj.Nblts, new_obj.Nfreqs, new_obj.Npols),
dtype=self.nsample_array.dtype,
)
temp_flag_array = np.zeros(
(new_obj.Nblts, new_obj.Nfreqs, new_obj.Npols),
dtype=self.flag_array.dtype,
)
else:
temp_data_array = np.zeros(
(new_obj.Nblts, 1, new_obj.Nfreqs, new_obj.Npols),
dtype=self.data_array.dtype,
)
temp_nsample_array = np.zeros(
(new_obj.Nblts, 1, new_obj.Nfreqs, new_obj.Npols),
dtype=self.nsample_array.dtype,
)
temp_flag_array = np.zeros(
(new_obj.Nblts, 1, new_obj.Nfreqs, new_obj.Npols),
dtype=self.flag_array.dtype,
)
for grp_ind, group in enumerate(red_gps):
if len(conjugates) > 0:
conj_group = set(group).intersection(conjugates)
reg_group = list(set(group) - conj_group)
conj_group = list(conj_group)
else:
reg_group = group
conj_group = []
group_times = []
group_inds = []
conj_group_inds = []
conj_group_times = []
for bl in reg_group:
bl_inds = np.where(self.baseline_array == bl)[0]
group_inds.extend(bl_inds)
group_times.extend(self.time_array[bl_inds])
for bl in conj_group:
bl_inds = np.where(self.baseline_array == bl)[0]
conj_group_inds.extend(bl_inds)
conj_group_times.extend(self.time_array[bl_inds])
group_inds = np.array(group_inds, dtype=np.int64)
conj_group_inds = np.array(conj_group_inds, dtype=np.int64)
# now we have to figure out which times are the same to a tolerance
# so we can average over them.
time_inds = np.arange(len(group_times + conj_group_times))
time_gps = uvutils.find_clusters(
time_inds,
np.array(group_times + conj_group_times),
self._time_array.tols[1],
)
# average over the same times
obj_bl = bl_ants[grp_ind]
obj_inds = new_obj._key2inds(obj_bl)[0]
obj_times = new_obj.time_array[obj_inds]
for gp in time_gps:
# Note that this average time is just used for identifying the
# index to use for the blt axis on the averaged data set.
# We do not update the actual time on that data set because it can
# result in confusing behavior -- small numerical rounding errors
# can result in many more unique times in the final data set than
# in the initial data set.
avg_time = np.average(np.array(group_times + conj_group_times)[gp])
obj_time_ind = np.where(
np.abs(obj_times - avg_time) < self._time_array.tols[1]
)[0]
if obj_time_ind.size == 1:
this_obj_ind = obj_inds[obj_time_ind[0]]
else:
warnings.warn(
"Index baseline in the redundant group does not "
"have all the times, compressed object will be "
"missing those times."
)
continue
# time_ind contains indices for both regular and conjugated bls
# because we needed to group them together in time.
# The regular ones are first and extend the length of group_times,
# so we use that to split them back up.
regular_orientation = np.array(
[time_ind for time_ind in gp if time_ind < len(group_times)],
dtype=np.int64,
)
regular_inds = group_inds[np.array(regular_orientation)]
conj_orientation = np.array(
[
time_ind - len(group_times)
for time_ind in gp
if time_ind >= len(group_times)
],
dtype=np.int64,
)
conj_inds = conj_group_inds[np.array(conj_orientation)]
# check that the integration times are all the same
int_times = np.concatenate(
(
self.integration_time[regular_inds],
self.integration_time[conj_inds],
)
)
if not np.all(
np.abs(int_times - new_obj.integration_time[obj_time_ind])
< new_obj._integration_time.tols[1]
):
warnings.warn(
"Integrations times are not identical in a redundant "
"group. Averaging anyway but this may cause unexpected "
"behavior."
)
if not self.metadata_only:
vis_to_avg = np.concatenate(
(
self.data_array[regular_inds],
np.conj(self.data_array[conj_inds]),
)
)
nsample_to_avg = np.concatenate(
(
self.nsample_array[regular_inds],
self.nsample_array[conj_inds],
)
)
flags_to_avg = np.concatenate(
(self.flag_array[regular_inds], self.flag_array[conj_inds],)
)
# if all data is flagged, average it all as if it were not
if np.all(flags_to_avg):
mask = np.zeros_like(flags_to_avg)
else:
mask = flags_to_avg
vis_to_avg = np.ma.masked_array(vis_to_avg, mask=mask)
nsample_to_avg = np.ma.masked_array(nsample_to_avg, mask=mask)
avg_vis = np.ma.average(
vis_to_avg, weights=nsample_to_avg, axis=0
)
avg_nsample = np.sum(nsample_to_avg, axis=0)
avg_flag = np.all(flags_to_avg, axis=0)
temp_data_array[this_obj_ind] = avg_vis
temp_nsample_array[this_obj_ind] = avg_nsample
temp_flag_array[this_obj_ind] = avg_flag
if inplace:
self.select(bls=bl_ants, keep_all_metadata=keep_all_metadata)
if not self.metadata_only:
self.data_array = temp_data_array
self.nsample_array = temp_nsample_array
self.flag_array = temp_flag_array
self.check()
return
else:
if not self.metadata_only:
new_obj.data_array = temp_data_array
new_obj.nsample_array = temp_nsample_array
new_obj.flag_array = temp_flag_array
new_obj.check()
return new_obj
else:
return self.select(
bls=bl_ants, inplace=inplace, keep_all_metadata=keep_all_metadata
)
def inflate_by_redundancy(self, tol=1.0, blt_order="time", blt_minor_order=None):
"""
Expand data to full size, copying data among redundant baselines.
Note that this method conjugates baselines to the 'u>0' convention in order
to inflate the redundancies.
Parameters
----------
tol : float
Redundancy tolerance in meters, default is 1.0 corresponding to 1 meter.
blt_order : str
string specifying primary order along the blt axis (see `reorder_blts`)
blt_minor_order : str
string specifying minor order along the blt axis (see `reorder_blts`)
"""
self.conjugate_bls(convention="u>0")
red_gps, centers, lengths = self.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
# Stack redundant groups into one array.
group_index, bl_array_full = zip(
*[(i, bl) for i, gp in enumerate(red_gps) for bl in gp]
)
# TODO should be an assert that each baseline only ends up in one group
# Map group index to blt indices in the compressed array.
bl_array_comp = self.baseline_array
uniq_bl = np.unique(bl_array_comp)
group_blti = {}
Nblts_full = 0
for i, gp in enumerate(red_gps):
for bl in gp:
# First baseline in the group that is also in the compressed
# baseline array.
if bl in uniq_bl:
group_blti[i] = np.where(bl == bl_array_comp)[0]
# add number of blts for this group
Nblts_full += group_blti[i].size * len(gp)
break
blt_map = np.zeros(Nblts_full, dtype=int)
full_baselines = np.zeros(Nblts_full, dtype=int)
missing = []
counter = 0
for bl, gi in zip(bl_array_full, group_index):
try:
# this makes the time the fastest axis
blt_map[counter : counter + group_blti[gi].size] = group_blti[gi]
full_baselines[counter : counter + group_blti[gi].size] = bl
counter += group_blti[gi].size
except KeyError:
missing.append(bl)
pass
if np.any(missing):
warnings.warn("Missing some redundant groups. Filling in available data.")
# blt_map is an index array mapping compressed blti indices to uncompressed
self.data_array = self.data_array[blt_map, ...]
self.nsample_array = self.nsample_array[blt_map, ...]
self.flag_array = self.flag_array[blt_map, ...]
self.time_array = self.time_array[blt_map]
self.lst_array = self.lst_array[blt_map]
self.integration_time = self.integration_time[blt_map]
self.uvw_array = self.uvw_array[blt_map, ...]
self.baseline_array = full_baselines
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
self.baseline_array
)
self.Nants_data = self._calc_nants_data()
self.Nbls = np.unique(self.baseline_array).size
self.Nblts = Nblts_full
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[blt_map]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[blt_map]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[blt_map]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[blt_map]
self.reorder_blts(order=blt_order, minor_order=blt_minor_order)
self.check()
def _convert_from_filetype(self, other):
"""
Convert from a file-type specific object to a UVData object.
Used in reads.
Parameters
----------
other : object that inherits from UVData
File type specific object to convert to UVData
"""
for p in other:
param = getattr(other, p)
setattr(self, p, param)
def _convert_to_filetype(self, filetype):
"""
Convert from a UVData object to a file-type specific object.
Used in writes.
Parameters
----------
filetype : str
Specifies what file type object to convert to. Options are: 'uvfits',
'fhd', 'miriad', 'uvh5', 'mir', 'ms'
Raises
------
ValueError
if filetype is not a known type
"""
if filetype == "uvfits":
from . import uvfits
other_obj = uvfits.UVFITS()
elif filetype == "fhd":
from . import fhd
other_obj = fhd.FHD()
elif filetype == "miriad":
from . import miriad
other_obj = miriad.Miriad()
elif filetype == "uvh5":
from . import uvh5
other_obj = uvh5.UVH5()
elif filetype == "mir":
from . import mir
other_obj = mir.Mir()
elif filetype == "ms":
from . import ms
other_obj = ms.MS()
else:
raise ValueError("filetype must be uvfits, mir, miriad, ms, fhd, or uvh5")
for p in self:
param = getattr(self, p)
setattr(other_obj, p, param)
return other_obj
def read_fhd(
self,
filelist,
use_model=False,
axis=None,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in data from a list of FHD files.
Parameters
----------
filelist : array_like of str
The list/array of FHD save files to read from. Must include at
least one polarization file, a params file, a layout file and a flag file.
An obs file is also required if `read_data` is False.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read).
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple data sets are passed.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object. If read_data is False, an obs file must be
included in the filelist.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If required files are missing or multiple files for any polarization
are included in filelist.
If there is no recognized key for visibility weights in the flags_file.
"""
from . import fhd
if isinstance(filelist[0], (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
fhd_obj = fhd.FHD()
fhd_obj.read_fhd(
filelist,
use_model=use_model,
background_lsts=background_lsts,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
self._convert_from_filetype(fhd_obj)
del fhd_obj
def read_mir(
self,
filepath,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
):
"""
Read in data from an SMA MIR file.
Note that with the exception of filepath, the reset of the parameters are
used to sub-select a range of data that matches the limitations of the current
instantiation of pyuvdata -- namely 1 spectral window, 1 source. These could
be dropped in the future, as pyuvdata capabilities grow.
Parameters
----------
filepath : str
The file path to the MIR folder to read from.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values. Default is false.
"""
from . import mir
mir_obj = mir.Mir()
mir_obj.read_mir(
filepath,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
)
self._convert_from_filetype(mir_obj)
del mir_obj
def read_miriad(
self,
filepath,
axis=None,
antenna_nums=None,
ant_str=None,
bls=None,
polarizations=None,
time_range=None,
read_data=True,
phase_type=None,
correct_lat_lon=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
calc_lst=True,
fix_old_proj=False,
fix_use_ant_pos=True,
):
"""
Read in data from a miriad file.
Parameters
----------
filepath : str
The miriad root directory to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to read into the object.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`bls` or `polarizations` parameters, if it is a ValueError will be raised.
polarizations : array_like of int or str, optional
List of polarization integers or strings to read-in. e.g. ['xx', 'yy', ...]
time_range : list of float, optional
len-2 list containing min and max range of times in Julian Date to
include when reading data into the object. e.g. [2458115.20, 2458115.40]
read_data : bool
Read in the visibility and flag data. If set to false,
only the metadata will be read in. Setting read_data to False
results in an incompletely defined object (check will not pass).
phase_type : str, optional
Option to specify the phasing status of the data. Options are 'drift',
'phased' or None. 'drift' means the data are zenith drift data,
'phased' means the data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
calc_lst : bool
Recalculate the LST values that are present within the file, useful in
cases where the "online" calculate values have precision or value errors.
Default is True.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If root file directory doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If the metadata are not internally consistent.
"""
from . import miriad
if isinstance(filepath, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
miriad_obj = miriad.Miriad()
miriad_obj.read_miriad(
filepath,
correct_lat_lon=correct_lat_lon,
read_data=read_data,
phase_type=phase_type,
antenna_nums=antenna_nums,
ant_str=ant_str,
bls=bls,
polarizations=polarizations,
time_range=time_range,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
self._convert_from_filetype(miriad_obj)
del miriad_obj
def read_ms(
self,
filepath,
axis=None,
data_column="DATA",
pol_order="AIPS",
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
ignore_single_chan=True,
raise_error=True,
read_weights=True,
):
"""
Read in data from a measurement set.
Parameters
----------
filepath : str
The measurement set root directory to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
ignore_single_chan : bool
Some measurement sets (e.g., those from ALMA) use single channel spectral
windows for recording pseudo-continuum channels or storing other metadata
in the track when the telescopes are not on source. Because of the way
the object is strutured (where all spectral windows are assumed to be
simultaneously recorded), this can significantly inflate the size and memory
footprint of UVData objects. By default, single channel windows are ignored
to avoid this issue, although they can be included if setting this parameter
equal to True.
raise_error : bool
The measurement set format allows for different spectral windows and
polarizations to have different metdata for the same time-baseline
combination, but UVData objects do not. If detected, by default the reader
will throw an error. However, if set to False, the reader will simply give
a warning, and will use the first value read in the file as the "correct"
metadata in the UVData object.
read_weights : bool
Read in the weights from the MS file, default is True. If false, the method
will set the `nsamples_array` to the same uniform value (namely 1.0).
Raises
------
IOError
If root file directory doesn't exist.
ValueError
If the `data_column` is not set to an allowed value.
If the data are have multiple subarrays or are multi source or have
multiple spectral windows.
If the data have multiple data description ID values.
"""
if isinstance(filepath, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
from . import ms
ms_obj = ms.MS()
ms_obj.read_ms(
filepath,
data_column=data_column,
pol_order=pol_order,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
ignore_single_chan=ignore_single_chan,
raise_error=raise_error,
read_weights=read_weights,
)
self._convert_from_filetype(ms_obj)
del ms_obj
def read_mwa_corr_fits(
self,
filelist,
axis=None,
use_aoflagger_flags=None,
use_cotter_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
flag_small_sig_ants=None,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
phase_to_pointing_center=False,
read_data=True,
data_array_dtype=np.complex64,
nsample_array_dtype=np.float32,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in MWA correlator gpu box files.
The default settings remove some of the instrumental effects in the bandpass
by dividing out the digital gains and the coarse band shape.
If the desired output is raw correlator data, set remove_dig_gains=False,
remove_coarse_band=False, correct_cable_len=False, and
phase_to_pointing_center=False.
Parameters
----------
filelist : list of str
The list of MWA correlator files to read from. Must include at
least one fits file and only one metafits file per data set.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
use_aoflagger_flags : bool
Option to use aoflagger mwaf flag files. Defaults to true if aoflagger
flag files are submitted.
use_cotter_flags : bool
Being replaced by use_aoflagger_flags and will be removed in v2.4.
remove_dig_gains : bool
Option to divide out digital gains.
remove_coarse_band : bool
Option to divide out coarse band shape.
correct_cable_len : bool
Option to apply a cable delay correction.
correct_van_vleck : bool
Option to apply a van vleck correction.
cheby_approx : bool
Only used if correct_van_vleck is True. Option to implement the van
vleck correction with a chebyshev polynomial approximation.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna.
flag_small_sig_ants : bool
Being replaced with flag_small_auto_ants and will be removed in v2.4.
propagate_coarse_flags : bool
Option to propagate flags for missing coarse channel integrations
across frequency.
flag_init: bool
Set to True in order to do routine flagging of coarse channel edges,
start or end integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if flag_init is True. Set to the width to flag on the edge
of each coarse channel, in hz. Errors if not equal to integer
multiple of channel_width. Set to 0 for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if flag_init is True. Set to the number of seconds to flag
at the end of the observation. Set to 0 for no flagging. Errors if
not an integer multiple of the integration time.
flag_dc_offset: bool
Only used if flag_init is True. Set to True to flag the center fine
channel of each coarse channel. Only used if file_type is
'mwa_corr_fits'.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
phase_to_pointing_center : bool
Option to phase to the observation pointing center.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata read in. Setting read_data to False
results in a metdata only object.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128
(double-precision real and imaginary).
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3).
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If required files are missing or multiple files metafits files are
included in filelist.
If files from different observations are included in filelist.
If files in fileslist have different fine channel widths
If file types other than fits, metafits, and mwaf files are included
in filelist.
"""
from . import mwa_corr_fits
if isinstance(filelist[0], (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
if use_cotter_flags is not None:
use_aoflagger_flags = use_cotter_flags
warnings.warn(
"Use `use_aoflagger_flags` instead of `use_cotter_flags`."
"`use_cotter_flags` is deprecated, and will be removed in "
"pyuvdata v2.4.",
DeprecationWarning,
)
if flag_small_sig_ants is not None:
flag_small_auto_ants = flag_small_sig_ants
warnings.warn(
"Use `flag_small_auto_ants` instead of `flag_small_sig_ants`."
"`flag_small_sig_ants` is deprecated, and will be removed in "
"pyuvdata v2.4.",
DeprecationWarning,
)
corr_obj = mwa_corr_fits.MWACorrFITS()
corr_obj.read_mwa_corr_fits(
filelist,
use_aoflagger_flags=use_aoflagger_flags,
remove_dig_gains=remove_dig_gains,
remove_coarse_band=remove_coarse_band,
correct_cable_len=correct_cable_len,
correct_van_vleck=correct_van_vleck,
cheby_approx=cheby_approx,
flag_small_auto_ants=flag_small_auto_ants,
propagate_coarse_flags=propagate_coarse_flags,
flag_init=flag_init,
edge_width=edge_width,
start_flag=start_flag,
end_flag=end_flag,
flag_dc_offset=flag_dc_offset,
remove_flagged_ants=remove_flagged_ants,
phase_to_pointing_center=phase_to_pointing_center,
read_data=read_data,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
self._convert_from_filetype(corr_obj)
del corr_obj
def read_uvfits(
self,
filename,
axis=None,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=None,
fix_use_ant_pos=True,
):
"""
Read in header, metadata and data from a single uvfits file.
Parameters
----------
filename : str
The uvfits file to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata read in. Setting read_data to False
results in a metdata only object.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If the metadata are not internally consistent or missing.
"""
from . import uvfits
if isinstance(filename, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
uvfits_obj = uvfits.UVFITS()
uvfits_obj.read_uvfits(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
self._convert_from_filetype(uvfits_obj)
del uvfits_obj
def read_uvh5(
self,
filename,
axis=None,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
data_array_dtype=np.complex128,
multidim_index=False,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=None,
fix_use_ant_pos=True,
):
"""
Read a UVH5 file.
Parameters
----------
filename : str
The UVH5 file to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata will be read in. Setting read_data to
False results in an incompletely defined object (check will not pass).
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'.
multidim_index : bool
[Only for HDF5] If True, attempt to index the HDF5 dataset
simultaneously along all data axes. Otherwise index one axis at-a-time.
This only works if data selection is sliceable along all but one axis.
If indices are not well-matched to data chunks, this can be slow.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
to apply the correction if the attributes `phase_center_app_ra` and
`phase_center_app_dec` are missing (as they were introduced alongside the
new phasing method).
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If the data_array_dtype is not a complex dtype.
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
"""
from . import uvh5
if isinstance(filename, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
uvh5_obj = uvh5.UVH5()
uvh5_obj.read_uvh5(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
multidim_index=multidim_index,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
self._convert_from_filetype(uvh5_obj)
del uvh5_obj
def read(
self,
filename,
axis=None,
file_type=None,
allow_rephase=True,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
phase_epoch=None,
orig_phase_frame=None,
phase_use_ant_pos=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
time_range=None,
keep_all_metadata=True,
read_data=True,
phase_type=None,
correct_lat_lon=True,
use_model=False,
data_column="DATA",
pol_order="AIPS",
data_array_dtype=np.complex128,
nsample_array_dtype=np.float32,
use_aoflagger_flags=None,
use_cotter_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
flag_small_sig_ants=None,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
phase_to_pointing_center=False,
skip_bad_files=False,
multidim_index=False,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
lsts=None,
lst_range=None,
calc_lst=True,
fix_old_proj=None,
fix_use_ant_pos=True,
make_multi_phase=False,
ignore_name=False,
):
"""
Read a generic file into a UVData object.
Parameters
----------
filename : str or array_like of str
The file(s) or list(s) (or array(s)) of files to read from.
file_type : str
One of ['uvfits', 'miriad', 'fhd', 'ms', 'uvh5'] or None.
If None, the code attempts to guess what the file type is.
For miriad and ms types, this is based on the standard directory
structure. For FHD, uvfits and uvh5 files it's based on file
extensions (FHD: .sav, .txt; uvfits: .uvfits; uvh5: .uvh5).
Note that if a list of datasets is passed, the file type is
determined from the first dataset.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
allow_rephase : bool
Allow rephasing of phased file data so that data from files with
different phasing can be combined.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). If set to None and multiple files are
read with different phase centers, the phase center of the first
file will be used.
unphase_to_drift : bool
Unphase the data from the files before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
phase_use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the data. Only used if file_type is 'uvfits',
'miriad' or 'uvh5'. If set to False, only the metadata will be
read in. Setting read_data to False results in a metdata only
object.
phase_type : str, optional
Option to specify the phasing status of the data. Only used if
file_type is 'miriad'. Options are 'drift', 'phased' or None.
'drift' means the data are zenith drift data, 'phased' means the
data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing. Only used if file_type is 'miriad'.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read). Only used if file_type is 'fhd'.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'. Only used if file_type is 'ms'.
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'. Only used if file_type is 'ms'.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'. Only used if file_type is 'uvh5' or
'mwa_corr_fits'.
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3). Only used if
file_type is 'mwa_corr_fits'.
use_aoflagger_flags : bool
Option to use aoflagger mwaf flag files. Defaults to true if aoflagger
flag files are submitted.
use_cotter_flags : bool
Being replaced by use_aoflagger_flags and will be removed in v2.4.
remove_dig_gains : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out digital
gains.
remove_coarse_band : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out coarse
band shape.
correct_cable_len : bool
Flag to apply cable length correction. Only used if file_type is
'mwa_corr_fits'.
correct_van_vleck : bool
Flag to apply a van vleck correction. Only used if file_type is
'mwa_corr_fits'.
cheby_approx : bool
Only used if file_type is 'mwa_corr_fits' and correct_van_vleck is True.
Option to implement the van vleck correction with a chebyshev polynomial
approximation. Set to False to run the integral version of the correction.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna. Only used if file_type is
'mwa_corr_fits'.
flag_small_sig_ants : bool
Being replaced by flag_small_auto_ants and will be removed in v2.4.
propogate_coarse_flags : bool
Option to propogate flags for missing coarse channel integrations
across frequency. Only used if file_type is 'mwa_corr_fits'.
flag_init: bool
Only used if file_type is 'mwa_corr_fits'. Set to True in order to
do routine flagging of coarse channel edges, start or end
integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the width to flag on the edge of each coarse channel, in hz.
Errors if not equal to integer multiple of channel_width. Set to 0
for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the number of seconds to flag at the end of the observation. Set
to 0 for no flagging. Errors if not an integer multiple of the
integration time.
flag_dc_offset: bool
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to True to flag the center fine channel of each coarse channel. Only
used if file_type is 'mwa_corr_fits'.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
Only used if file_type is 'mwa_corr_fits'.
phase_to_pointing_center : bool
Flag to phase to the pointing center. Only used if file_type is
'mwa_corr_fits'. Cannot be set if phase_center_radec is not None.
skip_bad_files : bool
Option when reading multiple files to catch read errors such that
the read continues even if one or more files are corrupted. Files
that produce errors will be printed. Default is False (files will
not be skipped).
multidim_index : bool
[Only for HDF5] If True, attempt to index the HDF5 dataset
simultaneously along all data axes. Otherwise index one axis at-a-time.
This only works if data selection is sliceable along all but one axis.
If indices are not well-matched to data chunks, this can be slow.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values in MIR dataset. Default is false.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
calc_lst : bool
Recalculate the LST values that are present within the file, useful in
cases where the "online" calculate values have precision or value errors.
Default is True. Only applies to MIRIAD files.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False, unless reading a UVH5 file that is missing the `phase_center_app_ra`
and `phase_center_app_dec` attributes (as these were introduced at the same
time as the new `phase` method), in which case the default is True.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. By default, this is only done if reading
in a file with multiple sources.
ignore_name : bool
Only relevant when reading in multiple files, which are concatenated into a
single UVData object. Option to ignore the name of the phase center when
combining multiple files, which would otherwise result in an error being
raised because of attributes not matching. Doing so effectively adopts the
name found in the first file read in. Default is False.
Raises
------
ValueError
If the file_type is not set and cannot be determined from the file name.
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If phase_center_radec is not None and is not length 2.
"""
if isinstance(filename, (list, tuple, np.ndarray)):
# this is either a list of separate files to read or a list of
# FHD files or MWA correlator FITS files
if isinstance(filename[0], (list, tuple, np.ndarray)):
if file_type is None:
# this must be a list of lists of FHD or MWA correlator FITS
basename, extension = os.path.splitext(filename[0][0])
if extension == ".sav" or extension == ".txt":
file_type = "fhd"
elif (
extension == ".fits"
or extension == ".metafits"
or extension == ".mwaf"
):
file_type = "mwa_corr_fits"
multi = True
else:
if file_type is None:
basename, extension = os.path.splitext(filename[0])
if extension == ".sav" or extension == ".txt":
file_type = "fhd"
elif (
extension == ".fits"
or extension == ".metafits"
or extension == ".mwaf"
):
file_type = "mwa_corr_fits"
if file_type == "fhd" or file_type == "mwa_corr_fits":
multi = False
else:
multi = True
else:
multi = False
if file_type is None:
if multi:
file_test = filename[0]
else:
file_test = filename
if os.path.isdir(file_test):
# it's a directory, so it's either miriad, mir, or ms file type
if os.path.exists(os.path.join(file_test, "vartable")):
# It's miriad.
file_type = "miriad"
elif os.path.exists(os.path.join(file_test, "OBSERVATION")):
# It's a measurement set.
file_type = "ms"
elif os.path.exists(os.path.join(file_test, "sch_read")):
# It's Submillimeter Array mir format.
file_type = "mir"
else:
basename, extension = os.path.splitext(file_test)
if extension == ".uvfits":
file_type = "uvfits"
elif extension == ".uvh5":
file_type = "uvh5"
if file_type is None:
raise ValueError(
"File type could not be determined, use the "
"file_type keyword to specify the type."
)
if time_range is not None:
if times is not None:
raise ValueError("Only one of times and time_range can be provided.")
if antenna_names is not None and antenna_nums is not None:
raise ValueError(
"Only one of antenna_nums and antenna_names can " "be provided."
)
if multi:
file_num = 0
file_warnings = ""
unread = True
f = filename[file_num]
while unread and file_num < len(filename):
try:
self.read(
filename[file_num],
file_type=file_type,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
time_range=time_range,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
use_model=use_model,
data_column=data_column,
pol_order=pol_order,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
skip_bad_files=skip_bad_files,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
isource=None,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
make_multi_phase=make_multi_phase,
)
unread = False
except KeyError as err:
file_warnings = (
file_warnings + f"Failed to read {f} due to KeyError: {err}\n"
)
file_num += 1
if skip_bad_files is False:
raise
except ValueError as err:
file_warnings = (
file_warnings + f"Failed to read {f} due to ValueError: {err}\n"
)
file_num += 1
if skip_bad_files is False:
raise
except OSError as err: # pragma: nocover
file_warnings = (
file_warnings + f"Failed to read {f} due to OSError: {err}\n"
)
file_num += 1
if skip_bad_files is False:
raise
if (
allow_rephase
and phase_center_radec is None
and not unphase_to_drift
and self.phase_type == "phased"
and not self.multi_phase_center
and not make_multi_phase
):
# set the phase center to be the phase center of the first file
phase_center_radec = [self.phase_center_ra, self.phase_center_dec]
phase_frame = self.phase_center_frame
phase_epoch = self.phase_center_epoch
uv_list = []
if len(filename) > file_num + 1:
for f in filename[file_num + 1 :]:
uv2 = UVData()
try:
uv2.read(
f,
file_type=file_type,
phase_center_radec=phase_center_radec,
phase_frame=phase_frame,
phase_epoch=phase_epoch,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
time_range=time_range,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
use_model=use_model,
data_column=data_column,
pol_order=pol_order,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
skip_bad_files=skip_bad_files,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
isource=None,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
make_multi_phase=make_multi_phase,
)
uv_list.append(uv2)
except KeyError as err:
file_warnings = (
file_warnings
+ f"Failed to read {f} due to KeyError: {err}\n"
)
if skip_bad_files:
continue
else:
raise
except ValueError as err:
file_warnings = (
file_warnings
+ f"Failed to read {f} due to ValueError: {err}\n"
)
if skip_bad_files:
continue
else:
raise
except OSError as err: # pragma: nocover
file_warnings = (
file_warnings
+ f"Failed to read {f} due to OSError: {err}\n"
)
if skip_bad_files:
continue
else:
raise
if unread is True:
warnings.warn(
"########################################################\n"
"ALL FILES FAILED ON READ - NO READABLE FILES IN FILENAME\n"
"########################################################"
)
elif len(file_warnings) > 0:
warnings.warn(file_warnings)
# Concatenate once at end
if axis is not None:
# Rewrote fast_concat to operate on lists
self.fast_concat(
uv_list,
axis,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=phase_use_ant_pos,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True,
ignore_name=ignore_name,
)
else:
# Too much work to rewrite __add__ to operate on lists
# of files, so instead doing a binary tree merge
uv_list = [self] + uv_list
while len(uv_list) > 1:
for uv1, uv2 in zip(uv_list[0::2], uv_list[1::2]):
uv1.__iadd__(
uv2,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=phase_use_ant_pos,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
ignore_name=ignore_name,
)
uv_list = uv_list[0::2]
# Because self was at the beginning of the list,
# everything is merged into it at the end of this loop
else:
if file_type in ["fhd", "ms", "mwa_corr_fits"]:
if (
antenna_nums is not None
or antenna_names is not None
or ant_str is not None
or bls is not None
or frequencies is not None
or freq_chans is not None
or times is not None
or time_range is not None
or polarizations is not None
or blt_inds is not None
):
select = True
warnings.warn(
"Warning: select on read keyword set, but "
'file_type is "{ftype}" which does not support select '
"on read. Entire file will be read and then select "
"will be performed".format(ftype=file_type)
)
# these file types do not have select on read, so set all
# select parameters
select_antenna_nums = antenna_nums
select_antenna_names = antenna_names
select_ant_str = ant_str
select_bls = bls
select_frequencies = frequencies
select_freq_chans = freq_chans
select_times = times
select_time_range = time_range
select_polarizations = polarizations
select_blt_inds = blt_inds
else:
select = False
elif file_type in ["uvfits", "uvh5"]:
select = False
elif file_type in ["miriad"]:
if (
antenna_names is not None
or frequencies is not None
or freq_chans is not None
or times is not None
or blt_inds is not None
):
if blt_inds is not None:
if (
antenna_nums is not None
or ant_str is not None
or bls is not None
or time_range is not None
):
warnings.warn(
"Warning: blt_inds is set along with select "
"on read keywords that are supported by "
"read_miriad and may downselect blts. "
"This may result in incorrect results "
"because the select on read will happen "
"before the blt_inds selection so the indices "
"may not match the expected locations."
)
else:
warnings.warn(
"Warning: a select on read keyword is set that is "
"not supported by read_miriad. This select will "
"be done after reading the file."
)
select = True
# these are all done by partial read, so set to None
select_antenna_nums = None
select_ant_str = None
select_bls = None
select_time_range = None
select_polarizations = None
# these aren't supported by partial read, so do it in select
select_antenna_names = antenna_names
select_frequencies = frequencies
select_freq_chans = freq_chans
select_times = times
select_blt_inds = blt_inds
else:
select = False
# reading a single "file". Call the appropriate file-type read
if file_type == "uvfits":
self.read_uvfits(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
read_data=read_data,
keep_all_metadata=keep_all_metadata,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
elif file_type == "mir":
self.read_mir(
filename,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
)
select = False
elif file_type == "miriad":
self.read_miriad(
filename,
antenna_nums=antenna_nums,
ant_str=ant_str,
bls=bls,
polarizations=polarizations,
time_range=time_range,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
elif file_type == "mwa_corr_fits":
self.read_mwa_corr_fits(
filename,
use_aoflagger_flags=use_aoflagger_flags,
use_cotter_flags=use_cotter_flags,
remove_dig_gains=remove_dig_gains,
remove_coarse_band=remove_coarse_band,
correct_cable_len=correct_cable_len,
correct_van_vleck=correct_van_vleck,
cheby_approx=cheby_approx,
flag_small_auto_ants=flag_small_auto_ants,
flag_small_sig_ants=flag_small_sig_ants,
propagate_coarse_flags=propagate_coarse_flags,
flag_init=flag_init,
edge_width=edge_width,
start_flag=start_flag,
end_flag=end_flag,
flag_dc_offset=True,
remove_flagged_ants=remove_flagged_ants,
phase_to_pointing_center=phase_to_pointing_center,
read_data=read_data,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
elif file_type == "fhd":
self.read_fhd(
filename,
use_model=use_model,
background_lsts=background_lsts,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
elif file_type == "ms":
self.read_ms(
filename,
data_column=data_column,
pol_order=pol_order,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
elif file_type == "uvh5":
self.read_uvh5(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
read_data=read_data,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata,
multidim_index=multidim_index,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
select = False
if select:
self.select(
antenna_nums=select_antenna_nums,
antenna_names=select_antenna_names,
ant_str=select_ant_str,
bls=select_bls,
frequencies=select_frequencies,
freq_chans=select_freq_chans,
times=select_times,
time_range=select_time_range,
polarizations=select_polarizations,
blt_inds=select_blt_inds,
keep_all_metadata=keep_all_metadata,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if make_multi_phase:
self._set_multi_phase_center(preserve_phase_center_info=True)
if unphase_to_drift:
if self.phase_type != "drift":
warnings.warn("Unphasing this UVData object to drift")
self.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=phase_use_ant_pos,
)
if phase_center_radec is not None:
if np.array(phase_center_radec).size != 2:
raise ValueError("phase_center_radec should have length 2.")
# If this object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if self.phase_type == "drift" or (
not np.isclose(
self.phase_center_ra,
phase_center_radec[0],
rtol=self._phase_center_ra.tols[0],
atol=self._phase_center_ra.tols[1],
)
or not np.isclose(
self.phase_center_dec,
phase_center_radec[1],
rtol=self._phase_center_dec.tols[0],
atol=self._phase_center_dec.tols[1],
)
or (self.phase_center_frame != phase_frame)
or (self.phase_center_epoch != phase_epoch)
):
warnings.warn("Phasing this UVData object to phase_center_radec")
self.phase(
phase_center_radec[0],
phase_center_radec[1],
epoch=phase_epoch,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=phase_use_ant_pos,
allow_rephase=True,
)
@classmethod
def from_file(
cls,
filename,
axis=None,
file_type=None,
allow_rephase=True,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
phase_epoch=None,
orig_phase_frame=None,
phase_use_ant_pos=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
time_range=None,
keep_all_metadata=True,
read_data=True,
phase_type=None,
correct_lat_lon=True,
use_model=False,
data_column="DATA",
pol_order="AIPS",
data_array_dtype=np.complex128,
nsample_array_dtype=np.float32,
use_aoflagger_flags=None,
use_cotter_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
flag_small_sig_ants=None,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
phase_to_pointing_center=False,
skip_bad_files=False,
multidim_index=False,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
lsts=None,
lst_range=None,
calc_lst=True,
fix_old_proj=None,
fix_use_ant_pos=True,
make_multi_phase=False,
ignore_name=False,
):
"""
Initialize a new UVData object by reading the input file.
Parameters
----------
filename : str or array_like of str
The file(s) or list(s) (or array(s)) of files to read from.
file_type : str
One of ['uvfits', 'miriad', 'fhd', 'ms', 'uvh5'] or None.
If None, the code attempts to guess what the file type is.
For miriad and ms types, this is based on the standard directory
structure. For FHD, uvfits and uvh5 files it's based on file
extensions (FHD: .sav, .txt; uvfits: .uvfits; uvh5: .uvh5).
Note that if a list of datasets is passed, the file type is
determined from the first dataset.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
allow_rephase : bool
Allow rephasing of phased file data so that data from files with
different phasing can be combined.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). If set to None and multiple files are
read with different phase centers, the phase center of the first
file will be used.
unphase_to_drift : bool
Unphase the data from the files before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
phase_use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the data. Only used if file_type is 'uvfits',
'miriad' or 'uvh5'. If set to False, only the metadata will be
read in. Setting read_data to False results in a metdata only
object.
phase_type : str, optional
Option to specify the phasing status of the data. Only used if
file_type is 'miriad'. Options are 'drift', 'phased' or None.
'drift' means the data are zenith drift data, 'phased' means the
data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing. Only used if file_type is 'miriad'.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read). Only used if file_type is 'fhd'.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'. Only used if file_type is 'ms'.
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'. Only used if file_type is 'ms'.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'. Only used if file_type is 'uvh5' or
'mwa_corr_fits'.
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3). Only used if
file_type is 'mwa_corr_fits'.
use_aoflagger_flags : bool
Only used if file_type is 'mwa_corr_fits'. Option to use aoflagger mwaf
flag files. Defaults to true if aoflagger flag files are submitted.
use_cotter_flags : bool
Being replaced by use_aoflagger_flags and will be removed in v2.4.
remove_dig_gains : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out digital
gains.
remove_coarse_band : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out coarse
band shape.
correct_cable_len : bool
Flag to apply cable length correction. Only used if file_type is
'mwa_corr_fits'.
correct_van_vleck : bool
Flag to apply a van vleck correction. Only used if file_type is
'mwa_corr_fits'.
cheby_approx : bool
Only used if file_type is 'mwa_corr_fits' and correct_van_vleck is True.
Option to implement the van vleck correction with a chebyshev polynomial
approximation. Set to False to run the integral version of the correction.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna. Only used if file_type is
'mwa_corr_fits'.
flag_small_sig_ants : bool
Being replaced by flag_small_auto_ants and will be removed in v2.4.
propogate_coarse_flags : bool
Option to propogate flags for missing coarse channel integrations
across frequency. Only used if file_type is 'mwa_corr_fits'.
flag_init: bool
Only used if file_type is 'mwa_corr_fits'. Set to True in order to
do routine flagging of coarse channel edges, start or end
integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the width to flag on the edge of each coarse channel, in hz.
Errors if not equal to integer multiple of channel_width. Set to 0
for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the number of seconds to flag at the end of the observation. Set
to 0 for no flagging. Errors if not an integer multiple of the
integration time.
flag_dc_offset: bool
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to True to flag the center fine channel of each coarse channel. Only
used if file_type is 'mwa_corr_fits'.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
Only used if file_type is 'mwa_corr_fits'.
phase_to_pointing_center : bool
Flag to phase to the pointing center. Only used if file_type is
'mwa_corr_fits'. Cannot be set if phase_center_radec is not None.
skip_bad_files : bool
Option when reading multiple files to catch read errors such that
the read continues even if one or more files are corrupted. Files
that produce errors will be printed. Default is False (files will
not be skipped).
multidim_index : bool
[Only for HDF5] If True, attempt to index the HDF5 dataset
simultaneously along all data axes. Otherwise index one axis at-a-time.
This only works if data selection is sliceable along all but one axis.
If indices are not well-matched to data chunks, this can be slow.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values in MIR dataset. Default is false.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
calc_lst : bool
Recalculate the LST values that are present within the file, useful in
cases where the "online" calculate values have precision or value errors.
Default is True. Only applies to MIRIAD files.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False, unless reading a UVH5 file that is missing the `phase_center_app_ra`
and `phase_center_app_dec` attributes (as these were introduced at the same
time as the new `phase` method), in which case the default is True.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. By default, this is only done if reading
in a file with multiple sources.
ignore_name : bool
Only relevant when reading in multiple files, which are concatenated into a
single UVData object. Option to ignore the name of the phase center when
combining multiple files, which would otherwise result in an error being
raised because of attributes not matching. Doing so effectively adopts the
name found in the first file read in. Default is False.
Raises
------
ValueError
If the file_type is not set and cannot be determined from the file name.
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If phase_center_radec is not None and is not length 2.
"""
uvd = cls()
uvd.read(
filename,
axis=axis,
file_type=file_type,
allow_rephase=allow_rephase,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
phase_epoch=phase_epoch,
orig_phase_frame=orig_phase_frame,
phase_use_ant_pos=phase_use_ant_pos,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
time_range=time_range,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
use_model=use_model,
data_column=data_column,
pol_order=pol_order,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
use_aoflagger_flags=use_aoflagger_flags,
use_cotter_flags=use_cotter_flags,
remove_dig_gains=remove_dig_gains,
remove_coarse_band=remove_coarse_band,
correct_cable_len=correct_cable_len,
correct_van_vleck=correct_van_vleck,
cheby_approx=cheby_approx,
flag_small_auto_ants=flag_small_auto_ants,
flag_small_sig_ants=flag_small_sig_ants,
propagate_coarse_flags=propagate_coarse_flags,
flag_init=flag_init,
edge_width=edge_width,
start_flag=start_flag,
end_flag=end_flag,
flag_dc_offset=flag_dc_offset,
remove_flagged_ants=remove_flagged_ants,
phase_to_pointing_center=phase_to_pointing_center,
skip_bad_files=skip_bad_files,
multidim_index=multidim_index,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
lsts=lsts,
lst_range=lst_range,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
make_multi_phase=make_multi_phase,
ignore_name=ignore_name,
)
return uvd
def write_miriad(
self,
filepath,
clobber=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
no_antnums=False,
calc_lst=False,
):
"""
Write the data to a miriad file.
Parameters
----------
filename : str
The miriad root directory to write to.
clobber : bool
Option to overwrite the filename if the file already exists.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
no_antnums : bool
Option to not write the antnums variable to the file.
Should only be used for testing purposes.
calc_lst : bool
Recalculate the LST values upon writing the file. This is done to perform
higher-precision accounting for the difference in MIRAD timestamps vs
pyuvdata (the former marks the beginning of an integration, the latter
marks the midpoint). Default is False, which instead uses a simple formula
for correcting the LSTs, expected to be accurate to approximately 0.1 µsec
precision.
Raises
------
ValueError
If the frequencies are not evenly spaced or are separated by more
than their channel width or if the UVData object is a metadata only object.
TypeError
If any entry in extra_keywords is not a single string or number.
"""
if self.metadata_only:
raise ValueError("Cannot write out metadata only objects to a miriad file.")
miriad_obj = self._convert_to_filetype("miriad")
miriad_obj.write_miriad(
filepath,
clobber=clobber,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
no_antnums=no_antnums,
calc_lst=calc_lst,
)
del miriad_obj
def write_mir(
self, filepath,
):
"""
Write the data to a mir file.
Parameters
----------
filename : str
The mir root directory to write to.
Raises
------
ValueError
If the UVData object is a metadata only object.
NotImplementedError
Method is not fully implemented yet, and thus will raise an error
"""
if self.metadata_only:
raise ValueError("Cannot write out metadata only objects to a mir file.")
mir_obj = self._convert_to_filetype("mir")
mir_obj.write_mir(filepath,)
del mir_obj
def write_ms(
self,
filename,
force_phase=False,
clobber=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write a CASA measurement set (MS).
Parameters
----------
filename : str
The measurement set file path to write to (a measurement set is really
a folder with many files).
force_phase : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
clobber : bool
Option to overwrite the file if it already exists.
run_check : bool
Option to check for the existence and proper shapes of parameters
before writing the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If the UVData object is a metadata only object.
"""
if self.metadata_only:
raise ValueError(
"Cannot write out metadata only objects to a measurement set file."
)
ms_obj = self._convert_to_filetype("ms")
ms_obj.write_ms(
filename,
force_phase=force_phase,
clobber=clobber,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
del ms_obj
def write_uvfits(
self,
filename,
spoof_nonessential=False,
write_lst=True,
force_phase=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write the data to a uvfits file.
Parameters
----------
filename : str
The uvfits file to write to.
spoof_nonessential : bool
Option to spoof the values of optional UVParameters that are not set
but are required for uvfits files.
write_lst : bool
Option to write the LSTs to the metadata (random group parameters).
force_phase: : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
The `phase_type` of the object is "drift" and the `force_phase`
keyword is not set.
If the frequencies are not evenly spaced or are separated by more
than their channel width.
The polarization values are not evenly spaced.
Any of ['antenna_positions', 'gst0', 'rdate', 'earth_omega', 'dut1',
'timesys'] are not set on the object and `spoof_nonessential` is False.
If the `timesys` parameter is not set to "UTC".
If the UVData object is a metadata only object.
TypeError
If any entry in extra_keywords is not a single string or number.
"""
if self.metadata_only:
raise ValueError("Cannot write out metadata only objects to a uvfits file.")
uvfits_obj = self._convert_to_filetype("uvfits")
uvfits_obj.write_uvfits(
filename,
spoof_nonessential=spoof_nonessential,
write_lst=write_lst,
force_phase=force_phase,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
del uvfits_obj
def write_uvh5(
self,
filename,
clobber=False,
chunks=True,
data_compression=None,
flags_compression="lzf",
nsample_compression="lzf",
data_write_dtype=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write a completely in-memory UVData object to a UVH5 file.
Parameters
----------
filename : str
The UVH5 file to write to.
clobber : bool
Option to overwrite the file if it already exists.
chunks : tuple or bool
h5py.create_dataset chunks keyword. Tuple for chunk shape,
True for auto-chunking, None for no chunking. Default is True.
data_compression : str
HDF5 filter to apply when writing the data_array. Default is
None meaning no filter or compression. Dataset must be chunked.
flags_compression : str
HDF5 filter to apply when writing the flags_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
nsample_compression : str
HDF5 filter to apply when writing the nsample_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
data_write_dtype : numpy dtype
datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be
specified with an 'r' field and an 'i' field for real and imaginary
parts, respectively. See uvh5.py for an example of defining such a datatype.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If the UVData object is a metadata only object.
"""
if self.metadata_only:
raise ValueError(
"Cannot write out metadata only objects to a uvh5 file. To initialize "
"a uvh5 file for partial writing, use the `initialize_uvh5_file` "
"method."
)
uvh5_obj = self._convert_to_filetype("uvh5")
uvh5_obj.write_uvh5(
filename,
clobber=clobber,
chunks=chunks,
data_compression=data_compression,
flags_compression=flags_compression,
nsample_compression=nsample_compression,
data_write_dtype=data_write_dtype,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
del uvh5_obj
def initialize_uvh5_file(
self,
filename,
clobber=False,
chunks=True,
data_compression=None,
flags_compression="lzf",
nsample_compression="lzf",
data_write_dtype=None,
):
"""
Initialize a UVH5 file on disk with the header metadata and empty data arrays.
Parameters
----------
filename : str
The UVH5 file to write to.
clobber : bool
Option to overwrite the file if it already exists.
chunks : tuple or bool
h5py.create_dataset chunks keyword. Tuple for chunk shape,
True for auto-chunking, None for no chunking. Default is True.
data_compression : str
HDF5 filter to apply when writing the data_array. Default is
None meaning no filter or compression. Dataset must be chunked.
flags_compression : str
HDF5 filter to apply when writing the flags_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
nsample_compression : str
HDF5 filter to apply when writing the nsample_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
data_write_dtype : numpy dtype
datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be
specified with an 'r' field and an 'i' field for real and imaginary
parts, respectively. See uvh5.py for an example of defining such a datatype.
Notes
-----
When partially writing out data, this function should be called first
to initialize the file on disk. The data is then actually written by
calling the write_uvh5_part method, with the same filename as the one
specified in this function. See the tutorial for a worked example.
"""
uvh5_obj = self._convert_to_filetype("uvh5")
uvh5_obj.initialize_uvh5_file(
filename,
clobber=clobber,
chunks=chunks,
data_compression=data_compression,
flags_compression=flags_compression,
nsample_compression=nsample_compression,
data_write_dtype=data_write_dtype,
)
del uvh5_obj
def write_uvh5_part(
self,
filename,
data_array,
flags_array,
nsample_array,
check_header=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
add_to_history=None,
run_check_acceptability=True,
):
"""
Write data to a UVH5 file that has already been initialized.
Parameters
----------
filename : str
The UVH5 file to write to. It must already exist, and is assumed to
have been initialized with initialize_uvh5_file.
data_array : ndarray
The data to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
flags_array : ndarray
The flags array to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
nsample_array : ndarray
The nsample array to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
check_header : bool
Option to check that the metadata present in the header on disk
matches that in the object.
antenna_nums : array_like of int, optional
The antennas numbers to include when writing data into the file
(antenna positions and names for the removed antennas will be retained).
This cannot be provided if `antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when writing data into the file
(antenna positions and names for the removed antennas will be retained).
This cannot be provided if `antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when writing data into the file. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include writing data into the file.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when writing data into the file, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include writing data into the file.
times : array_like of float, optional
The times to include when writing data into the file, each value
passed here should exist in the time_array.
polarizations : array_like of int, optional
The polarizations numbers to include when writing data into the file,
each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when writing data into the file.
This is not commonly used.
add_to_history : str
String to append to history before write out. Default is no appending.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
"""
uvh5_obj = self._convert_to_filetype("uvh5")
uvh5_obj.write_uvh5_part(
filename,
data_array,
flags_array,
nsample_array,
check_header=check_header,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
bls=bls,
ant_str=ant_str,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
add_to_history=add_to_history,
run_check_acceptability=run_check_acceptability,
)
del uvh5_obj
| 43.514138 | 88 | 0.560976 | 553,326 | 0.998742 | 1,030 | 0.001859 | 22,950 | 0.041424 | 0 | 0 | 261,060 | 0.471208 |
6ced9767f09be94c059293ba0ec574ce1794fa54 | 215 | py | Python | 30.strings/16.replace.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 30.strings/16.replace.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 30.strings/16.replace.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | # 15. replace() -> Altera determinado valor de uma string por outro. Troca uma string por outra.
texto = 'vou Treinar todo Dia Python'
print(texto.replace('vou','Vamos'))
print(texto.replace('Python','Algoritmos')) | 43 | 96 | 0.734884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.730233 |
6ceeb319087817241b31a0ee42e7edc720a5fffe | 573 | py | Python | Aula 13/ex049P.py | alaanlimaa/Python_CVM1-2-3 | 6d9a9bd693580fd1679a1d0b23afd26841b962a6 | [
"MIT"
] | null | null | null | Aula 13/ex049P.py | alaanlimaa/Python_CVM1-2-3 | 6d9a9bd693580fd1679a1d0b23afd26841b962a6 | [
"MIT"
] | null | null | null | Aula 13/ex049P.py | alaanlimaa/Python_CVM1-2-3 | 6d9a9bd693580fd1679a1d0b23afd26841b962a6 | [
"MIT"
] | null | null | null | '''Refaça o DESAFIO 9, mostrando a tabuada de um número que o usuário escolher,
só que agora utilizando um laço for.'''
#RESOLUÇÃO ALAN
'''mult = int(input(' Digite um número para ver sua tabuada: '))
for num in range(1, 11):
rest = num * mult
print('{} X {} = {} '. format(num, mult, rest))'''
#RESOLUÇÃO PROFESSOR
from time import sleep
num = int(input(' Digite um número para ver sua tabuada: '))
print('PROCESSANDO. . . ')
sleep(2)
for c in range(1, 11):
print('{} x {} = {}'.format(c, num, num * c))
sleep(1)
sleep(1)
print('''
OBRIGADO PARCEIRO''')
| 27.285714 | 79 | 0.635253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.736301 |
6ceec6d6259423de30eaf0244fd138f63c906931 | 4,710 | py | Python | saleor/api/purchase/serializers.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | 2 | 2017-07-11T12:40:59.000Z | 2017-10-18T18:02:46.000Z | saleor/api/purchase/serializers.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | 12 | 2017-06-19T07:20:41.000Z | 2022-03-15T19:03:33.000Z | saleor/api/purchase/serializers.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | null | null | null | from django.utils.formats import localize
from rest_framework.serializers import (
ModelSerializer,
HyperlinkedIdentityField,
SerializerMethodField,
ValidationError,
)
from rest_framework import serializers
from django.contrib.auth import get_user_model
from ...purchase.models import PurchaseProduct as Table
from saleor.payment.models import PaymentOption
from structlog import get_logger
logger = get_logger(__name__)
User = get_user_model()
class TableListSerializer(serializers.ModelSerializer):
unit_cost = SerializerMethodField()
total_cost = SerializerMethodField()
paid = SerializerMethodField()
supplier_name = SerializerMethodField()
product_name = SerializerMethodField()
pay_option = SerializerMethodField()
date = SerializerMethodField()
credit_balance = SerializerMethodField()
class Meta:
model = Table
fields = (
'id',
'invoice_number',
'product_name',
'variant',
'quantity',
'unit_cost',
'total_cost',
'paid',
'credit_balance',
'supplier_name',
'pay_option',
'date',
)
def get_pay_option(self, obj):
try:
options = obj.payment_options.first().name
except Exception as e:
print(e)
options = ''
try:
return options + '<br> ' + obj.payment_number
except:
return ''
def get_credit_balance(self, obj):
try:
return "{:,}".format(obj.balance.gross)
except Exception as e:
print(e)
return ''
def get_paid(self, obj):
try:
return "{:,}".format(obj.amount_paid.gross)
except Exception as e:
print(e)
return ''
def get_product_name(self, obj):
try:
return obj.stock.variant.display_product()
except:
return ''
def get_supplier_name(self, obj):
try:
return obj.supplier.name
except:
return ''
def get_date(self, obj):
return localize(obj.created)
def get_unit_cost(self, obj):
try:
return obj.cost_price.gross
except Exception as e:
return 0
def get_total_cost(self, obj):
try:
return obj.total_cost.gross
except Exception as e:
return 0
class DistinctTableListSerializer(serializers.ModelSerializer):
purchase_url = HyperlinkedIdentityField(view_name='dashboard:sale_supplier_list')
unit_cost = SerializerMethodField()
total_cost = SerializerMethodField()
total_quantity = SerializerMethodField()
supplier_name = SerializerMethodField()
product_name = SerializerMethodField()
date = SerializerMethodField()
class Meta:
model = Table
fields = (
'id',
'invoice_number',
'product_name',
'variant',
'quantity',
'unit_cost',
'total_cost',
'total_quantity',
'supplier_name',
'date',
'purchase_url'
)
def get_product_name(self, obj):
return obj.stock.variant.display_product()
def get_date(self, obj):
return localize(obj.created)
def get_supplier_name(self, obj):
try:
return obj.supplier.name
except:
return ''
def get_unit_cost(self, obj):
try:
return obj.cost_price.gross
except Exception as e:
return 0
def get_total_quantity(self, obj):
try:
return Table.objects.total_quantity(obj)
except:
return 0
def get_total_cost(self, obj):
try:
return Table.objects.total_cost(obj)
except:
return 0
class PaymentOptionListSerializer(serializers.ModelSerializer):
tendered = SerializerMethodField()
transaction_number = SerializerMethodField()
payment_name = SerializerMethodField()
class Meta:
model = PaymentOption
fields = (
'id',
'name',
'transaction_number',
'payment_name',
'tendered'
)
def get_transaction_number(self, obj):
return ''
def get_tendered(self, obj):
return 0.00
def get_payment_name(self, obj):
try:
return obj.name
except:
return ''
| 25.597826 | 85 | 0.562208 | 4,171 | 0.885563 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.080467 |
6cef339c16f78aea67a5d356f92c9371b336485d | 2,377 | py | Python | beautiful_fields/templatetags/beautiful_fields_tags.py | Excentrics/beautiful-fields | 7117cde6ac5646a5d65e166706b94898946a92b7 | [
"BSD-3-Clause"
] | null | null | null | beautiful_fields/templatetags/beautiful_fields_tags.py | Excentrics/beautiful-fields | 7117cde6ac5646a5d65e166706b94898946a92b7 | [
"BSD-3-Clause"
] | null | null | null | beautiful_fields/templatetags/beautiful_fields_tags.py | Excentrics/beautiful-fields | 7117cde6ac5646a5d65e166706b94898946a92b7 | [
"BSD-3-Clause"
] | null | null | null | from django import template
register = template.Library()
@register.filter
def fast_floatformat(number, places=-1, use_thousand_separator=False):
"""simple_floatformat(number:object, places:int) -> str
Like django.template.defaultfilters.floatformat but not locale aware
and between 40 and 200 times faster
"""
try:
number = float(number)
except (ValueError, TypeError):
return number #return ''
# floatformat makes -0.0 == 0.0
if number == 0:
number = 0
neg_places = False
if places < 0:
places = abs(places)
neg_places = True
if places == 0:
# %.0f will truncate rather than round
number = round(number, places)
# .format is noticably slower than %-formatting, use it only if necessary
if use_thousand_separator:
format_str = "{:,.%sf}" % places
formatted_number = format_str.format(number)
else:
format_str = "%%.%sf" % places
formatted_number = format_str % number
# -places means formatting to places, unless they're all 0 after places
if neg_places:
str_number = str(number)
if not "." in str_number:
return str_number
if len(str_number) > len(formatted_number):
return formatted_number
int_part, _, _ = formatted_number.partition(".")
if str_number.rstrip("0")[-1] == ".":
return int_part
return formatted_number
'''
# TEST AND VALIDATION
from django.template.defaultfilters import floatformat, special_floats
from decimal import Decimal as Decimal
vals = [
None,
'',
1,
1.9,
2.0,
0.1385798798,
0.2,
-0.5,
-0.0,
-5.0038,
18343.3582828389,
Decimal("-0.0"),
Decimal("5.000083387"),
Decimal("0E-7"),
Decimal("780000.388"),
"-0.5",
"3.80",
"foo",
]
vals.extend(special_floats)
def test_floatformat():
for val in vals:
yield check_equal, val, floatformat(val), fast_floatformat(val)
yield check_equal, val, floatformat(val, 7), fast_floatformat(val, 7)
yield check_equal, val, floatformat(val, -7), fast_floatformat(val, -7)
yield check_equal, val, floatformat(val, 0), fast_floatformat(val, 0)
def check_equal(orig, a, b):
assert a == b, '(%s) %s not equal with %s' % (orig, a, b)
''' | 25.836957 | 79 | 0.615482 | 0 | 0 | 0 | 0 | 1,428 | 0.600757 | 0 | 0 | 1,321 | 0.555743 |
6cf05f1a7f8d9e9c0505d52939240080210e4ca9 | 3,877 | py | Python | tools/collect_bot_info.py | Dalanar/DotA2DraftBot | 6ff222782a6c40d99004eeee101ca912e6e8dd11 | [
"MIT"
] | 1 | 2017-01-12T07:13:28.000Z | 2017-01-12T07:13:28.000Z | tools/collect_bot_info.py | Dalanar/DotA2DraftBot | 6ff222782a6c40d99004eeee101ca912e6e8dd11 | [
"MIT"
] | 2 | 2017-01-02T08:55:13.000Z | 2017-01-10T14:48:56.000Z | tools/collect_bot_info.py | Dalanar/DotA2DraftBot | 6ff222782a6c40d99004eeee101ca912e6e8dd11 | [
"MIT"
] | 1 | 2019-07-04T20:03:22.000Z | 2019-07-04T20:03:22.000Z | import vdf
implemented_bots = set([
'npc_dota_hero_axe',
'npc_dota_hero_bane',
'npc_dota_hero_bounty_hunter',
'npc_dota_hero_bloodseeker',
'npc_dota_hero_bristleback',
'npc_dota_hero_chaos_knight',
'npc_dota_hero_crystal_maiden',
'npc_dota_hero_dazzle',
'npc_dota_hero_death_prophet',
'npc_dota_hero_dragon_knight',
'npc_dota_hero_drow_ranger',
'npc_dota_hero_earthshaker',
'npc_dota_hero_jakiro',
'npc_dota_hero_juggernaut',
'npc_dota_hero_kunkka',
'npc_dota_hero_lich',
'npc_dota_hero_lina',
'npc_dota_hero_lion',
'npc_dota_hero_luna',
'npc_dota_hero_necrolyte',
'npc_dota_hero_omniknight',
'npc_dota_hero_oracle',
'npc_dota_hero_phantom_assassin',
'npc_dota_hero_pudge',
'npc_dota_hero_razor',
'npc_dota_hero_sand_king',
'npc_dota_hero_nevermore',
'npc_dota_hero_skywrath_mage',
'npc_dota_hero_sniper',
'npc_dota_hero_sven',
'npc_dota_hero_tidehunter',
'npc_dota_hero_tiny',
'npc_dota_hero_vengefulspirit',
'npc_dota_hero_viper',
'npc_dota_hero_warlock',
'npc_dota_hero_windrunner',
'npc_dota_hero_witch_doctor',
'npc_dota_hero_skeleton_king',
'npc_dota_hero_zuus',
])
heroes = vdf.load(open(r'D:\games\steamapps\common\dota 2 beta\game\dota\scripts\npc\npc_heroes.txt'))
with open('hero_bot_data.lua', 'w') as output:
# Write module exporting stuff #1
output.write('_G._savedEnv = getfenv()\n')
output.write('module("hero_bot_data", package.seeall)\n')
output.write('\n')
# Collect all hero types
hero_types = set()
hero_type_ids = {}
for name, data in heroes['DOTAHeroes'].iteritems():
if isinstance(data, dict) and 'Bot' in data:
this_hero_type = data['Bot']['HeroType'].split('|')
for hero_type in this_hero_type:
hero_types.add(hero_type.strip())
idx = 1
for hero_type in hero_types:
hero_type_ids[hero_type] = idx
output.write('%s = %d\n' % (hero_type, idx))
idx *= 2
output.write('\n')
# Fill LaningInfo and HeroType
output.write('heroes = {\n')
supported_list = []
not_supported_list = []
for name, data in heroes['DOTAHeroes'].iteritems():
if isinstance(data, dict) and data.get('CMEnabled', '0') == '1':
human_name = data['url'].replace('_', ' ')
if 'Bot' not in data:
not_supported_list.append(human_name)
continue
laning_info = []
try:
for key, value in data['Bot']['LaningInfo'].iteritems():
laning_info.append('[\'%s\'] = %s' % (key, value))
this_hero_type = 0
this_hero_type_raw = data['Bot']['HeroType'].split('|')
for hero_type in this_hero_type_raw:
this_hero_type |= hero_type_ids[hero_type.strip()]
if ('Loadout' not in data['Bot']) or (name not in implemented_bots):
not_supported_list.append(human_name)
else:
output.write(' [\'%s\'] = {[\'HeroType\'] = %s, [\'LaningInfo\'] = {%s}},\n' % (name, this_hero_type, ', '.join(laning_info)))
supported_list.append(human_name)
except KeyError as ex:
not_supported_list.append(human_name)
output.write('}\n\n')
# Write module exporting stuff #2
output.write('for k,v in pairs(hero_bot_data) do _G._savedEnv[k] = v end\n')
supported_list.sort()
print 'Fully operational:'
for hero in supported_list:
print ' - %s' % hero
not_supported_list.sort()
print '\nNot supported:'
for hero in not_supported_list:
print ' - %s' % hero
| 37.640777 | 150 | 0.605107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,617 | 0.417075 |
6cf3d683e2db7b466be99b176a67b586108082a3 | 147 | py | Python | maigret/__main__.py | noi4eg/maigret | d3b20e13dfa3df93dea1999201e250d5a7f82ad0 | [
"MIT"
] | 1,156 | 2020-08-30T08:06:59.000Z | 2022-03-31T17:42:14.000Z | maigret/__main__.py | noi4eg/maigret | d3b20e13dfa3df93dea1999201e250d5a7f82ad0 | [
"MIT"
] | 132 | 2020-08-30T13:53:21.000Z | 2022-03-31T18:42:42.000Z | maigret/__main__.py | noi4eg/maigret | d3b20e13dfa3df93dea1999201e250d5a7f82ad0 | [
"MIT"
] | 185 | 2020-08-30T08:15:00.000Z | 2022-03-31T22:49:02.000Z | #! /usr/bin/env python3
"""
Maigret entrypoint
"""
import asyncio
from .maigret import main
if __name__ == "__main__":
asyncio.run(main())
| 11.307692 | 26 | 0.673469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.401361 |
6cf481b9e35e180cf91908f12e638fa7c7e6f9dd | 80 | py | Python | 9_POO/generadorPassword/venv/lib/python3.9/site-packages/egcd/__init__.py | igijon/sge_2022 | 48228dad24c3d9fbcd7b0975c28095c40b15c4c3 | [
"MIT"
] | null | null | null | 9_POO/generadorPassword/venv/lib/python3.9/site-packages/egcd/__init__.py | igijon/sge_2022 | 48228dad24c3d9fbcd7b0975c28095c40b15c4c3 | [
"MIT"
] | null | null | null | 9_POO/generadorPassword/venv/lib/python3.9/site-packages/egcd/__init__.py | igijon/sge_2022 | 48228dad24c3d9fbcd7b0975c28095c40b15c4c3 | [
"MIT"
] | null | null | null | """Allow users to access the function directly."""
from egcd.egcd import egcd
| 26.666667 | 51 | 0.7375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.625 |
6cf5c425b9a007093ff73fa1c5872a082b863b03 | 18,872 | py | Python | externals/libbot/bot2-procman/python/src/bot_procman/sheriff_config.py | ericmanzi/double_pendulum_lqr | 76bba3091295abb7d412c4a3156258918f280c96 | [
"BSD-3-Clause"
] | null | null | null | externals/libbot/bot2-procman/python/src/bot_procman/sheriff_config.py | ericmanzi/double_pendulum_lqr | 76bba3091295abb7d412c4a3156258918f280c96 | [
"BSD-3-Clause"
] | null | null | null | externals/libbot/bot2-procman/python/src/bot_procman/sheriff_config.py | ericmanzi/double_pendulum_lqr | 76bba3091295abb7d412c4a3156258918f280c96 | [
"BSD-3-Clause"
] | null | null | null | TokIdentifier = "Identifier"
TokOpenStruct = "OpenStruct"
TokCloseStruct = "CloseStruct"
TokAssign = "Assign"
TokEndStatement = "EndStatement"
TokString = "String"
TokEOF = "EOF"
TokComment = "Comment"
TokInteger = "Integer"
class Token(object):
def __init__ (self, type, val):
self.type = type
self.val = val
class ParseError (ValueError):
def __init__ (self, lineno, line_pos, line_text, tokenval, msg):
self.lineno = lineno
self.offset = line_pos
self.text = line_text
self.token = tokenval
self.msg = msg
def __str__ (self):
ntabs = self.text.count ("\t")
tokenstr = ""
if self.token is not None:
tokenstr = "token %s" % self.token
s = """%s
line %d col %s %s
%s
""" % (self.msg, self.lineno, self.offset, tokenstr, self.text)
s += " " * (self.offset - ntabs - 1) + "\t" * ntabs + "^"
return s
class Tokenizer(object):
def __init__ (self, f):
self.f = f
self.unget_char = None
self.line_pos = 0
self.line_len = 0
self.line_buf = ""
self.line_num = 1
self.tok_pos = 0
self.prev_tok_pos = 0
def _next_char (self):
if self.unget_char is not None:
c = self.unget_char
self.unget_char = None
return c
else:
if self.line_pos == self.line_len:
self.line_buf = self.f.readline ()
if not len (self.line_buf):
return ''
self.line_len = len (self.line_buf)
self.line_pos = 0
c = self.line_buf[self.line_pos]
self.line_pos += 1
if c == '\n':
self.line_num += 1
return c
def _ungetc (self, c):
if not c: return
self.unget_char = c
def _unescape (self, c):
d = { "n": "\n",
"r": "\r",
"t": "\t" }
if c in d: return d[c]
return c
def next_token (self):
c = self._next_char ()
while c and c.isspace ():
c = self._next_char ()
if not c: return Token (TokEOF, "")
self.prev_tok_pos = self.tok_pos
self.tok_pos = self.line_pos
simple_tokens = { \
"=" : TokAssign,
";" : TokEndStatement,
"{" : TokOpenStruct,
"}" : TokCloseStruct
}
if c in simple_tokens:
return Token (simple_tokens[c], c)
tok_chars = [ c ]
if c == "#":
while True:
c = self._next_char ()
if not c or c == "\n":
return Token (TokComment, "".join (tok_chars))
tok_chars.append (c)
if c == "\"":
tok_chars = []
while True:
c = self._next_char ()
if c == "\n":
raise ParseError (self.line_num, self.tok_pos,
self.line_buf, None, "Unterminated string constant")
if c == "\\": c = self._unescape (self._next_char ())
elif not c or c == "\"":
return Token (TokString, "".join (tok_chars))
tok_chars.append (c)
if c.isalpha () or c == "_":
while True:
c = self._next_char ()
if not c.isalnum () and c not in "_-":
self._ungetc (c)
return Token (TokIdentifier, "".join (tok_chars))
tok_chars.append (c)
if c.isdigit():
while True:
c = self._next_char()
if not c.isdigit():
self._ungetc(c)
return Token(TokInteger, "".join(tok_chars))
tok_chars.append(c)
raise ParseError (self.line_num, self.line_pos,
self.line_buf, None, "Invalid character")
def escape_str(text):
def escape_char(c):
if c in r'\"':
return '\\' + c
return c
return "".join([ escape_char(c) for c in text ])
class CommandNode(object):
def __init__ (self):
self.attributes = { \
"exec" : None,
"host" : None,
"group" : "",
"nickname" : "",
"stop_signal" : 0,
"stop_time_allowed" : 0
}
def to_config_string(self, indent = 0):
s = " " * indent
lines = []
nickname = self.attributes["nickname"]
if len(nickname):
lines.append (s + "cmd \"%s\" {" % escape_str(nickname))
else:
lines.append (s + "cmd {")
pairs = self.attributes.items()
pairs.sort()
for key, val in pairs:
if not val:
continue
if key in [ "group", "nickname" ]:
continue
lines.append (s + " %s = \"%s\";" % (key, escape_str(val)))
lines.append (s + "}")
return ("\n".join (lines))
def __str__ (self):
return self.to_config_string()
class GroupNode(object):
def __init__ (self, name):
self.name = name
self.commands = []
self.subgroups = {}
def add_command (self, command):
command.attributes["group"] = self.name
self.commands.append (command)
def get_subgroup(self, name_parts, create=False):
if not name_parts:
return self
next_name = name_parts[0]
if next_name in self.subgroups:
return self.subgroups[next_name].get_subgroup(name_parts[1:], create)
elif create:
subgroup = GroupNode(next_name)
self.subgroups[next_name] = subgroup
return subgroup.get_subgroup(name_parts[1:], create)
else:
raise KeyError()
def to_config_string(self, indent=0):
s = " " * indent
if self.name == "":
assert indent == 0
val = "\n".join([group.to_config_string(0) for group in self.subgroups.values()])
val = val + "\n".join([cmd.to_config_string(0) for cmd in self.commands]) + "\n"
else:
val = "%sgroup \"%s\" {\n" % (s, self.name)
val = val + "\n".join([group.to_config_string(indent+1) for group in self.subgroups.values()])
val = val + "\n".join([cmd.to_config_string(indent+1) for cmd in self.commands])
val = val + "\n%s}\n" % s
return val
def __str__ (self):
return self.to_config_string(0)
class StartStopRestartActionNode(object):
def __init__(self, action_type, ident_type, ident, wait_status):
assert action_type in ["start", "stop", "restart"]
assert ident_type in [ "everything", "group", "cmd" ]
self.action_type = action_type
self.ident_type = ident_type
self.wait_status = wait_status
assert wait_status in [None, "running", "stopped"]
if self.ident_type == "everything":
self.ident = None
else:
self.ident = ident
assert self.ident is not None
def __str__(self):
if self.ident_type == "everything":
ident_str = self.ident_type
else:
ident_str = "%s \"%s\"" % (self.ident_type, escape_str(self.ident))
if self.wait_status is not None:
return "%s %s wait \"%s\";" % (self.action_type,
ident_str, self.wait_status)
else:
return "%s %s;" % (self.action_type, ident_str)
class WaitMsActionNode(object):
def __init__(self, delay_ms):
self.delay_ms = delay_ms
self.action_type = "wait_ms"
def __str__(self):
return "wait ms %d;" % self.delay_ms
class WaitStatusActionNode(object):
def __init__(self, ident_type, ident, wait_status):
self.ident_type = ident_type
self.ident = ident
self.wait_status = wait_status
self.action_type = "wait_status"
assert wait_status in ["running", "stopped"]
def __str__(self):
return "wait %s \"%s\" status \"%s\";" % \
(self.ident_type, escape_str(self.ident), self.wait_status)
class RunScriptActionNode(object):
def __init__(self, script_name):
self.script_name = script_name
self.action_type = "run_script"
def __str__(self):
return "run_script \"%s\";" % escape_str(self.script_name)
class ScriptNode(object):
def __init__(self, name):
self.name = name
self.actions = []
def add_action(self, action):
assert action is not None
assert hasattr(action, "action_type")
self.actions.append(action)
def __str__(self):
val = "script \"%s\" {" % escape_str(self.name)
for action in self.actions:
val = val + "\n " + str(action)
val = val + "\n}\n"
return val
class ConfigNode(object):
def __init__ (self):
self.scripts = {}
self.root_group = GroupNode("")
def _normalize_group_name(self, name):
if not name.startswith("/"):
name = "/" + name
while name.find("//") >= 0:
name = name.replace("//", "/")
return name.rstrip("/")
def has_group(self, group_name):
name = self._normalize_group_name(group_name)
parts = group_name.split("/")
group = self.root_group
assert group.name == parts[0]
for part in parts:
if part in group.subgroups:
group = group.subgroups[part]
else:
return False
return True
def get_group (self, group_name, create=False):
name = self._normalize_group_name(group_name)
parts = name.split("/")
group = self.root_group
return group.get_subgroup(parts[1:], create)
def add_script (self, script):
assert script.name not in self.scripts
self.scripts[script.name] = script
def __str__ (self):
val = self.root_group.to_config_string()
scripts = sorted(self.scripts.values(), key=lambda s: s.name.lower())
val += "\n" + "\n".join([str(script) for script in scripts])
return val
class Parser:
def __init__ (self):
self.tokenizer = None
self._cur_tok = None
self._next_tok = None
def _get_token (self):
self._cur_tok = self._next_tok
self._next_tok = self.tokenizer.next_token ()
while self._next_tok.type == TokComment:
self._next_tok = self.tokenizer.next_token ()
return self._cur_tok
def _eat_token (self, tok_type):
if self._next_tok and self._next_tok.type == tok_type:
self._get_token ()
return True
return False
def _fail (self, msg):
raise ParseError (self.tokenizer.line_num,
self.tokenizer.prev_tok_pos,
self.tokenizer.line_buf,
self._cur_tok.val, msg)
def _fail_next_token (self, msg):
raise ParseError (self.tokenizer.line_num,
self.tokenizer.tok_pos,
self.tokenizer.line_buf,
self._next_tok.val, msg)
def _eat_token_or_fail(self, tok_type, err_msg):
if not self._eat_token(tok_type):
self._fail_next_token(err_msg)
return self._cur_tok.val
def _expect_identifier(self, identifier, err_msg = None):
if err_msg is None:
err_msg = "Expected %s" % identifier
self._eat_token_or_fail(TokIdentifier, err_msg)
if self._cur_tok.val != identifier:
self._fail(err_msg)
def _parse_identifier_one_of(self, valid_identifiers):
err_msg = "Expected one of %s" % str(valid_identifiers)
self._eat_token_or_fail(TokIdentifier, err_msg)
result = self._cur_tok.val
if result not in valid_identifiers:
self._fail(err_msg)
return result
def _parse_string_one_of(self, valid_strings):
err_msg = "Expected one of %s" % str(valid_strings)
self._eat_token_or_fail(TokString, err_msg)
result = self._cur_tok.val
if result not in valid_strings:
self._fail(err_msg)
return result
def _parse_string_or_fail(self):
self._eat_token_or_fail(TokString, "Expected string literal")
return self._cur_tok.val
def _parse_command_param_list (self, cmd):
if not self._eat_token (TokIdentifier):
return
attrib_name = self._cur_tok.val
attribs = { "exec" : TokString,
"host" : TokString,
"auto_respawn" : TokString,
"group" : TokString,
"stop_signal" : TokInteger,
"stop_time_allowed" : TokInteger }
if attrib_name not in attribs:
self._fail("Unrecognized attribute %s" % attrib_name)
self._eat_token_or_fail(TokAssign, "Expected '='")
if attribs[attrib_name] == TokString:
attrib_val = self._parse_string_or_fail()
else:
self._eat_token_or_fail(TokInteger, "Expected integer literal")
attrib_val = int(self._cur_tok.val)
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
if attrib_name == "stop_signal" and attrib_val < 1:
self._fail("Invalid value specified for command attribute 'stop_signal'")
elif attrib_name == "stop_time_allowed" and attrib_val < 1:
self._fail("Invalid value specified for command attribute 'stop_time_allwoed'")
cmd.attributes[attrib_name] = attrib_val
return self._parse_command_param_list (cmd)
def _parse_command (self):
cmd = CommandNode ()
if self._eat_token(TokString):
cmd.attributes["nickname"] = self._cur_tok.val
if "/" in self._cur_tok.val:
self._fail("'/' character not allowed in command name")
self._eat_token_or_fail (TokOpenStruct, "Expected '{'")
self._parse_command_param_list (cmd)
self._eat_token_or_fail (TokCloseStruct, "Expected '}'")
if not cmd.attributes["exec"]:
self._fail ("Invalid command defined -- no executable specified")
return cmd
def _parse_group(self, parent_group):
self._eat_token_or_fail (TokString, "Expected group name string")
if "/" in self._cur_tok.val:
self._fail("'/' character is not allowed in group name")
elif not self._cur_tok.val.strip():
self._fail("Empty group name is not allowed")
name = self._cur_tok.val
group = parent_group.get_subgroup([name], True)
self._eat_token_or_fail (TokOpenStruct, "Expected '{'")
while self._eat_token(TokIdentifier):
if self._cur_tok.val == "cmd":
group.add_command(self._parse_command())
elif self._cur_tok.val == "group":
self._parse_group(group)
else:
self._fail("Expected one of [group, cmd]")
self._eat_token_or_fail(TokCloseStruct, "Expected '}'")
def _parse_start_stop_restart_action(self, action_type):
valid_ident_types = [ "everything", "cmd", "group" ]
ident_type = self._parse_identifier_one_of(valid_ident_types)
ident = None
if ident_type != "everything":
ident = self._parse_string_or_fail()
if self._eat_token(TokEndStatement):
return StartStopRestartActionNode(action_type, ident_type, ident,
None)
self._expect_identifier("wait", "Expected ';' or 'wait'")
wait_status = self._parse_string_one_of(["running", "stopped"])
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return StartStopRestartActionNode(action_type, ident_type, ident,
wait_status)
def _parse_wait_action(self):
wait_type = self._parse_identifier_one_of(["ms", "cmd", "group"])
if wait_type == "ms":
err_msg = "Expected integer constant"
delay_ms = int(self._eat_token_or_fail(TokInteger, err_msg))
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return WaitMsActionNode(delay_ms)
else:
ident = self._parse_string_or_fail()
self._expect_identifier("status")
wait_status = self._parse_string_one_of(["running", "stopped"])
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return WaitStatusActionNode(wait_type, ident, wait_status)
def _parse_run_script(self):
script_name = self._eat_token_or_fail(TokString, "expected script name")
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return RunScriptActionNode(script_name)
def _parse_script_action_list(self):
self._eat_token_or_fail (TokOpenStruct, "Expected '{'")
actions = []
while self._eat_token(TokIdentifier):
action_type = self._cur_tok.val
if action_type in [ "start", "stop", "restart" ]:
action = self._parse_start_stop_restart_action(action_type)
actions.append(action)
elif action_type == "wait":
actions.append(self._parse_wait_action())
elif action_type == "run_script":
actions.append(self._parse_run_script())
else:
self._fail("Unexpected token %s" % action_type)
self._eat_token_or_fail(TokCloseStruct, "Unexpected token")
return actions
def _parse_script(self):
name = self._eat_token_or_fail(TokString, "expected script name")
script_node = ScriptNode(name)
for action in self._parse_script_action_list():
script_node.add_action(action)
self._node.add_script(script_node)
def _parse_listdecl(self):
while True:
if self._eat_token(TokEOF):
return
ident_type = self._parse_identifier_one_of(["cmd", "group", "script"])
if ident_type == "cmd":
self._node.root_group.add_command(self._parse_command())
if ident_type == "group":
self._parse_group(self._node.root_group)
if ident_type == "script":
self._parse_script()
def parse (self, f):
self.tokenizer = Tokenizer (f)
self._cur_tok = None
self._next_tok = None
self._get_token ()
self._node = ConfigNode()
self._parse_listdecl()
return self._node
def config_from_filename (fname):
return Parser ().parse (file (fname))
if __name__ == "__main__":
import sys
try:
fname = sys.argv[1]
except IndexError:
print "usage: sheriff_config.py <fname>"
sys.exit (1)
print config_from_filename (fname)
| 34.755064 | 106 | 0.572011 | 18,164 | 0.962484 | 0 | 0 | 0 | 0 | 0 | 0 | 1,958 | 0.103752 |
6cf6f83b90ce42f51aa9b2b61196aa521afa20ce | 6,286 | py | Python | tests/test_page_objects.py | rinkky/page-objects | 7f0313fb16482a2c8f161472d85d2f0dda6490c2 | [
"X11"
] | 97 | 2015-06-09T06:34:53.000Z | 2020-07-02T08:45:22.000Z | tests/test_page_objects.py | rinkky/page-objects | 7f0313fb16482a2c8f161472d85d2f0dda6490c2 | [
"X11"
] | 7 | 2015-06-17T15:33:15.000Z | 2019-12-02T20:55:38.000Z | tests/test_page_objects.py | rinkky/page-objects | 7f0313fb16482a2c8f161472d85d2f0dda6490c2 | [
"X11"
] | 40 | 2015-04-28T14:31:34.000Z | 2020-06-18T12:53:38.000Z | import inspect
try:
from unittest import mock
except ImportError:
import mock
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver, WebElement
from selenium.common.exceptions import NoSuchElementException
from page_objects import PageObject, PageElement, MultiPageElement
@pytest.fixture()
def webdriver():
return mock.Mock(spec=WebDriver)
class TestConstructor:
def test_page_element(self):
elem = PageElement(css='foo')
assert elem.locator == (By.CSS_SELECTOR, 'foo')
def test_multi_page_element(self):
elem = MultiPageElement(id_='bar')
assert elem.locator == (By.ID, 'bar')
def test_page_element_bad_args(self):
with pytest.raises(ValueError):
PageElement()
with pytest.raises(ValueError):
PageElement(id_='foo', xpath='bar')
class TestGet:
def test_get_descriptors(self, webdriver):
class TestPage(PageObject):
test_elem1 = PageElement(css='foo')
test_elem2 = PageElement(id_='bar')
webdriver.find_element.side_effect = ["XXX", "YYY"]
page = TestPage(webdriver=webdriver)
assert page.test_elem1 == "XXX"
assert page.test_elem2 == "YYY"
assert webdriver.find_element.mock_calls == [
mock.call(By.CSS_SELECTOR, 'foo'),
mock.call(By.ID, 'bar'),
]
def test_get_element_with_context(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='bar', context=True)
page = TestPage(webdriver=webdriver)
elem = mock.Mock(spec=WebElement, name="My Elem")
res = page.test_elem(elem)
assert elem.find_element.called_once_with(By.CSS_SELECTOR, 'bar')
assert res == elem.find_element.return_value
def test_get_not_found(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='bar')
page = TestPage(webdriver=webdriver)
webdriver.find_element.side_effect = NoSuchElementException
assert page.test_elem is None
def test_get_unattached(self):
assert PageElement(css='bar').__get__(None, None) is None
def test_get_multi(self, webdriver):
class TestPage(PageObject):
test_elems = MultiPageElement(css='foo')
webdriver.find_elements.return_value = ["XXX", "YYY"]
page = TestPage(webdriver=webdriver)
assert page.test_elems == ["XXX", "YYY"]
assert webdriver.find_elements.called_once_with(By.CSS_SELECTOR, 'foo')
def test_get_multi_not_found(self, webdriver):
class TestPage(PageObject):
test_elems = MultiPageElement(css='foo')
webdriver.find_elements.side_effect = NoSuchElementException
page = TestPage(webdriver=webdriver)
assert page.test_elems == []
class TestSet:
def test_set_descriptors(self, webdriver):
class TestPage(PageObject):
test_elem1 = PageElement(css='foo')
page = TestPage(webdriver=webdriver)
elem = mock.Mock(spec=WebElement, name="My Elem")
webdriver.find_element.return_value = elem
page.test_elem1 = "XXX"
assert webdriver.find_elements.called_once_with(By.CSS_SELECTOR, 'foo')
elem.send_keys.assert_called_once_with('XXX')
def test_cannot_set_with_context(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='foo', context=True)
page = TestPage(webdriver=webdriver)
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "doesn't support elements with context" in e.value.args[0]
def test_cannot_set_not_found(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='foo')
page = TestPage(webdriver=webdriver)
webdriver.find_element.side_effect = NoSuchElementException
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "element not found" in e.value.args[0]
def test_set_multi(self, webdriver):
class TestPage(PageObject):
test_elems = MultiPageElement(css='foo')
page = TestPage(webdriver=webdriver)
elem1 = mock.Mock(spec=WebElement)
elem2 = mock.Mock(spec=WebElement)
webdriver.find_elements.return_value = [elem1, elem2]
page.test_elems = "XXX"
assert webdriver.find_elements.called_once_with(By.CSS_SELECTOR, 'foo')
elem1.send_keys.assert_called_once_with('XXX')
elem2.send_keys.assert_called_once_with('XXX')
def test_cannot_set_multi_with_context(self, webdriver):
class TestPage(PageObject):
test_elem = MultiPageElement(css='foo', context=True)
page = TestPage(webdriver=webdriver)
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "doesn't support elements with context" in e.value.args[0]
def test_cannot_set_multi_not_found(self, webdriver):
class TestPage(PageObject):
test_elem = MultiPageElement(css='foo')
page = TestPage(webdriver=webdriver)
webdriver.find_elements.side_effect = NoSuchElementException
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "no elements found" in e.value.args[0]
class TestRootURI:
class TestPage(PageObject):
pass
def test_from_constructor(self, webdriver):
page = self.TestPage(webdriver=webdriver, root_uri="http://example.com")
assert page.root_uri == 'http://example.com'
def test_from_webdriver(self):
webdriver = mock.Mock(spec=WebDriver, root_uri="http://example.com/foo")
page = self.TestPage(webdriver=webdriver)
assert page.root_uri == 'http://example.com/foo'
def test_get(self, webdriver):
page = self.TestPage(webdriver=webdriver, root_uri="http://example.com")
page.get('/foo/bar')
assert webdriver.get.called_once_with("http://example.com/foo/bar")
def test_get_no_root(self, webdriver):
page = self.TestPage(webdriver=webdriver)
page.get('/foo/bar')
assert webdriver.get.called_once_with("/foo/bar")
| 34.538462 | 80 | 0.669265 | 5,853 | 0.931117 | 0 | 0 | 71 | 0.011295 | 0 | 0 | 510 | 0.081133 |
6cf7f38147c6ceac137a1dddbaf8ea9d06083c5e | 1,355 | py | Python | flaskr/views/user_profile.py | qbbr/blog-backend-flask | b51555fff2ae1823ea6a61ca8064887e88479445 | [
"MIT"
] | null | null | null | flaskr/views/user_profile.py | qbbr/blog-backend-flask | b51555fff2ae1823ea6a61ca8064887e88479445 | [
"MIT"
] | null | null | null | flaskr/views/user_profile.py | qbbr/blog-backend-flask | b51555fff2ae1823ea6a61ca8064887e88479445 | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from marshmallow import ValidationError
from flaskr import db
from flaskr.helpers import error_bad_request, error_validation
from flaskr.models import User
from flaskr.schemas import UserProfileSchema
bp = Blueprint('user_profile', __name__)
@bp.route('/user/profile/', methods=['GET'])
@jwt_required
def get_user_profile():
user = User.query.filter_by(username=get_jwt_identity()).first_or_404()
return jsonify(UserProfileSchema().dump(user)), 200 # OK
@bp.route('/user/profile/', methods=['PUT', 'PATCH'])
@jwt_required
def update_user_profile():
if not request.is_json:
return error_bad_request()
json_data = request.get_json()
try:
user = User.query.filter_by(username=get_jwt_identity()).first_or_404()
UserProfileSchema().load(json_data, instance=user, partial=True)
db.session.add(user)
db.session.commit()
except ValidationError as err:
return error_validation(err.messages)
return '', 204 # No Content
@bp.route('/user/profile/', methods=['DELETE'])
@jwt_required
def delete_user_profile():
user = User.query.filter_by(username=get_jwt_identity()).first_or_404()
db.session.delete(user)
db.session.commit()
return '', 204 # No Content
| 31.511628 | 79 | 0.731365 | 0 | 0 | 0 | 0 | 994 | 0.733579 | 0 | 0 | 119 | 0.087823 |
6cf800fcc4613d29a520a2ec5bf292011684a4b3 | 6,394 | py | Python | pointerTable.py | dragonc0/LADXR | 31072400e3f01fdd2449971a8a53d39d6e349abe | [
"MIT"
] | null | null | null | pointerTable.py | dragonc0/LADXR | 31072400e3f01fdd2449971a8a53d39d6e349abe | [
"MIT"
] | null | null | null | pointerTable.py | dragonc0/LADXR | 31072400e3f01fdd2449971a8a53d39d6e349abe | [
"MIT"
] | null | null | null | import copy
import struct
class PointerTable:
END_OF_DATA = (0xff, )
"""
Class to manage a list of pointers to data objects
Can rewrite the rom to modify the data objects and still keep the pointers intact.
"""
def __init__(self, rom, info):
assert "count" in info
assert "pointers_bank" in info
assert "pointers_addr" in info
assert ("banks_bank" in info and "banks_addr" in info) or ("data_bank" in info)
self.__info = info
self.__data = []
self.__banks = []
self.__storage = []
count = info["count"]
addr = info["pointers_addr"]
pointers_bank = rom.banks[info["pointers_bank"]]
if "data_addr" in info:
pointers_raw = []
for n in range(count):
pointers_raw.append(info["data_addr"] + pointers_bank[addr + n] * info["data_size"])
else:
pointers_raw = struct.unpack("<" + "H" * count, pointers_bank[addr:addr+count*2])
if "data_bank" in info:
banks = [info["data_bank"]] * count
else:
addr = info["banks_addr"]
banks = rom.banks[info["banks_bank"]][addr:addr+count]
for n in range(count):
bank = banks[n] & 0x3f
pointer = pointers_raw[n]
pointer &= 0x3fff
self.__data.append(self._readData(rom, bank, pointer))
self.__banks.append(bank)
while self.__mergeStorage():
pass
self.__storage.sort(key=lambda n: n["start"])
if "claim_storage_gaps" in info and info["claim_storage_gaps"]:
self.__storage = [{"bank": self.__storage[0]["bank"], "start": self.__storage[0]["start"], "end": self.__storage[-1]["end"]}]
if "expand_to_end_of_bank" in info and info["expand_to_end_of_bank"]:
for st in self.__storage:
expand = True
for st2 in self.__storage:
if st["bank"] == st2["bank"] and st["end"] < st2["end"]:
expand = False
if expand:
st["end"] = 0x4000
# for s in sorted(self.__storage, key=lambda s: (s["bank"], s["start"])):
# print(self.__class__.__name__, s)
def __setitem__(self, item, value):
self.__data[item] = value
def __getitem__(self, item):
return self.__data[item]
def __len__(self):
return len(self.__data)
def store(self, rom):
storage = copy.deepcopy(self.__storage)
pointers_bank = self.__info["pointers_bank"]
pointers_addr = self.__info["pointers_addr"]
done = {}
for st in storage:
done[st["bank"]] = {}
for n, s in enumerate(self.__data):
if isinstance(s, int):
pointer = s
else:
s = bytes(s)
bank = self.__banks[n]
if s in done[bank]:
pointer = done[bank][s]
assert rom.banks[bank][pointer:pointer+len(s)] == s
else:
my_storage = None
for st in storage:
if st["end"] - st["start"] >= len(s) and st["bank"] == bank:
my_storage = st
break
assert my_storage is not None, "Not enough room in storage... %s" % (storage)
pointer = my_storage["start"]
my_storage["start"] = pointer + len(s)
rom.banks[bank][pointer:pointer+len(s)] = s
if "data_size" not in self.__info:
# aggressive de-duplication.
for skip in range(len(s)):
done[bank][s[skip:]] = pointer + skip
done[bank][s] = pointer
if "data_addr" in self.__info:
offset = pointer - self.__info["data_addr"]
if "data_size" in self.__info:
assert offset % self.__info["data_size"] == 0
offset //= self.__info["data_size"]
rom.banks[pointers_bank][pointers_addr + n] = offset
else:
rom.banks[pointers_bank][pointers_addr+n*2] = pointer & 0xff
rom.banks[pointers_bank][pointers_addr+n*2+1] = ((pointer >> 8) & 0xff) | 0x40
space_left = sum(map(lambda n: n["end"] - n["start"], storage))
# print(self.__class__.__name__, "Space left:", space_left)
def _readData(self, rom, bank_nr, pointer):
bank = rom.banks[bank_nr]
start = pointer
if "data_size" in self.__info:
pointer += self.__info["data_size"]
else:
while bank[pointer] not in self.END_OF_DATA:
pointer += 1
pointer += 1
self._addStorage(bank_nr, start, pointer)
return bank[start:pointer]
def _addStorage(self, bank, start, end):
for n, data in enumerate(self.__storage):
if data["bank"] == bank:
if data["start"] == end:
data["start"] = start
return
if data["end"] == start:
data["end"] = end
return
if data["start"] <= start and data["end"] >= end:
return
self.__storage.append({"bank": bank, "start": start, "end": end})
def __mergeStorage(self):
for n in range(len(self.__storage)):
n_end = self.__storage[n]["end"]
n_start = self.__storage[n]["start"]
for m in range(len(self.__storage)):
if m == n or self.__storage[n]["bank"] != self.__storage[m]["bank"]:
continue
m_end = self.__storage[m]["end"]
m_start = self.__storage[m]["start"]
if m_start - 1 <= n_end <= m_end:
self.__storage[n]["start"] = min(self.__storage[n]["start"], self.__storage[m]["start"])
self.__storage[n]["end"] = self.__storage[m]["end"]
self.__storage.pop(m)
return True
return False
| 39.9625 | 138 | 0.495777 | 6,360 | 0.994683 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.163591 |
6cf92cfddfadd9d50e03585da0f769ea7217673f | 2,092 | py | Python | userbot/modules/spam_protect.py | TAMILVIP007/javes-3.0 | d9238785fa2d79740bbb526aca92455dbccb3838 | [
"MIT"
] | 1 | 2021-05-06T18:30:50.000Z | 2021-05-06T18:30:50.000Z | userbot/modules/spam_protect.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | userbot/modules/spam_protect.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | #Telegram @javes05
import spamwatch, os, asyncio
from telethon import events
from userbot import client as javes, JAVES_NAME, JAVES_MSG
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
swapi = os.environ.get("SPAMWATCH_API_KEY", None)
SPAM_PROTECT = os.environ.get("SPAM_PROTECT", None)
SPAMWATCH_SHOUT = os.environ.get("SPAMWATCH_SHOUT", None)
W_CHAT = set(int(x) for x in os.environ.get("WHITE_CHATS", "").split())
if SPAM_PROTECT:
@javes.on(events.ChatAction)
async def handler(rkG):
if rkG.user_joined or rkG.user_added and not rkG.chat_id in W_CHAT and SPAM_PROTECT and swapi and not rkG.is_private:
chat = await rkG.get_chat()
admin = chat.admin_rights
creator = chat.creator
if admin or creator:
return
sw = spamwatch.Client(swapi)
guser = await rkG.get_user()
try:
sswatch = sw.get_ban(guser.id)
except:
return
if sswatch:
try:
await javes.edit_permissions(rkG.chat_id, guser.id, view_messages=False)
action = "`ban`" ; return await rkG.reply(f"`{JAVES_NNAME}:` ** This user is detected as spam by SpamWatch!!** \n"
f"**Reason ** : `{sswatch.reason}`\n"
f"**Victim Id**: [{guser.id}](tg://user?id={guser.id})\n"
f"**Action ** : {action}")
except:
return
#else:
#if SPAMWATCH_SHOUT:
#action = "`Reported to `@admins" ; return await rkG.reply(f"`{JAVES_NNAME}:` ** This user is detected as spam by SpamWatch!!** \n"
#f"**Reason ** : `{sswatch.reason}`\n"
#f"**Victim Id**: [{guser.id}](tg://user?id={guser.id})\n"
#f"**Action ** : {action}")
| 44.510638 | 174 | 0.502868 | 0 | 0 | 0 | 0 | 1,608 | 0.768642 | 1,578 | 0.754302 | 609 | 0.291109 |
6cf97a5e7fb1af82d37f64beb54252478889c38a | 2,483 | py | Python | populate_app_db.py | nshams365/item-catalog | 7811cbef335a07791807ea081168dcdc043726da | [
"MIT"
] | null | null | null | populate_app_db.py | nshams365/item-catalog | 7811cbef335a07791807ea081168dcdc043726da | [
"MIT"
] | null | null | null | populate_app_db.py | nshams365/item-catalog | 7811cbef335a07791807ea081168dcdc043726da | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from setup_app_db import Title, User, Base
engine = create_engine('sqlite:///book_catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create dummy title
user1 = User(u_name="Noor Shams", email="nshams365@gmail.com", picture="https://media.licdn.com/dms/image/C4D03AQFzLp_GzKOEmw/profile-displayphoto-shrink_200_200/0?e=1565827200&v=beta&t=Rbr-v9IMIxCNfIXdT_bN-0ZY6BFZhIay7fzid-HHmWo")
session.add(user1)
session.commit()
# Titles table entry
title2 = Title(t_name="isms Understanding Art",
author_name= "Stephen Little",
description= "A handy guide to a wide range of art 'isms'. Herbert Press London.",
category_id=1,
user_id=1)
session.add(title2)
session.commit()
title3 = Title(t_name="Grow Your Own Crops in Pots",
author_name= "Kay Maguire",
description= "30 steps by step projects using vegetables, fruit and herbs.",
category_id=3,
user_id=1)
session.add(title3)
session.commit()
title4 = Title(t_name="Bad Science",
author_name= "Ben Goldacre",
description= "A brilliant book that debunks medical nonsense",
category_id=13,
user_id=1)
session.add(title4)
session.commit()
title5 = Title(t_name="The Naked Trader",
author_name= "Robbie Burns",
description= "How anyone can make money trading shares.",
category_id=13,
user_id=1)
session.add(title5)
session.commit()
title6 = Title(t_name="Digital Fortress",
author_name= "Dan Brown",
description= " ",
category_id=21,
user_id=1)
session.add(title6)
session.commit()
title7 = Title(t_name="To Kill a Mockingbird",
author_name= "Harper Lee",
description= "",
category_id=23,
user_id=1)
session.add(title7)
session.commit()
title8 = Title(t_name="Fifteen Poets",
author_name= "Various",
description= "The best work of the greate masters of English poetry from Chaucer to Matthew Arnold.",
category_id=19,
user_id=1)
session.add(title8)
session.commit()
title9 = Title(t_name="Chings Fast Food",
author_name= "Ching-He Huang",
description= "110 quick and healthy Chinese favorites.",
category_id=27,
user_id=1)
session.add(title9)
session.commit()
title10 = Title(t_name="The Joy of Less",
author_name= "Francine Jay",
description= "A minimalist guide to declutter, organize and simplify.",
category_id=29,
user_id=1)
session.add(title10)
session.commit()
| 26.698925 | 232 | 0.730165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.389448 |
6cfcc7a76feb1abd43a64d6e085b49c8a70e37fa | 755 | py | Python | coupon/logger.py | thewizardplusplus/coupon | 96ca82a3838c680e588896c50977912cbd2f2258 | [
"MIT"
] | null | null | null | coupon/logger.py | thewizardplusplus/coupon | 96ca82a3838c680e588896c50977912cbd2f2258 | [
"MIT"
] | 22 | 2017-02-14T20:35:40.000Z | 2017-07-01T01:57:15.000Z | coupon/logger.py | thewizardplusplus/coupon | 96ca82a3838c680e588896c50977912cbd2f2258 | [
"MIT"
] | null | null | null | import logging
import termcolor
class Formatter(logging.Formatter):
_LEVELS_COLORS = {
'[INFO]': 'green',
'[WARNING]': 'yellow',
'[ERROR]': 'red',
}
def format(self, record):
message = super().format(record)
for level, color in self._LEVELS_COLORS.items():
message = message.replace(level, termcolor.colored(level, color))
return message
def get_logger():
return logging.getLogger(__package__)
def init_logger():
handler = logging.StreamHandler()
handler.setFormatter(Formatter(
fmt=termcolor.colored('%(asctime)s', 'grey') \
+ ' [%(levelname)s] %(message)s',
))
get_logger().addHandler(handler)
get_logger().setLevel(logging.INFO)
| 24.354839 | 77 | 0.619868 | 379 | 0.501987 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.128477 |
6cff52c06a97dbdd16e1411fdd11b804c3f35e61 | 1,373 | py | Python | autonomia/features/weather.py | jamesperes/autonomiaBot | bbc90497d29fa16a85477281f87ceaeb7c3e55fc | [
"MIT"
] | 8 | 2018-03-22T21:46:30.000Z | 2021-04-07T07:06:05.000Z | autonomia/features/weather.py | jamesperes/autonomiaBot | bbc90497d29fa16a85477281f87ceaeb7c3e55fc | [
"MIT"
] | 42 | 2018-03-23T17:20:38.000Z | 2021-06-02T01:19:51.000Z | autonomia/features/weather.py | jamesperes/autonomiaBot | bbc90497d29fa16a85477281f87ceaeb7c3e55fc | [
"MIT"
] | 3 | 2018-03-26T20:30:53.000Z | 2020-01-18T13:52:32.000Z | import json
from urllib import parse, request
from telegram.ext import CallbackContext, CommandHandler
from telegram.update import Update
from autonomia.core import bot_handler
BASE_URL = "https://query.yahooapis.com/v1/public/yql?"
def _get_weather_info(location):
query = (
"select * from weather.forecast where woeid in (select woeid "
'from geo.places(1) where text="%s") AND u="c"' % location
)
final_url = BASE_URL + parse.urlencode({"q": query}) + "&format=json"
result = json.loads(request.urlopen(final_url).read())
if result["query"]["count"] > 0:
return result["query"]["results"]
def cmd_weather(update: Update, context: CallbackContext):
args = context.args
if not args:
args = ["dublin"]
location = " ".join(args)
weather_info = _get_weather_info(location)
if not weather_info:
return
condition = weather_info["channel"]["item"]["condition"]
msg = "{location}, {date}, {temp}°C, {sky}".format(
location=location.capitalize(),
date=condition["date"],
temp=condition["temp"],
sky=condition["text"],
)
update.message.reply_text(msg)
@bot_handler
def weather_factory():
"""
/weather - show the current weather conditions for a given location
"""
return CommandHandler("weather", cmd_weather, pass_args=True)
| 28.020408 | 73 | 0.661326 | 0 | 0 | 0 | 0 | 189 | 0.137555 | 0 | 0 | 385 | 0.280204 |
6cffbd65c3259cdce6bb8d63fcc07acc7911f4c6 | 5,733 | py | Python | 21-fs-ias-lec/13-RaptorCode/src/decoder.py | Kyrus1999/BACnet | 5be8e1377252166041bcd0b066cce5b92b077d06 | [
"MIT"
] | 8 | 2020-03-17T21:12:18.000Z | 2021-12-12T15:55:54.000Z | 21-fs-ias-lec/13-RaptorCode/src/decoder.py | Kyrus1999/BACnet | 5be8e1377252166041bcd0b066cce5b92b077d06 | [
"MIT"
] | 2 | 2021-07-19T06:18:43.000Z | 2022-02-10T12:17:58.000Z | 21-fs-ias-lec/13-RaptorCode/src/decoder.py | Kyrus1999/BACnet | 5be8e1377252166041bcd0b066cce5b92b077d06 | [
"MIT"
] | 25 | 2020-03-20T09:32:45.000Z | 2021-07-18T18:12:59.000Z | import numpy as np
import utils
from utils import NUMBER_OF_ENCODED_BITS, VECTORLENGTH, Z, TransmissionPackage
class Decoder:
def __init__(self):
self._inputData = [] # [(informationID, [[(a, y)]] )] <-- Format, place in second Array is chunkID
self._decodedData = [] # [(informationID, [decodedBinaryVector] )] <-- Format, place in second Array is chunkID
self._positionsInCoefVector = [] # [(informationID, [[ys with element in this position]] )]
# self._Z = self._calcZ() # calculate the Z vectors
self._counter = 0
if len(Z) == 0:
utils.calcZ()
def _decode(self, data, coeffPositions):
# x is encoded source code:
x = np.empty(VECTORLENGTH)
x.fill(-1)
x = x.astype(int)
# y is solution vector
y = []
# building of Matrix and x Vector
allCoefficients = [] # temporary array to build Matrix
for i in range(len(data)):
allCoefficients.append(data[i][0])
y.append(data[i][1])
# add Raptor idea of additional precoded coeff vectors
for z in Z:
allCoefficients.append(z)
y.append(0)
matrix = np.array(allCoefficients)
if len(data) < NUMBER_OF_ENCODED_BITS:
print("Not enough Information")
return
for j in range(VECTORLENGTH):
# Step one
# find an entry which has exactly on unknown
numberOfCoefficient = np.sum(matrix, 1) # number of coefficient per row
indexOfYi = utils.findFirstNumber(numberOfCoefficient, 1) # index of first row with one coef.
if indexOfYi == -1: # catch if there is no such row
return
indexOfXj = utils.findFirstNumber(matrix[indexOfYi], 1) # index of element with 1
# step two
# decode xj since yi is of degree one
x[indexOfXj] = y[indexOfYi]
# step three
# check all coefficient vectors, if they contain xj and remove them from there and yi
for i in coeffPositions[indexOfXj]:
matrix[i, indexOfXj] = 0
y[i] = np.bitwise_xor(y[i], x[indexOfXj])
coeffPositions[indexOfXj] = []
"""for i in range(len(y)):
if BinaryVector.checkNumberAtPosition(matrix[i], 1, indexOfXj):
matrix[i, indexOfXj] = 0
y[i] = np.bitwise_xor(y[i], x[indexOfXj])"""
# step 4
# repeat until done
if utils.findFirstNumber(x, -1) == -1:
return x
def decode(self, tp: TransmissionPackage):
# get the information from the package
informationID = tp.informationID
chunkID = tp.chunkID
size = tp.size
ayTuple = (utils.intToBin(tp.ayTuple[0], VECTORLENGTH), tp.ayTuple[1]) # also decode a to binVector
# check if this chunk of information already has been decoded
decodedID = -1
for i in range(len(self._decodedData)):
if self._decodedData[i][0] == informationID:
decodedID = i
if len(self._decodedData[i][1][chunkID]) != 0:
return
self._counter += 1
# check if this information is already in processing
index = -1
for i in range(len(self._inputData)):
if self._inputData[i][0] == informationID:
index = i
break
# when not, create entry's
if index == -1:
packages = []
decoded = []
coeff = []
for i in range(size):
packages.append([])
decoded.append([])
temp = []
for i in range(VECTORLENGTH):
temp.append([])
coeff.append(temp)
self._inputData.append((informationID, packages))
self._decodedData.append((informationID, decoded))
self._positionsInCoefVector.append((informationID, coeff))
# add it to the input
self._inputData[index][1][chunkID].append(ayTuple)
# register positions in coeffvector
yLen = len(self._inputData[index][1][chunkID])
for i in range(VECTORLENGTH):
if ayTuple[0][i] == 1:
self._positionsInCoefVector[index][1][chunkID][i].append(yLen - 1)
# check if there are enough information to decode
# print(self._inputData)
if len(self._inputData[index][1][chunkID]) < NUMBER_OF_ENCODED_BITS:
return
# try to decode data
decoded = self._decode(self._inputData[index][1][chunkID],
self._positionsInCoefVector[index][1][chunkID].copy())
# check if decoding was successful
if decoded is None:
return
# safe decoded chunk
self._decodedData[decodedID][1][chunkID] = decoded
# check if all chunks are decoded
for i in range(size):
if len(self._decodedData[decodedID][1][i]) == 0:
return
# cut decoded to correct size
xVector = self._decodedData[decodedID][1][0][0:NUMBER_OF_ENCODED_BITS]
# bring decoded data in byte form
byte = utils.bitArrayToBytes(xVector)
for i in range(1, size):
xVector = self._decodedData[decodedID][1][i][0:NUMBER_OF_ENCODED_BITS]
byte += utils.bitArrayToBytes(xVector)
# remove informationSlot from the memory
self._inputData.pop(index)
self._decodedData.pop(decodedID)
self._positionsInCoefVector.pop(index)
# return the data
return byte
| 36.056604 | 120 | 0.568114 | 5,616 | 0.979592 | 0 | 0 | 0 | 0 | 0 | 0 | 1,590 | 0.277342 |
9f0096648e2874f2a1dd4b98ee0c9a0e98ee7fdb | 2,006 | py | Python | sleepcounter/hardware_a/mocks.py | daveshed/sleepcounter-hardware-a | 8f8729b9e29b37d3f12b156dfd5b2cb277b1524d | [
"MIT"
] | null | null | null | sleepcounter/hardware_a/mocks.py | daveshed/sleepcounter-hardware-a | 8f8729b9e29b37d3f12b156dfd5b2cb277b1524d | [
"MIT"
] | null | null | null | sleepcounter/hardware_a/mocks.py | daveshed/sleepcounter-hardware-a | 8f8729b9e29b37d3f12b156dfd5b2cb277b1524d | [
"MIT"
] | null | null | null | """Mock hardware implementation"""
import logging
from stage import exceptions
from unittest.mock import Mock
LOGGER = logging.getLogger("mock")
class MockStage:
"""A mock implemenation of a stepper motor driven linear stage"""
MAX_POS = 100
MIN_POS = 0
def __init__(self):
self._position = __class__.MIN_POS
self.home()
def home(self):
"""Move to home position"""
LOGGER.info("Homing stage")
self._position = __class__.MIN_POS
def end(self):
"""Move to end position"""
LOGGER.info("Moving to home position")
self._position = self.max
@property
def max(self):
"""Return the maximum position index"""
return __class__.MAX_POS
@property
def position(self):
"""Return the current position index"""
return self._position
@position.setter
def position(self, request):
LOGGER.info("Setting position to %s", request)
too_large = request > __class__.MAX_POS
too_small = request < __class__.MIN_POS
if too_large or too_small:
raise exceptions.OutOfRangeError(
"Cannot go to position {}".format(request))
self._position = request
class Matrix:
"""
A mock for an led matrix device
"""
_width = 32
_height = 8
_mode = "1"
def __init__(self):
_LOGGER.info("Created mock led matrix device %r", self)
self.display = Mock()
@property
def width(self):
"""
Width of the display in pixels
"""
return Matrix._width
@property
def height(self):
"""
Height of the display in pixels
"""
return Matrix._height
@property
def mode(self):
"""
Returns mode which is needed for image drawing reasons
"""
return Matrix._mode
def clear(self):
"""
Clear the display
"""
_LOGGER.info("Clearing device %r", self)
| 23.057471 | 69 | 0.589232 | 1,854 | 0.924227 | 0 | 0 | 985 | 0.491027 | 0 | 0 | 658 | 0.328016 |
9f01d83bfd543dbc5808bc7589718084e0296fe1 | 6,861 | py | Python | my_work/ch2_lists_and_pointer_structures/singly_linked_lists.py | gabrielavirna/PythonDataStructuresAndAlgorithms | d406dc247a8ff2f39ee5aac2a398027298ffbf69 | [
"MIT"
] | 1 | 2019-01-22T04:59:02.000Z | 2019-01-22T04:59:02.000Z | my_work/ch2_lists_and_pointer_structures/singly_linked_lists.py | gabrielavirna/python_data_structures_and_algorithms | d406dc247a8ff2f39ee5aac2a398027298ffbf69 | [
"MIT"
] | null | null | null | my_work/ch2_lists_and_pointer_structures/singly_linked_lists.py | gabrielavirna/python_data_structures_and_algorithms | d406dc247a8ff2f39ee5aac2a398027298ffbf69 | [
"MIT"
] | null | null | null | """
Singly linked lists
-------------------
- is a list with only 1 pointer between 2 successive nodes
- It can only be traversed in a single direction: from the 1st node in the list to the last node
Several problems
----------------
It requires too much manual work by the programmer
It is too error-prone (this is a consequence of the first point)
Too much of the inner workings of the list is exposed to the programmer
"""
"""Singly linked list implementation"""
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
n1 = Node("eggs")
n2 = Node("spam")
n3 = Node("ham")
# Link the nodes together so that they form a chain
n1.next = n2
n2.next = n3
# To traverse the list: start by setting the variable current to the first item in the list:
# Loop: print the current element; set current to point to the next element in the list; until reaching the end of list
current = n1
while current:
print(current.data)
current = current.next
print("\n")
"""
Singly linked list class
-------------------------
Create a very simple class to hold our list. Start with a constructor that holds a reference to the very first node in
the list. Since this list is initially empty, start by setting this reference to None:
Append
-------
- append items to the list(insert operation):
- hide away the Node class. The user of our list class should really never have to interact with Node objects.
- Big problem: it has to traverse the entire list to find the insertion point => Ok few items in the list, but not to
add thousands of items. Each append will be slightly slower than the previous one
- Worst case running time of the append operation: O(n)
Faster append operation
--------------------------
- store, not only a reference to the first node in the list, but also a reference to the last node.
- Make sure the previous last node points to the new node, that is about to be appended to the list.
=> quickly append a new node at the end of the list.
- Reduced worst case running time: O(1).
Size of the list
-----------------
- counting the number of nodes: traverse the entire list and increase a counter as we go along
- works but list traversal is potentially an expensive operation that we should avoid
- Worst case running time: O(n) because of using a loop to count the number of nodes in the list
Better size of the list
-----------------------
- add a size member to the SinglyLinkedList class, initializing it to 0 in the constructor.
- Then increment size by one in the append method
- Reduced the worst case running time: O(1), because we are now only reading the size attribute of the node object
Improving list traversal
-------------------------
Still exposed to the Node class; need to use node.data to get the contents of the node and node.next to get next node.
But client code should never need to interact with Node objects => Create a method that returns a generator: iter()
Deleting nodes
--------------
Decide how to select a node for deletion: by an index number/ by the data the node contains? Here delete by the data
- to delete a node that is between 2 other nodes, make the previous node directly to the successor of its next node
- O(n) to delete a node
List search
------------
- check whether a list contains an item.
- each pass of the loop compares the current data to the data being searched for; if match: True returned, else: False
Clear a list
------------
Clear the pointers head and tail by setting them to None
By orphaning all the nodes at the tail and head pointers of the list => effect of orphaning all the nodes in between
"""
class SinglyLinkedListSlow:
def __init__(self):
self.tail = None
# Worst case running time: O(n)
def append(self, data):
# encapsulate data in a Node
# so that it now has the next pointer attribute
node = Node(data)
# check if there are any existing nodes in the list
# does self.tail point to a Node ?
if self.tail is None:
# if none, make the new node the first node of the list
self.tail = node
else:
# find the insertion point by traversing the list to the last node
# updating the next pointer of the last node to the new node.
current = self.tail
while current.next:
current = current.next
current.next = node
# Worst case running time: O(n)
def size(self):
count = 0
current = self.tail
while current:
count += 1
current = current.next
return count
#############################################################################
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
self.size = 0 # to compute size of list
# Worst case running time: O(1)
# append new nodes through self.head
# the self.tail variable points to the first node in the list
def append(self, data):
node = Node(data)
if self.head:
self.head.next = node
self.head = node
else:
self.tail = node
self.head = node
self.size += 1 # worst case running time: O(1)
def delete(self, data):
current = self.tail
prev = self.tail
while current:
if current.data == data:
if current == self.tail:
self.tail = current.next
else:
prev.next = current.next
self.size -= 1
return
prev = current
current = current.next
# Traverse the list - iter() method yields the data member of the node
def iter(self):
current = self.tail
while current:
val = current.data
current = current.next
yield val
def search(self, data):
for node_data in self.iter():
if data == node_data:
return True
return False
# clear the entire list
def clear(self):
self.tail = None
self.head = None
# append a few items
words1 = SinglyLinkedListSlow()
words1.append('egg')
words1.append('ham')
words1.append('spam')
# append faster
words2 = SinglyLinkedList()
words2.append('egg')
words2.append('ham')
words2.append('spam')
# List traversal
current = words1.tail
while current:
print(current.data)
current = current.next
print("\n")
# List traversal using iter
for word in words2.iter():
print(word)
print("\n")
# Size of list
print(words1.size())
print(words2.size)
print("\n")
# Delete a node
words2.delete('ham')
for word in words2.iter():
print(word)
print(words2.size)
print('\n')
# Search for a node
print(words2.search('spam'))
| 29.830435 | 119 | 0.637079 | 2,561 | 0.373269 | 154 | 0.022446 | 0 | 0 | 0 | 0 | 4,331 | 0.631249 |
9f01e6da899f6d81dfa4b4fe9c15bd9084a5870b | 78 | py | Python | codechef/FLOW004.py | andraantariksa/code-exercise-answer | 69b7dbdc081cdb094cb110a72bc0c9242d3d344d | [
"MIT"
] | 1 | 2019-11-06T15:17:48.000Z | 2019-11-06T15:17:48.000Z | codechef/FLOW004.py | andraantariksa/code-exercise-answer | 69b7dbdc081cdb094cb110a72bc0c9242d3d344d | [
"MIT"
] | null | null | null | codechef/FLOW004.py | andraantariksa/code-exercise-answer | 69b7dbdc081cdb094cb110a72bc0c9242d3d344d | [
"MIT"
] | 1 | 2018-11-13T08:43:26.000Z | 2018-11-13T08:43:26.000Z | for _ in range(int(input())):
n = input()
print(int(n[0])+int(n[-1]))
| 19.5 | 31 | 0.512821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f02328459eb7de455227b7b8b982b827b8b597d | 200 | py | Python | clouds/experiments/__init__.py | jchen42703/reproducing-cloud-3rd-place | 25571f53efd48f68735d7fe2991e3ad783cbd4b1 | [
"Apache-2.0"
] | 1 | 2020-03-22T19:42:38.000Z | 2020-03-22T19:42:38.000Z | clouds/experiments/__init__.py | jchen42703/reproducing-cloud-3rd-place | 25571f53efd48f68735d7fe2991e3ad783cbd4b1 | [
"Apache-2.0"
] | 5 | 2020-03-19T17:50:03.000Z | 2020-03-21T20:10:26.000Z | clouds/experiments/__init__.py | jchen42703/reproducing-cloud-3rd-place | 25571f53efd48f68735d7fe2991e3ad783cbd4b1 | [
"Apache-2.0"
] | null | null | null | from .utils import get_train_transforms, get_val_transforms, \
get_preprocessing, seed_everything
from .train_2d import TrainSegExperiment
from .infer import GeneralInferExperiment
| 40 | 62 | 0.79 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f02894ebc954a270f5715baabf264b3e5cc54c2 | 7,513 | py | Python | app.py | gborn/Generating-Multiple-Choice-Questions-From-Any-Text | 521c9297d3115a89e3ff8a74230d3b8390aa57d8 | [
"MIT"
] | 1 | 2022-01-29T04:26:41.000Z | 2022-01-29T04:26:41.000Z | app.py | gborn/Generating-Multiple-Choice-Questions-From-Any-Text | 521c9297d3115a89e3ff8a74230d3b8390aa57d8 | [
"MIT"
] | null | null | null | app.py | gborn/Generating-Multiple-Choice-Questions-From-Any-Text | 521c9297d3115a89e3ff8a74230d3b8390aa57d8 | [
"MIT"
] | null | null | null |
from src.answerkey import AnswerKey
from src.model import Model, unmasker
import streamlit as st
PAGE_CONFIG = {"page_title":"MCQ-App by Glad Nayak","page_icon":":white_check_mark:"}
st.set_page_config(**PAGE_CONFIG)
def render_input():
"""
Renders text area for input, and button
"""
# source of default text: https://www.fresherslive.com/online-test/reading-comprehension-test-questions-and-answers
text = """The Dust Bowl, considered one of the greatest man-made ecological disasters, was a period of severe dust storms that lasted nearly a decade, starting 1931, and engulfed large parts of the US. The dust storms originated in the Great Plains-from states like Texas, Oklahoma, New Mexico, Colorado and Kansas. They were so severe that they choked everything and blocked out the sun for days. Sometimes, the storms travelled thousands of kilometres and blotted out monuments such as the Statue of Liberty. Citizens developed “dust pneumonia” and experienced chest pain and difficulty in breathing. The storms damaged the soil in around 100 million acres of land, leading to the greatest short-time migration in the American history, with approximately 3.5 million people abandoning their farms and fields.
Dust storms are an annual weather pattern in the northern region of India comprising Delhi, Haryana, Punjab, Uttar Pradesh and Rajasthan and Punjab, as also in the Sindh region of Pakistan. But, they are normally low in intensity and accompanied by rains. In fact, people welcome dust storms as they bring down temperatures and herald the arrival of the monsoons. But, the dust storms that have hit India since February this year have been quantitatively and qualitatively different from those in the past. They are high-powered storms travelling long distances and destroying properties and agricultural fields. Since February, they have affected as many as 16 states and killed more than 500 people. Cities like Delhi were choked in dust for days, with air quality level reaching the “severe” category on most days.
The Dust Bowl areas of the Great Plains are largely arid and semi-arid and prone to extended periods of drought. The US federal government encouraged settlement and development of large-scale agriculture by giving large parcels of grasslands to settlers. Waves of European settlers arrived at the beginning of the 20th century and converted grasslands into agricultural fields. At the same time, technological improvements allowed rapid mechanization of farm equipment, especially tractors and combined harvesters, which made it possible to operate larger parcels of land.
For the next two decades, agricultural land grew manifold and farmers undertook extensive deep ploughing of the topsoil with the help of tractors to plant crops like wheat. This displaced the native, deep-rooted grasses that trapped soil and moisture even during dry periods and high winds. Then, the drought struck. Successive waves of drought, which started in 1930 and ended in 1939, turned the Great Plains into bone-dry land. As the soil was already loose due to extensive ploughing, high winds turned them to dust and blew them away in huge clouds. Does this sound familiar? The dust storm regions of India and Pakistan too are largely arid and semi-arid. But they are at a lower altitude and hence less windy compared to the Great Plains. Over the last 50 years, chemical- and water-intensive agriculture has replaced the traditional low-input agriculture. Canal irrigation has been overtaken by the groundwater irrigation. In addition, mechanized agriculture has led to deeper ploughing, loosening more and more topsoil. The result has been devastating for the soil and groundwater. In most of these areas, the soil has been depleted and groundwater levels have fallen precipitously. On top of the man-made ecological destruction, the natural climatic cycle along with climate change is affecting the weather pattern of this region.
First, this area too is prone to prolonged drought. In fact, large parts of Haryana, Punjab, Delhi and western UP have experienced mildly dry to extremely dry conditions in the last six years. The Standardized Precipitation Index (SPI), which specifies the level of dryness or excess rains in an area, of large parts of Haryana, Punjab and Delhi has been negative since 2012. Rajasthan, on the other hand shows a positive SPI or excess rainfall. Second, this area is experiencing increasing temperatures. In fact, there seems to be a strong correlation between the dust storms and the rapid increase in temperature. Maximum temperatures across northern and western India have been far higher than normal since April this year. Last, climate change is affecting the pattern of Western Disturbances (WDs), leading to stronger winds and stronger storms. WDs are storms originating in the Mediterranean region that bring winter rain to northwestern India. But because of the warming of the Arctic and the Tibetan Plateau, indications are that the WDs are becoming unseasonal, frequent and stronger.
The Dust Bowl led the US government to initiate a large-scale land-management and soil-conservation programme. Large-scale shelterbelt plantations, contour ploughing, conservation agriculture and establishment of conservation areas to keep millions of acres as grassland, helped halt wind erosion and dust storms. It is time India too recognizes its own Dust Bowl and initiates a large-scale ecological restoration programme to halt it. Else, we will see more intense dust storms, and a choked Delhi would be a permanent feature.
"""
st.sidebar.subheader('Enter Text:')
text = st.sidebar.text_area('', text.strip(), height = 275)
ngram_range = st.sidebar.slider('answer ngram range:', value=[1, 2], min_value=1, max_value=3, step=1)
num_questions = st.sidebar.slider("number of questions:", value=10, min_value=10, max_value=20, step=1)
question_type_str = st.sidebar.radio('question type:', ('declarative (fill in the blanks)', 'imperative'))
question_type = question_type_str == 'declarative (fill in the blanks)'
button = st.sidebar.button('Generate')
if button:
return (text, ngram_range, num_questions, question_type)
def main():
# Render input text area
inputs = render_input()
if not inputs:
st.title('Generate Multiple Choice Questions(MCQs) from Text Automatically')
st.subheader('Enter Text, select how long a single answer should be(ngram_range), and number of questions to get started.')
else:
with st.spinner('Loading questions and distractors using BERT model'):
st.subheader("")
st.title("")
text, ngram_range, num_questions, question_type = inputs
# Load model
answerkeys = AnswerKey(text)
keyword_to_sentence = answerkeys.get_answers(ngram_range, num_questions)
model = Model()
quizzes = model.get_questions(keyword_to_sentence, unmasker, k=num_questions, declarative=question_type)
st.subheader('Questions')
for id, quiz in enumerate(quizzes):
question, options, answer = quiz
st.write(question)
for option in options[:3]:
st.checkbox(option, key=id)
ans_button = st.checkbox(answer, key=id, value=True)
st.balloons()
st.button('Save')
if __name__ == '__main__':
main()
| 98.855263 | 1,344 | 0.763077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,895 | 0.783805 |
9f031046a6a5d1910fe9a095791a6d9f8768fb8d | 687 | py | Python | FORTRAN/bardell/linear_buckling_cpanel/verification_cpanel.py | mrosemeier/compmech | f18f6d0471c72b26a3b014d2df41df3463505eae | [
"BSD-3-Clause"
] | 4 | 2019-02-05T06:12:12.000Z | 2022-03-25T14:41:18.000Z | FORTRAN/bardell/linear_buckling_cpanel/verification_cpanel.py | mrosemeier/compmech | f18f6d0471c72b26a3b014d2df41df3463505eae | [
"BSD-3-Clause"
] | null | null | null | FORTRAN/bardell/linear_buckling_cpanel/verification_cpanel.py | mrosemeier/compmech | f18f6d0471c72b26a3b014d2df41df3463505eae | [
"BSD-3-Clause"
] | 2 | 2019-06-05T07:19:35.000Z | 2020-12-29T00:22:18.000Z | from compmech.stiffpanelbay import StiffPanelBay
spb = StiffPanelBay()
spb.a = 2.
spb.b = 1.
spb.r = 2.
spb.stack = [0, 90, 90, 0, -45, +45]
spb.plyt = 1e-3*0.125
spb.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)
spb.model = 'cpanel_clt_donnell_bardell'
spb.m = 15
spb.n = 16
spb.u1tx = 0.
spb.u1rx = 1.
spb.u2tx = 0.
spb.u2rx = 1.
spb.u1ty = 0.
spb.u1ry = 1.
spb.u2ty = 0.
spb.u2ry = 1.
spb.v1tx = 0.
spb.v1rx = 1.
spb.v2tx = 0.
spb.v2rx = 1.
spb.v1ty = 0.
spb.v1ry = 1.
spb.v2ty = 0.
spb.v2ry = 1.
spb.w1tx = 0.
spb.w1rx = 1.
spb.w2tx = 0.
spb.w2rx = 1.
spb.w1ty = 0.
spb.w1ry = 1.
spb.w2ty = 0.
spb.w2ry = 1.
spb.add_panel(y1=0, y2=spb.b, Nxx=-1.)
spb.lb(silent=False)
| 16.357143 | 60 | 0.617176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.040757 |
9f03f062a720d101370b2aeaa19a382b35aa2aa7 | 2,203 | py | Python | test/unittests/analysis/mri/test_base.py | monashbiomedicalimaging/nianalysis | d69c38eed52ae557a849889930cd659cdb3c6401 | [
"Apache-2.0"
] | 2 | 2019-11-14T01:02:26.000Z | 2022-03-17T01:47:01.000Z | test/unittests/analysis/mri/test_base.py | MonashBI/banana | 37364243b520ab14ac1243005dbd465f824542b4 | [
"Apache-2.0"
] | 18 | 2019-04-03T04:25:55.000Z | 2020-06-08T06:00:56.000Z | test/unittests/analysis/mri/test_base.py | MonashBI/nianalysis | 37364243b520ab14ac1243005dbd465f824542b4 | [
"Apache-2.0"
] | 4 | 2018-05-23T07:13:02.000Z | 2018-08-24T04:05:31.000Z | from banana.analysis.mri.base import MriAnalysis
from banana.utils.testing import AnalysisTester, PipelineTester, TEST_CACHE_DIR
from banana import FilesetFilter
from arcana.repository.xnat import XnatRepo
class TestMriBaseDefault(AnalysisTester):
analysis_class = MriAnalysis
parameters = {'mni_tmpl_resolution': 1}
inputs = ['magnitude', 'coreg_ref']
class TestMriAnalysis(PipelineTester):
name = 'BaseMri'
analysis_class = MriAnalysis
ref_repo = XnatRepo(server='https://mbi-xnat.erc.monash.edu.au',
project_id='TESTBANANAMRI',
cache_dir=TEST_CACHE_DIR)
parameters = {
'mni_tmpl_resolution': 1}
def test_preprocess_channels_pipeline(self):
pass # Need to upload some raw channel data for this
def test_coreg_pipeline(self):
self.run_pipeline_test('coreg_pipeline')
def test_brain_extraction_pipeline(self):
self.run_pipeline_test('brain_extraction_pipeline')
def test_brain_coreg_pipeline(self):
self.run_pipeline_test('brain_coreg_pipeline',
add_inputs=['coreg_ref'])
def test_coreg_fsl_mat_pipeline(self):
self.run_pipeline_test('coreg_fsl_mat_pipeline',
add_inputs=['coreg_ref'])
def test_coreg_ants_mat_pipeline(self):
self.run_pipeline_test('coreg_ants_mat_pipeline',
add_inputs=['coreg_ref'])
def test_coreg_to_tmpl_pipeline(self):
self.run_pipeline_test('coreg_to_tmpl_pipeline',
add_inputs=['coreg_ref'],
test_criteria={
'coreg_to_tmpl': {'rms_tol': 20000}})
def test_qform_transform_pipeline(self):
self.run_pipeline_test('qform_transform_pipeline',
add_inputs=['coreg_ref'])
def test_preprocess_pipeline(self):
self.run_pipeline_test('preprocess_pipeline')
def test_header_extraction_pipeline(self):
self.run_pipeline_test('header_extraction_pipeline')
def test_motion_mat_pipeline(self):
self.run_pipeline_test('motion_mat_pipeline')
| 34.968254 | 79 | 0.663187 | 1,991 | 0.903768 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.2197 |
9f0439803d85dd469a63299426e2bd344cab6542 | 333 | py | Python | python/my_first_module/my_email.py | kimkh0930/python_practice | 82ddd450bf65de01865c26790a6e728840b2ccbe | [
"MIT"
] | null | null | null | python/my_first_module/my_email.py | kimkh0930/python_practice | 82ddd450bf65de01865c26790a6e728840b2ccbe | [
"MIT"
] | null | null | null | python/my_first_module/my_email.py | kimkh0930/python_practice | 82ddd450bf65de01865c26790a6e728840b2ccbe | [
"MIT"
] | null | null | null | class Email:
def __init__(self):
self.from_email = ''
self.to_email = ''
self.subject = ''
self.contents = ''
def send_mail(self):
print('From: '+ self.from_email)
print('To: '+ self.to_email)
print('Subject: '+ self.subject)
print('Contents: '+ self.contents) | 27.75 | 42 | 0.540541 | 333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.135135 |
9f05226afec709be950e9d25b3968e7c2da2d83d | 1,290 | py | Python | crypto/classic/subst/vigenere.py | jrlambea/ctf_tools | a563d5b603d87529d494c882e1dc17377c8b904f | [
"MIT"
] | null | null | null | crypto/classic/subst/vigenere.py | jrlambea/ctf_tools | a563d5b603d87529d494c882e1dc17377c8b904f | [
"MIT"
] | null | null | null | crypto/classic/subst/vigenere.py | jrlambea/ctf_tools | a563d5b603d87529d494c882e1dc17377c8b904f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
__author__ = "JR. Lambea"
__copyright__ = "Copyright 2015"
__credits__ = ["JR. Lambea"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "JR. Lambea"
__email__ = "jr.lambea@yahoo.com"
__status__ = ""
import sys
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument ("text", help = "Message to encrypt/decrypt", type = str)
parser.add_argument ("key", help = "Key to encrypt/decrypt", type = str)
parser.add_argument ("-a", "--alphabet", help = "Alphabet to use", type = str, default = 'abcdefghijklmnopqrstuvwxyz')
parser.add_argument ("-c", "--case-insensitive", help = "Does not distinguish between lower and uppercase.", action = "count", default = 0)
args = parser.parse_args()
case_insensitive = args.case_insensitive
if case_insensitive > 0:
text = args.text.lower()
alphabet = args.alphabet.lower()
key = args.key.lower()
else:
text = args.text
alphabet = args.alphabet
key = args.key
k = 0
i = 0
for c in text:
if k == len(key):
k = 0
d = alphabet.index (key[k])
if c in alphabet:
char = alphabet [(alphabet.index (c) + d) % len (alphabet)]
print ("%s" % char, end = "")
else:
print ("%s" % c, end = "")
k += 1
print ()
if __name__ == "__main__":
main()
| 22.241379 | 140 | 0.651163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.26124 |
9f05cb39a0e710373d52bb0986bb7df178e23e03 | 3,465 | py | Python | pip/toto/models/common.py | team-ferret/pip-toto | 7a20c056be0dfcb64b880774c03395c9fe08a438 | [
"MIT"
] | 1 | 2021-03-09T16:46:28.000Z | 2021-03-09T16:46:28.000Z | pip/toto/models/common.py | team-ferret/pip-toto | 7a20c056be0dfcb64b880774c03395c9fe08a438 | [
"MIT"
] | null | null | null | pip/toto/models/common.py | team-ferret/pip-toto | 7a20c056be0dfcb64b880774c03395c9fe08a438 | [
"MIT"
] | null | null | null | """
<Program Name>
common.py
<Author>
Lukas Puehringer <lukas.puehringer@nyu.edu>
Santiago Torres <santiago@nyu.edu>
<Started>
Sep 23, 2016
<Copyright>
See LICENSE for licensing information.
<Purpose>
Provides base classes for various classes in the model.
<Classes>
Metablock:
pretty printed canonical JSON representation and dump
Signable:
sign self, store signature to self and verify signatures
ComparableHashDict: (helper class)
compare contained dictionary of hashes using "=", "!="
"""
import attr
import canonicaljson
from ..ssl_crypto import keys as ssl_crypto__keys
@attr.s(repr=False)
class Metablock(object):
"""Objects with base class Metablock have a __repr__ method
that returns a canonical pretty printed JSON string and can be dumped to a
file """
def __repr__(self):
return canonicaljson.encode_pretty_printed_json(attr.asdict(self))
def dump(self, filename):
with open(filename, 'wt') as fp:
fp.write("{}".format(self))
@attr.s(repr=False)
class Signable(Metablock):
"""Objects with base class Signable can sign their payload (a canonical
pretty printed JSON string not containing the signatures attribute) and store
the signature (signature format: ssl_crypto__formats.SIGNATURE_SCHEMA) """
signatures = attr.ib([])
@property
def payload(self):
payload = attr.asdict(self)
payload.pop("signatures")
return canonicaljson.encode_pretty_printed_json(payload)
def sign(self, key):
"""Signs the canonical JSON representation of itself (without the
signatures property) and adds the signatures to its signature properties."""
# XXX LP: Todo: Verify key format
signature = ssl_crypto__keys.create_signature(key, self.payload)
self.signatures.append(signature)
def verify_signatures(self, keys_dict):
"""Verifies all signatures of the object using the passed key_dict."""
if not self.signatures or len(self.signatures) <= 0:
raise Exception("No signatures found")
for signature in self.signatures:
keyid = signature["keyid"]
try:
key = keys_dict[keyid]
except:
raise Exception("Signature key not found, key id is %s" % keyid)
if not ssl_crypto__keys.verify_signature(key, signature, self.payload):
raise Exception("Invalid signature")
@attr.s(repr=False, cmp=False)
class ComparableHashDict(object):
"""Helper class providing that wraps hash dicts (format:
toto.ssl_crypto.formats.HASHDICT_SCHEMA) in order to compare them using
`=` and `!=`"""
hash_dict = attr.ib({})
def __eq__(self, other):
"""Equal if the dicts have the same keys and the according values
(strings) are equal"""
if self.hash_dict.keys() != other.hash_dict.keys():
return False
for key in self.hash_dict.keys():
if self.hash_dict[key] != other.hash_dict[key]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# @attr.s(repr=False)
# class GenericPathList(object):
# """ Helper class implementing __contains__ to provide <path> in <path list>
# where <path> can start with "./" or not
# """
# path_list = attr.ib([])
# def __contains__(self, item):
# if item.startswith("./"):
# other_item = item.lstrip("./")
# else:
# other_item = "./" + item
# if item in self.path_list or \
# other_item in self.path_list:
# return True
# return False
| 27.283465 | 80 | 0.694372 | 2,279 | 0.65772 | 0 | 0 | 2,350 | 0.678211 | 0 | 0 | 1,971 | 0.568831 |
9f06666109409538bd3e098b187da1a2987b3910 | 1,034 | py | Python | application.py | ane4katv/python_training | d8ba6dbed0b43402e8b09a5b6cf8de52703e18a1 | [
"Apache-2.0"
] | null | null | null | application.py | ane4katv/python_training | d8ba6dbed0b43402e8b09a5b6cf8de52703e18a1 | [
"Apache-2.0"
] | null | null | null | application.py | ane4katv/python_training | d8ba6dbed0b43402e8b09a5b6cf8de52703e18a1 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class Application:
def __init__(self):
self.wd = webdriver.Chrome(executable_path='/Users/atvelova/Documents/python_training/chromedriver')
self.wd.implicitly_wait(60)
def open_page(self):
wd = self.wd
wd.get("http://hrm.seleniumminutes.com/symfony/web/index.php/auth/login")
def login(self):
wd = self.wd
self.open_page()
wd.find_element(By.ID, "txtUsername").click()
wd.find_element(By.ID, "txtUsername").send_keys("admin")
wd.find_element(By.ID, "txtPassword").send_keys("Password")
wd.find_element(By.ID, "txtPassword").send_keys(Keys.ENTER)
self.wd.implicitly_wait(60)
def logout(self):
wd = self.wd
wd.find_element(By.ID, "welcome").click()
self.wd.implicitly_wait(60)
wd.find_element(By.LINK_TEXT, "Logout").click()
def destroy(self):
self.wd.quit() | 32.3125 | 108 | 0.662476 | 909 | 0.87911 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.200193 |
9f06cf95ce0ef6f08bed1fb63d57780556f6d2fe | 704 | py | Python | codes/network/__init__.py | upzheng/Electrocardio-Panorama | 258e545b22e910bd75047f9011970ff16c50766f | [
"MIT"
] | 33 | 2021-05-06T06:44:11.000Z | 2022-03-25T06:20:26.000Z | codes/network/__init__.py | upzheng/Electrocardio-Panorama | 258e545b22e910bd75047f9011970ff16c50766f | [
"MIT"
] | 3 | 2021-10-08T09:15:42.000Z | 2022-03-29T02:47:02.000Z | codes/network/__init__.py | upzheng/Electrocardio-Panorama | 258e545b22e910bd75047f9011970ff16c50766f | [
"MIT"
] | 5 | 2021-06-09T11:01:46.000Z | 2022-02-25T13:46:01.000Z | from .model_nefnet import Model_nefnet
from torch.nn import MSELoss, L1Loss, CrossEntropyLoss
from .loss import losswrapper, MSELead
def build_model(cfg):
model_name = cfg.MODEL.model
if model_name == 'model_nefnet':
return Model_nefnet(theta_encoder_len=cfg.MODEL.theta_L, lead_num=cfg.DATA.lead_num)
else:
raise ValueError('build model: model name error')
def build_loss(cfg):
loss_name = cfg.MODEL.loss
if loss_name == 'v1':
return losswrapper
elif loss_name == 'ce':
return CrossEntropyLoss()
elif loss_name == 'mse':
return MSELoss()
else:
raise ValueError('build loss: loss name error')
| 27.076923 | 93 | 0.661932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.12358 |
9f0810d6e1403ceb89071bec522c48c3ac61b892 | 8,942 | py | Python | guild/deps.py | Jim-Holmstroem/guildai | ed80e307fca20c0d607c600180cdb0854f42021d | [
"Apache-2.0"
] | null | null | null | guild/deps.py | Jim-Holmstroem/guildai | ed80e307fca20c0d607c600180cdb0854f42021d | [
"Apache-2.0"
] | null | null | null | guild/deps.py | Jim-Holmstroem/guildai | ed80e307fca20c0d607c600180cdb0854f42021d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import re
from guild import namespace
from guild import resolver as resolverlib
from guild import resource
from guild import util
log = logging.getLogger("guild")
RESOURCE_TERM = r"[a-zA-Z0-9_\-\.]+"
class DependencyError(Exception):
pass
class ResolutionContext(object):
def __init__(self, target_dir, opdef, resource_config):
self.target_dir = target_dir
self.opdef = opdef
self.resource_config = resource_config
class Resource(object):
def __init__(self, resdef, location, ctx):
self.resdef = resdef
self.location = location
self.ctx = ctx
self.config = self._init_resource_config()
self.dependency = None
def _init_resource_config(self):
for name, config in self.ctx.resource_config.items():
if name in [self.resdef.fullname, self.resdef.name]:
return config
return None
def resolve(self, unpack_dir=None):
resolved_acc = []
for source in self.resdef.sources:
paths = self.resolve_source(source, unpack_dir)
resolved_acc.extend(paths)
return resolved_acc
def resolve_source(self, source, unpack_dir=None):
resolver = resolverlib.for_resdef_source(source, self)
if not resolver:
raise DependencyError(
"unsupported source '%s' in %s resource" % (source, self.resdef.name)
)
try:
source_paths = resolver.resolve(unpack_dir)
except resolverlib.ResolutionError as e:
msg = "could not resolve '%s' in %s resource: %s" % (
source,
self.resdef.name,
e,
)
if source.help:
msg += "\n%s" % source.help
raise DependencyError(msg)
except Exception as e:
log.exception(
"resolving required source '%s' in %s resource",
source,
self.resdef.name,
)
raise DependencyError(
"unexpected error resolving '%s' in %s resource: %r"
% (source, self.resdef.name, e)
)
else:
for path in source_paths:
self._link_to_source(path, source)
return source_paths
def _link_to_source(self, source_path, source):
source_path = util.strip_trailing_sep(source_path)
link = self._link_path(source_path, source)
_symlink(source_path, link)
def _link_path(self, source_path, source):
basename = os.path.basename(source_path)
res_path = self.resdef.path or ""
if source.path:
res_path = os.path.join(res_path, source.path)
if os.path.isabs(res_path):
raise DependencyError(
"invalid path '%s' in %s resource (path must be relative)"
% (res_path, self.resdef.name)
)
if source.rename:
basename = _rename_source(basename, source.rename)
return os.path.join(self.ctx.target_dir, res_path, basename)
def _rename_source(name, rename):
for spec in rename:
try:
renamed = re.sub(spec.pattern, spec.repl, name)
except Exception as e:
raise DependencyError(
"error renaming source %s (%r %r): %s"
% (name, spec.pattern, spec.repl, e)
)
else:
if renamed != name:
return renamed
return name
def _symlink(source_path, link):
assert os.path.isabs(link), link
if os.path.lexists(link) or os.path.exists(link):
log.debug("%s already exists, skipping link", link)
return
util.ensure_dir(os.path.dirname(link))
log.debug("resolving source %s as link %s", source_path, link)
rel_source_path = _rel_source_path(source_path, link)
util.symlink(rel_source_path, link)
def _rel_source_path(source, link):
source_dir, source_name = os.path.split(source)
real_link = util.realpath(link)
link_dir = os.path.dirname(real_link)
source_rel_dir = os.path.relpath(source_dir, link_dir)
return os.path.join(source_rel_dir, source_name)
class ResourceProxy(object):
def __init__(self, dependency, name, config, ctx):
self.dependency = dependency
self.name = name
self.config = config
self.ctx = ctx
def resolve(self):
source_path = self.config # the only type of config supported
if not os.path.exists(source_path):
raise DependencyError(
"could not resolve %s: %s does not exist" % (self.name, source_path)
)
log.info("Using %s for %s resource", source_path, self.name)
basename = os.path.basename(source_path)
link = os.path.join(self.ctx.target_dir, basename)
_symlink(source_path, link)
return [source_path]
def _dep_desc(dep):
return "%s:%s" % (dep.opdef.modeldef.name, dep.opdef.name)
def resolve(dependencies, ctx):
resolved = {}
for res in resources(dependencies, ctx):
log.info("Resolving %s dependency", res.resdef.name)
resolved_sources = res.resolve()
log.debug("resolved sources for %s: %r", res.dependency, resolved_sources)
if not resolved_sources:
log.warning("Nothing resolved for %s dependency", res.resdef.name)
resolved.setdefault(res.resdef.name, []).extend(resolved_sources)
return resolved
def resources(dependencies, ctx):
flag_vals = util.resolve_all_refs(ctx.opdef.flag_values())
return [_dependency_resource(dep, flag_vals, ctx) for dep in dependencies]
def _dependency_resource(dep, flag_vals, ctx):
if dep.inline_resource:
return _inline_resource(dep.inline_resource, ctx)
spec = util.resolve_refs(dep.spec, flag_vals)
try:
res = util.find_apply(
[_model_resource, _guildfile_resource, _packaged_resource], spec, ctx
)
except DependencyError as e:
if spec in ctx.resource_config:
log.warning(str(e))
return ResourceProxy(dep, spec, ctx.resource_config[spec], ctx)
raise
if res:
res.dependency = spec
return res
raise DependencyError(
"invalid dependency '%s' in operation '%s'" % (spec, ctx.opdef.fullname)
)
def _inline_resource(resdef, ctx):
return Resource(resdef, resdef.modeldef.guildfile.dir, ctx)
def _model_resource(spec, ctx):
m = re.match(r"(%s)$" % RESOURCE_TERM, spec)
if m is None:
return None
res_name = m.group(1)
return _modeldef_resource(ctx.opdef.modeldef, res_name, ctx)
def _modeldef_resource(modeldef, res_name, ctx):
resdef = modeldef.get_resource(res_name)
if resdef is None:
raise DependencyError(
"resource '%s' required by operation '%s' is not defined"
% (res_name, ctx.opdef.fullname)
)
return Resource(resdef, modeldef.guildfile.dir, ctx)
def _guildfile_resource(spec, ctx):
m = re.match(r"(%s):(%s)$" % (RESOURCE_TERM, RESOURCE_TERM), spec)
if m is None:
return None
model_name = m.group(1)
modeldef = ctx.opdef.guildfile.models.get(model_name)
if modeldef is None:
raise DependencyError(
"model '%s' in resource '%s' required by operation "
"'%s' is not defined" % (model_name, spec, ctx.opdef.fullname)
)
res_name = m.group(2)
return _modeldef_resource(modeldef, res_name, ctx)
def _packaged_resource(spec, ctx):
m = re.match(r"(%s)/(%s)$" % (RESOURCE_TERM, RESOURCE_TERM), spec)
if m is None:
return None
pkg_name = m.group(1)
res_name = m.group(2)
try:
resources = list(resource.for_name(res_name))
except LookupError:
pass
else:
for res in resources:
if namespace.apply_namespace(res.dist.project_name) == pkg_name:
location = os.path.join(
res.dist.location, res.dist.key.replace(".", os.path.sep)
)
return Resource(res.resdef, location, ctx)
raise DependencyError(
"resource '%s' required by operation '%s' is not installed"
% (spec, ctx.opdef.fullname)
)
| 33.118519 | 85 | 0.630396 | 3,594 | 0.401924 | 0 | 0 | 0 | 0 | 0 | 0 | 1,414 | 0.15813 |
9f08bf7a9be74e2413c2f47b167c933d97a2d0c1 | 12,770 | py | Python | perm_hmm/policies/belief_tree.py | usnistgov/perm_hmm | b57b3cca51d0d91bde438a62f26c0b0123c26aa5 | [
"Apache-2.0"
] | null | null | null | perm_hmm/policies/belief_tree.py | usnistgov/perm_hmm | b57b3cca51d0d91bde438a62f26c0b0123c26aa5 | [
"Apache-2.0"
] | null | null | null | perm_hmm/policies/belief_tree.py | usnistgov/perm_hmm | b57b3cca51d0d91bde438a62f26c0b0123c26aa5 | [
"Apache-2.0"
] | null | null | null | r"""Provides functions used by strategies that use a tree to select the
permutation.
To compute optimal permutations, we use the belief states
.. math::
b(y^{k-1}) := \mathbb{P}(s_0, s_k|y^{k-1}),
where the :math:`s_k` are the states of the HMM at step :math:`k`, and the
superscript :math:`y^{k-1}` is the sequence of observations up to step
:math:`k-1`.
Here, when we refer to a tree, we really mean a list of
:py:class:`~perm_hmm.strategies.belief.HMMBeliefState` objects. The i'th
object contains the beliefs for all the nodes at the i'th level of the tree.
"""
import torch
from perm_hmm.policies.belief import HMMBeliefState
class HMMBeliefTree(object):
r"""
Instances of this class have the following attributes:
``hmm``:
A :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` that is used to
calculate belief states.
``possible_perms``:
A :py:class:`~torch.Tensor` of type ``long`` that contains the possible
permutations. This is used to compute transition matrices for updating
belief states.
"""
def __init__(self, hmm, possible_perms, nsteps, root_belief: HMMBeliefState = None, data_len=None, terminal_offset=False):
r"""Generates the belief tree for the given HMM.
Builds a tree that is traversed by sequences :math:`y_0, \sigma_0, y_1,
\sigma_1, \ldots`, where the :math:`\sigma_k` are permutation indices, and
the :math:`y_k` are the observation indices. This tree has a layered
structure. Attached to each node in the tree is a belief state
:math:`\mathbb{P}(s_0, s_k|y^{k-1})`, or :math:`\mathbb{P}(s_0, s_k|y^k)`,
depending on whether the node is an even or odd number of steps from the
root, respectively. To go from a belief state attached to one node to a
belief state attached to one of that node's children, we either use a
transition or a Bayesian update, depending on whether the edge is a
permutation or an observation, respectively.
:param hmm: The HMM to compute likelihoods with.
:param possible_perms: The allowable permutations.
:param nsteps: The number of steps to compute for. (2 * nsteps + 1) is the
height of the tree.
:param HMMBeliefState root_belief: The belief state to start the tree with. If None,
defaults to the initial state distribution of the HMM.
:param data_len: The length of the data. If None, defaults to 1.
:param terminal_offset: Whether the leaves of the tree should be labeled by
observation indices.
:return: A list of belief states, to be interpreted as a tree by looking at
the ith element of the list as the set of all nodes at the ith level.
"""
self.hmm = hmm
self.possible_perms = possible_perms
self._build_tree(nsteps, root_belief, data_len, terminal_offset)
def _build_tree(self, nsteps, root_belief: HMMBeliefState = None, data_len=None, terminal_offset=False):
r"""Generates the belief tree for the given HMM.
Builds a tree that is traversed by sequences :math:`y_0, \sigma_0, y_1,
\sigma_1, \ldots`, where the :math:`\sigma_k` are permutation indices, and
the :math:`y_k` are the observation indices. This tree has a layered
structure. Attached to each node in the tree is a belief state
:math:`\mathbb{P}(s_0, s_k|y^{k-1})`, or :math:`\mathbb{P}(s_0, s_k|y^k)`,
depending on whether the node is an even or odd number of steps from the
root, respectively. To go from a belief state attached to one node to a
belief state attached to one of that node's children, we either use a
transition or a Bayesian update, depending on whether the edge is a
permutation or an observation, respectively.
:param nsteps: The number of steps to compute for. (2 * nsteps + 1) is the
height of the tree.
:param root_belief: The belief state to start the tree with. If None,
defaults to the initial state distribution of the HMM.
:param data_len: The length of the data. If None, defaults to 1.
:param terminal_offset: Whether the leaves of the tree should be labeled by
observation indices.
:return: A list of belief states, to be interpreted as a tree by looking at
the ith element of the list as the set of all nodes at the ith level.
:raise ValueError: If ``nsteps`` is less than 1. Must look ahead at
least one step.
"""
if nsteps < 1:
raise ValueError("Cannot build a tree of less than 1 look ahead "
"steps.")
if data_len is None:
data_len = 1
if root_belief is None:
root_belief = HMMBeliefState.from_hmm(self.hmm)
root_belief.logits = root_belief.logits.expand(data_len, -1, -1)
self.beliefs = [root_belief]
if terminal_offset and (nsteps == 1):
return
b = root_belief.bayes_update(self.hmm.observation_dist.enumerate_support(expand=False).squeeze(-1), new_dim=True)
self.beliefs.append(b)
if (not terminal_offset) and (nsteps == 1):
return
while len(self.beliefs) < (2 * (nsteps - 1)):
self.grow(self.possible_perms)
if not terminal_offset:
self.grow(self.possible_perms)
else:
self.beliefs.append(self.beliefs[-1].transition(self.possible_perms, new_dim=True))
def broadcast_to_length(self, length):
new_beliefs = []
for b in self.beliefs:
shape = torch.broadcast_shapes((length, 1, 1), b.logits.shape)
new_b = HMMBeliefState(b.logits.expand(shape).clone(), b.hmm, offset=b.offset)
new_beliefs.append(new_b)
self.beliefs = new_beliefs
def grow(self, possible_perms=None, hmm=None):
"""Expands the tree by two levels.
Assumes that the leaves have offset=True. Then, we expand the leaves by
transitioning the belief states at the leaves, and then again by Bayesian
updates.
:param possible_perms: The allowable permutations.
:param hmm: The HMM to compute likelihoods with.
:return: An expanded tree, in the form of a list of belief states.
"""
if possible_perms is None:
possible_perms = self.possible_perms
if hmm is None:
hmm = self.hmm
b = self.beliefs[-1].transition(possible_perms, hmm=hmm, new_dim=True)
self.beliefs.append(b)
b = self.beliefs[-1].bayes_update(hmm.observation_dist.enumerate_support(expand=False).squeeze(-1), hmm=hmm, new_dim=True)
self.beliefs.append(b)
def perm_idxs_from_log_cost(self, log_cost_func, return_log_costs=False, terminal_log_cost=None, is_cost_func=True):
r"""Computes :math:`\mathbb{E}_{Y_k^n|y^{k-1}}[c(y^{k-1},Y_k^n)]` and the
corresponding permutation indices that minimize this expectation.
Given a tree of belief states, computes the expected cost of the tree.
This computation is performed by first evaluating the cost function at the
leaves of the tree, then propagating the cost up the tree.
To compute the cost at an internal node whose children are labeled by data,
we take the expectation over the children's costs, using the belief state
to compute said expectation. To compute the cost at an internal node whose
children are labeled by permutations, we take the minimum over the
children's costs. This is a direct computation of the expected cost using
the `Bellman equation`_.
We then return both the permutation indices and, if ``return_costs`` is
True, the expected cost.
The computation is done in log space, so the cost function must be in log
space as well.
.. _`Bellman equation`: https://en.wikipedia.org/wiki/Bellman_equation
:param log_cost_func: The cost function to compute the expected cost of.
Must be in log space, and must take a single argument, which is a
tensor of shape ``tree_shape + (n_states, n_states)``, returning a
tensor of shape ``tree_shape``. The last two dimensions of the input
correspond to the initial and final states of the HMM.
:param bool return_log_costs: Whether to return the expected cost as well.
:param terminal_log_cost: A tensor of terminal costs to start the calculation
with. Defaults to ``log_cost_func(self.tree[-1].logits)``
:return: A list of permutation indices, and, if ``return_costs`` is True,
the expected cost.
"""
if terminal_log_cost is None:
terminal_log_cost = log_cost_func(self.beliefs[-1].logits)
costs = [terminal_log_cost]
perm_idxs = []
for b in reversed(self.beliefs[:-1]):
if b.offset:
# yksk = b.joint_yksk(b.hmm.enumerate_support(expand=False).squeeze(-1), new_dim=True)
yksk = b.joint_yksks0(b.hmm.enumerate_support(expand=False).squeeze(-1), new_dim=True).logsumexp(-2)
yk = yksk.logsumexp(-1)
# Compute the expectation of the cost function
c = costs[-1] + yk
c = c.logsumexp(-2)
costs.append(c)
else:
# Gets the optimal permutation index.
if is_cost_func:
c, perm_idx = costs[-1].min(-2)
else:
c, perm_idx = costs[-1].max(-2)
costs.append(c)
perm_idxs.append(perm_idx)
costs = costs[::-1]
perm_idxs = perm_idxs[::-1]
perm_tree = PermIdxTree(perm_idxs)
if return_log_costs:
return perm_tree, costs
return perm_tree
def prune_tree(self, idx):
"""Prunes a tree according to the index.
:param idx: The index corresponding to the data or permutations.
"""
idx = idx.unsqueeze(-1).unsqueeze(-2)
new_tree = []
for b in self.beliefs[1:]:
idxb = torch.broadcast_tensors(idx, b.logits)[0]
new_b = HMMBeliefState(b.logits.gather(0, idxb)[0], b.hmm, b.offset)
new_tree.append(new_b)
self.beliefs = new_tree
class PermIdxTree(object):
def __init__(self, idx_list):
self.perm_idxs = idx_list
def trim_list_tree(self):
r"""Trims the tree to remove permutation layers.
The tree is a list of tensors. The first tensor is the root of the tree, and
each subsequent tensor is a layer of the tree. The tree has a layered
structure, with a path to a node in the tree given by the indices
corresponding to the list :math:`(y_0, \sigma_0, y_1, \sigma_1, \ldots,)`,
where :math:`y_i` is the index of the observation at step :math:`i`, and
:math:`\sigma_i` is the index of the permutation at step :math:`i`.
Once the permutations have been selected, the tree should be trimmed to
remove the permutation layers, which is done by this function.
"""
new_tree = []
p = self.perm_idxs[0]
p = p.squeeze()
new_tree.append(p)
for p in self.perm_idxs[1:]:
p = p.squeeze()
for ntp in new_tree:
idx = torch.meshgrid([torch.arange(s) for s in ntp.shape])
p = p[idx + (ntp,)]
new_tree.append(p)
self.perm_idxs = new_tree
def expand_batch(self, data_len):
r"""Adds a dimension of length data_len to each tensor in the tree.
This function is used to expand the tree.
:param int data_len: Length of new dimension.
:return: Same list of tensors, but with a new dimension added to each
tensor.
"""
self.perm_idxs = [b.unsqueeze(-1).expand((-1,)*(len(b.shape)) + (data_len,)) for b in self.perm_idxs]
def prune_perm_tree(self, data_idx):
r"""Prunes the tree after observing data.
Given data indexed by data_idx, this function prunes the tree to remove
the branches that are not relevant to the data.
:param torch.Tensor data_idx: Index of data.
:return: Same list of tensors, but with the branches not relevant to the
data removed.
"""
# data_idx = data_idx.unsqueeze(-1)
new_tree = []
for pl in self.perm_idxs[1:]:
new_b = pl[data_idx, ..., torch.arange(data_idx.shape[-1])]
new_b = new_b.movedim(0, -1)
new_tree.append(new_b)
self.perm_idxs = new_tree
| 46.436364 | 130 | 0.637901 | 12,124 | 0.949413 | 0 | 0 | 0 | 0 | 0 | 0 | 8,090 | 0.633516 |
9f09542fe4875bd5504e95caec22e4558b8a94ef | 1,424 | py | Python | setup.py | zhul9311/XFNTR | 4f2e58775c6bb0df9a90e2854e7532f15f0e341a | [
"MIT"
] | null | null | null | setup.py | zhul9311/XFNTR | 4f2e58775c6bb0df9a90e2854e7532f15f0e341a | [
"MIT"
] | null | null | null | setup.py | zhul9311/XFNTR | 4f2e58775c6bb0df9a90e2854e7532f15f0e341a | [
"MIT"
] | null | null | null | # All .ui files and .so files are added through keyword: package_data, because setuptools doesn't include them automatically.
import sys
import os
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "xfntr",
version = "0.3.0",
author = "Zhu Liang",
author_email = "zliang8@uic.edu",
description = "A software that analyzes xfntr data",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/zhul9311/XFNTR.git",
packages = find_packages(),
package_dir = {'':'.'},
package_data = {
'' : ['xr_ref.cpython-37m-darwin.so',
'GUI/*',
'images/*',
'test/*']
},
exclude_package_data = {
'' : ['.git/','.setup.py.swp']
},
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires = '>=3.6',
install_requires = [
'pyqt5',
'scipy',
'matplotlib',
'lmfit',
'periodictable',
'numba'
],
entry_points = { # create scripts and add to sys.PATH
'console_scripts':[
'xfntr1 = xfntr.main:main'
],
'gui_scripts': [
'xfntr = xfntr.main:main'
]
},
)
| 27.384615 | 125 | 0.558287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.456461 |
9f0cf9dacde698a4632f08679730078e59456da8 | 720 | py | Python | scripts/subsample_model_files.py | musicpiano/mlmicrophysics | 720e09b9003285e4e601df8befd58337bee691f5 | [
"MIT"
] | 4 | 2021-01-05T13:18:28.000Z | 2021-09-29T09:53:28.000Z | scripts/subsample_model_files.py | musicpiano/mlmicrophysics | 720e09b9003285e4e601df8befd58337bee691f5 | [
"MIT"
] | 5 | 2020-11-16T15:53:24.000Z | 2021-07-22T20:16:11.000Z | scripts/subsample_model_files.py | musicpiano/mlmicrophysics | 720e09b9003285e4e601df8befd58337bee691f5 | [
"MIT"
] | 4 | 2020-07-08T13:04:44.000Z | 2022-01-09T13:36:55.000Z | import xarray as xr
import argparse
from glob import glob
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="Input File Directory")
parser.add_argument("-o", "--output", help="Output file directory")
parser.add_argument("-x", "--xsub", type=int, default=2, help="X and Y subset factor")
parser.add_argument("-z", "--zsub", type=int, default=1, help="Z subset factor")
parser.add_argument("-t", "--tsub", type=int, default=1, help="Time subset factor")
args = parser.parse_args()
nc_files = sorted(glob(args.input + "*.nc"))
for nc_file in nc_files:
ds = xr.open_dataset(nc_file)
ds.close()
if __name__ == "__main__":
main() | 37.894737 | 90 | 0.652778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.255556 |
9f0ebe9d5e07f84acae7dcbce8b140dde1d99a45 | 7,099 | py | Python | main.py | dddzg/MoCo | 94125b06235032cf74768709bac36e3ffbeb3f7c | [
"MIT"
] | 41 | 2019-12-04T09:56:22.000Z | 2022-02-10T13:07:37.000Z | main.py | dddzg/MoCo | 94125b06235032cf74768709bac36e3ffbeb3f7c | [
"MIT"
] | 5 | 2020-03-17T06:53:33.000Z | 2021-01-16T20:15:14.000Z | main.py | dddzg/MoCo | 94125b06235032cf74768709bac36e3ffbeb3f7c | [
"MIT"
] | 6 | 2019-12-21T06:50:58.000Z | 2021-12-04T20:48:16.000Z | from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import config
from dataset import custom_dataset
import pretrainedmodels as models
import torch
from tqdm import tqdm
from torch.nn import functional as F
import types
from utils import AverageMeter, get_shuffle_idx
import os
from utils import get_transform, dataset_info
from wideresnet import WideResNet
# torch.nn.BatchNorm1d
def parse_option():
return None
def get_model(model_name='resnet18'):
try:
if model_name in models.__dict__:
model = models.__dict__[model_name]
elif model_name == 'wideresnet':
model = WideResNet
else:
KeyError(f'There is no model named {model_name}')
# model = CustomNetwork
model_q = model(pretrained=None)
model_k = model(pretrained=None)
def forward(self, input):
x = self.features(input)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.mlp(x)
x = F.normalize(x) # l2 normalize by default
return x
model_q.forward = types.MethodType(forward, model_q)
model_k.forward = types.MethodType(forward, model_k)
# for model k, it doesn't require grad
for param in model_k.parameters():
param.requires_grad = False
device_list = [config.GPU_ID] * 4 # Shuffle BN can be applied through there is only one gpu.
model_q = torch.nn.DataParallel(model_q, device_ids=device_list)
model_k = torch.nn.DataParallel(model_k, device_ids=device_list)
model_q.to(config.DEVICE)
model_k.to(config.DEVICE)
return model_q, model_k
except KeyError:
print(f'model name:{model_name} does not exist.')
def momentum_update(model_q, model_k, m=0.999):
""" model_k = m * model_k + (1 - m) model_q """
for p1, p2 in zip(model_q.parameters(), model_k.parameters()):
p2.data.mul_(m).add_(1 - m, p1.detach().data)
def enqueue(queue, k):
return torch.cat([queue, k], dim=0)
def dequeue(queue, max_len=config.QUEUE_LENGTH):
if queue.shape[0] >= max_len:
return queue[-max_len:] # queue follows FIFO
else:
return queue
def train(train_dataloader, model_q, model_k, queue, optimizer, device, t=0.07):
model_q.train()
model_k.train()
losses = AverageMeter()
pred_meter = AverageMeter()
for i, (img_q, img_k, _) in enumerate(tqdm(train_dataloader)):
if queue is not None and queue.shape[0] == config.QUEUE_LENGTH:
img_q, img_k = img_q.to(device), img_k.to(device)
q = model_q(img_q) # N x C
# shuffle BN
shuffle_idx, reverse_idx = get_shuffle_idx(config.BATCH_SIZE, device)
img_k = img_k[shuffle_idx]
k = model_k(img_k) # N x C
k = k[reverse_idx].detach() # reverse and no graident to key
N, C = q.shape
# K = config.QUEUE_LENGTH
l_pos = torch.bmm(q.view(N, 1, C), k.view(N, C, 1)).view(N, 1) # positive logit N x 1
l_neg = torch.mm(q.view(N, C), queue.transpose(0, 1)) # negative logit N x K
labels = torch.zeros(N, dtype=torch.long).to(device) # positives are the 0-th
logits = torch.cat([l_pos, l_neg], dim=1) / t
# print(logits[0])
pred = logits[:, 0].mean()
loss = criterion(logits, labels)
losses.update(loss.item(), N)
pred_meter.update(pred.item(), N)
# update model_q
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update model_k by momentum
momentum_update(model_q, model_k, 0.999)
else:
img_k = img_k.to(device)
shuffle_idx, reverse_idx = get_shuffle_idx(config.BATCH_SIZE, device)
img_k = img_k[shuffle_idx]
k = model_k(img_k) # N x C
k = k[reverse_idx].detach() # reverse and no graident to key
# update dictionary
queue = enqueue(queue, k) if queue is not None else k
queue = dequeue(queue)
return {
'loss': losses.avg,
'pred': pred_meter.avg
}, queue
if __name__ == '__main__':
args = parse_option()
image_size, mean, std = dataset_info(name='cifar')
# image_size = 28
# mean = [0.1307, ]
# std = [0.3081, ]
# normalize = transforms.Normalize(mean=mean, std=std)
train_transform = get_transform(image_size, mean=mean, std=std, mode='train')
# datasets.mnist.MNIST
train_dataset = custom_dataset(datasets.cifar.CIFAR10)(root='./', train=True, transform=train_transform,
download=True)
print(len(train_dataset))
train_dataloader = DataLoader(train_dataset, batch_size=config.BATCH_SIZE, shuffle=True, num_workers=0,
pin_memory=False, drop_last=True) # drop the last batch due to irregular size
model_q, model_k = get_model(config.MODEL)
optimizer = torch.optim.SGD(model_q.parameters(), lr=0.02, momentum=0.9, nesterov=True, weight_decay=1e-5)
per = config.ALL_EPOCHS // 6
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[per * 2, per * 4, per * 5], gamma=0.1)
# copy parameters from model_q to model_k
momentum_update(model_q, model_k, 0)
criterion = torch.nn.CrossEntropyLoss()
torch.backends.cudnn.benchmark = True
queue = None
start_epoch = 0
min_loss = float('inf')
# load model from file
if config.RESUME and os.path.isfile(config.FILE_PATH):
print(f'loading model from {config.FILE_PATH}')
checkpoint = torch.load(config.FILE_PATH)
# config.__dict__.update(checkpoint['config'])
model_q.module.load_state_dict(checkpoint['model_q'])
model_k.module.load_state_dict(checkpoint['model_k'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
start_epoch = checkpoint['epoch']
min_loss = checkpoint['min_loss']
print(f'loaded model from {config.FILE_PATH}')
for epoch in range(start_epoch, config.ALL_EPOCHS):
ret, queue = train(train_dataloader, model_q, model_k, queue, optimizer, config.DEVICE)
ret_str = ' - '.join([f'{k}:{v:.4f}' for k, v in ret.items()])
print(f'epoch:{epoch} {ret_str}')
scheduler.step()
# print(type(config))
if ret['loss'] < min_loss:
min_loss = ret['loss']
state = {
# 'config': config.__dict__,
'model_q': model_q.module.state_dict(),
'model_k': model_k.module.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch,
'min_loss': min_loss
}
print(f'save to {config.FILE_PATH}')
torch.save(state, config.FILE_PATH)
| 36.973958 | 114 | 0.612762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,262 | 0.177772 |
9f10006267ccc0675cf18d5cd79c4fd8c9f5f569 | 1,957 | py | Python | covid_briefing.py | jron-reb/CA-3 | a278fae6b863d4dc44e6da0c5d1b672e9b93393b | [
"MIT"
] | null | null | null | covid_briefing.py | jron-reb/CA-3 | a278fae6b863d4dc44e6da0c5d1b672e9b93393b | [
"MIT"
] | null | null | null | covid_briefing.py | jron-reb/CA-3 | a278fae6b863d4dc44e6da0c5d1b672e9b93393b | [
"MIT"
] | null | null | null | """
module contains a function that polls the ukcovid api in order to find up to date
information about coronavirus in the UK
"""
import json
import logging
from uk_covid19 import Cov19API
from requests import get
logging.basicConfig(level=logging.DEBUG, filename='sys.log')
def get_covid() -> str:
""" Polls the ukcovid api in order to find up to date information about Covid-19 in the UK """
with open('config.json') as config_file:
data = json.load(config_file)
filters = [
'areaType=' + data["uk_covid19"][0]["area_type"],
'areaName=' + data["uk_covid19"][0]["area_name"]
]
structure = data["uk_covid19"][0]["structure"]
api = Cov19API(filters=filters, structure=structure, latest_by = "newDeathsByDeathDate")
covid_info = api.get_json()
data = covid_info['data'][0]
date = 'Date of information is ' + str(data['date']) + '.'
new_cases = ' The number of new cases is ' + str(data['newCasesByPublishDate']) + '.'
cum_cases = ' The number of cumulative cases is ' + str(data['cumCasesByPublishDate']) + '.'
new_deaths = ' The number of new deaths is ' + str(data['newDeathsByDeathDate']) + '.'
cum_deaths = ' The number of cumulative deaths is ' + str(data['cumDeathsByDeathDate']) + '. '
return date + new_cases + cum_cases + new_deaths + cum_deaths
def covid_api_checker() -> int:
"""Function finds the HTTP response code when polling the api """
endpoint = endpoint = (
'https://api.coronavirus.data.gov.uk/v1/data?'
'filters=areaType=nation;areaName=england&'
'structure={"date":"date","newCases":"newCasesByPublishDate"}'
)
response = get(endpoint, timeout=10)
if response.status_code >= 400:
logging.warning('HTTP GET request failed, response code is ' + str(response.status_code))
return response.status_code
logging.info('HTTP GET request succeeded')
return response.status_code
| 39.938776 | 98 | 0.671947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 937 | 0.478794 |
9f109b8c71bc216b9f304425a1fe0bb9187d6712 | 1,187 | py | Python | TaskManage/models.py | CooloiStudio/django-hotel-manager | dce558bfeedbb45e5d58bc875dfa936940d57ed5 | [
"MIT"
] | 1 | 2021-08-07T18:44:32.000Z | 2021-08-07T18:44:32.000Z | TaskManage/models.py | CooloiStudio/django-hotel-manager | dce558bfeedbb45e5d58bc875dfa936940d57ed5 | [
"MIT"
] | null | null | null | TaskManage/models.py | CooloiStudio/django-hotel-manager | dce558bfeedbb45e5d58bc875dfa936940d57ed5 | [
"MIT"
] | 1 | 2017-09-10T07:23:05.000Z | 2017-09-10T07:23:05.000Z | # coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from RoomManage.models import Room, Customs
# Create your models here.
class Task(models.Model):
context = models.TextField()
date = models.DateTimeField()
task_status = models.CharField(max_length=20, default='undo')
user = models.ForeignKey(User)
room = models.ForeignKey(Room)
def __str__(self):
return '%s %s - %s' % (self.user.last_name, self.user.first_name, self.room.room_num)
class Attendance(models.Model):
clock_in = models.DateTimeField()
clock_out = models.DateTimeField(null=True, blank=True)
user = models.ForeignKey(User)
def __str__(self):
return '%s %s -- %s' % (self.user.last_name, self.user.first_name, self.clock_in)
class Emergency(models.Model):
date_time = models.DateTimeField()
room = models.ForeignKey(Room)
user = models.ForeignKey(User, null=True, blank=True)
def __str__(self):
return '%s %s - %s' % (self.user.last_name, self.user.first_name, self.room.room_num)
class Meta:
permissions = (
('create_emergency', 'can create a emergency'),
)
| 27.604651 | 93 | 0.676495 | 1,015 | 0.855097 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.10615 |
9f10e94c964ce66f7a8b92cc263cc90bcb46f403 | 400 | py | Python | test/pypendency/test_parser.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | null | null | null | test/pypendency/test_parser.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | 1 | 2021-06-23T15:05:40.000Z | 2021-06-23T15:05:40.000Z | test/pypendency/test_parser.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | null | null | null | import pathlib
import pytest
from pypendency.parser.yaml import Parser
from pypendency.lexer import LarkRelationLexer
def test_read_yaml_node_length():
file = pathlib.Path(__file__).parent / "example.yml"
lexer = LarkRelationLexer()
p = Parser(lexer=lexer, folder=pathlib.Path(__file__).parent)
g = p.parse("example.yml")
length = len(g.nodes)
pytest.assume(length == 4)
| 23.529412 | 65 | 0.73 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.065 |
9f11ad86c1a7f6d1888aeabc2cabec9e7fdb4c14 | 842 | py | Python | tests.py | fredrikaverpil/python-rust | 06bedd1b3bea93af3eaafb0154447a18aba56118 | [
"MIT"
] | 1 | 2021-09-23T19:30:04.000Z | 2021-09-23T19:30:04.000Z | tests.py | fredrikaverpil/python-rust | 06bedd1b3bea93af3eaafb0154447a18aba56118 | [
"MIT"
] | null | null | null | tests.py | fredrikaverpil/python-rust | 06bedd1b3bea93af3eaafb0154447a18aba56118 | [
"MIT"
] | null | null | null | import re
import string
import random
import mylib
# Python ZIP version
def count_doubles(val):
total = 0
# there is an improved version later on this post
for c1, c2 in zip(val, val[1:]):
if c1 == c2:
total += 1
return total
# Python REGEXP version
double_re = re.compile(r'(?=(.)\1)')
def count_doubles_regex(val):
return len(double_re.findall(val))
# Benchmark it
# generate 1M of random letters to test it
val = ''.join(random.choice(string.ascii_letters) for i in range(1000000))
def test_pure_python(benchmark):
benchmark(count_doubles, val)
def test_regex(benchmark):
benchmark(count_doubles_regex, val)
def test_rust(benchmark):
benchmark(mylib.count_doubles, val)
def test_get_result():
data = "Hello world"
assert mylib.get_result(data) == f"Rust says: {data}"
| 21.05 | 74 | 0.693587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.231591 |
9f14982bbe3f83710d22b85ba6beb2a13fe3b1e7 | 329 | py | Python | feapder/network/user_pool/__init__.py | ibryang/feapder | 14b1c1e9bd0953ea8af102d6d220fed4b79d0a5c | [
"MIT"
] | 876 | 2021-02-09T11:08:04.000Z | 2022-03-31T21:14:11.000Z | feapder/network/user_pool/__init__.py | ibryang/feapder | 14b1c1e9bd0953ea8af102d6d220fed4b79d0a5c | [
"MIT"
] | 94 | 2021-02-20T07:59:28.000Z | 2022-03-28T09:54:53.000Z | feapder/network/user_pool/__init__.py | ibryang/feapder | 14b1c1e9bd0953ea8af102d6d220fed4b79d0a5c | [
"MIT"
] | 172 | 2021-02-22T08:24:44.000Z | 2022-03-29T08:15:27.000Z | __all__ = [
"GuestUserPool",
"GuestUser",
"NormalUserPool",
"NormalUser",
"GoldUserPool",
"GoldUser",
"GoldUserStatus",
]
from .gold_user_pool import GoldUserPool, GoldUser, GoldUserStatus
from .guest_user_pool import GuestUserPool, GuestUser
from .normal_user_pool import NormalUserPool, NormalUser
| 23.5 | 66 | 0.735562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.285714 |
9f16a103699a0493911a1bced040dcaaad5e2781 | 5,954 | py | Python | src/rez/cli/memcache.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rez/cli/memcache.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rez/cli/memcache.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | 1 | 2020-09-24T08:33:43.000Z | 2020-09-24T08:33:43.000Z | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Manage and query memcache server(s).
"""
from __future__ import print_function
def setup_parser(parser, completions=False):
parser.add_argument(
"--flush", action="store_true",
help="flush all cache entries")
parser.add_argument(
"--stats", action="store_true",
help="list stats")
parser.add_argument(
"--reset-stats", action="store_true",
help="reset statistics")
parser.add_argument(
"--poll", action="store_true",
help="continually poll, showing get/sets per second")
parser.add_argument(
"--interval", type=float, metavar="SECS", default=1.0,
help="interval (in seconds) used when polling (default: %(default)s)")
parser.add_argument(
"--warm", action="store_true",
help="warm the cache server with visible packages")
def poll(client, interval):
from rez.utils.memcached import Client
import time
prev_entry = None
print("%-64s %-16s %-16s %-16s %-16s %-16s"
% ("SERVER", "CONNS", "GET/s", "SET/s", "TEST_GET", "TEST_SET"))
while True:
stats = dict(client.get_stats())
entry = (time.time(), stats)
if prev_entry:
prev_t, prev_stats = prev_entry
t, stats = entry
dt = t - prev_t
for instance, payload in stats.items():
prev_payload = prev_stats.get(instance)
if payload and prev_payload:
# stats
gets = int(payload["cmd_get"]) - int(prev_payload["cmd_get"])
sets = int(payload["cmd_set"]) - int(prev_payload["cmd_set"])
gets_per_sec = gets / dt
sets_per_sec = sets / dt
# test get/set
uri = instance.split()[0]
client = Client([uri], debug=True)
t1 = time.time()
client.set("__TEST__", 1)
t2 = time.time()
test_set = t2 - t1
client.get("__TEST__")
test_get = time.time() - t2
nconns = int(payload["curr_connections"])
print("%-64s %-16d %-16g %-16g %-16g %-16g"
% (instance, nconns, gets_per_sec, sets_per_sec,
test_get, test_set))
prev_entry = entry
time.sleep(interval)
def command(opts, parser, extra_arg_groups=None):
from rez.config import config
from rez.packages import iter_package_families, iter_packages
from rez.utils.yaml import dump_yaml
from rez.utils.memcached import Client
from rez.utils.formatting import columnise, readable_time_duration, \
readable_memory_size
import sys
memcache_client = Client(servers=config.memcached_uri,
debug=config.debug_memcache)
if not memcache_client:
print("memcaching is not enabled.", file=sys.stderr)
sys.exit(1)
if opts.poll:
poll(memcache_client, opts.interval)
return
if opts.flush:
memcache_client.flush(hard=True)
print("memcached servers are flushed.")
return
if opts.warm:
seen = set()
paths = config.nonlocal_packages_path
for family in iter_package_families(paths=paths):
if family.name in seen:
continue
for package in iter_packages(family.name, paths=paths):
if opts.verbose:
print("warming: %s" % package.qualified_name)
# forces package definition load, which puts in memcache
_ = package.data # noqa
seen.add(family.name)
print("memcached servers are warmed.")
return
if opts.reset_stats:
memcache_client.reset_stats()
print("memcached servers are stat reset.")
return
def _fail():
print("memcached servers are not responding.", file=sys.stderr)
sys.exit(1)
stats = memcache_client.get_stats()
if opts.stats:
if stats:
txt = dump_yaml(stats)
print(txt)
else:
_fail()
return
# print stats summary
if not stats:
_fail()
rows = [["CACHE SERVER", "UPTIME", "HITS", "MISSES", "HIT RATIO", "MEMORY", "USED"],
["------------", "------", "----", "------", "---------", "------", "----"]]
for server_id, stats_dict in stats:
server_uri = server_id.split()[0]
uptime = int(stats_dict.get("uptime", 0))
hits = int(stats_dict.get("get_hits", 0))
misses = int(stats_dict.get("get_misses", 0))
memory = int(stats_dict.get("limit_maxbytes", 0))
used = int(stats_dict.get("bytes", 0))
hit_ratio = float(hits) / max(hits + misses, 1)
hit_percent = int(hit_ratio * 100.0)
used_ratio = float(used) / max(memory, 1)
used_percent = int(used_ratio * 100.0)
row = (server_uri,
readable_time_duration(uptime),
str(hits),
str(misses),
"%d%%" % hit_percent,
readable_memory_size(memory),
"%s (%d%%)" % (readable_memory_size(used), used_percent))
rows.append(row)
print('\n'.join(columnise(rows)))
| 32.714286 | 88 | 0.571213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,634 | 0.274437 |
9f16b56179abddee7eeec967dbe0ed0f81253019 | 124 | py | Python | core/data/Cityscape/__init__.py | js-fan/MCIC | a98927e2d88452d96f1fba99a5dc25a5f518caa8 | [
"MIT"
] | 1 | 2021-07-19T21:52:46.000Z | 2021-07-19T21:52:46.000Z | core/data/Cityscape/__init__.py | js-fan/MCIC | a98927e2d88452d96f1fba99a5dc25a5f518caa8 | [
"MIT"
] | null | null | null | core/data/Cityscape/__init__.py | js-fan/MCIC | a98927e2d88452d96f1fba99a5dc25a5f518caa8 | [
"MIT"
] | null | null | null | from .cs_loader import CSPointDataset
from .cs_class_loader import CSClassDataset
from .cs_seed_loader import CSSeedDataset
| 31 | 43 | 0.879032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f16b7604472b4df4adc8e900195443eaeab2e23 | 1,235 | py | Python | lib/sxc/actuator.py | GoogleCloudPlatform/SourceXCloud | c1397e5b9a6989fc4adf54d7957f73a95c5bcdc8 | [
"Apache-2.0"
] | 3 | 2017-11-29T16:15:21.000Z | 2021-10-09T18:32:16.000Z | lib/sxc/actuator.py | GoogleCloudPlatform/SourceXCloud | c1397e5b9a6989fc4adf54d7957f73a95c5bcdc8 | [
"Apache-2.0"
] | null | null | null | lib/sxc/actuator.py | GoogleCloudPlatform/SourceXCloud | c1397e5b9a6989fc4adf54d7957f73a95c5bcdc8 | [
"Apache-2.0"
] | 3 | 2016-07-12T02:10:35.000Z | 2021-03-21T10:24:19.000Z | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The actuator interface and implementations.
"""
import json
import os
class Actuator(object):
def push(self, core, image, args):
"""Push an image to a target."""
raise NotImplementedError()
class ActuatorExtension(Actuator):
"""Actuator extension consisting of hook programs."""
def __init__(self, actuator_root_dir):
self.root = actuator_root_dir
self.name = os.path.basename(actuator_root_dir)
def push(self, core, image, args):
result = core.get_utils().run_hook(
self.root, 'push', core.get_source_directory(), *args,
input=json.dumps(image))
return result
| 30.875 | 74 | 0.704453 | 580 | 0.469636 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.569231 |
9f16f735c3c599fc2596af8a0c9fd7ff4f482bbc | 2,432 | py | Python | linuxjournalarchiver.py | jcjordyn130/linuxjournalarchiver | b7580cd17c58af7d9bccfa53807cc9ec6581fc0e | [
"BSD-3-Clause"
] | null | null | null | linuxjournalarchiver.py | jcjordyn130/linuxjournalarchiver | b7580cd17c58af7d9bccfa53807cc9ec6581fc0e | [
"BSD-3-Clause"
] | null | null | null | linuxjournalarchiver.py | jcjordyn130/linuxjournalarchiver | b7580cd17c58af7d9bccfa53807cc9ec6581fc0e | [
"BSD-3-Clause"
] | null | null | null | # linuxjournalarchiver - Some hacky code I wrote to archive the Linux Journal.
# Licensed under the BSD-3-Clause license.
from bs4 import BeautifulSoup
import requests
import re
import pathlib
# Download the download page.
print("Downloading magazine list...")
session = requests.session()
# Update the User Agent to curl, this is because the server-side code
# handles curl specially.
session.headers.update({"User-Agent": "curl/7.65.3"})
r = session.get("https://secure2.linuxjournal.com/pdf/dljdownload.php")
soup = BeautifulSoup(r.text, "lxml")
# Process all the download buttons.
for e in reversed(soup.find_all("div", class_ = "downloadbtn")):
# Some issues don't have certain file formats, skip these.
if e.get_text() == "N/A":
print("No link")
continue
# Certain downloadbtn div elements don't have a link, skip these.
try:
link = e.a.get("href")
except AttributeError:
print("Invalid element")
continue
# Download the magazine.
magr = session.get(link + '&action=spit', stream = True)
# Get the name and format it.
name = re.findall(r'filename=(.+)', magr.headers["Content-Disposition"])
name = name[0].strip('"')
# Special treatment for Supplemental issues.
if not "Supplement" in link:
# Get the date.
date = re.findall("....-..(?=\....)", name)[0]
year, month = date.split("-")
# Get the path.
dirpath = pathlib.Path(f"{year}/{month}")
dirpath.mkdir(parents = True, exist_ok = True)
magpath = dirpath / name
else:
# We don't have a date,
# so we use the "Supplement" folder as a fill in on suplemental issues.
dirpath = pathlib.Path(f"Supplement")
dirpath.mkdir(parents = True, exist_ok = True)
magpath = dirpath / name
# Don't download a magazine that we have already downloaded.
if magpath.exists():
print(f"{magpath} exists... skipping")
continue
# Debug printing.
print(f"Downloading {link} to {magpath}...")
# Save the data to a file.
with open(f"{magpath}", "wb") as f:
bytesdownloaded = 0
for chunk in magr.iter_content(chunk_size = 8192):
if chunk: # filter out keep-alive new chunks
bytesdownloaded+=len(chunk)
print(f"{name}: {bytesdownloaded}/{magr.headers['Content-Length']}")
f.write(chunk) | 33.315068 | 84 | 0.630345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,191 | 0.48972 |
9f1a77ebd61987c0e53368471dad9d42d5b2f750 | 548 | py | Python | mtianyan/listname.py | mtianyan/mtianyan | 6c8f3d2076da4f472f6734714f1352ffaa5264b1 | [
"Apache-2.0"
] | null | null | null | mtianyan/listname.py | mtianyan/mtianyan | 6c8f3d2076da4f472f6734714f1352ffaa5264b1 | [
"Apache-2.0"
] | null | null | null | mtianyan/listname.py | mtianyan/mtianyan | 6c8f3d2076da4f472f6734714f1352ffaa5264b1 | [
"Apache-2.0"
] | 4 | 2020-11-29T14:25:39.000Z | 2021-04-05T07:17:56.000Z | import os.path
filepathlist=[]
filenamelist=[]
def processDirectory ( args, dirname, filenames ):
for filename in filenames:
file_path=os.path.join(dirname,filename)
if os.path.isfile(file_path):
filepathlist.append(file_path)
filenamelist.append(filename)
def getpatch(path):
os.path.walk(r'%s'%path, processDirectory, None )
return filepathlist
getpatch('H:\CodePath\NoteBook\uber_input')
fw = open('data_list.txt','w')
for item in filenamelist:
fw.write(item+'\n')
| 28.842105 | 54 | 0.662409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.109489 |
9f1b53d078da5b69adb58ccf7772f687a6128974 | 4,858 | py | Python | artifacts/pull_sources.py | onedata/bamboo | 5f6ce4234a75d150e8c9bfd60054d6a96bfcc3c1 | [
"MIT"
] | null | null | null | artifacts/pull_sources.py | onedata/bamboo | 5f6ce4234a75d150e8c9bfd60054d6a96bfcc3c1 | [
"MIT"
] | null | null | null | artifacts/pull_sources.py | onedata/bamboo | 5f6ce4234a75d150e8c9bfd60054d6a96bfcc3c1 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""
Pulls artifacts from external repo using branches defined in branchConfig.yaml
file.
Run the script with -h flag to learn about script's running options.
"""
__author__ = "Michal Cwiertnia"
__copyright__ = "Copyright (C) 2018 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
import os
import yaml
import argparse
import boto3
from paramiko import SSHClient, AutoAddPolicy
from pull_artifact import (download_artifact_safe,
download_specific_or_default,
s3_download_artifact_safe,
s3_download_specific_or_default)
BRANCH_CFG_PATH = 'branchConfig.yaml'
BAMBOO_BRANCH_NAME = 'bamboo_planRepository_branchName'
DEFAULT_BRANCH = 'default'
CURRENT_BRANCH = 'current_branch'
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Pull sources and images lists for branches specified in '
'branchConfig.yaml file.')
parser.add_argument(
'--hostname', '-hn',
help='Hostname of artifacts repository',
required=True)
parser.add_argument(
'--port', '-p',
type=int,
help='SSH port to connect to',
required=True)
parser.add_argument(
'--username', '-u',
help='The username to authenticate as',
required=True)
parser.add_argument(
'--s3-url',
help='The S3 endpoint URL',
default='https://storage.cloud.cyfronet.pl')
parser.add_argument(
'--s3-bucket',
help='The S3 bucket name',
default='bamboo-artifacts-2')
args = parser.parse_args()
if args.hostname != 'S3':
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.load_system_host_keys()
ssh.connect(args.hostname, port=args.port, username=args.username)
with open(BRANCH_CFG_PATH, 'r') as branch_cfg_file:
branch_cfg = yaml.load(branch_cfg_file)
default_branch = branch_cfg.get(DEFAULT_BRANCH)
for plan, branch in branch_cfg.get('branches').items():
if branch != CURRENT_BRANCH:
print('Getting artifact for plan {}\'s from branch {}'
.format(plan, branch))
exc_log = 'Branch {} in plan {} not found.'.format(branch,
plan)
download_artifact_safe(ssh, plan, branch, args.hostname,
args.port, args.username,
exc_handler=exit,
exc_handler_args=(1,),
exc_log=exc_log)
else:
download_specific_or_default(ssh, plan,
os.getenv(BAMBOO_BRANCH_NAME),
args.hostname, args.port,
args.username,
default_branch=default_branch)
ssh.close()
else:
s3_session = boto3.session.Session()
s3_res = s3_session.resource(
service_name='s3',
endpoint_url=args.s3_url
)
with open(BRANCH_CFG_PATH, 'r') as branch_cfg_file:
branch_cfg = yaml.load(branch_cfg_file)
default_branch = branch_cfg.get(DEFAULT_BRANCH)
for plan, branch in branch_cfg.get('branches').items():
if branch != CURRENT_BRANCH:
print('Getting artifact for plan {}\'s from branch {}'
.format(plan, branch))
exc_log = 'Branch {} in plan {} not found.'.format(branch,
plan)
s3_download_artifact_safe(s3_res, args.s3_bucket, plan, branch, args.hostname,
args.port, args.username,
exc_handler=exit,
exc_handler_args=(1,),
exc_log=exc_log)
else:
s3_download_specific_or_default(s3_res, args.s3_bucket, plan,
os.getenv(BAMBOO_BRANCH_NAME),
args.hostname, args.port,
args.username,
default_branch=default_branch)
if __name__ == '__main__':
main()
| 38.864 | 98 | 0.512762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 934 | 0.19226 |
9f1b577bee0eed95315bb683c1d863d69af58309 | 845 | py | Python | setup.py | strath-sdr/rfsoc_notebooks | 9608a77ceb6545e939309ecb1093ee870625e095 | [
"BSD-3-Clause"
] | 8 | 2021-03-01T00:11:28.000Z | 2022-01-25T23:36:21.000Z | setup.py | strath-sdr/rfsoc_notebooks | 9608a77ceb6545e939309ecb1093ee870625e095 | [
"BSD-3-Clause"
] | null | null | null | setup.py | strath-sdr/rfsoc_notebooks | 9608a77ceb6545e939309ecb1093ee870625e095 | [
"BSD-3-Clause"
] | 4 | 2021-03-05T15:58:09.000Z | 2022-01-11T10:28:06.000Z | import os
import shutil
from distutils.dir_util import copy_tree
from setuptools import find_packages, setup
# global variables
nb_dir = os.environ['PYNQ_JUPYTER_NOTEBOOKS']
package_name = 'pystrath_rfsoc'
pip_name = 'pystrath-rfsoc'
data_files = []
# copy common notebooks to jupyter home
def copy_common_notebooks():
src_dir = os.path.join(f'common')
dst_dir = os.path.join(nb_dir, 'rfsoc-notebooks')
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
copy_tree(src_dir, dst_dir)
copy_common_notebooks()
setup(
name=package_name,
version='0.2.0',
install_requires=[
'plotly==5.1.0',
'pynq==2.7'
],
author="David Northcote",
packages=find_packages(),
package_data={
'': data_files,
},
description="A collection of RFSoC introductory notebooks for PYNQ.")
| 23.472222 | 73 | 0.693491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.292308 |
9f1b5c4fac93e2e8e19afd05e5185ce9a5959531 | 7,682 | py | Python | test/test_query.py | jayvdb/version-query | e9b7c8887d18441bc3a79205f236d6b20eea2930 | [
"Apache-2.0"
] | null | null | null | test/test_query.py | jayvdb/version-query | e9b7c8887d18441bc3a79205f236d6b20eea2930 | [
"Apache-2.0"
] | null | null | null | test/test_query.py | jayvdb/version-query | e9b7c8887d18441bc3a79205f236d6b20eea2930 | [
"Apache-2.0"
] | null | null | null | """Tests of querying tools."""
import contextlib
import importlib
import io
import logging
import os
import pathlib
import sys
import tempfile
import unittest
from version_query.version import VersionComponent, Version
from version_query.git_query import query_git_repo, predict_git_repo
from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder
from version_query.query import \
query_folder, query_caller, query_version_str, predict_caller, predict_version_str
from .examples import \
PY_LIB_DIR, GIT_REPO_EXAMPLES, METADATA_JSON_EXAMPLE_PATHS, PKG_INFO_EXAMPLE_PATHS, \
PACKAGE_FOLDER_EXAMPLES
from .test_setup import run_module
_LOG = logging.getLogger(__name__)
class Tests(unittest.TestCase):
def test_deprecated(self):
import warnings
warnings.warn('remove this test after removing deprecated function', DeprecationWarning)
from version_query import generate_version_str
with self.assertWarns(DeprecationWarning):
version_str = generate_version_str()
self.assertIsInstance(version_str, str)
def _check_examples_count(self, description, examples):
lvl = logging.WARNING if len(examples) < 10 else logging.INFO
_LOG.log(lvl, '%s count: %i', description, len(examples))
if len(examples) < 5:
_LOG.warning('%s list: %s', description, examples)
self.assertGreater(len(examples), 0)
def test_example_count_checking(self):
_LOG.warning('%s', PY_LIB_DIR)
with self.assertRaises(AssertionError):
self._check_examples_count('test', [])
self._check_examples_count('test', list(range(1)))
self._check_examples_count('test', list(range(9)))
self._check_examples_count('test', list(range(10)))
def _query_test_case(self, paths, query_function):
for path in paths:
with self.subTest(path=path, query_function=query_function):
_LOG.debug('testing %s() on %s', query_function.__name__, path)
try:
version = query_function(path)
_LOG.debug('%s: %s', path, version)
except ValueError:
_LOG.info('failed to get version from %s', path, exc_info=True)
def test_query_git_repo(self):
self._check_examples_count('git repo', GIT_REPO_EXAMPLES)
self._query_test_case(GIT_REPO_EXAMPLES, query_git_repo)
def test_predict_caller_bad(self):
with tempfile.TemporaryDirectory() as project_path_str:
with tempfile.NamedTemporaryFile(suffix='.py', dir=project_path_str,
delete=False) as project_file:
project_file_path = pathlib.Path(project_file.name)
with project_file_path.open('a') as project_file:
project_file.write('from version_query.query import predict_caller\n\n\n'
'def caller():\n predict_caller()\n\n\ncaller()\n')
sys.path.insert(0, project_path_str)
_LOG.warning('inserted %s to sys.path', project_path_str)
print(project_file_path)
print(project_path_str)
with self.assertRaises(ValueError):
importlib.import_module(project_file_path.with_suffix('').name)
sys.path.remove(project_path_str)
_LOG.warning('removed %s from sys.path', project_path_str)
project_file_path.unlink()
def test_predict_git_repo(self):
self._query_test_case(GIT_REPO_EXAMPLES, predict_git_repo)
@unittest.skipIf(not METADATA_JSON_EXAMPLE_PATHS, 'no "metadata.json" files found')
def test_query_metadata_json(self):
self._check_examples_count('metadata.json', METADATA_JSON_EXAMPLE_PATHS)
self._query_test_case(METADATA_JSON_EXAMPLE_PATHS, query_metadata_json)
@unittest.skipIf(not PKG_INFO_EXAMPLE_PATHS, 'no "PKG-INFO" files found')
def test_query_pkg_info(self):
self._check_examples_count('PKG-INFO', PKG_INFO_EXAMPLE_PATHS)
self._query_test_case(PKG_INFO_EXAMPLE_PATHS, query_pkg_info)
@unittest.skipUnless(os.environ.get('TEST_PACKAGING'), 'skipping packaging test')
def test_query_pkg_info_current(self):
run_module('setup', 'build')
paths = list(pathlib.Path.cwd().glob('*.egg-info/PKG-INFO'))
self.assertEqual(len(paths), 1)
path = paths[0]
version = query_pkg_info(path)
_LOG.debug('%s: %s', path, version)
def test_query_pkg_info_bad(self):
with tempfile.NamedTemporaryFile(delete=False) as bad_file:
bad_file_path = pathlib.Path(bad_file.name)
with self.assertRaises(ValueError):
query_pkg_info(bad_file_path)
with bad_file_path.open('a') as bad_file:
bad_file.write('blah blah blah')
with self.assertRaises(ValueError):
query_pkg_info(bad_file_path)
with bad_file_path.open('a') as bad_file:
bad_file.write('Version: hello world')
with self.assertRaises(ValueError):
query_pkg_info(bad_file_path)
bad_file_path.unlink()
def test_query_package_folder(self):
self._check_examples_count('package folder', PACKAGE_FOLDER_EXAMPLES)
self._query_test_case(PACKAGE_FOLDER_EXAMPLES, query_package_folder)
@unittest.skipUnless(os.environ.get('TEST_PACKAGING'), 'skipping packaging test')
def test_query_package_folder_current(self):
run_module('setup', 'build')
path = pathlib.Path.cwd().joinpath('version_query')
version = query_package_folder(path)
_LOG.debug('%s: %s', path, version)
self.assertIsInstance(version, Version)
def test_query_folder(self):
self._query_test_case(PACKAGE_FOLDER_EXAMPLES, query_folder)
def test_query_folder_current(self):
path = pathlib.Path.cwd()
version = query_folder(path)
_LOG.debug('%s: %s', path, version)
self.assertIsInstance(version, Version)
def test_query_caller(self):
version = query_caller()
_LOG.debug('caller: %s', version)
self.assertIsInstance(version, Version)
def test_not_as_main(self):
run_module('version_query', run_name=None)
def test_help(self):
sio = io.StringIO()
with contextlib.redirect_stderr(sio):
with self.assertRaises(SystemExit):
run_module('version_query')
_LOG.info('%s', sio.getvalue())
def test_bad_usage(self):
sio = io.StringIO()
with contextlib.redirect_stderr(sio):
with self.assertRaises(ValueError):
run_module('version_query', '-p', '-i', '.')
_LOG.info('%s', sio.getvalue())
def test_here(self):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
run_module('version_query', '.')
self.assertEqual(sio.getvalue().rstrip(), query_caller().to_str())
self.assertEqual(sio.getvalue().rstrip(), query_version_str())
def test_increment_here(self):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
run_module('version_query', '-i', '.')
self.assertEqual(sio.getvalue().rstrip(),
query_caller().increment(VersionComponent.Patch).to_str())
def test_predict_here(self):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
run_module('version_query', '-p', '.')
self.assertEqual(sio.getvalue().rstrip(), predict_caller().to_str())
self.assertEqual(sio.getvalue().rstrip(), predict_version_str())
| 41.080214 | 96 | 0.669357 | 6,962 | 0.906274 | 0 | 0 | 1,274 | 0.165842 | 0 | 0 | 827 | 0.107654 |
9f1c2546b04d443c1ae8a04369e0722359f68afc | 728 | py | Python | app/main/forms.py | Ag-nes/Blog | 3c8c3f50b2eb6dffe19392179b4c2fda1718e926 | [
"MIT"
] | null | null | null | app/main/forms.py | Ag-nes/Blog | 3c8c3f50b2eb6dffe19392179b4c2fda1718e926 | [
"MIT"
] | null | null | null | app/main/forms.py | Ag-nes/Blog | 3c8c3f50b2eb6dffe19392179b4c2fda1718e926 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SelectField ,SubmitField
from wtforms.validators import input_required
class EditProfile(FlaskForm):
about = TextAreaField('Tell us about yourself.',validators = [input_required()])
submit = SubmitField('Update')
class UpdatePost(FlaskForm):
text = TextAreaField('Edit post here',validators = [input_required()])
submit = SubmitField('Update')
class PostForm(FlaskForm):
post_text = TextAreaField('New Post', validators=[input_required()])
submit = SubmitField('Post')
class CommentForm(FlaskForm):
post_comment = TextAreaField('Make a comment', validators=[input_required()])
submit = SubmitField('Submit') | 31.652174 | 84 | 0.75 | 566 | 0.777473 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.133242 |
9f1c5fd349b7d26a420b931b4c0db17b38bcc27d | 647 | py | Python | test/lit/memberfield/__init__.py | sivachandra/gala | 6d7e5fd3cf3c319062a3985dbffd791944e180e9 | [
"Apache-2.0"
] | 4 | 2016-07-16T01:35:30.000Z | 2020-06-18T05:37:33.000Z | test/lit/memberfield/__init__.py | sivachandra/gala | 6d7e5fd3cf3c319062a3985dbffd791944e180e9 | [
"Apache-2.0"
] | 7 | 2015-06-26T19:24:30.000Z | 2015-08-18T18:16:11.000Z | test/lit/memberfield/__init__.py | sivachandra/gala | 6d7e5fd3cf3c319062a3985dbffd791944e180e9 | [
"Apache-2.0"
] | null | null | null | import gdb
def print_field(f):
print("========")
print("name: %s" % f.name)
print("type: %s" % f.type)
if hasattr(f, "bitpos"):
print("bitpos: %d" % f.bitpos)
else:
print("No bitpos attribute.")
print("bitsize: %d" % f.bitsize)
print("parent_type: %s" % f.parent_type)
print("is_base_class: %s" % f.is_base_class)
print("artificial: %s" % f.artificial)
if hasattr(f, "enumval"):
print("enumval: %d" % f.enumval)
else:
print("No enumval attribute.")
derived = gdb.lookup_type("Derived")
for f in derived.fields():
print_field(f)
enum = gdb.lookup_type("EnumType")
for f in enum.fields():
print_field(f)
| 23.962963 | 46 | 0.630603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.310665 |
9f1d626a1d893e3b39612703d70895dd7ca60253 | 42,581 | py | Python | source/main.py | psythefirst/bibrarian | 9a88921ee442b44b57d7e1e9d0e8d2d395541653 | [
"MIT"
] | 4 | 2018-07-05T15:48:33.000Z | 2020-01-22T16:59:02.000Z | source/main.py | psythefirst/bibrarian | 9a88921ee442b44b57d7e1e9d0e8d2d395541653 | [
"MIT"
] | null | null | null | source/main.py | psythefirst/bibrarian | 9a88921ee442b44b57d7e1e9d0e8d2d395541653 | [
"MIT"
] | 1 | 2019-05-02T17:18:46.000Z | 2019-05-02T17:18:46.000Z | import argparse
import getpass
import glob
import hashlib
import itertools
import json
import logging
import os
import sys
import threading
import time
import traceback
import subprocess
import urllib
import urllib.request
import urllib.parse
import urwid
import pybtex
import pybtex.database
class BibEntry:
class SearchPanelWidgetImpl(urwid.AttrMap):
def __init__(self, entry):
super().__init__(urwid.SolidFill(), None)
self.entry = entry
self.title = urwid.AttrMap(urwid.Text(entry.title), 'title')
self.info = urwid.Text([('author', f"{entry.abbrev_authors}"),
('delim', ". "),
('venue', f"{entry.venue}"),
('delim', ", "),
('year', f"{entry.year}"),
('delim', ".")])
self.mark = urwid.AttrMap(urwid.Text(('mark_none', "[M]"), align='right'), None)
self.source = urwid.Text([('source', f"{entry.source}"),
('delim', "::"),
('bibkey', f"{entry.bibkey}")])
self.original_widget = urwid.Pile([
urwid.AttrMap(urwid.Columns([('weight', 1, self.title),
('pack', self.mark)],
dividechars=1),
'title'),
self.info, self.source])
self.set_focus_map({k: ('plain' if k is None else str(k)) + '+' for k in [
'title', 'author', 'delim', 'venue', 'year', 'source',
'bibkey', 'mark_none', 'mark_selected', 'title_delim',
'bibtex_ready', 'bibtex_fetching', None]})
def selectable(self):
return True
def keypress(self, size, key):
if key == ' ':
self.entry.repo.selected_keys_panel.Toggle(self.entry)
self.entry.OnSelectionHandler()
elif key == 'i':
self.entry.repo.details_panel.original_widget = self.entry.details_widget
elif key == '@':
self.entry.OpenInBrowser()
else:
return key
def __init__(self, source, repo):
self.repo = repo
self._source = source
self._search_panel_widget = None
self._mark = None
@property
def authors(self): return NotImplemented
@property
def title(self): return NotImplemented
@property
def year(self): return NotImplemented
@property
def venue(self): return NotImplemented
@property
def bibkey(self): return NotImplemented
@property
def url(self): return NotImplemented
@property
def abbrev_authors(self):
authors = self.authors
if len(authors) == 1:
return f"{authors[0]}"
else:
return f"{authors[0]} et al"
@property
def pyb_entry(self): return NotImplemented
@property
def details_widget(self): return NotImplemented
@property
def source(self):
return self._source
def Match(self, keywords):
trivial = True
for keyword in filter(lambda k: len(k) >= 3, keywords):
trivial = False
if keyword.upper() in self.unique_key.upper():
continue
if keyword.upper() in self.title.upper():
continue
matched = False
for author in self.authors:
if keyword.upper() in author.upper():
matched = True
break
if not matched: return False
return not trivial
@property
def search_panel_widget(self):
self._InitializeSearchPanelWidget()
return self._search_panel_widget
@property
def mark(self):
return self._mark
@mark.setter
def mark(self, value):
self._InitializeSearchPanelWidget()
self._mark = value
if value is None:
self._search_panel_widget.mark.original_widget.set_text(
[('title_delim', "["), ('mark_none', " "), ('title_delim', "]")])
elif value == 'selected':
self._search_panel_widget.mark.original_widget.set_text(
[('title_delim', "["), ('mark_selected', "X"), ('title_delim', "]")])
else:
raise ValueError(f"Invalid mark: {mark}")
@property
def unique_key(self):
return f"{self.source}::{self.bibkey}"
@property
def unique_key_item(self):
return urwid.Text([('selected_key', self.bibkey), ('selected_hint', f"({self.source})")])
def OnSelectionHandler(self): pass
def OpenInBrowser(self):
if self.url is None:
self.repo.message_bar.Post("Could not infer url of this entry.",
"warning", 1)
return
status = subprocess.run(["python3", "-m", "webbrowser", "-t", self.url],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if status.returncode == 0:
self.repo.message_bar.Post(f"Opened url '{self.url}'.", 'normal', 1)
else:
self.repo.message_bar.Post(
f"Error occured when opening url '{self.url}' (code {status.returncode})",
'error', 1)
def _InitializeSearchPanelWidget(self):
if self._search_panel_widget is None:
self._search_panel_widget = BibEntry.SearchPanelWidgetImpl(self)
class DblpEntry(BibEntry):
class DetailsWidgetImpl(urwid.Pile):
def __init__(self, entry):
super().__init__([])
self._entry = entry
self.key_item = urwid.Columns([('pack', urwid.Text(('detail_key', "bibtex key: "))),
('weight', 1, urwid.Text(('detail_value', entry.bibkey)))])
self.source_item = urwid.Columns([('pack', urwid.Text(('detail_key', "source: "))),
('weight', 1, urwid.Text(('detail_value', entry.source)))])
self.person_items = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', '\n'.join(entry.data['info']['authors'][k]))))])
for k in entry.data['info']['authors'].keys()
])
self.info_items = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', f"{entry.data['info'][k]}")))])
for k in entry.data['info'].keys() if k != 'authors'
])
self.contents = [(self.key_item, ('pack', None)),
(self.source_item, ('pack', None)),
(self.person_items, ('pack', None)),
(self.info_items, ('pack', None)),
(urwid.SolidFill(), ('weight', 1))]
@property
def entry(self):
return self._entry
def __init__(self, dblp_entry, repo):
super().__init__('dblp.org', repo)
self.data = dblp_entry
self._details_widget = None
self._bibkey = None
self._redraw_fd = None
self.pybtex_entry = None
self.bibtex_loading_done = threading.Event()
self.bibtex_loading_thread = threading.Thread(
name=f"bibtex-{self.bibkey}",
target=self._LoadPybtexEntry,
daemon=False)
def __del__(self):
if self._redraw_fd is not None:
os.close(self._redraw_fd)
@property
def pyb_entry(self):
self.bibtex_loading_done.wait()
return self.pybtex_entry
@property
def authors(self):
try:
authors = self.data['info']['authors']['author']
if authors: return authors
else: return ["Unknown"]
except: return ["Unknown"]
@property
def title(self):
try: return str(self.data['info']['title'])
except: return "Unknown"
@property
def year(self):
try: return str(self.data['info']['year'])
except: return "Unknown"
@property
def venue(self):
try: return self.data['info']['venue']
except: return "Unknown"
@property
def bibkey(self):
if self._bibkey is None:
flat_key = self.data['info']['key']
base = flat_key.split('/')[-1]
sha1 = hashlib.sha1(flat_key.encode('utf-8')).hexdigest()
self._bibkey = f"{base}:{sha1[:4].upper()}"
return self._bibkey
@property
def url(self):
try: return self.data['info']['ee']
except: return None
@property
def details_widget(self):
self._InitializeDetailsWidget()
return self._details_widget
def OnSelectionHandler(self):
if self._redraw_fd is None:
event_loop = self.repo.event_loop
self._redraw_fd = event_loop.watch_pipe(self._FdWriteHandler)
self.bibtex_loading_thread.start()
def _FdWriteHandler(self, data):
self.repo.event_loop.draw_screen()
def _InitializeDetailsWidget(self):
if self._details_widget is None:
self._details_widget = DblpEntry.DetailsWidgetImpl(self)
def _LoadPybtexEntry(self):
bib_url = f"https://dblp.org/rec/bib2/{self.data['info']['key']}.bib"
try:
if self.search_panel_widget is not None:
self.search_panel_widget.source.set_text([
('source', f"{self.source}"),
('delim', "::"),
('bibkey', f"{self.bibkey}"),
('bibtex_fetching', " (fetching bibtex)")])
os.write(self._redraw_fd, b"?")
with urllib.request.urlopen(bib_url) as remote:
bib_text = remote.read().decode('utf-8')
pyb_db = pybtex.database.parse_string(bib_text, 'bibtex')
self.pybtex_entry = pyb_db.entries[f"DBLP:{self.data['info']['key']}"]
if self.search_panel_widget is not None:
self.search_panel_widget.source.set_text([
('source', f"{self.source}"),
('delim', "::"),
('bibkey', f"{self.bibkey}"),
('bibtex_ready', " (bibtex ready)")])
os.write(self._redraw_fd, b"?")
except Exception as e:
logging.error(f"Error when fetching bibtex entry from DBLP: Entry: {self.data} {traceback.format_exc()}")
self.bibtex_loading_done.set()
class BibtexEntry(BibEntry):
class DetailsWidgetImpl(urwid.Pile):
def __init__(self, entry):
super().__init__([])
self.entry = entry
self.key = urwid.Columns([
('pack', urwid.Text(('detail_key', "citation key: "))),
('weight', 1, urwid.Text(('detail_value', entry.bibkey)))])
self.source = urwid.Columns([
('pack', urwid.Text(('detail_key', "source: "))),
('weight', 1, urwid.Text(('detail_value', entry.source)))])
self.item_type = urwid.Columns([
('pack', urwid.Text(('detail_key', "type: "))),
('weight', 1, urwid.Text(('detail_value', entry.entry.type)))])
self.persons = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', '\n'.join([str(p) for p in entry.entry.persons[k]]))))])
for k in entry.entry.persons.keys()
])
self.info = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', f"{entry.entry.fields[k]}")))])
for k in entry.entry.fields.keys() if entry.entry.fields[k]
])
self.contents = [(self.key, ('pack', None)),
(self.source, ('pack', None)),
(self.item_type, ('pack', None)),
(self.persons, ('pack', None)),
(self.info, ('pack', None)),
(urwid.SolidFill(), ('weight', 1))]
def __init__(self, key, entry, repo, source):
super().__init__(source, repo)
self._bibkey = key
self.entry = entry
self._details_widget = None
@property
def authors(self):
try: return [str(au) for au in self.entry.persons['author']]
except: return ["Unknown"]
@property
def title(self):
try: return self.entry.fields['title']
except: return "Unknown"
@property
def year(self):
try: return self.entry.fields['year']
except: return "Unknown"
@property
def venue(self):
try:
if 'booktitle' in self.entry.fields:
return self.entry.fields['booktitle']
elif 'journal' in self.entry.fields:
return self.entry.fields['journal']
elif 'publisher' in self.entry.fields:
return f"Publisher: {self.entry.fields['publisher']}"
except: return "Unknown"
@property
def bibkey(self):
return self._bibkey
@property
def url(self):
try: return self.entry.fields['url']
except: return None
@property
def pyb_entry(self):
return self.entry
@property
def details_widget(self):
self._InitializeDetailsWidget()
return self._details_widget
def _InitializeDetailsWidget(self):
if self._details_widget is None:
self._details_widget = BibtexEntry.DetailsWidgetImpl(self)
class BibRepo:
@staticmethod
def Create(config, access, event_loop):
enabled = config.get('enabled', True)
if 'remote' in config:
return DblpRepo(event_loop, enabled)
elif 'glob' in config:
ctor = {'ro': BibtexRepo, 'rw': OutputBibtexRepo}[access]
return ctor(config['glob'], event_loop, enabled)
else:
raise ValueError(f"Invalid config: {config}")
class StatusIndicatorWidgetImpl(urwid.AttrMap):
def __init__(self, repo):
super().__init__(urwid.SolidFill(), None)
self.repo = repo
self._status = None
self.label = urwid.AttrMap(urwid.Text(f"{repo.source}"), "db_label")
self.access = urwid.Text("")
self.status_indicator = urwid.AttrMap(urwid.Text(""), "db_label")
self.original_widget = urwid.Columns([('pack', self.repo._short_label),
('pack', self.repo._enabled_mark),
('weight', 1, self.label),
('pack', self.status_indicator),
('pack', self.access)],
dividechars=1)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
with self.repo.redraw_lock:
self._status = value
if value == 'initialized':
self.status_indicator.original_widget.set_text("initialized")
elif value == 'loading':
self.status_indicator.set_attr_map({None: "db_status_loading"})
self.status_indicator.original_widget.set_text("loading")
elif value == 'searching':
self.status_indicator.set_attr_map({None: "db_status_searching"})
self.status_indicator.original_widget.set_text("searching")
elif value == 'ready':
self.status_indicator.set_attr_map({None: "db_status_ready"})
self.status_indicator.original_widget.set_text("ready")
elif value == 'no file':
self.status_indicator.set_attr_map({None: "db_status_error"})
self.status_indicator.original_widget.set_text("no file")
else:
raise LookupError(f"Invalid status: {status}")
def __init__(self, source, event_loop, enabled):
self.source = source
self.redraw_lock = threading.Lock()
self.event_loop = event_loop
self._redraw_fd = event_loop.watch_pipe(self._FdWriteHandler)
self.serial = 0
self._serial_lock = threading.Lock()
self.search_results_panel = None
self.message_bar = None
self.selected_entries_panel = None
self.details_panel = None
self.loading_done = threading.Event()
self.searching_done = threading.Event()
self.loading_thread = threading.Thread(name=f"load-{self.source}",
target=self.LoadingThreadWrapper,
daemon=True)
self.searching_thread = threading.Thread(name=f"search-{self.source}",
target=self.SearchingThreadWrapper,
daemon=True)
self._short_label = urwid.Text("?")
self._enabled_mark = urwid.Text("")
self.enabled = enabled
self._status_indicator_widget = BibRepo.StatusIndicatorWidgetImpl(self)
self.access_type = 'ro'
self.status = "initialized"
self.loading_thread.start()
self.searching_thread.start()
def __del__(self):
os.close(self._redraw_fd)
@property
def short_label(self):
return self._short_label.get_text()
@short_label.setter
def short_label(self, value):
self._short_label.set_text(value)
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, value):
if value == 'ro':
self._access_type = 'ro'
self._status_indicator_widget.access.set_text(('db_ro', "ro"))
elif value == 'rw':
self._access_type = 'rw'
self._status_indicator_widget.access.set_text(('db_rw', "rw"))
else:
raise ValueError(f"Invalid access info: {value}")
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
if self._enabled:
self._enabled_mark.set_text(["[", ('db_enabled', "X"), "]"])
else:
self._enabled_mark.set_text("[ ]")
@property
def status(self):
return self._status_indicator_widget.status
@status.setter
def status(self, value):
self._status_indicator_widget.status = value
@property
def status_indicator_widget(self):
return self._status_indicator_widget
def Search(self, search_text, serial):
self.search_text = search_text
with self._serial_lock:
self.serial = serial
self.searching_done.set()
def LoadingThreadWrapper(self):
self.status = "loading"
self.Redraw()
status = self.LoadingThreadMain()
self.status = status
self.Redraw()
self.loading_done.set()
def LoadingThreadMain(self):
return NotImplemented
def SearchingThreadWrapper(self):
self.loading_done.wait()
if self.status == 'no file':
return
while True:
self.searching_done.wait()
with self._serial_lock:
serial = self.serial
self.status = "searching"
self.Redraw()
try:
for item in self.SearchingThreadMain(self.search_text):
if self.selected_entries_panel is not None and \
item.bibkey in self.selected_entries_panel.entries.keys():
item.mark = 'selected'
else:
item.mark = None
if self.search_results_panel is not None:
self.search_results_panel.Add(item, serial)
except Exception as e:
logging.error(traceback.format_exc())
self.status = "ready"
self.Redraw()
with self._serial_lock:
if self.serial == serial:
self.searching_done.clear()
def Redraw(self):
with self.redraw_lock:
try:
os.write(self._redraw_fd, b"?")
except:
logging.error(traceback.format_exc())
def _FdWriteHandler(self, data):
self.event_loop.draw_screen()
class BibtexRepo(BibRepo):
def __init__(self, glob_expr, event_loop, enabled):
super().__init__(glob_expr, event_loop, enabled)
self._bib_files = []
self._bib_entries = []
@property
def bib_entries(self):
self.loading_done.wait()
return self._bib_entries
@property
def bib_files(self):
self.loading_done.wait()
return self._bib_files
def LoadingThreadMain(self):
glob_expr = self.source
logging.debug(f"Collecting entries from glob expression '{glob_expr}'")
self._bib_files = glob.glob(glob_expr, recursive=True)
if not self._bib_files:
logging.warning(f"Glob expr '{glob_expr}' matches no target")
if self.message_bar is not None:
self.message_bar.Post(f"Glob expr '{glob_expr}' matches no target.",
'warning')
return 'no file'
for path in self._bib_files:
try:
bib_data = pybtex.database.parse_file(path)
except Exception as e:
logging.error(f"Exception raised when parsing file {path}: {e}")
continue
for key, entry in iter(bib_data.entries.items()):
self._bib_entries.append(BibtexEntry(key, entry, self, path))
logging.debug(f"Parsed {len(bib_data.entries)} entries from file {path}")
return 'ready'
def SearchingThreadMain(self, search_text):
stripped = search_text.strip()
if not stripped:
return
keywords = search_text.split()
for entry in self.bib_entries:
if entry.Match(keywords):
yield entry
class OutputBibtexRepo(BibtexRepo):
def __init__(self, glob_expr, event_loop, enabled):
super().__init__(glob_expr, event_loop, enabled)
self.selected_keys_panel = None
if len(self.bib_files) > 1:
raise ValueError(f"Glob expr '{glob_expr}' matches more than one file")
self.access_type = 'rw'
self.output_file = self.bib_files[0] if self.bib_files else glob_expr
def Write(self):
if self.selected_keys_panel is None:
return
self.loading_done.wait()
entries = {e.bibkey: e.pyb_entry for e in self.bib_entries}
entries.update({e.bibkey: e.pyb_entry for e in self.selected_keys_panel.entries.values()})
for key, entry in entries.items():
if entry is None:
logging.error(f"Key {key} has empty entry. Not writing to file.")
return
pybtex.database.BibliographyData(entries).to_file(self.output_file)
logging.info(f"Wrote to file '{self.output_file}'")
class DblpRepo(BibRepo):
def __init__(self, event_loop, enabled):
super().__init__("https://dblp.org", event_loop, enabled)
def LoadingThreadMain(self):
return 'ready'
def SearchingThreadMain(self, search_text):
stripped = search_text.strip()
if not stripped:
return
url = f"https://dblp.org/search/publ/api?q={urllib.parse.quote(search_text)}&format=json"
with urllib.request.urlopen(url) as response:
bib_data = json.load(response)
if 'hit' not in bib_data['result']['hits']:
return []
for entry in bib_data['result']['hits']['hit']:
yield DblpEntry(entry, self)
class Banner(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.SolidFill(), None)
self.big_text = urwid.BigText([('banner_hi', "bib"),
('banner_lo', "rarian")],
urwid.font.HalfBlock7x7Font())
self.big_text_clipped = urwid.Padding(self.big_text, 'center', width='clip')
self.subtitle = urwid.Text(('banner_hi', "A BibTeX Management Tool Powered By D.B.L.P"), align='center')
self.version = urwid.Text(('banner_lo', "version 1.0"), align='center')
self.original_widget = urwid.Filler(
urwid.Pile([self.big_text_clipped, self.subtitle, self.version]),
'middle')
class SearchResultsPanel(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.SolidFill(), None)
self._serial = 0
self._serial_lock = threading.Lock()
self.banner = Banner()
self._Clear()
@property
def serial(self):
return self._serial
@serial.setter
def serial(self, value):
with self._serial_lock:
self._serial = value
self._Clear()
def _Clear(self):
self.items = []
self.SyncDisplay()
def Add(self, entry, serial):
with self._serial_lock:
if self._serial == serial:
self.items.append(entry.search_panel_widget)
self.SyncDisplay()
def SyncDisplay(self):
enabled_items = [item for item in self.items if item.entry.repo.enabled]
if enabled_items:
self.list_walker = urwid.SimpleListWalker(enabled_items)
self.original_widget = urwid.ListBox(self.list_walker)
else:
self.original_widget = self.banner
def keypress(self, size, key):
if key in ('ctrl n', 'j'):
self.original_widget._keypress_down(size)
elif key in ('ctrl p', 'k'):
self.original_widget._keypress_up(size)
else:
self.original_widget.keypress(size, key)
class SelectedKeysPanel(urwid.Pile):
def __init__(self, keys_output):
super().__init__([])
self.entries = {}
self.keys_output = keys_output
self.SyncDisplay()
def Toggle(self, entry):
key = entry.unique_key
if key in self.entries:
del self.entries[key]
entry.mark = None
else:
self.entries[key] = entry
entry.mark = 'selected'
self.SyncDisplay()
def Add(self, entry):
self.entries[entry.unique_key] = entry
self.SyncDisplay()
def SyncDisplay(self):
new_contents = [(ent.unique_key_item, ('pack', None)) for ent in self.entries.values()]
if not new_contents:
new_contents = [(urwid.Text(('selected_hint', "Hit <SPACE> on highlighted item to select.")), ('pack', None))]
self.contents = new_contents
def Write(self):
if self.keys_output is None: return
with open(self.keys_output, 'w') as f:
print(','.join(map(lambda e: e.bibkey, self.entries.values())),
file=f, end='')
logging.info(f"Wrote selected keys to file '{self.keys_output}'")
class SearchBar(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.SolidFill(), 'search_content')
self._search = urwid.Edit(('search_label', "Search: "))
self.original_widget = self._search
self.search_results_panel = None
self._search_serial = 0
self.bib_repos = []
urwid.connect_signal(self._search, 'change', self.TextChangeHandler)
def TextChangeHandler(self, edit, text):
if self.search_results_panel is None:
return
self.search_results_panel.serial = self._search_serial
for repo in self.bib_repos:
repo.Search(text, self._search_serial)
self._search_serial += 1
class MessageBar(urwid.AttrMap):
def __init__(self, loop):
super().__init__(urwid.Text("Welcome to bibrarian."), 'msg_normal')
self.event_loop = loop
self._redraw_fd = loop.watch_pipe(self._FdWriteHandler)
self.initial_delay = 1
self.post_delay = 3
self.tips_delay = 5
self.next_message_ready = threading.Event()
self.next_message_scheduled = 0
self.messages = [
"Use ctrl+c to exit the program with all files untouched.",
"Use ctrl+w to write the selected entries to the target file.",
"Press @ (shift+2) open the entry using system browser.",
"Use up (or ctrl+p or k) and down (or ctrl+n or j) to navigate the search results.",
"Use alt+shift+n to toggle enabled/disabled the n-th bib repo.",
"This software is powered by Python 3, dblp API, Pybtex, and urwid.",
]
self.msg_lock = threading.Lock()
self.periodic_trigger_thread = threading.Thread(
name=f"msg-trigger", target=self._PeriodicTrigger, daemon=True)
self.message_update_thread = threading.Thread(
name=f"msg-update", target=self._UpdateMessage, daemon=True)
self.periodic_trigger_thread.start()
self.message_update_thread.start()
def Post(self, message, severity='normal', delay=None):
if severity == 'normal':
label = "Message"
style = 'msg_normal'
elif severity == 'warning':
label = "Warning"
style = 'msg_warning'
elif severity == 'error':
label = "Error"
style = 'msg_error'
else:
raise ValueError(f"Invalid severity: {severity}")
with self.msg_lock:
self.original_widget = urwid.Text((style, f"{label}: {message}"))
self.next_message_ready.set()
if delay is None: delay = self.post_delay
self.next_message_scheduled = time.time() + delay
def _FdWriteHandler(self, data):
self.event_loop.draw_screen()
def _PeriodicTrigger(self):
time.sleep(self.initial_delay)
while True:
for message in self.messages:
while True:
if time.time() >= self.next_message_scheduled:
with self.msg_lock:
self.original_widget = urwid.Text(('msg_tips', f"Tip: {message}"))
self.next_message_ready.set()
self.next_message_scheduled = time.time() + self.tips_delay
time.sleep(self.tips_delay)
break
else:
time.sleep(1)
continue
def _UpdateMessage(self):
while True:
self.next_message_ready.wait()
self.next_message_ready.clear()
os.write(self._redraw_fd, b"?")
def __del__(self):
os.close(self._redraw_fd)
class DetailsPanel(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.Filler(urwid.Text(
('details_hint', 'Hit <i> on highlighted item to update info.')), 'top'), None)
class InputFilter:
def __init__(self):
self.widget = None
def __call__(self, keys, raw):
if not keys: return keys
if keys[0] == 'ctrl w':
try:
for repo in self.widget.output_repos:
repo.Write()
except:
logging.error(traceback.format_exc())
try: self.widget.selected_keys_panel.Write()
except: logging.error(traceback.format_exc())
raise urwid.ExitMainLoop()
elif self.MaskDatabases(keys[0]):
self.widget.search_results_panel.SyncDisplay()
return
return keys
def MaskDatabases(self, key):
symbol_number_map = {s: n for s, n in zip(")!@#$%^&*(", range(10))}
if 'meta ' in key:
symbol = key[5:]
if symbol == '~':
for repo in self.widget.bib_repos:
repo.enabled = True
else:
number = symbol_number_map.get(symbol)
if number == 0:
for repo in self.widget.bib_repos:
repo.enabled = False
else:
try:
repo = self.widget.bib_repos[number - 1]
repo.enabled = not repo.enabled
except: pass
return True
elif key == 'enter':
self.widget.focus_position = 1 - self.widget.focus_position
else:
return False
class DatabaseStatusPanel(urwid.Pile):
def __init__(self, databases, config_source):
super().__init__([])
self.contents = [(db, ('pack', None)) for db in databases] \
+ [(urwid.Text(('cfg_src', f"config: {config_source}")), ('pack', None))]
class TopWidget(urwid.Pile):
def __init__(self, args, config, event_loop):
super().__init__([urwid.SolidFill()])
self.message_bar = MessageBar(event_loop)
self.search_results_panel = SearchResultsPanel()
self.details_panel = DetailsPanel()
self.selected_keys_panel = SelectedKeysPanel(args.keys_output)
self.output_repos = [BibRepo.Create(cfg, 'rw', event_loop) for cfg in config['rw_repos']]
self.bib_repos = [BibRepo.Create(cfg, 'ro', event_loop) for cfg in config['ro_repos']] + self.output_repos
for repo, i in zip(self.bib_repos, itertools.count(1)):
repo.short_label = f"{i}"
repo.message_bar = self.message_bar
repo.search_results_panel = self.search_results_panel
repo.selected_keys_panel = self.selected_keys_panel
repo.details_panel = self.details_panel
self.search_bar = SearchBar()
self.search_bar.bib_repos = self.bib_repos
self.search_bar.search_results_panel = self.search_results_panel
self.db_status_panel = DatabaseStatusPanel(
[repo.status_indicator_widget for repo in self.bib_repos],
config.source)
for repo in self.output_repos:
repo.selected_keys_panel = self.selected_keys_panel
self.right_panel = urwid.Pile([
('pack', urwid.LineBox(self.db_status_panel, title="Database Info")),
('weight', 5, urwid.LineBox(self.details_panel, title="Detailed Info")),
('pack', urwid.LineBox(self.selected_keys_panel, title="Selected Entries"))])
self.main_widget = urwid.Columns([
('weight', 2, urwid.LineBox(self.search_results_panel, title="Search Results")),
('weight', 1, self.right_panel)])
self.contents = [(self.search_bar, ('pack', None)),
(self.main_widget, ('weight', 1)),
(self.message_bar, ('pack', None))]
class DefaultConfig(dict):
def __init__(self):
self['ro_repos'] = [
{
'remote': "dblp.org",
'enabled': True
},
{
'glob': "/path/to/lots/of/**/*.bib",
'enabled': True
},
{
'glob': "/path/to/sample.bib",
'enabled': False
},
{
'glob': "/path/to/another/sample.bib"
}
]
self['rw_repos'] = [
{
'glob': "reference.bib",
'enabled': True
}
]
def Write(self, file):
with open(file, 'w') as f:
json.dump(self, f, indent=4)
class Config(dict):
def __init__(self, file_name):
prefix = os.getcwd()
self.source = None
while True:
path = os.path.join(prefix, file_name)
if os.path.isfile(path) and os.access(path, os.R_OK):
with open(path) as f:
self.update(json.load(f))
self.source = path
break
if prefix == '/': break
prefix = os.path.dirname(prefix)
if self.source is None:
print("Did not find any config file.")
print("You can generate an example config file using option -g.")
print("For more information, please use option -h for help.")
sys.exit(1)
self._NormalizePaths()
def _NormalizePaths(self):
config_dir = os.path.dirname(os.path.realpath(self.source))
for repo_group in (self[k] for k in ('ro_repos', 'rw_repos')):
for repo_config in repo_group:
if 'glob' in repo_config:
repo_config['glob'] = os.path.expandvars(os.path.expanduser(repo_config['glob']))
if not os.path.isabs(repo_config['glob']):
repo_config['glob'] = os.path.join(config_dir, repo_config['glob'])
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super().__init__(prog="bibrarian")
self.add_argument("-f", "--config",
help="force configuration file path",
default=".bibrarian_config.json",
action='store'
)
self.add_argument("-g", "--gen-config",
help="generate a configuration file",
default=False,
action='store_true')
self.add_argument("-l", "--log",
help="force log file path",
default=f"/tmp/{getpass.getuser()}_babrarian.log",
action='store')
self.add_argument("-k", "--keys-output",
help="output bib keys file (truncate mode)",
action='store')
self.add_argument("-v", "--version",
action='version',
version="%(prog)s 1.0")
class Palette(list):
def __init__(self):
self.append(('search_label', 'yellow', 'dark magenta'))
self.append(('search_content', 'white', 'dark magenta'))
self.append(('search_hint', 'light cyan', 'dark magenta'))
self.append(('msg_tips', 'white', 'dark gray'))
self.append(('msg_normal', 'light green', 'dark gray'))
self.append(('msg_warning', 'yellow', 'dark gray'))
self.append(('msg_error', 'light red', 'dark gray'))
self.append(('details_hint', 'dark green', 'default'))
self.append(('db_label', 'default', 'default'))
self.append(('db_enabled', 'light cyan', 'default'))
self.append(('db_status_ready', 'light green', 'default'))
self.append(('db_status_loading', 'light cyan', 'default'))
self.append(('db_status_searching', 'yellow', 'default'))
self.append(('db_status_error', 'light red', 'default'))
self.append(('db_rw', 'light magenta', 'default'))
self.append(('db_ro', 'light green', 'default'))
self.append(('mark_none', 'default', 'dark gray'))
self.append(('mark_selected', 'light cyan', 'dark gray'))
self.append(('title', 'yellow', 'dark gray'))
self.append(('title_delim', 'default', 'dark gray'))
self.append(('source', 'dark green', 'default'))
self.append(('author', 'white', 'default'))
self.append(('venue', 'underline', 'default'))
self.append(('year', 'light gray', 'default'))
self.append(('delim', 'default', 'default'))
self.append(('bibkey', 'light green', 'default'))
self.append(('bibtex_ready', 'dark green', 'default'))
self.append(('bibtex_fetching', 'yellow', 'default'))
self.append(('plain+', 'default', 'dark magenta'))
self.append(('mark_none+', 'default', 'light magenta'))
self.append(('mark_selected+', 'light cyan', 'light magenta'))
self.append(('title+', 'yellow', 'light magenta'))
self.append(('title_delim+', 'default', 'light magenta'))
self.append(('source+', 'light green', 'dark magenta'))
self.append(('author+', 'white', 'dark magenta'))
self.append(('venue+', 'white,underline', 'dark magenta'))
self.append(('year+', 'white', 'dark magenta'))
self.append(('delim+', 'default', 'dark magenta'))
self.append(('bibkey+', 'light green', 'dark magenta'))
self.append(('bibtex_ready+', 'dark green', 'dark magenta'))
self.append(('bibtex_fetching+', 'yellow', 'dark magenta'))
self.append(('selected_key', 'light cyan', 'default'))
self.append(('selected_hint', 'dark cyan', 'default'))
self.append(('detail_key', 'light green', 'default'))
self.append(('detail_value', 'default', 'default'))
self.append(('banner_hi', 'light magenta', 'default'))
self.append(('banner_lo', 'dark magenta', 'default'))
self.append(('cfg_src', 'dark gray', 'default'))
if __name__ == '__main__':
args = ArgParser().parse_args()
if args.gen_config:
DefaultConfig().Write(args.config)
print(f"Wrote default config to file {args.config}")
sys.exit(0)
logging.basicConfig(filename=args.log,
format="[%(asctime)s %(levelname)7s] %(threadName)s: %(message)s",
datefmt="%m-%d-%Y %H:%M:%S",
level=logging.DEBUG)
config = Config(args.config)
input_filter = InputFilter()
main_loop = urwid.MainLoop(urwid.SolidFill(),
palette=Palette(),
input_filter=input_filter)
top_widget = TopWidget(args, config, main_loop)
input_filter.widget = top_widget
main_loop.widget = top_widget
try: main_loop.run()
except KeyboardInterrupt:
sys.exit(0)
| 34.731648 | 129 | 0.552594 | 41,370 | 0.97156 | 782 | 0.018365 | 7,229 | 0.169771 | 0 | 0 | 7,066 | 0.165943 |
9f1dd961d3e84df7e11d43cbab3f1cd8de101859 | 11,144 | py | Python | pipeline_scripts/pre_linkage/E_CCS_Households.py | Data-Linkage/ccslink | ee1105888d43c6a2b307deb96ddede34d03a965f | [
"MIT"
] | null | null | null | pipeline_scripts/pre_linkage/E_CCS_Households.py | Data-Linkage/ccslink | ee1105888d43c6a2b307deb96ddede34d03a965f | [
"MIT"
] | null | null | null | pipeline_scripts/pre_linkage/E_CCS_Households.py | Data-Linkage/ccslink | ee1105888d43c6a2b307deb96ddede34d03a965f | [
"MIT"
] | null | null | null | # ----------------------- #
# -------- SETUP -------- #
# ----------------------- #
# Import PySpark, Parameters, File Paths, Functions & Packages
import pyspark
from CCSLink import Parameters
from CCSLink.Parameters import FILE_PATH
from CCSLink import Person_Functions as PF
from CCSLink import Household_Functions as HF
from CCSLink import Cluster_Function as CF
exec(open("/home/cdsw/collaborative_method_matching/CCSLink/Packages.py").read())
# Changes to SparkSession
sparkSession.conf.set('spark.sql.codegen.wholeStage', 'false')
# ----------------------- #
# -------- DATA --------- #
# ----------------------- #
# Set year, month, date for file path
YEAR, MONTH, DAY = '2021', '11', '16'
# Read in CCS HH data
ccs = sparkSession.read.csv('some_path' + 'ccs_households/ccs_households.csv'.format(YEAR, MONTH, DAY), header = True)
# Select columns
ccs = ccs.selectExpr('qid as qid_ccs', 'household_id as hh_id_ccs', 'ownership_type as tenure_ccs', 'accommodation_type as typaccom_ccs', 'resident_count as no_resi_ccs',
'census_address_indicator', 'census_address as address_cenday_ccs', 'census_address_postcode as pc_cenday_ccs', 'census_address_uprn as uprn_cenday_ccs',
'census_address_country as country_cenday_ccs').persist()
ccs.count()
# ---------------------------------------------------------------------------------- #
# ---------------- Current Address / Postcode / UPRN / House Number ---------------- #
# ---------------------------------------------------------------------------------- #
# CCS Questionnaire data
ccs_q = sparkSession.read.csv('some_path' + 'ccs_questionnaires/ccs_questionnaires.csv'.format(YEAR, MONTH, DAY), header = True)
ccs_q = ccs_q.selectExpr('qid as qid_ccs', 'display_address', 'address', 'address_postcode as pc_ccs', 'uprn as uprn_ccs').drop_duplicates()
# Replace -9 & -8 with None
for variable in ['display_address', 'address', 'pc_ccs']:
ccs_q = ccs_q.withColumn(variable, when(col(variable).isin(['-9', '-8']), lit(None)).otherwise(col(variable)))
# Add comma to display_address (helps with house number function later on)
ccs_q = ccs_q.withColumn('display_address', concat(col('display_address'), lit(',')))
# Replace missing address with display address (equivalent to CMS variable 'address_combined')
ccs_q = ccs_q.withColumn('address_ccs', when(col('address').isNull() == True, col('display_address')).otherwise(col('address'))).drop('display_address')
# Clean Postcode
ccs_q = ccs_q.withColumn('pc_ccs', upper(regexp_replace(col('pc_ccs'), "[^0-9A-Za-z]+", "")))
ccs_q = ccs_q.withColumn('pc_ccs', when(col('pc_ccs') == '', None).otherwise(col('pc_ccs')))
# Join variables on via qid
ccs = ccs.join(ccs_q.dropDuplicates(['qid_ccs']), on = 'qid_ccs', how = 'left')
# House/Flat Number
ccs = ccs.withColumn('house_no_ccs', HF.house_number_udf(col('address_ccs')))
ccs = ccs.withColumn('flat_no_ccs', HF.flat_number_udf(col('address_ccs')))
# ----------------------------------------------------------------------------------------------- #
# ---------------- Census Day Address / Postcode / UPRN / House Number / Country ---------------- #
# ----------------------------------------------------------------------------------------------- #
# Indicator update: If census day postcode exists (and census day address is not -8), set to 1, otherwise set to 0
ccs = ccs.withColumn('census_address_indicator', when(col('pc_cenday_ccs').isin(['-9', '-8']), lit(0)).otherwise(lit(1)))
ccs = ccs.withColumn('census_address_indicator', when(col('address_cenday_ccs') == '-8', lit(0)).otherwise(col('census_address_indicator')))
# Replace -9 & -8 with None
for variable in ['census_address_indicator', 'address_cenday_ccs', 'pc_cenday_ccs', 'uprn_cenday_ccs', 'country_cenday_ccs']:
ccs = ccs.withColumn(variable, when(col(variable).isin(['-9', '-8']), lit(None)).otherwise(col(variable)))
# Clean Census Day Postcode
ccs = ccs.withColumn('pc_cenday_ccs', upper(regexp_replace(col('pc_cenday_ccs'), "[^0-9A-Za-z]+", "")))
ccs = ccs.withColumn('pc_cenday_ccs', when(col('pc_cenday_ccs') == '', None).otherwise(col('pc_cenday_ccs')))
# Create House/Flat Number using UDF
ccs = ccs.withColumn('house_no_cenday_ccs', HF.house_number_udf(col('address_cenday_ccs')))
ccs = ccs.withColumn('flat_no_cenday_ccs', HF.flat_number_udf(col('address_cenday_ccs')))
# Update mover indicator to 0 if pc_cenday = pc (ccsday)
ccs = ccs.withColumn('census_address_indicator', when(col('pc_cenday_ccs') == col('pc_ccs'), lit(0)).otherwise(col('census_address_indicator')))
# -------------------------------------------------------------------------- #
# ----------------- Update Geographic Variables for Movers ----------------- #
# -------------------------------------------------------------------------- #
# Firstly, save geographic variables on CCS day in new columns
ccs = ccs.withColumn('pc_ccsday_ccs', col('pc_ccs'))
ccs = ccs.withColumn('uprn_ccsday_ccs', col('uprn_ccs'))
ccs = ccs.withColumn('house_no_ccsday_ccs', col('house_no_ccs'))
ccs = ccs.withColumn('flat_no_ccsday_ccs', col('flat_no_ccs'))
ccs = ccs.withColumn('address_ccsday_ccs', col('address_ccs'))
# Next, if CCS person is a mover, update their main geographic columns with census_day variables
ccs = ccs.withColumn('pc_ccs', when(col('census_address_indicator') == 1, col('pc_cenday_ccs')).otherwise(col('pc_ccs')))
ccs = ccs.withColumn('uprn_ccs', when(col('census_address_indicator') == 1, col('uprn_cenday_ccs')).otherwise(col('uprn_ccs')))
ccs = ccs.withColumn('house_no_ccs', when(col('census_address_indicator') == 1, col('house_no_cenday_ccs')).otherwise(col('house_no_ccs')))
ccs = ccs.withColumn('flat_no_ccs', when(col('census_address_indicator') == 1, col('flat_no_cenday_ccs')).otherwise(col('flat_no_ccs')))
ccs = ccs.withColumn('address_ccs', when(col('census_address_indicator') == 1, col('address_cenday_ccs')).otherwise(col('address_ccs')))
# If HH has moved since Census Day, set variables relating to current address to NULL
ccs = ccs.withColumn('typaccom_ccs', when(ccs.census_address_indicator == 1, lit(None)).otherwise(ccs.typaccom_ccs))
ccs = ccs.withColumn('tenure_ccs', when(ccs.census_address_indicator == 1, lit(None)).otherwise(ccs.tenure_ccs))
# Create sect/dist/area from primary postcode
ccs = ccs.withColumn('pc_sect_ccs', F.expr("substring({0},1, length({0}) - 2)".format("pc_ccs")))\
.withColumn('pc_dist_ccs', F.expr("""IF(length({0}) = 5, substring({0},1,4), IF(length({0}) =4, substring({0},1,3), substring({0},1,2)))""".format("pc_sect_ccs")))\
.withColumn('pc_area_ccs', F.expr("""IF(substring({0},2,1) in('1','2','3','4','5','6','7','8','9'), substr({0},1,1), substring({0},1,2))""".format("pc_sect_ccs")))
# --------------------------------- #
# -------- PERSON VARIABLES ------- #
# --------------------------------- #
# Read in CCS People
ccs_ppl = sparkSession.read.parquet(FILE_PATH('Stage_1_clean_ccs')).selectExpr('id_ccs', 'hh_id_ccs', 'fn1_ccs as FN', 'sn1_ccs as SN', 'dob_ccs as DOB', 'age_ccs as AGE').persist()
ccs_ppl.count()
# Remove records with no household ID
ccs_ppl = ccs_ppl.filter(col('hh_id_ccs') != '-9')
# ----------------------------- #
# -------- MISSINGNESS -------- #
# ----------------------------- #
# FN / SN
ccs_ppl = ccs_ppl.withColumn('FN', when(ccs_ppl.FN.isNull(), 'YYYYY').otherwise(ccs_ppl.FN))
ccs_ppl = ccs_ppl.withColumn('SN', when(ccs_ppl.SN.isNull(), 'YYYYY').otherwise(ccs_ppl.SN))
# DOB
ccs_ppl = ccs_ppl.withColumn('DOB', when(ccs_ppl.DOB.isNull(), '1700-07-07').otherwise(ccs_ppl.DOB))
ccs_ppl = ccs_ppl.withColumn('DOB', to_date('DOB', 'yyyy-MM-dd'))
# AGE
ccs_ppl = ccs_ppl.withColumn('AGE', when(ccs_ppl.AGE.isNull(), '777').otherwise(ccs_ppl.AGE))
# ------------------------- #
# -------- HH SIZE -------- #
# ------------------------- #
# Count number of IDs for each HH - different to number of usual residents
ccs_ppl = ccs_ppl.withColumn('hh_size_ccs', size(collect_set('id_ccs').over(Window.partitionBy('hh_id_ccs')))).drop('id_ccs')
# ------------------------------ #
# --------- PERSON SETS -------- #
# ------------------------------ #
# Create 4 columns which contains list of all unique forenames / surnames / dobs / ages from that household
ccs_ppl = ccs_ppl.withColumn('fn_set_ccs', collect_set('FN').over(Window.partitionBy('hh_id_ccs')))\
.withColumn('sn_set_ccs', collect_set('SN').over(Window.partitionBy('hh_id_ccs')))\
.withColumn('dob_set_ccs', collect_set('DOB').over(Window.partitionBy('hh_id_ccs')))\
.withColumn('age_set_ccs', collect_set('AGE').over(Window.partitionBy('hh_id_ccs')))\
.drop('FN', 'SN', 'DOB', 'AGE')\
.drop_duplicates(['hh_id_ccs'])\
# Array missing values
ccs_ppl = ccs_ppl.withColumn('fn_set_ccs', when(ccs_ppl.fn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs_ppl.fn_set_ccs))
ccs_ppl = ccs_ppl.withColumn('sn_set_ccs', when(ccs_ppl.sn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs_ppl.sn_set_ccs))
ccs_ppl = ccs_ppl.withColumn('dob_set_ccs', when(ccs_ppl.dob_set_ccs.isNull(), array(lit('1700-07-07'))).otherwise(ccs_ppl.dob_set_ccs))
ccs_ppl = ccs_ppl.withColumn('age_set_ccs', when(ccs_ppl.age_set_ccs.isNull(), array(lit('777'))).otherwise(ccs_ppl.age_set_ccs))
# Sort arrays
for var in ['fn_set_ccs', 'sn_set_ccs', 'dob_set_ccs', 'age_set_ccs']:
ccs_ppl = ccs_ppl.withColumn(var, sort_array(var))
# -------------------------------------- #
# ---- COMBINE PERSON & HH VARIABLES --- #
# -------------------------------------- #
# Join person variables on HH ID
ccs = ccs.join(ccs_ppl, on = 'hh_id_ccs', how = 'left')
# ------------------------- #
# ---------- SAVE --------- #
# ------------------------- #
# Ensure all None array values have a missing value - this enables HH functions to run
ccs = ccs.withColumn('fn_set_ccs', when(ccs.fn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs.fn_set_ccs))
ccs = ccs.withColumn('sn_set_ccs', when(ccs.sn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs.sn_set_ccs))
ccs = ccs.withColumn('dob_set_ccs', when(ccs.dob_set_ccs.isNull(), array(lit('1700-07-07'))).otherwise(ccs.dob_set_ccs))
ccs = ccs.withColumn('age_set_ccs', when(ccs.age_set_ccs.isNull(), array(lit('777'))).otherwise(ccs.age_set_ccs))
# Replace missing values with NULL
ccs = ccs.withColumn('typaccom_ccs', when(col('typaccom_ccs').isin(['-9', '-8', '-7']), lit(None)).otherwise(col('typaccom_ccs')))
ccs = ccs.withColumn('tenure_ccs', when(col('tenure_ccs').isin(['-9', '-8', '-7']), lit(None)).otherwise(col('tenure_ccs')))
ccs = ccs.withColumn('no_resi_ccs', when(col('no_resi_ccs').isin(['-9', '-5', '-4']), lit(None)).otherwise(col('no_resi_ccs')))
# Column Types
ccs = ccs.withColumn('typaccom_ccs', ccs['typaccom_ccs'].cast('int'))
ccs = ccs.withColumn('tenure_ccs', ccs['tenure_ccs'].cast('int'))
ccs = ccs.withColumn('no_resi_ccs', ccs['no_resi_ccs'].cast('int'))
# Save clean households
ccs.write.mode('overwrite').parquet(FILE_PATH('Stage_1_clean_HHs_ccs'))
sparkSession.stop()
| 56 | 181 | 0.631551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,157 | 0.552495 |
9f1edfe99b6308d3d2554579aa4a79c439a3f872 | 199 | py | Python | abc130/b/b.py | KeiNishikawa218/atcoder | 0af5e091f8b1fd64d5ca7b46b06b9356eacfe601 | [
"MIT"
] | null | null | null | abc130/b/b.py | KeiNishikawa218/atcoder | 0af5e091f8b1fd64d5ca7b46b06b9356eacfe601 | [
"MIT"
] | null | null | null | abc130/b/b.py | KeiNishikawa218/atcoder | 0af5e091f8b1fd64d5ca7b46b06b9356eacfe601 | [
"MIT"
] | null | null | null | n, x = map(int, input().split())
ll = list(map(int, input().split()))
ans = 1
d_p = 0
d_c = 0
for i in range(n):
d_c = d_p + ll[i]
if d_c <= x:
ans += 1
d_p = d_c
print(ans) | 13.266667 | 37 | 0.487437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f2096e503b1764311d574ee845445b547da7b73 | 453 | py | Python | Django/ballon/admin.py | ballon3/GRAD | c630e32272fe34ead590c04d8360169e02be87f1 | [
"MIT"
] | null | null | null | Django/ballon/admin.py | ballon3/GRAD | c630e32272fe34ead590c04d8360169e02be87f1 | [
"MIT"
] | null | null | null | Django/ballon/admin.py | ballon3/GRAD | c630e32272fe34ead590c04d8360169e02be87f1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from ballon.models import Pkg, Resume, Main, Education, Project, Work, Skill, Testimonial, Social, Address
#admin.site.register(Category)
admin.site.register(Resume)
admin.site.register(Main)
admin.site.register(Education)
admin.site.register(Work)
admin.site.register(Project)
admin.site.register(Skill)
admin.site.register(Social)
admin.site.register(Testimonial)
admin.site.register(Address)
admin.site.register(Pkg) | 32.357143 | 106 | 0.81457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.066225 |
9f21b60bd02c143fea65c9b8b2b327fcc6402878 | 1,316 | py | Python | docs/scripts/add_labels_without_proto.py | daachi/tezos | 00118e91ff13bb54ff658907ed8ffc1c4d3d2266 | [
"MIT"
] | null | null | null | docs/scripts/add_labels_without_proto.py | daachi/tezos | 00118e91ff13bb54ff658907ed8ffc1c4d3d2266 | [
"MIT"
] | 1 | 2022-02-18T09:54:32.000Z | 2022-02-18T09:54:32.000Z | docs/scripts/add_labels_without_proto.py | daachi/tezos | 00118e91ff13bb54ff658907ed8ffc1c4d3d2266 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Add unversioned labels to a page, before each versioned label
## Overview
# This script adds unversioned labels (i.e. Sphinx labels without the _N suffix,
# where N is the name of the protocol) to one doc page before every versioned
# label, unless an unversioned label already exists.
# If it finds an unversioned label, it leaves it as is, but prints a warning.
import sys
import re
import fileinput
USAGE = f"usage: {sys.argv[0]} <proto-dir>/<rst-file>"
if len(sys.argv) != 2:
print(USAGE, file = sys.stderr)
exit(1)
# Parse the argument to separate the protocol directory and the page file
if not (m := re.search(r'([^\/]+)\/([^\/]+)$', sys.argv[1])):
print(USAGE, file = sys.stderr)
exit(1)
proto = m.group(1)
# Recognize a label definition:
def_lbl_pat = re.compile(r' *[.][.] *_([a-zA-Z0-9_-]*) *:')
for line in fileinput.input():
if m := re.match(def_lbl_pat, line):
# label definition
label = m.group(1)
if label.endswith('_' + proto): # versioned label => add before
# drop "_proto" suffix:
print(f".. _{label[:-len(proto)-1]}:" )
else: # unversioned already there => warn
print(f"warning: unversioned label {label} found, may conflict", file = sys.stderr)
print(line, end = '')
| 32.097561 | 95 | 0.641337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 788 | 0.598784 |
9f26e4475075ee4497350a311fc5dbe71abdd647 | 10,435 | py | Python | ds/eurito_indicators/getters/arxiv_getters.py | nestauk/eurito_indicators | e3dfa783eb0ceb1419cf636dac4e6fdd52268417 | [
"MIT"
] | null | null | null | ds/eurito_indicators/getters/arxiv_getters.py | nestauk/eurito_indicators | e3dfa783eb0ceb1419cf636dac4e6fdd52268417 | [
"MIT"
] | 99 | 2021-02-02T17:11:53.000Z | 2022-03-31T14:36:34.000Z | ds/eurito_indicators/getters/arxiv_getters.py | nestauk/eurito_indicators | e3dfa783eb0ceb1419cf636dac4e6fdd52268417 | [
"MIT"
] | null | null | null | # Get arxiv data
import json
import logging
import os
import pickle
from collections import Counter
from datetime import datetime
from io import BytesIO
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from kaggle.api.kaggle_api_extended import KaggleApi
from eurito_indicators import PROJECT_DIR
from eurito_indicators.pipeline.clustering_naming import make_doc_comm_lookup
from eurito_indicators.pipeline.processing_utils import covid_getter
GRID_PATH = f"{PROJECT_DIR}/inputs/data/grid"
CORD_META_PATH = f"{PROJECT_DIR}/inputs/data/metadata.csv.zip"
DISC_QUERY = f"{PROJECT_DIR}/inputs/data/arxiv_discipline.csv"
COV_PAPERS_PATH = f"{PROJECT_DIR}/inputs/data/arxiv_papers_covid.csv"
def get_arxiv_articles():
"""Get arxiv - and cord - articles"""
art = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_articles_v2.csv",
dtype={"id": str},
parse_dates=["created"],
)
art = art.rename(columns={"id": "article_id"})
art["month_year"] = [
datetime(x.year, x.month, 1) if pd.isnull(x) == False else np.nan
for x in art["created"]
]
selected_columns = [
"article_id",
"created",
"month_year",
"title",
"journal_ref",
"doi",
"authors",
"abstract",
"mag_id",
"citation_count",
"article_source",
]
return art[selected_columns]
def get_arxiv_institutes():
"""Lookup between paper ids and org id"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_institutes_updated.csv",
dtype={"article_id": str, "institute_id": str},
)
return inst
def get_article_categories():
"""Article categories"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_categories.csv",
dtype={"article_id": str},
)
return inst
def get_arxiv_w2v():
with open(f"{PROJECT_DIR}/outputs/models/arxiv_w2v.p", "rb") as infile:
return pickle.load(infile)
def fetch_grid():
"""Fetch the grid data"""
if os.path.exists(GRID_PATH) is False:
logging.info("Collecting Grid data")
os.makedirs(GRID_PATH, exist_ok=True)
g = requests.get("https://ndownloader.figshare.com/files/28431024")
g_z = ZipFile(BytesIO(g.content))
g_z.extractall(GRID_PATH)
def fetch_cord_meta():
"""Fetch the cord metadata"""
if os.path.exists(CORD_META_PATH) is False:
logging.info("Fetching cord data")
api = KaggleApi()
api.authenticate()
api.dataset_download_file(
"allen-institute-for-ai/CORD-19-research-challenge",
"metadata.csv",
path=f"{PROJECT_DIR}/inputs/data",
)
def get_cord_metadata():
"""Gets the cord metadata"""
meta = pd.read_csv(f"{PROJECT_DIR}/inputs/data/metadata.csv.zip", compression="zip")
meta_has_date = meta.dropna(axis=0, subset=["publish_time"])
meta_bad_date = set(
[
f"cord-{_id}"
for _id, date in zip(
meta_has_date["cord_uid"], meta_has_date["publish_time"]
)
if "-" not in date
]
)
meta_year = {
f"cord-{_id}": int(date.split("-")[0]) if "-" in date else int(date)
for _id, date in zip(meta_has_date["cord_uid"], meta_has_date["publish_time"])
}
return meta_bad_date, meta_year
def get_covid_papers():
"""Make the papers table
Includes:
Removing duplicated papers in cord
Creating month year variable missing for cord papers without detailed
publication date
"""
if os.path.exists(COV_PAPERS_PATH) is False:
logging.info("Making covid papers")
arts = get_arxiv_articles()
logging.info("processing arxiv papers")
arxiv_covid = (
arts.query("article_source!='cord'")
.dropna(axis=0, subset=["abstract","title"])
.assign(text = lambda df: [" ".join([x,y]) for x,y in zip(df['title'],df['abstract'])])
.assign(has_cov=lambda df: [covid_getter(text) for text in df["text"]])
.query("has_cov == True")
)
arxiv_covid["month_year"] = [
datetime(x.year, x.month, 1) for x in arxiv_covid["created"]
]
arxiv_covid["year"] = [x.year for x in arxiv_covid["month_year"]]
logging.info("processing cord papers")
cord = (
arts.query("article_source=='cord'")
.dropna(axis=0, subset=["abstract"])
.assign(has_cov=lambda df: [covid_getter(text) for text in df["abstract"]])
.query("has_cov == True")
.assign(
journal_ref=lambda df: [
x.lower() if type(x) == str else np.nan for x in df["journal_ref"]
]
)
)
cord = cord.loc[~cord["journal_ref"].isin(["biorxiv", "medrxiv"])]
cord = cord.drop_duplicates("title")
meta_bad_date, meta_year = get_cord_metadata()
cord["year"] = cord["article_id"].map(meta_year)
cord["month_year"] = [
datetime(d.year, d.month, 1)
if (_id not in meta_bad_date) & (not pd.isnull(d))
else np.nan
for _id, d in zip(cord["article_id"], cord["created"])
]
papers = (
pd.concat([arxiv_covid, cord], axis=0)
.reset_index(drop=True)
.drop(axis=1, labels=["has_cov"])
)
papers.to_csv(COV_PAPERS_PATH, index=False)
return papers
else:
return pd.read_csv(
COV_PAPERS_PATH,
dtype={"article_id": str},
parse_dates=["created", "month_year"],
)
def get_grid_meta():
"""Get relevant grid metadata"""
name, address, org_type, geo = [
pd.read_csv(f"{GRID_PATH}/full_tables/{n}.csv")
for n in ["institutes", "addresses", "types", "geonames"]
]
merged = (
name.merge(address, on="grid_id")
.merge(org_type, on="grid_id")
.merge(geo, on=["geonames_city_id", "city"], how="left")
)
grid_meta = merged[
[
"grid_id",
"name",
"lat",
"lng",
"city",
"country",
"country_code",
"type",
"nuts_level1_code",
"nuts_level2_code",
"nuts_level3_code",
]
]
return grid_meta
def query_arxiv_institute():
"""Combine arXiv institute lookup with grid metadata"""
inst = get_arxiv_institutes()
grid_meta = get_grid_meta()
inst_meta = inst.merge(grid_meta, left_on="institute_id", right_on="grid_id")
return inst_meta
def get_arxiv_tokenised():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_tokenised.json", "r") as infile:
return json.load(infile)
def get_arxiv_fos():
return pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_fields_of_study.csv",
dtype={"article_id": str, "fos_id": int},
)
def get_children(values):
if type(values) is str:
return [int(x) for x in values.split(",")]
else:
return np.nan
def make_fos_l0_lookup():
"""Creates a lookup between all MAG fos levels and the top level of the taxonomy"""
logging.info("Reading data")
fos_taxon = pd.read_csv(f"{PROJECT_DIR}/inputs/data/mag_fields_of_study.csv")
id_name_lookup = fos_taxon.set_index("id")["name"].to_dict()
all_children = {
_id: get_children(values)
for _id, values in zip(fos_taxon["id"], fos_taxon["child_ids"])
}
fos_0 = fos_taxon.loc[fos_taxon["level"] == 0]["id"].tolist()
fos_lu = {}
logging.info("Finding children categories")
# We recursively look for the children of level 0s at different levels of the taxonomy
for f in fos_0:
children = all_children[f].copy()
for level in range(1, 5):
table = fos_taxon.loc[fos_taxon["id"].isin(children)].query(
f"level=={level}"
)
for _id in table["id"]:
try:
for ch in all_children[_id]:
children.append(ch)
except BaseException:
pass
for c in children:
if c not in fos_lu.keys():
fos_lu[c] = [f]
else:
fos_lu[c].append(f)
logging.info("Creating dataframe")
fos_lu_df = pd.DataFrame(
{"fos_id": fos_lu.keys(), "fos_l0": fos_lu.values()}
).explode("fos_l0")
fos_lu_df["fos_id_name"], fos_lu_df["fos_l0_name"] = [
fos_lu_df[var].map(id_name_lookup) for var in ["fos_id", "fos_l0"]
]
return fos_lu_df
def query_article_discipline():
"""Returns a lookup between articles and high level disciplines"""
if os.path.exists(DISC_QUERY) is False:
arxiv_fos = get_arxiv_fos()
fos_lu_df = make_fos_l0_lookup()
arxiv_f0 = arxiv_fos.merge(fos_lu_df, on="fos_id")
logging.info("Finding top discipline")
arxiv_discipline = (
arxiv_f0.groupby("article_id")["fos_l0_name"]
.apply(lambda x: Counter(x).most_common(1)[0][0])
.reset_index(drop=False)
)
arxiv_discipline.to_csv(DISC_QUERY, index=False)
return arxiv_discipline
else:
return pd.read_csv(DISC_QUERY, dtype={"article_id": str})
def get_arxiv_topic_model():
with open(f"{PROJECT_DIR}/outputs/models/topsbm_arxiv_sampled.p", "rb") as infile:
return pickle.load(infile)
def get_arxiv_tokenised():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_tokenised.json", "r") as infile:
return json.load(infile)
def get_ai_results():
with open(f"{PROJECT_DIR}/outputs/data/find_ai_outputs.p", "rb") as infile:
return pickle.load(infile)
def get_cluster_names():
with open(f"{PROJECT_DIR}/outputs/data/aux/arxiv_cluster_names.json",'r') as infile:
return {int(k):v for k,v in json.load(infile).items()}
def get_cluster_ids():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_cluster_lookup.json",'r') as infile:
paper_cluster_lookup = json.load(infile)
cluster_names = get_cluster_names()
paper_cluster_name = {k: cluster_names[v] for k,v in paper_cluster_lookup.items()}
return paper_cluster_name
if __name__ == "__main__":
fetch_grid()
| 28.510929 | 99 | 0.605271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,045 | 0.291806 |
9f271d6d4cc41e0b7933872e8c2a90b565c70fe7 | 6,617 | py | Python | StatusWindow.py | BlackXanthus/PyBirch5 | 61a70fe48f859509bd737d2c204882e13c2411c6 | [
"BSD-3-Clause"
] | null | null | null | StatusWindow.py | BlackXanthus/PyBirch5 | 61a70fe48f859509bd737d2c204882e13c2411c6 | [
"BSD-3-Clause"
] | null | null | null | StatusWindow.py | BlackXanthus/PyBirch5 | 61a70fe48f859509bd737d2c204882e13c2411c6 | [
"BSD-3-Clause"
] | null | null | null |
####
#Made reduntant in Alpha 0.4
#
# As this provided no extra functionality from the Channel, and is, in essance,
# simply a speciall channel, various additions were made to Channel.py
# To perform the same function. This has meant less duplication of code.
####
import sys, sip
from PyQt5 import QtCore, QtGui
#from pybirchMdiV4 import Ui_PyBirch
#from IrcConnection import IrcConnection
#from ServerOutputSorter import ServerOutputSorter
from pybirchMessage import Ui_BirchMessageWindow
from userInputSorter import UserInputSorter
from TextString import TextString
from NickListView import NickListView
from time import strftime
from struct import unpack
import string
try:
from PyQt5.QtCore import QString
except ImportError:
QString = str
class StatusWindow(QtGui.QMdiSubWindow):
_channel = ""
_nick = ""
_button = ""
__version__ = "Status v. 0.1"
def __init__(self, my_channel, parent=None):
self._channel=my_channel
self._nick=""
#Config will need to be passed in later, because of the order of
#creation.
#self.config=my_config
#print "DEBUG-------->In Channel :"+_channel_
QtGui.QMdiSubWindow.__init__(self,parent)
sip.delete(self.layout())
self.ui = Ui_BirchMessageWindow()
self.ui.setupUi(self)
# self.ui.setupUi(self)
self.ui.label_ChanName.setText(self._channel)
self.setWindowTitle(self._channel)
#install the key event filter.
self.ui.text_input.installEventFilter(self)
#self._nicklist = NickListView()
#self.ui.list_NickList.setModel(self._nicklist)
# self.ui.setWindowTitle(_channel_)
#Quick and Dirty hashing method.
#--- May have problems with similarly-named channels. eg
# test, tset, ttse and so on. MUST TEST (v0.1)
#http://bytes.com/topic/python/answers/23620-string-ascii-values
# def __hash__(self):
# global _channel_
# return unpack('%sB' % len(value), value)
def get_channel_name(self):
return self._channel
def append_channel_text(self,myString):
myQString = QString(str(myString))
showTime = "True"
#self.ui.editor_Window.append(myQString)
#try:
# showTime=self.config.get("Channel_Settings", "Time")
#except:
# self.config.add_section("Channel_Settings")
# self.config.set("Channel_Settings","Time", "True")
# showTime="True"
if showTime=="True":
current_time = strftime("%H:%M")
myQString="["+current_time+"] "+myQString
self.ui.editor_Window.insertHtml(myQString+"<br>")
#This should stop the window from updating when scrolling #up and down through the channel!
# if not self.ui.editor_Window.isHorizontalSliderPressed():
self.ui.editor_Window.moveCursor(11,False)
del myQString
def eventFilter(self, obj ,event):
if event.type() == QtCore.QEvent.KeyPress and event.matches(QtGui.QKeySequence.InsertParagraphSeparator):
# self.sendToServer()
self.process_input_event()
if event.type() == QtCore.QEvent.KeyPress and event.key() == QtCore.Qt.Key_Tab:
self.process_tab_event()
return True
return False
def process_tab_event(self):
print("Processing Tab Event")
#originalString = self.ui.text_input.text()
#searchString = str(originalString.rsplit(None, 1))
#searchString = originalString.split(" ")[-1]
#print(searchString)
#resultString = self._nicklist.search_nick(searchString)
#if resultString != "":
#originalString=originalString.rsplit(" ", 1)[0]
#if originalString.endswith(searchString):
# originalString= originalString[:-len(searchString)]
#originalString = originalString.rstrip(searchString)
# if len(originalString) == 0:
# self.ui.text_input.setText(originalString +resultString)
# else:
# self.ui.text_input.setText(originalString +" "+ resultString)
def process_input_event(self):
channelName=self._channel
originalString = self.ui.text_input.text()
textToSend = originalString
displayText = textToSend
uIS = UserInputSorter()
ts = uIS.process_input(channelName, self._nick, originalString)
self.ui.editor_Window.moveCursor(11,False)
#
# myTextToSwitch = textToSend.split(" ")
#
# if myTextToSwitch[0][0:1] == "/":
# if myTextToSwitch[0] == "/msg":
# #Note, this doesn't in any way work.
# remainderIndex = textToSend.find(myTextToSwitch[1])
# textToSend = "PRIVMSG "+myTextToSwitch[1]+" "+textToSend[remainderIndex:]
# displayText = "**Messaging "+myTextToSwitch[1]+textToSend[remainderIndex:]
# else:
# textToSend = str(textToSend[1:])
# displayText = "---"+str(textToSend)
#remainderIndex=string.find(strServerOutput,":",2)
# else:
# textToSend = "PRIVMSG "+channelName+" :"+textToSend
# displayText = "["+_nick_+"] "+originalString
#try:
# showTime=self.config.get("Channel_Settings", "Time")
#except:
# pass
showTime = "True"
self.emit(QtCore.SIGNAL("UserInput"),ts.get_original_string())
myDisplayString = ts.get_display_string()
if showTime == "True":
current_time = strftime("%H:%M")
myDisplayString="["+current_time+"] "+myDisplayString
self.ui.editor_Window.insertHtml(myDisplayString+"<br>")
self.ui.text_input.setText("")
def nick_update(self, my_new_nick):
self._nick = my_new_nick
def closeEvent(self, closeEvent):
self.emit(QtCore.SIGNAL("Channel_Closed"),self._channel)
closeEvent.accept();
print ("<StatusWindow : Close event> PANIC Mr Mannering!")
def button_click(self):
#sender = self.sender
if self.isMinimized():
self.show()
self.showNormal()
else:
self.showMinimized()
self.hide()
###
# While insert_nick currently works, the listWidget will need a QAbstractView in order to be able to remove
# items. This will deal with @, +, and other standard modifiers of the channel. definately TODO!
#
# def insert_nick(self, ts):
# #this is being done this way for future proofing
# print ("<DEBUG>Channel.py:insert_nick", ts.get_message())
# for myNick in ts.get_message().split():
# myNickToAdd = myNick.replace(":","",1 )
# self._nicklist.insert_nick(myNickToAdd)
# self.ui.listWidget.addItem(QtGui.QListWidgetItem(myNickToAdd))
# def remove_nick(self, ts):
# print("<DEBUG>Channel.py:remove_nick"+self._channel+" :"+ts.get_nick())
# for myNick in ts.get_nick().split():
# myNickToRemove = myNick.replace(":", "", 1)
# found = self._nicklist.remove_nick(myNickToRemove)
# self.ui.listWidget.removeItemWidget(QtGui.QListWidgetItem(myNickToRemove))
# if(found):
# self.append_channel_text(ts.get_display_string())
# def nick_mode_change(self, ts):
# self._nicklist.changeStatus(ts.get_mode_user(), ts.get_mode_settings())
| 27.118852 | 107 | 0.716639 | 4,795 | 0.724649 | 0 | 0 | 0 | 0 | 0 | 0 | 3,900 | 0.589391 |
9f2899e6bed96e2dc652ccce962eaa40f027c29b | 697 | py | Python | utils/config_objects.py | spikingevolution/evolution-strategies | 21f6032df0d5aa5e5dcecedd2520b0492e2361f2 | [
"MIT"
] | null | null | null | utils/config_objects.py | spikingevolution/evolution-strategies | 21f6032df0d5aa5e5dcecedd2520b0492e2361f2 | [
"MIT"
] | null | null | null | utils/config_objects.py | spikingevolution/evolution-strategies | 21f6032df0d5aa5e5dcecedd2520b0492e2361f2 | [
"MIT"
] | null | null | null | from collections import namedtuple
Config = namedtuple('Config', [
'env_id',
'env_seed',
'population_size',
'timesteps_per_gen',
'num_workers',
'learning_rate',
'noise_stdev',
'snapshot_freq',
'return_proc_mode',
'calc_obstat_prob',
'l2coeff',
'eval_prob'
])
Optimizations = namedtuple('Optimizations', [
'mirrored_sampling',
'fitness_shaping',
'weight_decay',
'discretize_actions',
'gradient_optimizer',
'observation_normalization',
'divide_by_stdev'
])
ModelStructure = namedtuple('ModelStructure', [
'ac_noise_std',
'ac_bins',
'hidden_dims',
'nonlin_type',
'optimizer',
'optimizer_args'
])
| 18.837838 | 47 | 0.649928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.595409 |
9f28adf03dedfee3bb16b177af11aae2f63cb4c9 | 1,833 | py | Python | scripts/run_burner_bench.py | joye1503/cocrawler | 39b543320e91477412ab8bfc8402c88c3304c553 | [
"Apache-2.0"
] | 166 | 2016-07-18T19:37:34.000Z | 2022-03-06T18:26:50.000Z | scripts/run_burner_bench.py | joye1503/cocrawler | 39b543320e91477412ab8bfc8402c88c3304c553 | [
"Apache-2.0"
] | 9 | 2016-10-22T18:20:56.000Z | 2021-04-06T05:28:04.000Z | scripts/run_burner_bench.py | joye1503/cocrawler | 39b543320e91477412ab8bfc8402c88c3304c553 | [
"Apache-2.0"
] | 25 | 2017-02-28T19:41:41.000Z | 2021-07-10T11:20:33.000Z | import sys
import logging
import functools
import asyncio
import cocrawler.burner as burner
import cocrawler.parse as parse
import cocrawler.stats as stats
test_threadcount = 2
loop = asyncio.get_event_loop()
b = burner.Burner(test_threadcount, loop, 'parser')
queue = asyncio.Queue()
def parse_all(name, string):
links1, _ = parse.find_html_links(string, url=name)
links2, embeds2 = parse.find_html_links_and_embeds(string, url=name)
all2 = links2.union(embeds2)
if len(links1) != len(all2):
print('{} had different link counts of {} and {}'.format(name, len(links1), len(all2)))
extra1 = links1.difference(all2)
extra2 = all2.difference(links1)
print(' extra in links: {!r}'.format(extra1))
print(' extra in links and embeds: {!r}'.format(extra2))
return 1,
async def work():
while True:
w = await queue.get()
string = ' ' * 10000
partial = functools.partial(parse_all, w, string)
await b.burn(partial)
queue.task_done()
async def crawl():
workers = [asyncio.Task(work(), loop=loop) for _ in range(test_threadcount)]
print('queue count is {}'.format(queue.qsize()))
await queue.join()
print('join is done')
for w in workers:
if not w.done():
w.cancel()
# Main program:
for i in range(10000):
queue.put_nowait('foo')
print('Queue size is {}, beginning work.'.format(queue.qsize()))
try:
loop.run_until_complete(crawl())
print('exit run until complete')
except KeyboardInterrupt:
sys.stderr.flush()
print('\nInterrupt. Exiting cleanly.\n')
finally:
loop.stop()
loop.run_forever()
loop.close()
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[3])
stats.report()
parse.report()
| 25.458333 | 95 | 0.659574 | 0 | 0 | 0 | 0 | 0 | 0 | 477 | 0.260229 | 270 | 0.1473 |
9f2a3bd086cb47aeb604a357b7d551f9109fd29b | 3,362 | py | Python | examples/crankd/sample-of-events/generate-event-plist.py | timgates42/pymacadmin | d4913a45497aebbcf9ba5c28c7b064d6442e0a2e | [
"Apache-2.0"
] | 112 | 2015-05-21T11:28:06.000Z | 2021-11-24T02:21:14.000Z | examples/crankd/sample-of-events/generate-event-plist.py | timgates42/pymacadmin | d4913a45497aebbcf9ba5c28c7b064d6442e0a2e | [
"Apache-2.0"
] | 14 | 2015-05-20T16:41:42.000Z | 2019-08-31T13:06:18.000Z | examples/crankd/sample-of-events/generate-event-plist.py | timgates42/pymacadmin | d4913a45497aebbcf9ba5c28c7b064d6442e0a2e | [
"Apache-2.0"
] | 17 | 2015-06-03T20:51:49.000Z | 2021-12-22T22:58:25.000Z | #!/usr/bin/env python
"""
Generates a list of OS X system events into a plist for crankd.
This is designed to create a large (but probably not comprehensive) sample
of the events generated by Mac OS X that crankd can tap into. The generated
file will call the 'tunnel.sh' as the command for each event; said fail can
be easily edited to redirect the output to wherever you would like it to go.
"""
OUTPUT_FILE = "crankd-config.plist"
from SystemConfiguration import SCDynamicStoreCopyKeyList, SCDynamicStoreCreate
# Each event has a general event type, and a specific event
# The category is the key, and the value is a list of specific events
event_dict = {}
def AddEvent(event_category, specific_event):
"""Adds an event to the event dictionary"""
if event_category not in event_dict:
event_dict[event_category] = []
event_dict[event_category].append(specific_event)
def AddCategoryOfEvents(event_category, events):
"""Adds a list of events that all belong to the same category"""
for specific_event in events:
AddEvent(event_category, specific_event)
def AddKnownEvents():
"""Here we add all the events that we know of to the dictionary"""
# Add a bunch of dynamic events
store = SCDynamicStoreCreate(None, "generate_event_plist", None, None)
AddCategoryOfEvents(u"SystemConfiguration",
SCDynamicStoreCopyKeyList(store, ".*"))
# Add some standard NSWorkspace events
AddCategoryOfEvents(u"NSWorkspace",
u'''
NSWorkspaceDidLaunchApplicationNotification
NSWorkspaceDidMountNotification
NSWorkspaceDidPerformFileOperationNotification
NSWorkspaceDidTerminateApplicationNotification
NSWorkspaceDidUnmountNotification
NSWorkspaceDidWakeNotification
NSWorkspaceSessionDidBecomeActiveNotification
NSWorkspaceSessionDidResignActiveNotification
NSWorkspaceWillLaunchApplicationNotification
NSWorkspaceWillPowerOffNotification
NSWorkspaceWillSleepNotification
NSWorkspaceWillUnmountNotification
'''.split())
def PrintEvents():
"""Prints all the events, for debugging purposes"""
for category in sorted(event_dict):
print category
for event in sorted(event_dict[category]):
print "\t" + event
def OutputEvents():
"""Outputs all the events to a file"""
# print the header for the file
plist = open(OUTPUT_FILE, 'w')
print >>plist, '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>'''
for category in sorted(event_dict):
# print out the category
print >>plist, " <key>%s</key>\n <dict>" % category
for event in sorted(event_dict[category]):
print >>plist, """
<key>%s</key>
<dict>
<key>command</key>
<string>%s '%s' '%s'</string>
</dict>""" % ( event, 'tunnel.sh', category, event )
# end the category
print >>plist, " </dict>"
# end the plist file
print >>plist, '</dict>'
print >>plist, '</plist>'
plist.close()
def main():
"""Runs the program"""
AddKnownEvents()
#PrintEvents()
OutputEvents()
main()
| 31.12963 | 102 | 0.674004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,057 | 0.611838 |
9f2c3e9cafb84df9fef985c9eceb7eb2ace5651b | 15,742 | py | Python | submissions/available/NNSlicer/NNSlicer/eval/eval_by_reduced_point.py | ziqi-zhang/fse20 | f3998abda2e40d67989ec113340236f3460f0dc3 | [
"MIT"
] | null | null | null | submissions/available/NNSlicer/NNSlicer/eval/eval_by_reduced_point.py | ziqi-zhang/fse20 | f3998abda2e40d67989ec113340236f3460f0dc3 | [
"MIT"
] | null | null | null | submissions/available/NNSlicer/NNSlicer/eval/eval_by_reduced_point.py | ziqi-zhang/fse20 | f3998abda2e40d67989ec113340236f3460f0dc3 | [
"MIT"
] | 2 | 2020-07-24T20:43:34.000Z | 2020-09-08T07:10:14.000Z | import csv
import random
from functools import partial
from typing import Callable, Optional
from pdb import set_trace as st
import os
import random
import pandas as pd
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import tensorflow as tf
from foolbox.attacks import (
FGSM,
Attack,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
# from foolbox.criteria import TargetClass
# from foolbox.models import TensorFlowModel
from tensorflow.python.training import saver
from tensorflow.python.training.session_manager import SessionManager
import tensorflow as tf
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
from model.config import LENET
from model import LeNet
import nninst_mode as mode
from dataset import mnist
from dataset.config import MNIST_TRAIN, MNIST_PATH
from dataset.mnist_transforms import *
from trace.lenet_mnist_class_trace_v2 import (
data_config,
)
from trace.common import (
class_trace,
)
from tf_utils import new_session_config
from nninst_statistics import calc_trace_side_overlap
from nninst_trace import TraceKey
from nninst_utils.numpy import arg_approx
from nninst_utils.ray import ray_init
from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath
from .common import get_overlay_summary, clean_overlap_ratio, \
translation_overlap_ratio, attack_overlap_ratio, \
lenet_mnist_example
from .cw_attack import cw_generate_adversarial_example
from .eval_mnist import foolbox_generate_adversarial_example
from .cw_attacks import CarliniL2
from nninst_graph import AttrMap, Graph, GraphAttrKey
from nninst_utils.ray import ray_iter
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from .analyse_class_trace import reconstruct_edge
# Model config
model_label = "augmentation"
model_dir = f"result/lenet/model_{model_label}"
# Trace config
trace_dir = f"{model_dir}/traces"
trace_name = "noop"
# Result dir
result_name = "test"
key = TraceKey.POINT
# Result dir
key_name = key.split('.')[1]
# reduce_mode includes output, channel, none
reduce_mode = "none"
result_dir = f"{model_dir}/conv_point_NOT/{reduce_mode}_{trace_name}_attack_overlap"
# result_dir = f"result/lenet/test"
images_per_class = 100
attack_name = "FGSM"
attacks = {
"FGSM": [FGSM],
"BIM": [IterativeGradientSignAttack],
"JSMA": [SaliencyMapAttack],
"DeepFool": [DeepFoolAttack],
# "DeepFool_full": [DeepFoolAttack, dict(subsample=None)],
# "CWL2": [CarliniL2],
}
adversarial_label = 1
normal_label = -1
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
)
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
def reconstruct_point(
trace,
graph,
key,
node_name,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Key not found")
def filter_point_by_key(
trace: AttrMap,
key: str =TraceKey.POINT,
graph = LENET.network_class.graph().load(),
):
reconstruct_point_fn = partial(
reconstruct_point,
trace,
graph,
key,
)
op_to_mask = {}
# print(trace.nodes.keys())
for node_name in sorted(trace.nodes):
# print(f"{node_name}: {trace.nodes[node_name].keys()}")
if key in trace.nodes[node_name]:
op_to_mask[node_name] = reconstruct_point_fn(node_name)
# for op in op_to_mask:
# print(f"{op}: {op_to_mask[op].shape}")
# st()
return op_to_mask
def reduce_edge_mask(edge_mask: AttrMap, reduce_mode="none"):
reduced_edge = {}
for node_name in edge_mask:
# shape of edge (Ci, Hk, Wk, Co, Ho, Wo)
edge = edge_mask[node_name]
if "conv2d" in node_name:
if reduce_mode == "channel":
edge_sum = edge_mask[node_name].sum(0)
edge_sum[edge_sum>0] = 1
elif reduce_mode == "output":
edge_sum = edge_mask[node_name].sum(-1).sum(-1)
edge_sum[edge_sum>0] = 1
else:
edge_sum = edge_mask[node_name]
else:
edge_sum = edge
reduced_edge[node_name] = edge_sum
return reduced_edge
def detect_by_reduced_edge(class_trace, trace, reduce_mode = "none"):
class_masks = filter_point_by_key(
class_trace,
key = key
)
sample_masks = filter_point_by_key(
trace,
key = key
)
class_masks = reduce_edge_mask(class_masks, reduce_mode = reduce_mode)
sample_masks = reduce_edge_mask(sample_masks, reduce_mode = reduce_mode)
is_adversarial = False
for node_name in class_masks:
if "conv2d" not in node_name or "Relu" not in node_name:
continue
class_mask = class_masks[node_name]
sample_mask = sample_masks[node_name]
class_zero = class_mask==0
sample_zero_sum = sample_mask[class_zero].sum()
if sample_zero_sum>0:
is_adversarial = True
if is_adversarial:
return adversarial_label
else:
return normal_label
# Compute the mean overlap ratio of attacked image
def attack_reduced_edge_detection(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
num_gpus: float = 0.2,
model_dir = "result/lenet/model_augmentation",
transforms = None,
transform_name = "noop",
reduce_mode = "none",
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
nonlocal model_dir
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath(model_dir)
ckpt_dir = f"{model_dir}/ckpts"
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook,
create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=ckpt_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
# model_dir not ckpt_dir
model_dir=model_dir,
transforms = transforms,
transform_name = transform_name,
mode = "test",
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=ckpt_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf_brute_force(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
row = {
"image_id": image_id,
"class_id": class_id,
"original.prediction":
detect_by_reduced_edge(
class_trace_fn(class_id).load(),
trace,
reduce_mode,
),
"adversarial.prediction":
detect_by_reduced_edge(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
reduce_mode,
),
}
return row
detections = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [detection for detection in detections if len(detection) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def attack_transform_overlap(attack_name,
transform_name,
transforms,
reduce_mode = "none",
result_dir = "result/lenet/9transform_attack_overlap"):
name = attack_name+'_'+transform_name
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
threshold = 0.5
# DeepFool will shutdown when num_gpu<0.2
num_gpus = 0.2
overlap_fn = calc_trace_side_overlap
per_channel = False
path = os.path.join(result_dir, f"{name}_overlap.csv")
# print(f"Computing {name}")
# lenet_overlap_ratio = attack_reduced_edge_detection_count_violation(
lenet_overlap_ratio = attack_reduced_edge_detection(
attack_name=attack_name,
attack_fn=attacks[attack_name][0],
generate_adversarial_fn=cw_generate_adversarial_example
if attack_name.startswith("CW")
else foolbox_generate_adversarial_example,
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
),
select_fn=lambda input: arg_approx(input, threshold),
overlap_fn=overlap_fn,
path=path,
per_channel=per_channel,
preprocessing=(0.1307, 0.3081),
image_size=28,
class_num=10,
norm_fn=mnist.normalize,
data_format="channels_first",
**(attacks[attack_name][1] if len(attacks[attack_name]) == 2 else {}),
images_per_class=images_per_class,
model_dir=model_dir,
num_gpus = num_gpus,
transforms = transforms,
transform_name = transform_name,
reduce_mode = reduce_mode,
)
lenet_overlap_ratio.save()
return lenet_overlap_ratio.load()
def compute_accuracy(trace_frame):
adversarial_metric = trace_frame["adversarial.prediction"]
original_metric = trace_frame["original.prediction"]
predictions = np.concatenate([adversarial_metric, original_metric])
row_filter = np.isfinite(predictions)
labels = np.concatenate(
[
np.repeat(1, adversarial_metric.shape[0]),
np.repeat(-1, original_metric.shape[0]),
]
)
labels = labels[row_filter]
predictions = predictions[row_filter]
fpr, tpr, thresholds = metrics.roc_curve(labels, predictions)
roc_auc = metrics.auc(fpr, tpr)
return fpr, tpr, roc_auc
def draw_attack_transform_roc(exp_to_roc, save_name, result_dir):
plt.title('ROC')
detection_results = {}
for exp_name, item in exp_to_roc.items():
fpr, tpr, roc_auc, color = item
print(f"{exp_name}: fpr={fpr}, tpr={tpr}")
plt.plot(fpr, tpr,color,label=f"{exp_name}_AUC={roc_auc:.2f}")
detection_results[exp_name] = [fpr, tpr]
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.ylabel('TPR')
plt.xlabel('FPR')
path = os.path.join(result_dir, f"{save_name}.png")
plt.savefig(path)
path = os.path.join(result_dir, f"{save_name}.txt")
with open(path, "w") as f:
for name in detection_results:
print(f"{exp_name}: fpr={fpr}, tpr={tpr}", file=f)
def attack_exp():
exp_to_roc = {}
os.makedirs(result_dir, exist_ok=True)
for transforms, transform_name, color in [
[None, "noop", 'b'],
# [Translate(dx=-5,dy=-5), "leftup", 'g'],
# [Translate(dx=5,dy=5), "rightdown", 'c'],
# [Translate(dx=-5), "left", 'y'],
# [Translate(dy=-5), "up", 'm'],
]:
exp_name = attack_name+"_"+transform_name
print(f"Computing {exp_name}")
trace_frame = attack_transform_overlap(attack_name,
transform_name,
transforms,
reduce_mode = reduce_mode,
result_dir=result_dir)
exp_to_roc[exp_name] = compute_accuracy(trace_frame) + (color,)
draw_attack_transform_roc(exp_to_roc,
save_name=attack_name,
result_dir=result_dir)
if __name__ == "__main__":
# mode.debug()
mode.local()
# ray_init("gpu")
ray_init(
log_to_driver=False
)
tf.set_random_seed(3)
np.random.seed(3)
random.seed(3)
attack_exp()
| 31.80202 | 84 | 0.59643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,737 | 0.110342 |
9f2d370d901cbf43cdc0f855d9288e71744ad5fe | 2,459 | py | Python | PRESUBMIT.py | iamtheonedaone1/n | 985419af32f9bbd3abc934db3edc09523477118a | [
"Apache-2.0"
] | 69 | 2016-01-11T13:22:52.000Z | 2022-03-21T02:13:12.000Z | PRESUBMIT.py | DalavanCloud/caterpillar | 985419af32f9bbd3abc934db3edc09523477118a | [
"Apache-2.0"
] | 57 | 2016-01-13T03:26:49.000Z | 2020-10-15T19:05:11.000Z | PRESUBMIT.py | DalavanCloud/caterpillar | 985419af32f9bbd3abc934db3edc09523477118a | [
"Apache-2.0"
] | 34 | 2016-08-10T23:58:06.000Z | 2021-07-03T22:38:36.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caterpillar presubmit checks."""
import os
TEST_DATA_REALPATHS = [
os.path.realpath(os.path.join('tests', 'test_app_minimal')),
os.path.realpath(os.path.join('tests', 'test_app_tts')),
os.path.realpath(os.path.join('tests', 'test_app_tts_output')),
]
def filter_test_data(affected_file):
path = affected_file.LocalPath()
realpath = os.path.realpath(path)
for test_path in TEST_DATA_REALPATHS:
if realpath.startswith(test_path):
return False
return True
def CheckChange(input_api, output_api):
results = []
results += input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api)
results += input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api)
results += input_api.canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol(
input_api, output_api)
results += input_api.canned_checks.CheckLongLines(input_api, output_api, 80,
source_file_filter=filter_test_data)
results += input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api, source_file_filter=filter_test_data)
results += input_api.RunTests(GetPythonTests(input_api, output_api))
results += input_api.RunTests(GetKarmaTests(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetKarmaTests(input_api, output_api):
cmd = [
input_api.os_path.join('node_modules', 'karma', 'bin', 'karma'), 'start']
return [input_api.Command('Karma', cmd, {}, output_api.PresubmitError)]
def GetPythonTests(input_api, output_api):
command = ['python', '-m', 'unittest', 'discover', '-s', 'src/', '-p',
'*_test.py']
return [input_api.Command('Python', command, {}, output_api.PresubmitError)]
| 36.701493 | 79 | 0.749085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 804 | 0.326962 |
9f31226e643782fd28baebd4975c3c71e18371c6 | 794 | py | Python | python_advanced_retake_18_08_2021/fill_the_box.py | ivan-yosifov88/python_oop_june_2021 | 7ae6126065abbcce7ce97c86d1150ae307360249 | [
"MIT"
] | 1 | 2021-08-03T19:14:24.000Z | 2021-08-03T19:14:24.000Z | python_advanced_retake_18_08_2021/fill_the_box.py | ivan-yosifov88/python_oop_june_2021 | 7ae6126065abbcce7ce97c86d1150ae307360249 | [
"MIT"
] | null | null | null | python_advanced_retake_18_08_2021/fill_the_box.py | ivan-yosifov88/python_oop_june_2021 | 7ae6126065abbcce7ce97c86d1150ae307360249 | [
"MIT"
] | null | null | null | def fill_the_box(*args):
height = args[0]
length = args[1]
width = args[2]
cube_size = height * length * width
for i in range(3, len(args)):
if args[i] == "Finish":
return f"There is free space in the box. You could put {cube_size} more cubes."
if cube_size < args[i]:
cubes_left = args[i] - cube_size
for c in range(i + 1, len(args)):
if args[c] == "Finish":
break
cubes_left += args[c]
return f"No more free space! You have {cubes_left} more cubes."
cube_size -= args[i]
print(fill_the_box(2, 8, 2, 2, 1, 7, 3, 1, 5, "Finish"))
print(fill_the_box(5, 5, 2, 40, 11, 7, 3, 1, 5, "Finish"))
print(fill_the_box(10, 10, 10, 40, "Finish", 2, 15, 30))
| 33.083333 | 91 | 0.536524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.211587 |
9f32819e06ea0621caf86c2f15e2ecde23a5a0f6 | 4,759 | py | Python | neurokit2/ecg/ecg_process.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | neurokit2/ecg/ecg_process.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | neurokit2/ecg/ecg_process.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
from ..signal import signal_rate, signal_sanitize
from .ecg_clean import ecg_clean
from .ecg_delineate import ecg_delineate
from .ecg_peaks import ecg_peaks
from .ecg_phase import ecg_phase
from .ecg_quality import ecg_quality
def ecg_process(ecg_signal, sampling_rate=1000, method="neurokit"):
"""**Automated pipeline for preprocessing an ECG signal**
This function runs different preprocessing steps. **Help us improve the documentation of
this function by making it more tidy and useful!**
Parameters
----------
ecg_signal : Union[list, np.array, pd.Series]
The raw ECG channel.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : str
The processing pipeline to apply. Defaults to ``"neurokit"``.
Returns
-------
signals : DataFrame
A DataFrame of the same length as the ``ecg_signal`` containing the following columns:
* ``"ECG_Raw"``: the raw signal.
* ``"ECG_Clean"``: the cleaned signal.
* ``"ECG_R_Peaks"``: the R-peaks marked as "1" in a list of zeros.
* ``"ECG_Rate"``: heart rate interpolated between R-peaks.
* ``"ECG_P_Peaks"``: the P-peaks marked as "1" in a list of zeros
* ``"ECG_Q_Peaks"``: the Q-peaks marked as "1" in a list of zeros .
* ``"ECG_S_Peaks"``: the S-peaks marked as "1" in a list of zeros.
* ``"ECG_T_Peaks"``: the T-peaks marked as "1" in a list of zeros.
* ``"ECG_P_Onsets"``: the P-onsets marked as "1" in a list of zeros.
* ``"ECG_P_Offsets"``: the P-offsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_T_Onsets"``: the T-onsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_T_Offsets"``: the T-offsets marked as "1" in a list of zeros.
* ``"ECG_R_Onsets"``: the R-onsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_R_Offsets"``: the R-offsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_Phase_Atrial"``: cardiac phase, marked by "1" for systole and "0" for diastole.
* ``"ECG_Phase_Ventricular"``: cardiac phase, marked by "1" for systole and "0" for
diastole.
* ``"ECG_Atrial_PhaseCompletion"``: cardiac phase (atrial) completion, expressed in
percentage
(from 0 to 1), representing the stage of the current cardiac phase.
* ``"ECG_Ventricular_PhaseCompletion"``: cardiac phase (ventricular) completion, expressed
in percentage (from 0 to 1), representing the stage of the current cardiac phase.
* **This list is not up-to-date. Help us improve the documentation!**
info : dict
A dictionary containing the samples at which the R-peaks occur, accessible with the key
``"ECG_Peaks"``, as well as the signals' sampling rate.
See Also
--------
ecg_clean, ecg_peaks, ecg_delineate, ecg_phase, ecg_plot, .signal_rate
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate ECG signal
ecg = nk.ecg_simulate(duration=15, sampling_rate=1000, heart_rate=80)
# Preprocess ECG signal
signals, info = nk.ecg_process(ecg, sampling_rate=1000)
# Visualize
@savefig p_ecg_process.png scale=100%
nk.ecg_plot(signals)
@suppress
plt.close()
"""
# Sanitize input
ecg_signal = signal_sanitize(ecg_signal)
ecg_cleaned = ecg_clean(ecg_signal, sampling_rate=sampling_rate, method=method)
# R-peaks
instant_peaks, rpeaks, = ecg_peaks(
ecg_cleaned=ecg_cleaned, sampling_rate=sampling_rate, method=method, correct_artifacts=True
)
rate = signal_rate(rpeaks, sampling_rate=sampling_rate, desired_length=len(ecg_cleaned))
quality = ecg_quality(ecg_cleaned, rpeaks=None, sampling_rate=sampling_rate)
signals = pd.DataFrame(
{"ECG_Raw": ecg_signal, "ECG_Clean": ecg_cleaned, "ECG_Rate": rate, "ECG_Quality": quality}
)
# Additional info of the ecg signal
delineate_signal, delineate_info = ecg_delineate(
ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate
)
cardiac_phase = ecg_phase(ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, delineate_info=delineate_info)
signals = pd.concat([signals, instant_peaks, delineate_signal, cardiac_phase], axis=1)
# Rpeaks location and sampling rate in dict info
info = rpeaks
info["sampling_rate"] = sampling_rate
return signals, info
| 40.675214 | 100 | 0.664425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,478 | 0.730826 |
9f33897db986b714f2efcae7ab4c12ac9c297fdd | 457 | py | Python | lab3/lab3a.py | Speedy905/ops435 | ed0fb0d44fbd2d91b26201bdaff06cbb15476e66 | [
"MIT"
] | 1 | 2021-01-30T04:52:17.000Z | 2021-01-30T04:52:17.000Z | lab3/lab3a.py | Speedy905/ops435 | ed0fb0d44fbd2d91b26201bdaff06cbb15476e66 | [
"MIT"
] | null | null | null | lab3/lab3a.py | Speedy905/ops435 | ed0fb0d44fbd2d91b26201bdaff06cbb15476e66 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#Antonio Karlo Mijares
# return_text_value function
def return_text_value():
name = 'Terry'
greeting = 'Good Morning ' + name
return greeting
# return_number_value function
def return_number_value():
num1 = 10
num2 = 5
num3 = num1 + num2
return num3
# Main program
if __name__ == '__main__':
print('python code')
text = return_text_value()
print(text)
number = return_number_value()
print(str(number))
| 18.28 | 34 | 0.702407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.352298 |
9f33b1abd556580df9d3863d91371106cc98ca82 | 1,958 | py | Python | sightpy/geometry/sphere.py | ulises1229/Python-Raytracer | ad89b9dabda1c3eeb68af2d3578c3f38dee9f5b9 | [
"MIT"
] | 326 | 2020-08-14T07:29:40.000Z | 2022-03-30T11:13:32.000Z | sightpy/geometry/sphere.py | ulises1229/Python-Raytracer | ad89b9dabda1c3eeb68af2d3578c3f38dee9f5b9 | [
"MIT"
] | 7 | 2020-08-14T21:57:56.000Z | 2021-06-09T00:53:04.000Z | sightpy/geometry/sphere.py | ulises1229/Python-Raytracer | ad89b9dabda1c3eeb68af2d3578c3f38dee9f5b9 | [
"MIT"
] | 37 | 2020-08-14T17:37:56.000Z | 2022-03-30T09:37:22.000Z | import numpy as np
from ..utils.constants import *
from ..utils.vector3 import vec3
from ..geometry import Primitive, Collider
class Sphere(Primitive):
def __init__(self,center, material, radius, max_ray_depth = 5, shadow = True):
super().__init__(center, material, max_ray_depth, shadow = shadow)
self.collider_list += [Sphere_Collider(assigned_primitive = self, center = center, radius = radius)]
self.bounded_sphere_radius = radius
def get_uv(self, hit):
return hit.collider.get_uv(hit)
class Sphere_Collider(Collider):
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
self.radius = radius
def intersect(self, O, D):
b = 2 * D.dot(O - self.center)
c = self.center.square_length() + O.square_length() - 2 * self.center.dot(O) - (self.radius * self.radius)
disc = (b ** 2) - (4 * c)
sq = np.sqrt(np.maximum(0, disc))
h0 = (-b - sq) / 2
h1 = (-b + sq) / 2
h = np.where((h0 > 0) & (h0 < h1), h0, h1)
pred = (disc > 0) & (h > 0)
M = (O + D * h)
NdotD = ((M - self.center) * (1. / self.radius) ).dot(D)
pred1 = (disc > 0) & (h > 0) & (NdotD > 0)
pred2 = (disc > 0) & (h > 0) & (NdotD < 0)
pred3 = True
#return an array with hit distance and the hit orientation
return np.select([pred1,pred2,pred3] , [[h, np.tile(UPDOWN, h.shape)], [h,np.tile(UPWARDS, h.shape)], FARAWAY])
def get_Normal(self, hit):
# M = intersection point
return (hit.point - self.center) * (1. / self.radius)
def get_uv(self, hit):
M_C = (hit.point - self.center) / self.radius
phi = np.arctan2(M_C.z, M_C.x)
theta = np.arcsin(M_C.y)
u = (phi + np.pi) / (2*np.pi)
v = (theta + np.pi/2) / np.pi
return u,v | 37.653846 | 120 | 0.53524 | 1,811 | 0.924923 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.042901 |
9f3474bc67e71222949a710987df560b94e4e15b | 430 | py | Python | src/lib/worker/worker_1.py | sankhaMukherjee/celeryTest | 6c002fe3ab2f6bb5e06a36f98163c3b53f719a91 | [
"MIT"
] | null | null | null | src/lib/worker/worker_1.py | sankhaMukherjee/celeryTest | 6c002fe3ab2f6bb5e06a36f98163c3b53f719a91 | [
"MIT"
] | 1 | 2021-06-01T23:31:02.000Z | 2021-06-01T23:31:02.000Z | src/lib/worker/worker_1.py | sankhaMukherjee/celeryTest | 6c002fe3ab2f6bb5e06a36f98163c3b53f719a91 | [
"MIT"
] | null | null | null | from logs import logDecorator as lD
import json, psycopg2
from lib.celery.App import app
config = json.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + 'lib.worker.worker_1'
@app.task
@lD.log(logBase + '.add')
def add(logger, a, b):
try:
result = a+b
return result
except Exception as e:
logger.error('Unable to log the task: {e}')
return None
return
| 19.545455 | 62 | 0.637209 | 0 | 0 | 0 | 0 | 222 | 0.516279 | 0 | 0 | 97 | 0.225581 |
9f358868f7cc1c35fcff13c6a034e85e0c1f1f16 | 647 | py | Python | chapter-3/foreach.py | outerbounds/dsbook | 411b55c2057a3ba1e1d893cde03d6ec97d529969 | [
"Apache-2.0"
] | 27 | 2021-05-29T14:36:34.000Z | 2022-03-22T10:12:40.000Z | chapter-3/foreach.py | saibaldas/dsbook | be6b4670ed33a2001de8f28f6fb4151111cb26ca | [
"Apache-2.0"
] | null | null | null | chapter-3/foreach.py | saibaldas/dsbook | be6b4670ed33a2001de8f28f6fb4151111cb26ca | [
"Apache-2.0"
] | 6 | 2021-05-29T14:36:40.000Z | 2022-03-09T14:57:46.000Z | from metaflow import FlowSpec, step
class ForeachFlow(FlowSpec):
@step
def start(self):
self.creatures = ['bird', 'mouse', 'dog']
self.next(self.analyze_creatures, foreach='creatures')
@step
def analyze_creatures(self):
print("Analyzing", self.input)
self.creature = self.input
self.score = len(self.creature)
self.next(self.join)
@step
def join(self, inputs):
self.best = max(inputs, key=lambda x: x.score).creature
self.next(self.end)
@step
def end(self):
print(self.best, 'won!')
if __name__ == '__main__':
ForeachFlow()
| 22.310345 | 63 | 0.599691 | 562 | 0.868624 | 0 | 0 | 502 | 0.775889 | 0 | 0 | 56 | 0.086553 |
9f385adb32c54c2cf674753727414734f8446d67 | 2,150 | py | Python | www/wotd/models.py | nullsym/sachiye | f08d890697b7da502e329d40dbf774c4c8af187d | [
"MIT"
] | null | null | null | www/wotd/models.py | nullsym/sachiye | f08d890697b7da502e329d40dbf774c4c8af187d | [
"MIT"
] | null | null | null | www/wotd/models.py | nullsym/sachiye | f08d890697b7da502e329d40dbf774c4c8af187d | [
"MIT"
] | null | null | null | # Imports from our app
from wotd import db, app
# Flask-Login works via the LoginManager class: Thus, we need
# to start things off by telling LoginManager about our Flask app
from flask_login import LoginManager
login_manager = LoginManager(app)
login_manager.init_app(app)
# Password hashing
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
# Get date in a specific timezone
from datetime import datetime
from pytz import timezone
# Use the timezone of the person adding the WOTD
def get_date():
fmt = "%Y-%m-%d"
naive = datetime.now(timezone('America/Los_Angeles'))
return naive.strftime(fmt)
###############
# Our classes #
###############
class User(UserMixin, db.Model):
username = db.Column(db.String(80), nullable=False, primary_key=True)
password = db.Column(db.String(120))
def __repr__(self):
return '<user %r:%r>' % (self.username, self.password)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def get_id(self):
return self.username
class Wotd(db.Model):
uid = db.Column(db.Integer(), primary_key=True, unique=True)
date = db.Column(db.String(), nullable=False, default=get_date())
# JP word, its romanji, and its definition. All required.
wotd = db.Column(db.String(), nullable=False, unique=True)
romaji = db.Column(db.String(), nullable=False)
defn = db.Column(db.String(), nullable=False)
# Not required, but nice to have
example = db.Column(db.Text())
classification = db.Column(db.String(20))
def __repr__(self):
return '<WOTD: %r:%r:%r>' % (self.uid, self.date, self.wotd)
# Given a user ID, return the associated user object
# It should take unicode ID of a user and returns the corresponding user object
# Should return None if the ID is not valid
@login_manager.user_loader
def user_loader(email):
# Returns true if the user does not
# exist, otherwise it returns the user object
return User.query.get(email) | 35.245902 | 79 | 0.709767 | 1,077 | 0.50093 | 0 | 0 | 173 | 0.080465 | 0 | 0 | 698 | 0.324651 |
9f3b23013912e3d6b5ff12494cd3c60f642390af | 316 | py | Python | Aula07/03EstruturaDeRepeticaoFor.py | gutoffline/curso-python-2021 | 4a9a5f11188ad734402d1dafa7ea627179e7079b | [
"MIT"
] | null | null | null | Aula07/03EstruturaDeRepeticaoFor.py | gutoffline/curso-python-2021 | 4a9a5f11188ad734402d1dafa7ea627179e7079b | [
"MIT"
] | null | null | null | Aula07/03EstruturaDeRepeticaoFor.py | gutoffline/curso-python-2021 | 4a9a5f11188ad734402d1dafa7ea627179e7079b | [
"MIT"
] | null | null | null | """
for x in range(10):
print(x)
for x in range(20, 30):
print(x)
for x in range(10,100,5):
print(x)
for x in range(10,1,-1):
print(x)
print(range(10))
"""
frutas = ["maçã", "laranja", "banana", "morango"]
for x in range(len(frutas)):
print(frutas[x])
for fruta in frutas:
print(fruta)
| 13.73913 | 49 | 0.582278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.660377 |
9f3c05f654e0e6c8154b9b599c1b51c9e8da4e55 | 2,092 | py | Python | twitter_functions.py | SakiFu/Twitter | b7365d91a7321f09cba38320a53f207e9396f81a | [
"MIT"
] | null | null | null | twitter_functions.py | SakiFu/Twitter | b7365d91a7321f09cba38320a53f207e9396f81a | [
"MIT"
] | null | null | null | twitter_functions.py | SakiFu/Twitter | b7365d91a7321f09cba38320a53f207e9396f81a | [
"MIT"
] | null | null | null | import twitter
import util
from config import *
BOSTON_WOEID = 2367105
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
def search(searchTerm):
"""
Print recent tweets containing `searchTerm`.
To test this function, at the command line run:
python twitter_api.py --search=<search term>
For example,
python twitter_api.py --search=python
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
tweets = api.GetSearch(searchTerm)
for tweet in tweets:
util.safe_print(tweet.GetText())
def trendingTopics():
"""
Print the currently trending topics.
To test this function, at the command line run:
python twitter_api.py -t
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)
for topic in trending_topics:
util.safe_print(topic.name)
def userTweets(username):
"""
Print recent tweets by `username`.
You may find the twitter.Api() function GetUserTimeline() helpful.
To test this function, at the command line run:
python twitter_api.py -u <username>
For example,
python twitter_api.py -u bostonpython
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
user_tweet = api.GetUserTimeline(screen_name=username)
for tweet in user_tweet:
util.safe_print(tweet.GetText())
def trendingTweets():
"""
Print tweets for all the trending topics.
To test this function, at the command line run:
python twitter_api.py -w
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)
for tweet in trending_topics:
util.safe_print(tweet.GetText())
| 34.295082 | 124 | 0.732792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 792 | 0.378585 |
9f3de39af6b193748d1dcd26db1ccd2c6dfaa504 | 2,830 | py | Python | tests/test_download.py | HeavyTony2/downloader-cli | 215df466e8b2a0d1460a4f2f549ff2fcec0e9e18 | [
"MIT"
] | 301 | 2019-11-12T15:58:14.000Z | 2022-03-23T20:07:32.000Z | tests/test_download.py | HeavyTony2/downloader-cli | 215df466e8b2a0d1460a4f2f549ff2fcec0e9e18 | [
"MIT"
] | 15 | 2019-11-12T23:09:51.000Z | 2022-01-17T13:45:51.000Z | tests/test_download.py | HeavyTony2/downloader-cli | 215df466e8b2a0d1460a4f2f549ff2fcec0e9e18 | [
"MIT"
] | 30 | 2019-11-13T05:16:44.000Z | 2022-03-14T23:00:01.000Z | """Tests various methods of the Download
class.
All the methods that start with test are used
to test a certain function. The test method
will have the name of the method being tested
seperated by an underscore.
If the method to be tested is extract_content,
the test method name will be test_extract_content
"""
from hashlib import md5
from os import remove
from downloader_cli.download import Download
TEST_URL = "http://212.183.159.230/5MB.zip"
def test__extract_border_icon():
"""Test the _extract_border_icon method"""
download = Download(TEST_URL)
icon_one = download._extract_border_icon("#")
icon_two = download._extract_border_icon("[]")
icon_none = download._extract_border_icon("")
icon_more = download._extract_border_icon("sdafasdfasdf")
assert icon_one == ('#', '#'), "Should be ('#', '#')"
assert icon_two == ('[', ']'), "Should be ('[', '])"
assert icon_none == ('|', '|'), "Should be ('|', '|')"
assert icon_more == ('|', '|'), "Should be ('|', '|')"
def test__build_headers():
"""Test the _build_headers method"""
download = Download(TEST_URL)
download._build_headers(1024)
header_built = download.headers
assert header_built == {"Range": "bytes={}-".format(1024)}, \
"Should be 1024"
def test__preprocess_conn():
"""Test the _preprocess_conn method"""
download = Download(TEST_URL)
download._preprocess_conn()
assert download.f_size == 5242880, "Should be 5242880"
def test__format_size():
"""
Test the function that formats the size
"""
download = Download(TEST_URL)
size, unit = download._format_size(255678999)
# Size should be 243.83449459075928
# and unit should be `MB`
size = int(size)
assert size == 243, "Should be 243"
assert unit == "MB", "Should be MB"
def test__format_time():
"""
Test the format time function that formats the
passed time into a readable value
"""
download = Download(TEST_URL)
time, unit = download._format_time(2134991)
# Time should be 9 days
assert int(time) == 9, "Should be 9"
assert unit == "d", "Should be d"
time, unit = download._format_time(245)
# Time should be 4 minutes
assert int(time) == 4, "Should be 4"
assert unit == "m", "Should be m"
def test_file_integrity():
"""
Test the integrity of the downloaded file.
We will test the 5MB.zip file which has a hash
of `eb08885e3082037a12a42308c521fa3c`.
"""
HASH = "eb08885e3082037a12a42308c521fa3c"
download = Download(TEST_URL)
download.download()
# Once download is done, check the integrity
_hash = md5(open("5MB.zip", "rb").read()).hexdigest()
assert _hash == HASH, "Integrity check failed for 5MB.zip"
# Remove the file now
remove(download.basename)
| 25.495495 | 65 | 0.664311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,305 | 0.461131 |
9f400427c659e5c8d5c0f656dfde230a04071a6b | 9,932 | py | Python | Yuanjunling1/boss-xiadan.py | yuanjunling/PycharmProjects | 087b1a30818bbe2bf3972c9340f61ca4b792eb7d | [
"bzip2-1.0.6"
] | null | null | null | Yuanjunling1/boss-xiadan.py | yuanjunling/PycharmProjects | 087b1a30818bbe2bf3972c9340f61ca4b792eb7d | [
"bzip2-1.0.6"
] | null | null | null | Yuanjunling1/boss-xiadan.py | yuanjunling/PycharmProjects | 087b1a30818bbe2bf3972c9340f61ca4b792eb7d | [
"bzip2-1.0.6"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re, os
class BossXiadan(unittest.TestCase):
def setUp(self):
chrome_driver=os.path.abspath(r'C:\Python27\chromedriver.exe')
os.environ['webdriver.chrome.driver']=chrome_driver
self.driver=webdriver.Chrome()
self.driver.get("https://cas.qa.great-tao.com:8443/cas-server/login?service=http://boss.qa.great-tao.com/cas")
self.verificationErrors = []
self.accept_next_alert = True
def test_boss_xiadan(self):
driver = self.driver
driver.get("https://cas.qa.great-tao.com:8443/cas-server/login?service=http://boss.qa.great-tao.com/cas")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("dingni")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("1234")
driver.find_element_by_id("captcha").clear()
driver.find_element_by_id("captcha").send_keys("greattao0818")
driver.find_element_by_name("submit").click()
driver.find_element_by_link_text(u"BOSS系统").click()
time.sleep(3)
driver.find_element_by_link_text(u"销售线索").click()
time.sleep(2)
driver.find_element_by_link_text(u"添加线索").click()
time.sleep(2)
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/add')]"))
#切换frame
driver.find_element_by_id("sourceRemark").clear()
driver.find_element_by_id("sourceRemark").send_keys("test")
driver.find_element_by_id("company").clear()
driver.find_element_by_id("company").send_keys("dntest")
xiansuo_style1=driver.find_element_by_id("type")
Select(xiansuo_style1).select_by_value('3')#下拉框选择
xiansuo_source=driver.find_element_by_id("source")
Select(xiansuo_source).select_by_value('3')
driver.find_element_by_id("contact").clear()
driver.find_element_by_id("contact").send_keys("dn")
driver.find_element_by_id("tel").clear()
driver.find_element_by_id("tel").send_keys("15987598758")
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("156848@126.com")
driver.find_element_by_id("content").clear()
driver.find_element_by_id("content").send_keys("dntest")
driver.find_element_by_id("submit").click()
driver.switch_to.default_content()#换回主frame
driver.find_element_by_link_text(u"线索分配").click()
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
#切换frame
xiansuo_style2=driver.find_element_by_id("type")
Select(xiansuo_style2).select_by_value('3')
xiansuo_status2=driver.find_element_by_id("status")
Select(xiansuo_status2).select_by_value('1')
driver.find_element_by_id("btn_query").click()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath("//*[@id='table']/tbody/tr[1]/td[10]/a[2]").click()
time.sleep(2)
driver.switch_to.frame(driver.find_element_by_id("layui-layer-iframe1"))#切换frame
div_fenpei=driver.find_element_by_class_name("col-xs-6").find_element_by_id("receiver")
Select(div_fenpei).select_by_value('53')
driver.find_element_by_id("confirm").click()
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
Select(xiansuo_status2).select_by_value('7')
time.sleep(1)
driver.find_element_by_id("btn_query").click()
time.sleep(3)
driver.find_element_by_xpath("//*[@id='table']/tbody/tr[1]/td[10]/a[1]").click()
time.sleep(1)
driver.find_element_by_link_text(u"关联").click()
time.sleep(3)
driver.switch_to.frame(driver.find_element_by_id("layui-layer-iframe1"))#切换frame
driver.find_element_by_xpath("//*[@id='table']/tbody/tr[1]").click()
driver.find_element_by_id("confirm").click()
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
driver.find_element_by_link_text(u"跟进").click()
time.sleep(1)
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
driver.switch_to.frame(driver.find_element_by_id("layui-layer-iframe1"))#切换frame
time.sleep(1)
driver.find_element_by_id("followupRemark").clear()
driver.find_element_by_id("followupRemark").send_keys("test dn")
driver.find_element_by_id("followupDate").click()
driver.find_element_by_xpath("/html/body/div[2]/div[3]/table/tbody/tr[4]/td[3]").click()
driver.find_element_by_id("confirm").click()
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
driver.find_element_by_link_text(u"下单").click()
driver.find_element_by_id("selectServiceProvider").click()
driver.find_element_by_id("popup").click()
driver.find_element_by_xpath("(//input[@name='btSelectItem'])[35]").click()
driver.find_element_by_id("queryService").click()
driver.find_element_by_name("orderAddressList[0].name").clear()
driver.find_element_by_name("orderAddressList[0].name").send_keys("testdn001")
driver.find_element_by_name("orderAddressList[0].address").clear()
driver.find_element_by_name("orderAddressList[0].address").send_keys("testdn001")
driver.find_element_by_name("orderAddressList[1].name").clear()
driver.find_element_by_name("orderAddressList[1].name").send_keys("testdn0001")
driver.find_element_by_name("orderAddressList[2].name").clear()
driver.find_element_by_name("orderAddressList[2].name").send_keys("testdn00001")
driver.find_element_by_name("goodsList[0].goodsName").clear()
driver.find_element_by_name("goodsList[0].goodsName").send_keys("testdn0001")
driver.find_element_by_name("goodsList[0].num").clear()
driver.find_element_by_name("goodsList[0].num").send_keys("1")
Select(driver.find_element_by_name("goodsList[0].unit")).select_by_visible_text("CTNS")
driver.find_element_by_name("goodsList[0].grossWeight").clear()
driver.find_element_by_name("goodsList[0].grossWeight").send_keys("1")
driver.find_element_by_name("goodsList[0].measurement").clear()
driver.find_element_by_name("goodsList[0].measurement").send_keys("1")
driver.find_element_by_xpath("(//input[@name='orderExpand.containerMark'])[2]").click()
driver.find_element_by_xpath("//div[@id='sizzle-1489114881098']/div[3]/table/tbody/tr/td[5]").click()
driver.find_element_by_xpath("//div[@id='sizzle-1489114881098']/div[3]/table/tbody/tr[3]/td[6]").click()
driver.find_element_by_id("select2-orderportOfShipment-qa-container").click()
driver.find_element_by_css_selector("span.select2-selection__placeholder").click()
Select(driver.find_element_by_name("orderExpand.typeOfShipping")).select_by_visible_text(u"海运")
Select(driver.find_element_by_name("order.freightPayableat")).select_by_visible_text(u"运费预付")
driver.find_element_by_name("selectService").click()
driver.find_element_by_name("orderExpand.entrustName").clear()
driver.find_element_by_name("orderExpand.entrustName").send_keys("11")
driver.find_element_by_name("orderExpand.entrustTel").clear()
driver.find_element_by_name("orderExpand.entrustTel").send_keys("11")
Select(driver.find_element_by_id("haha")).select_by_visible_text(u"海运费")
driver.find_element_by_name("logisticsCosts[3].price").clear()
driver.find_element_by_name("logisticsCosts[3].price").send_keys("11")
Select(driver.find_element_by_id("store-name")).select_by_visible_text(u"吊机费")
driver.find_element_by_name("logisticsCosts[4].price").clear()
driver.find_element_by_name("logisticsCosts[4].price").send_keys("11")
driver.find_element_by_name("logisticsCosts[1].price").clear()
driver.find_element_by_name("logisticsCosts[1].price").send_keys("11")
driver.find_element_by_id("saveService").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
'''def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)'''
if __name__ == "__main__":
unittest.main()
| 58.423529 | 154 | 0.700665 | 9,655 | 0.960123 | 0 | 0 | 0 | 0 | 0 | 0 | 3,067 | 0.304992 |
9f418384a2feb7b42cf899ae93dde9aa9269dff9 | 10,830 | py | Python | notification.py | phillco/talon-accessibility | 1872a759817073bdb3b6c92900e6e3f2ba750b9a | [
"MIT"
] | 10 | 2022-01-23T19:02:36.000Z | 2022-02-22T04:48:35.000Z | notification.py | phillco/talon-accessibility | 1872a759817073bdb3b6c92900e6e3f2ba750b9a | [
"MIT"
] | 4 | 2022-01-23T21:33:06.000Z | 2022-03-22T02:37:40.000Z | notification.py | phillco/talon_accessibility | a35cfb0cb302b72b13edfa4fb245e482d4c9b827 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from itertools import chain
from typing import Optional
from talon import Context, Module, actions, app, cron, ui
# XXX(nriley) actions are being returned out of order; that's a problem if we want to pop up a menu
mod = Module()
mod.list("notification_actions", desc="Notification actions")
mod.list("notification_apps", desc="Notification apps")
notification_debug = mod.setting(
"notification_debug",
type=bool,
default=False,
desc="Display macOS notification debugging information.",
)
try:
from rich.console import Console
console = Console(color_system="truecolor", soft_wrap=True)
def debug_print(obj: any, *args):
"""Pretty prints the object"""
if not notification_debug.get():
return
if args:
console.out(obj, *args)
else:
console.print(obj)
except ImportError:
def debug_print(obj: any, *args):
if not notification_debug.get():
return
print(obj, *args)
@mod.action_class
class Actions:
def notification_action(index: int, action: str) -> bool:
"""Perform the specified action on the notification (stack) at the specified index"""
return False
def notification_app_action(app_name: str, action: str) -> bool:
"""Perform the specified action on the first notification (stack) for the specified app"""
return False
def notifications_update():
"""Update notification list to reflect what is currently onscreen"""
# (poll? not try to keep up? not sure what else to do)
def notification_center():
"""Display or hide Notification Center"""
@dataclass(frozen=True)
class Notification:
identifier: int
subrole: str = field(default=None, compare=False)
app_name: str = field(default=None, compare=False)
stacking_identifier: str = field(default=None, compare=False)
title: str = field(default=None, compare=False)
subtitle: str = field(default=None, compare=False)
body: str = field(default=None, compare=False)
# action values are named "Name:<name>\nTarget:0x0\nSelector:(null)"; keys are speakable
actions: dict[str, str] = field(default=None, compare=False)
@staticmethod
def group_identifier(group):
identifier = getattr(group, "AXIdentifier", None)
if identifier is None or not str.isdigit(identifier):
return None
return int(identifier)
@staticmethod
def from_group(group, identifier):
group_actions = group.actions
if "AXScrollToVisible" in group_actions:
del group_actions["AXScrollToVisible"] # not useful
# XXX(nriley) create_spoken_forms_from_list doesn't handle apostrophes correctly
# https://github.com/knausj85/knausj_talon/issues/780
group_actions = {
name.lower().replace("’", "'"): action
for action, name in group_actions.items()
}
title = body = subtitle = None
try:
title = group.children.find_one(AXIdentifier="title").AXValue
except ui.UIErr:
pass
try:
body = group.children.find_one(AXIdentifier="body").AXValue
except ui.UIErr:
pass
try:
subtitle = group.children.find_one(AXIdentifier="subtitle").AXValue
except ui.UIErr:
pass
return Notification(
identifier=identifier,
subrole=group.AXSubrole,
app_name=group.AXDescription,
stacking_identifier=group.AXStackingIdentifier,
title=title,
subtitle=subtitle,
body=body,
actions=group_actions,
)
@staticmethod
def notifications_in_window(window):
notifications = []
for group in window.children.find(AXRole="AXGroup"):
if not (identifier := Notification.group_identifier(group)):
continue
notification = Notification.from_group(group, identifier)
notifications.append(notification)
return notifications
MONITOR = None
ctx = Context()
ctx.matches = r"""
os: mac
"""
ctx.lists["user.notification_actions"] = {}
ctx.lists["user.notification_apps"] = {}
@ctx.action_class("user")
class UserActions:
def notification_action(index: int, action: str) -> bool:
return MONITOR.perform_action(action, index=index)
def notification_app_action(app_name: str, action: str) -> bool:
return MONITOR.perform_action(action, app_name=app_name)
def notifications_update():
MONITOR.update_notifications()
def notification_center():
cc = ui.apps(bundle="com.apple.controlcenter")[0]
cc.element.children.find_one(AXRole="AXMenuBar", max_depth=0).children.find_one(
AXRole="AXMenuBarItem",
AXSubrole="AXMenuExtra",
AXIdentifier="com.apple.menuextra.clock",
max_depth=0,
).perform("AXPress")
class NotificationMonitor:
__slots__ = (
"pid",
"notifications",
)
def __init__(self, app: ui.App):
self.pid = app.pid
self.notifications = []
ui.register("win_open", self.win_open)
ui.register("win_close", self.win_close)
ui.register("app_close", self.app_closed)
self.update_notifications()
def win_open(self, window):
if not window.app.pid == self.pid:
return
notifications = Notification.notifications_in_window(window)
self.update_notifications(adding=notifications)
def notification_groups(self):
ncui = ui.apps(pid=self.pid)[0]
for window in ncui.windows():
for group in window.children.find(AXRole="AXGroup"):
if not (identifier := Notification.group_identifier(group)):
continue
yield identifier, group
def perform_action(
self, action: str, index: Optional[int] = None, app_name: str = None
):
self.update_notifications()
cron.after("500ms", self.update_notifications)
notification = None
if index is not None:
if index < 0 or index > len(self.notifications) - 1:
app.notify(f"Unable to locate notification #{index + 1}", "Try again?")
return False
notification = self.notifications[index]
elif app_name is not None:
try:
notification = next(
notification
for notification in self.notifications
if notification.app_name == app_name
)
except StopIteration:
app.notify(
f"Unable to locate notification for {app_name}", "Try again?"
)
return False
for identifier, group in self.notification_groups():
if identifier != notification.identifier:
continue
if action not in notification.actions:
# allow closing a notification stack like an individual notification
if action == "close" and "clear all" in notification.actions:
action = "clear all"
else:
app.notify(f"No such action “{action}”", "Try again?")
return False
group.perform(notification.actions[action])
return True
app.notify("Unable to locate notification", "Try again?")
return False
def update_notifications(self, adding=[]):
if adding:
self.notifications += adding
notifications = {}
for identifier, group in self.notification_groups():
y = group.AXPosition.y
try:
notifications[y] = self.notifications[
self.notifications.index(Notification(identifier=identifier))
]
except ValueError:
notifications[y] = Notification.from_group(group, identifier)
self.notifications = list(notifications.values())
if notifications:
debug_print(notifications)
notification_actions = set()
notification_apps = set()
for notification in self.notifications:
notification_actions.update(notification.actions.keys())
notification_apps.add(notification.app_name)
notification_actions = list(notification_actions)
# XXX(nriley) create_spoken_forms_from_list doesn't handle apostrophes correctly
# https://github.com/knausj85/knausj_talon/issues/780
apostrophe_words = {
word.replace("'", " "): word
for word in chain.from_iterable(
action.split() for action in notification_actions
)
if "'" in word
}
words_to_exclude = [word.split(" ")[0] for word in apostrophe_words]
notification_actions = actions.user.create_spoken_forms_from_list(
notification_actions, words_to_exclude=words_to_exclude
)
if apostrophe_words:
notification_actions = {
spoken_form.replace(mangled_word, word): action
for mangled_word, word in apostrophe_words.items()
for spoken_form, action in notification_actions.items()
if "apostrophe" not in spoken_form
}
if notification_actions:
debug_print("actions", notification_actions)
if "close" not in notification_actions and "clear all" in notification_actions:
# allow closing a notification stack like an individual notification
notification_actions["close"] = "clear all"
ctx.lists["user.notification_actions"] = notification_actions
# XXX(nriley) use app name overrides from knausj?
notification_apps = actions.user.create_spoken_forms_from_list(
notification_apps
)
ctx.lists["user.notification_apps"] = notification_apps
if notification_apps:
debug_print("apps", notification_apps)
def win_close(self, window):
if not window.app.pid == self.pid:
return
self.update_notifications()
def app_closed(self, app):
if app.pid == self.pid:
ui.unregister("app_close", self.app_closed)
def app_launched(app):
global MONITOR
if not app.bundle == "com.apple.notificationcenterui":
return
MONITOR = NotificationMonitor(app)
def monitor():
global MONITOR
apps = ui.apps(bundle="com.apple.notificationcenterui")
if apps:
MONITOR = NotificationMonitor(apps[0])
ui.register("app_launch", app_launched)
app.register("ready", monitor)
| 31.852941 | 99 | 0.62373 | 9,170 | 0.846253 | 320 | 0.029531 | 3,830 | 0.353451 | 0 | 0 | 1,967 | 0.181525 |
9f434eee5285c89e5b4ba0cfb55e6a244ea4ee9a | 529 | py | Python | emulation_system/emulation_system/consts.py | Opentrons/ot3-emulator | 90fad37b54dc3b003732220e630185de1a1d5dfd | [
"Apache-2.0"
] | null | null | null | emulation_system/emulation_system/consts.py | Opentrons/ot3-emulator | 90fad37b54dc3b003732220e630185de1a1d5dfd | [
"Apache-2.0"
] | 3 | 2021-08-31T14:59:41.000Z | 2021-10-04T16:10:25.000Z | emulation_system/emulation_system/consts.py | Opentrons/ot3-emulator | 90fad37b54dc3b003732220e630185de1a1d5dfd | [
"Apache-2.0"
] | null | null | null | """System-wide constants."""
from __future__ import annotations
import os
# Mode Names
PRODUCTION_MODE_NAME = "prod"
DEVELOPMENT_MODE_NAME = "dev"
# Latest Git Commit
LATEST_KEYWORD = "latest"
# Root of repo
ROOT_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
)
DEFAULT_CONFIGURATION_FILE_PATH = f"{ROOT_DIR}/configuration.json"
CONFIGURATION_FILE_LOCATION_VAR_NAME = "CONFIGURATION_FILE_LOCATION"
DOCKERFILE_DIR_LOCATION = f"{ROOT_DIR}/emulation_system/resources/docker/"
| 24.045455 | 74 | 0.773157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.395085 |
9f43ce6631e7319b1bcdc1a179c864fd23965ec3 | 1,367 | gyp | Python | binding.gyp | jacobsologub/gif2webp | 15e006074557f12863a7ac295ea8d2a9e771dd9a | [
"MIT"
] | 11 | 2015-09-23T08:47:00.000Z | 2022-01-11T09:47:57.000Z | binding.gyp | jacobsologub/gif2webp | 15e006074557f12863a7ac295ea8d2a9e771dd9a | [
"MIT"
] | 2 | 2015-04-23T06:28:00.000Z | 2017-05-09T16:46:45.000Z | binding.gyp | jacobsologub/gif2webp | 15e006074557f12863a7ac295ea8d2a9e771dd9a | [
"MIT"
] | 3 | 2015-12-01T23:12:21.000Z | 2019-03-01T01:51:22.000Z | {
"variables": {
"HEROKU%": '<!(echo $HEROKU)'
},
"targets": [
{
"target_name": "gif2webp",
"defines": [
],
"sources": [
"src/gif2webp.cpp",
"src/webp/example_util.cpp",
"src/webp/gif2webp_util.cpp",
"src/webp/gif2webpMain.cpp"
],
"conditions": [
[
'OS=="mac"',
{
"include_dirs": [
"/usr/local/include",
"src/webp"
],
"libraries": [
"-lwebp",
"-lwebpmux",
"-lgif"
]
}
],
[
'OS=="linux"',
{
"include_dirs": [
"/usr/local/include",
"src/webp"
],
"libraries": [
"-lwebp",
"-lwebpmux",
"-lgif"
]
}
]
]
}
]
} | 26.288462 | 49 | 0.21068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.28237 |
9f44a5bdf67e30827a8720ae39bd17612174f30c | 4,881 | py | Python | multiagent/scenarios/simple_hockey.py | johan-kallstrom/multiagent-particle-envs | 61fc78c4537f2f1ca78fd80eb64eeb0d2d9cc5dd | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_hockey.py | johan-kallstrom/multiagent-particle-envs | 61fc78c4537f2f1ca78fd80eb64eeb0d2d9cc5dd | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_hockey.py | johan-kallstrom/multiagent-particle-envs | 61fc78c4537f2f1ca78fd80eb64eeb0d2d9cc5dd | [
"MIT"
] | null | null | null | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 2
num_adversaries = 1
num_landmarks = 5
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
if i < num_adversaries:
agent.adversary = True
agent.color = np.array([0.75, 0.25, 0.25])
else:
agent.adversary = False
agent.color = np.array([0.25, 0.25, 0.75])
# add landmarks for goal posts and puck
goal_posts = [[-0.25, -1.0],
[-0.25, 1.0],
[0.25, -1.0],
[0.25, 1.0]]
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
if i > 0:
landmark.collide = True
landmark.movable = False
landmark.state.p_pos = np.array(goal_posts[i-1])
landmark.state.p_vel = np.zeros(world.dim_p)
else:
landmark.collide = True
landmark.movable = True
# add landmarks for rink boundary
#world.landmarks += self.set_boundaries(world)
# make initial conditions
self.reset_world(world)
return world
def set_boundaries(self, world):
boundary_list = []
landmark_size = 1
edge = 1 + landmark_size
num_landmarks = int(edge * 2 / landmark_size)
for x_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([x_pos, -1 + i * landmark_size])
boundary_list.append(l)
for y_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([-1 + i * landmark_size, y_pos])
boundary_list.append(l)
for i, l in enumerate(boundary_list):
l.name = 'boundary %d' % i
l.collide = True
l.movable = False
l.boundary = True
l.color = np.array([0.75, 0.75, 0.75])
l.size = landmark_size
l.state.p_vel = np.zeros(world.dim_p)
return boundary_list
def reset_world(self, world):
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
if i > 0:
landmark.color = np.array([0.7, 0.7, 0.7])
else:
landmark.color = np.array([0.1, 0.1, 0.1])
landmark.index = i
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
world.landmarks[0].state.p_pos = np.random.uniform(-1, +1, world.dim_p)
world.landmarks[0].state.p_vel = np.zeros(world.dim_p)
# return all agents of the blue team
def blue_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all agents of the red team
def red_agents(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
# Agents are rewarded based on team they belong to
return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
def agent_reward(self, agent, world):
# reward for blue team agent
return 0.0
def adversary_reward(self, agent, world):
# reward for red team agent
return 0.0
def observation(self, agent, world):
# get positions/vel of all entities in this agent's reference frame
entity_pos = []
entity_vel = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
if entity.movable:
entity_vel.append(entity.state.p_vel)
# get positions/vel of all other agents in this agent's reference frame
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
other_pos.append(other.state.p_pos - agent.state.p_pos)
other_vel.append(other.state.p_vel)
return np.concatenate([agent.state.p_vel] + entity_pos + entity_vel + other_pos + other_vel)
| 38.433071 | 106 | 0.565048 | 4,764 | 0.97603 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.125794 |
9f4562e1f053bb1fc2d14c5b37e00a09ea1df617 | 2,931 | py | Python | pyshocks/continuity/schemes.py | alexfikl/pyshocks | 9a2c2f5bd9b5b90af71f4ef62a82dcd96b2275f7 | [
"MIT"
] | null | null | null | pyshocks/continuity/schemes.py | alexfikl/pyshocks | 9a2c2f5bd9b5b90af71f4ef62a82dcd96b2275f7 | [
"MIT"
] | null | null | null | pyshocks/continuity/schemes.py | alexfikl/pyshocks | 9a2c2f5bd9b5b90af71f4ef62a82dcd96b2275f7 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Optional
import jax.numpy as jnp
from pyshocks import Grid, ConservationLawScheme
from pyshocks import flux, numerical_flux, predict_timestep
from pyshocks.weno import WENOJSMixin, WENOJS32Mixin, WENOJS53Mixin
# {{{ base
@dataclass(frozen=True)
class Scheme(ConservationLawScheme):
"""Base class for numerical schemes for the continuity equation.
.. attribute:: a
Advection velocity at cell centers.
"""
velocity: Optional[jnp.ndarray]
@flux.register(Scheme)
def _flux_continuity(
scheme: Scheme, t: float, x: jnp.ndarray, u: jnp.ndarray
) -> jnp.ndarray:
assert scheme.velocity is not None
return scheme.velocity * u
@predict_timestep.register(Scheme)
def _predict_timestep_continuity(
scheme: Scheme, grid: Grid, t: float, u: jnp.ndarray
) -> float:
assert scheme.velocity is not None
amax = jnp.max(jnp.abs(scheme.velocity[grid.i_]))
return grid.dx_min / amax
# }}}
# {{{ upwind
@dataclass(frozen=True)
class Godunov(Scheme):
"""First-order Godunov (upwind) scheme for the continuity equation.
The flux of the Godunov scheme is given by
:func:`~pyshocks.scalar.scalar_flux_upwind`.
.. attribute:: order
.. automethod:: __init__
"""
@property
def order(self):
return 1
@numerical_flux.register(Godunov)
def _numerical_flux_continuity_godunov(
scheme: Godunov, grid: Grid, t: float, u: jnp.ndarray
) -> jnp.ndarray:
assert scheme.velocity is not None
assert u.shape[0] == grid.x.size
am = jnp.maximum(-scheme.velocity, 0.0)
ap = jnp.maximum(+scheme.velocity, 0.0)
fnum = ap[:-1] * u[:-1] - am[1:] * u[1:]
return jnp.pad(fnum, 1)
# }}}
# {{{ WENO
@dataclass(frozen=True)
class WENOJS(Scheme, WENOJSMixin): # pylint: disable=abstract-method
"""See :class:`pyshocks.burgers.WENOJS`."""
def __post_init__(self):
# pylint: disable=no-member
self.set_coefficients()
@dataclass(frozen=True)
class WENOJS32(WENOJS32Mixin, WENOJS):
"""See :class:`pyshocks.burgers.WENOJS32`."""
eps: float = 1.0e-6
@dataclass(frozen=True)
class WENOJS53(WENOJS53Mixin, WENOJS):
"""See :class:`pyshocks.burgers.WENOJS53`."""
eps: float = 1.0e-12
@numerical_flux.register(WENOJS)
def _numerical_flux_continuity_wenojs(
scheme: WENOJS, grid: Grid, t: float, u: jnp.ndarray
) -> jnp.ndarray:
assert scheme.velocity is not None
assert u.size == grid.x.size
from pyshocks.weno import reconstruct
up = reconstruct(grid, scheme, u)
fp = flux(scheme, t, grid.f[1:], up)
um = reconstruct(grid, scheme, u[::-1])[::-1]
fm = flux(scheme, t, grid.f[:-1], um)
# NOTE: using the *global* Lax-Friedrichs flux
a = scheme.velocity[grid.i_]
amax = jnp.max(jnp.abs(a))
fnum = (fp[:-1] + fm[1:]) / 2 + amax * (up[:-1] - um[1:]) / 2
return jnp.pad(fnum, 1)
# }}}
| 21.873134 | 71 | 0.667349 | 966 | 0.32958 | 0 | 0 | 2,574 | 0.878199 | 0 | 0 | 652 | 0.22245 |
9f4610a7e6a629e7a9bdcd73d7e9be93fc1f7fd1 | 121 | py | Python | finitewave/core/command/__init__.py | ArsOkenov/Finitewave | 14274d74be824a395b47a5c53ba18188798ab70d | [
"MIT"
] | null | null | null | finitewave/core/command/__init__.py | ArsOkenov/Finitewave | 14274d74be824a395b47a5c53ba18188798ab70d | [
"MIT"
] | null | null | null | finitewave/core/command/__init__.py | ArsOkenov/Finitewave | 14274d74be824a395b47a5c53ba18188798ab70d | [
"MIT"
] | null | null | null | from finitewave.core.command.command import Command
from finitewave.core.command.command_sequence import CommandSequence
| 40.333333 | 68 | 0.884298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f46ca6c2a027a96dbe50f686fba48b96a51859c | 96 | py | Python | smart_meter/apps.py | GPXenergy/gpx_server_api | 9b021522be4414ac95159a0ed576848c463637f9 | [
"MIT"
] | null | null | null | smart_meter/apps.py | GPXenergy/gpx_server_api | 9b021522be4414ac95159a0ed576848c463637f9 | [
"MIT"
] | null | null | null | smart_meter/apps.py | GPXenergy/gpx_server_api | 9b021522be4414ac95159a0ed576848c463637f9 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SmartMeterConfig(AppConfig):
name = 'smart_meter'
| 16 | 34 | 0.770833 | 59 | 0.614583 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.135417 |
9f4848fb1e1f8c151bf749c7d7992e896b000b74 | 344 | py | Python | settings.py | nopogo/twitchplaysturtle | 247ce20be55082b6675aebea56c4a264493c0218 | [
"MIT"
] | null | null | null | settings.py | nopogo/twitchplaysturtle | 247ce20be55082b6675aebea56c4a264493c0218 | [
"MIT"
] | null | null | null | settings.py | nopogo/twitchplaysturtle | 247ce20be55082b6675aebea56c4a264493c0218 | [
"MIT"
] | null | null | null | import secrets
channel_name = "nopogo_tv"
server_ip = "0.0.0.0"
server_port = 8000
channel_id = 28092036
ws_host = "wss://pubsub-edge.twitch.tv"
ws_local = secrets.local_ip
topics = [
"channel-bits-events-v2.{}".format(channel_id),
"channel-points-channel-v1.{}".format(channel_id),
"channel-subscribe-events-v1.{}".format(channel_id)
]
| 21.5 | 52 | 0.726744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.401163 |
9f484b3a565eb983b142b6be88f528edee03101a | 1,510 | py | Python | piv/framed/framed.py | LaCumbancha/piv-algorithm | cfa1a8b7074be600d96233f4ee756d7023c2a311 | [
"MIT"
] | null | null | null | piv/framed/framed.py | LaCumbancha/piv-algorithm | cfa1a8b7074be600d96233f4ee756d7023c2a311 | [
"MIT"
] | null | null | null | piv/framed/framed.py | LaCumbancha/piv-algorithm | cfa1a8b7074be600d96233f4ee756d7023c2a311 | [
"MIT"
] | 1 | 2021-08-23T21:31:42.000Z | 2021-08-23T21:31:42.000Z | # Imports
import numpy as np
# Single to double frame
# Combines images by 2, returning an array with two frames (one for each image).
#
# Input: 5 images with step 1.
# Output: 4 double-framed images.
# FrameA: 1 2 3 4
# FrameB: 2 3 4 5
#
# Input: 8 images with step 3.
# Output: 5 doubled-framed images.
# FrameA: 1 2 3 4 5
# FrameB: 4 5 6 7 8
#
# This function also crops the image according to the provided Region of Interest (ROI), that must be passed as:
# ROI = [X-start X-end Y-start Y-end], for example: [1 100 1 50].
#
# Output:
# Array with the following dimensions: 0 - Image; 1 - Frame; 2 - Height (Y); 3 - Width (X).
def single_to_double_frame(images, step=1, roi=None):
total_images = images.shape[0]
frameA_idx = list(range(0, total_images-step))
frameB_idx = [idx+1 for idx in frameA_idx]
images_double_framed = []
for idx in frameA_idx:
double_frame = [images[frameA_idx[idx]], images[frameB_idx[idx]]]
if roi and len(roi) == 4:
size_y, size_x = double_frame[0].shape
min_x, max_x = max(0, roi[0]-1), min(roi[1], size_x)
min_y, max_y = max(0, roi[2]-1), min(roi[3], size_x)
double_frame[0] = np.array(double_frame[0][min_y:max_y, min_x:max_x])
double_frame[1] = np.array(double_frame[1][min_y:max_y, min_x:max_x])
images_double_framed += [double_frame]
return np.array(images_double_framed)
| 32.826087 | 112 | 0.617219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.423841 |
9f4a93e0e0310ba3c9ec9b1e5108dbc823a741c9 | 4,656 | py | Python | paccmann_proteomics/data/datasets/seq_clf.py | xueeinstein/paccmann_proteomics | b376883996641a07da77fbbb6dbd34c2c04fdddb | [
"MIT"
] | 28 | 2020-11-24T17:37:40.000Z | 2022-03-23T17:05:42.000Z | paccmann_proteomics/data/datasets/seq_clf.py | xueeinstein/paccmann_proteomics | b376883996641a07da77fbbb6dbd34c2c04fdddb | [
"MIT"
] | 9 | 2021-01-22T15:52:10.000Z | 2022-02-16T05:29:16.000Z | paccmann_proteomics/data/datasets/seq_clf.py | xueeinstein/paccmann_proteomics | b376883996641a07da77fbbb6dbd34c2c04fdddb | [
"MIT"
] | 8 | 2020-12-08T02:51:28.000Z | 2022-02-21T08:15:38.000Z | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from transformers import PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer
from transformers.data.datasets import GlueDataset
from transformers.data.datasets import GlueDataTrainingArguments
from transformers.data.processors.glue import glue_convert_examples_to_features
from transformers.data.processors.utils import InputFeatures
from loguru import logger
from ..processors.seq_clf import seq_clf_output_modes, seq_clf_processors, seq_clf_tasks_num_labels
class Split(Enum):
train = 'train'
dev = 'dev'
test = 'test'
class SeqClfDataset(GlueDataset):
"""
Why this class even exists?
`class GlueDataset(Dataset)` has a constructor `def __init__()` with
`processor = glue_processors[args.task_name]()`, however I want to expand `glue_processors`
with protein clf task names. The line `processor = glue_processors[args.task_name]()` in parent
class doesn't accomodate this.
"""
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: GlueDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
cache_dir: Optional[str] = None,
):
self.args = args
self.processor = seq_clf_processors[args.task_name]()
self.output_mode = seq_clf_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
'cached_{}_{}_{}_{}'.format(
mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), args.task_name,
),
)
label_list = self.processor.get_labels()
if args.task_name in ['mnli', 'mnli-mm'] and tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start
)
else:
logger.info(f'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
# Load a data file into a list of ``InputFeatures``
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
'Saving features into cached file %s [took %.3f s]', cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
| 39.794872 | 114 | 0.626503 | 3,992 | 0.857388 | 0 | 0 | 0 | 0 | 0 | 0 | 991 | 0.212844 |
9f4ab46e51128f48f06e0891f830eba972929243 | 4,071 | py | Python | code/shell_scripts/fcs_processing.py | RPGroup-PBoC/rnaseq_barcode | f90bdd7388d08b8dd1eeaaaedd75040580af5f10 | [
"MIT"
] | null | null | null | code/shell_scripts/fcs_processing.py | RPGroup-PBoC/rnaseq_barcode | f90bdd7388d08b8dd1eeaaaedd75040580af5f10 | [
"MIT"
] | 1 | 2020-11-02T19:07:52.000Z | 2020-11-02T19:07:52.000Z | code/shell_scripts/fcs_processing.py | RPGroup-PBoC/rnaseq_barcode | f90bdd7388d08b8dd1eeaaaedd75040580af5f10 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
This script parses and cleans up a provided Flow Cytometry Standard (fcs) file
and saves it as a Comma Separated Value (csv).
"""
import os
import re
import numpy as np
import pandas as pd
import optparse
import fcsparser
# #########################################################################
def main():
# Initialize the option parser
parser = optparse.OptionParser()
#Add options.
parser.add_option('-i', '--input_file', dest='filename', help='name of single\
file to be processed.', metavar="filename")
parser.add_option('-d', '--directory', dest='inputdir', help='name of\
input directory to be processed')
parser.add_option('-p', '--pattern', dest='pattern', help='filename\
pattern to parse files.')
parser.add_option('-o', '--output', dest='out',
help='name of output directory')
parser.add_option('-c', '--channel', action='append', dest='channels',
help=' individual channels to extract. Each channel must have its\
own -c flag.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\
help='print progress to stdout', default=False)
parser.add_option('-f', '--force', action='store_true', dest='force',
help='force saving of files to output directory if needed.',
default=False)
# get the ops and args
ops, args = parser.parse_args()
# List files
if (ops.inputdir == None) & (ops.filename == None):
raise ValueError('no input directory/file provided! Please indicate\
the input directory that contains the fcs files')
# get all the files in the directory
files = []
if ops.inputdir != None:
usr_files = np.array(os.listdir(ops.inputdir))
# Use the pattern to identify all of the files.
files_idx = np.array([ops.pattern in f for f in usr_files])
file_names = usr_files[files_idx]
#Add the input directory ahead of each file.
for f in file_names:
files.append('%s/%s' %(ops.inputdir, f))
else:
files.append(ops.filename)
# Test that the output directory exists and is empty.
if ops.out != None:
if os.path.isdir(ops.out) == False:
os.mkdir(ops.out)
print("Made new ouptut directory %s. I hope that's okay..." %ops.out)
elif len(os.listdir(ops.out)) != None:
if ops.force == True:
cont = 'y'
else:
cont = raw_input('Output directory is not empty! Continue? [y/n]: ')
# loop through the files
for i,f in enumerate(files):
# consider only the fcs files
if f.endswith('.fcs'):
# read the file
meta, data = fcsparser.parse(f)
# if there are set channels, get all the channels
if ops.channels != None:
data = data.loc[:, ops.channels]
#parse the file name to change the extension
filename = re.sub('.fcs', '.csv', f)
#Determine if they should be saved to an output directory or not.
if ops.out == None:
data.to_csv(filename, index=False)
if ops.verbose == True:
print(f + ' -> ' + filename)
else:
find_split = filename.rsplit('/', 1)
if len(find_split) != 1:
filename = filename.rsplit('/', 1)[1]
# Determine how to save the file.
if len(os.listdir(ops.out)) != None:
if cont.lower() == 'y':
data.to_csv(ops.out + '/' + filename, index=False)
if ops.verbose == True:
print(f + ' -> ' + ops.out + '/' + filename)
else:
raise ValueError('output directory is not empty.')
if __name__ == '__main__':
main()
print('thank you -- come again')
| 37.694444 | 84 | 0.542127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,642 | 0.403341 |
9f4c54bafc52b2cd849c4e602fafdcc41ce81b9c | 89 | py | Python | test/utils/enums.py | alvintangz/cscc43-a2-sql-queries-test | 0531258375a7eee8bd1e9400b72943033290ad66 | [
"PostgreSQL"
] | 3 | 2021-07-12T21:52:32.000Z | 2021-07-16T19:30:37.000Z | test/utils/enums.py | alvintangz/cscc43-a2-sql-queries-test | 0531258375a7eee8bd1e9400b72943033290ad66 | [
"PostgreSQL"
] | null | null | null | test/utils/enums.py | alvintangz/cscc43-a2-sql-queries-test | 0531258375a7eee8bd1e9400b72943033290ad66 | [
"PostgreSQL"
] | null | null | null | from enum import Enum
class Semester(Enum):
FALL = 9
WINTER = 1
SUMMER = 5
| 11.125 | 21 | 0.606742 | 64 | 0.719101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f4d37c4ab3e91a8989de355eb4c109721b6c34b | 485 | py | Python | day06/day6_part1.py | raistlin7447/AoC2021 | 2f11733c866065fb2bae5862be562a14db40d0b9 | [
"MIT"
] | null | null | null | day06/day6_part1.py | raistlin7447/AoC2021 | 2f11733c866065fb2bae5862be562a14db40d0b9 | [
"MIT"
] | null | null | null | day06/day6_part1.py | raistlin7447/AoC2021 | 2f11733c866065fb2bae5862be562a14db40d0b9 | [
"MIT"
] | null | null | null | with open("day6_input.txt") as f:
initial_fish = list(map(int, f.readline().strip().split(",")))
fish = [0] * 9
for initial_f in initial_fish:
fish[initial_f] += 1
for day in range(80):
new_fish = [0] * 9
for state in range(9):
if state == 0:
new_fish[6] += fish[0]
new_fish[8] += fish[0]
else:
new_fish[state-1] += fish[state]
fish = new_fish
print(sum(fish))
| 30.3125 | 66 | 0.492784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.039175 |