partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
get_next_event
|
Returns the next occurrence of a given event, relative to 'now'.
The 'event' arg should be an iterable containing one element,
namely the event we'd like to find the occurrence of.
The reason for this is b/c the get_count() function of CountHandler,
which this func makes use of, expects an iterable.
CHANGED: The 'now' arg must be an instance of datetime.datetime()
to allow time comparison (used to accept datetime.date() as well)
|
happenings/utils/next_event.py
|
def get_next_event(event, now):
"""
Returns the next occurrence of a given event, relative to 'now'.
The 'event' arg should be an iterable containing one element,
namely the event we'd like to find the occurrence of.
The reason for this is b/c the get_count() function of CountHandler,
which this func makes use of, expects an iterable.
CHANGED: The 'now' arg must be an instance of datetime.datetime()
to allow time comparison (used to accept datetime.date() as well)
"""
year = now.year
month = now.month
day = now.day
e_day = event[0].l_start_date.day
e_end_day = event[0].l_end_date.day
good_today = True if event[0].l_start_date.time() >= now.time() else False
if event[0].starts_same_year_month_as(year, month) and \
e_day <= now.day <= e_end_day:
occurrences = CountHandler(year, month, event).get_count()
future_dates = (x for x in occurrences if x >= now.day)
day = min(future_dates, key=lambda x: abs(x - now.day))
else:
e_year = event[0].l_start_date.year
e_month = event[0].l_start_date.month
# convert to datetime.date() to be sure we can make a comparison
if date(e_year, e_month, e_day) > date(now.year, now.month, now.day):
# if the event hasn't started yet, then its next occurrence will
# be on its start date, so return that.
year = e_year
month = e_month
day = e_day
else:
occurrences = CountHandler(year, month, event).get_count()
future_dates = [x for x in occurrences if x >= now.day]
e_end_month = event[0].l_end_date.month
if future_dates and future_dates[0] is day and not good_today:
future_dates.pop(0)
while not future_dates:
month, year = inc_month(month, year)
if event[0].repeats('YEARLY') and \
(month != e_month or month != e_end_month):
continue
occurrences = CountHandler(year, month, event).get_count()
# we don't check for now.day here, b/c we're in a month past
# whatever now is. As an example, if we checked for now.day
# we'd get stuck in an infinite loop if this were a
# monthly repeating event and our 'now' was on a day after the
# event's l_end_date.day
future_dates = [x for x in occurrences]
day = min(future_dates)
if event[0].repeats('WEEKDAY'):
return check_weekday(year, month, day)
return year, month, day
|
def get_next_event(event, now):
"""
Returns the next occurrence of a given event, relative to 'now'.
The 'event' arg should be an iterable containing one element,
namely the event we'd like to find the occurrence of.
The reason for this is b/c the get_count() function of CountHandler,
which this func makes use of, expects an iterable.
CHANGED: The 'now' arg must be an instance of datetime.datetime()
to allow time comparison (used to accept datetime.date() as well)
"""
year = now.year
month = now.month
day = now.day
e_day = event[0].l_start_date.day
e_end_day = event[0].l_end_date.day
good_today = True if event[0].l_start_date.time() >= now.time() else False
if event[0].starts_same_year_month_as(year, month) and \
e_day <= now.day <= e_end_day:
occurrences = CountHandler(year, month, event).get_count()
future_dates = (x for x in occurrences if x >= now.day)
day = min(future_dates, key=lambda x: abs(x - now.day))
else:
e_year = event[0].l_start_date.year
e_month = event[0].l_start_date.month
# convert to datetime.date() to be sure we can make a comparison
if date(e_year, e_month, e_day) > date(now.year, now.month, now.day):
# if the event hasn't started yet, then its next occurrence will
# be on its start date, so return that.
year = e_year
month = e_month
day = e_day
else:
occurrences = CountHandler(year, month, event).get_count()
future_dates = [x for x in occurrences if x >= now.day]
e_end_month = event[0].l_end_date.month
if future_dates and future_dates[0] is day and not good_today:
future_dates.pop(0)
while not future_dates:
month, year = inc_month(month, year)
if event[0].repeats('YEARLY') and \
(month != e_month or month != e_end_month):
continue
occurrences = CountHandler(year, month, event).get_count()
# we don't check for now.day here, b/c we're in a month past
# whatever now is. As an example, if we checked for now.day
# we'd get stuck in an infinite loop if this were a
# monthly repeating event and our 'now' was on a day after the
# event's l_end_date.day
future_dates = [x for x in occurrences]
day = min(future_dates)
if event[0].repeats('WEEKDAY'):
return check_weekday(year, month, day)
return year, month, day
|
[
"Returns",
"the",
"next",
"occurrence",
"of",
"a",
"given",
"event",
"relative",
"to",
"now",
".",
"The",
"event",
"arg",
"should",
"be",
"an",
"iterable",
"containing",
"one",
"element",
"namely",
"the",
"event",
"we",
"d",
"like",
"to",
"find",
"the",
"occurrence",
"of",
".",
"The",
"reason",
"for",
"this",
"is",
"b",
"/",
"c",
"the",
"get_count",
"()",
"function",
"of",
"CountHandler",
"which",
"this",
"func",
"makes",
"use",
"of",
"expects",
"an",
"iterable",
".",
"CHANGED",
":",
"The",
"now",
"arg",
"must",
"be",
"an",
"instance",
"of",
"datetime",
".",
"datetime",
"()",
"to",
"allow",
"time",
"comparison",
"(",
"used",
"to",
"accept",
"datetime",
".",
"date",
"()",
"as",
"well",
")"
] |
wreckage/django-happenings
|
python
|
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/next_event.py#L9-L63
|
[
"def",
"get_next_event",
"(",
"event",
",",
"now",
")",
":",
"year",
"=",
"now",
".",
"year",
"month",
"=",
"now",
".",
"month",
"day",
"=",
"now",
".",
"day",
"e_day",
"=",
"event",
"[",
"0",
"]",
".",
"l_start_date",
".",
"day",
"e_end_day",
"=",
"event",
"[",
"0",
"]",
".",
"l_end_date",
".",
"day",
"good_today",
"=",
"True",
"if",
"event",
"[",
"0",
"]",
".",
"l_start_date",
".",
"time",
"(",
")",
">=",
"now",
".",
"time",
"(",
")",
"else",
"False",
"if",
"event",
"[",
"0",
"]",
".",
"starts_same_year_month_as",
"(",
"year",
",",
"month",
")",
"and",
"e_day",
"<=",
"now",
".",
"day",
"<=",
"e_end_day",
":",
"occurrences",
"=",
"CountHandler",
"(",
"year",
",",
"month",
",",
"event",
")",
".",
"get_count",
"(",
")",
"future_dates",
"=",
"(",
"x",
"for",
"x",
"in",
"occurrences",
"if",
"x",
">=",
"now",
".",
"day",
")",
"day",
"=",
"min",
"(",
"future_dates",
",",
"key",
"=",
"lambda",
"x",
":",
"abs",
"(",
"x",
"-",
"now",
".",
"day",
")",
")",
"else",
":",
"e_year",
"=",
"event",
"[",
"0",
"]",
".",
"l_start_date",
".",
"year",
"e_month",
"=",
"event",
"[",
"0",
"]",
".",
"l_start_date",
".",
"month",
"# convert to datetime.date() to be sure we can make a comparison",
"if",
"date",
"(",
"e_year",
",",
"e_month",
",",
"e_day",
")",
">",
"date",
"(",
"now",
".",
"year",
",",
"now",
".",
"month",
",",
"now",
".",
"day",
")",
":",
"# if the event hasn't started yet, then its next occurrence will",
"# be on its start date, so return that.",
"year",
"=",
"e_year",
"month",
"=",
"e_month",
"day",
"=",
"e_day",
"else",
":",
"occurrences",
"=",
"CountHandler",
"(",
"year",
",",
"month",
",",
"event",
")",
".",
"get_count",
"(",
")",
"future_dates",
"=",
"[",
"x",
"for",
"x",
"in",
"occurrences",
"if",
"x",
">=",
"now",
".",
"day",
"]",
"e_end_month",
"=",
"event",
"[",
"0",
"]",
".",
"l_end_date",
".",
"month",
"if",
"future_dates",
"and",
"future_dates",
"[",
"0",
"]",
"is",
"day",
"and",
"not",
"good_today",
":",
"future_dates",
".",
"pop",
"(",
"0",
")",
"while",
"not",
"future_dates",
":",
"month",
",",
"year",
"=",
"inc_month",
"(",
"month",
",",
"year",
")",
"if",
"event",
"[",
"0",
"]",
".",
"repeats",
"(",
"'YEARLY'",
")",
"and",
"(",
"month",
"!=",
"e_month",
"or",
"month",
"!=",
"e_end_month",
")",
":",
"continue",
"occurrences",
"=",
"CountHandler",
"(",
"year",
",",
"month",
",",
"event",
")",
".",
"get_count",
"(",
")",
"# we don't check for now.day here, b/c we're in a month past",
"# whatever now is. As an example, if we checked for now.day",
"# we'd get stuck in an infinite loop if this were a",
"# monthly repeating event and our 'now' was on a day after the",
"# event's l_end_date.day",
"future_dates",
"=",
"[",
"x",
"for",
"x",
"in",
"occurrences",
"]",
"day",
"=",
"min",
"(",
"future_dates",
")",
"if",
"event",
"[",
"0",
"]",
".",
"repeats",
"(",
"'WEEKDAY'",
")",
":",
"return",
"check_weekday",
"(",
"year",
",",
"month",
",",
"day",
")",
"return",
"year",
",",
"month",
",",
"day"
] |
7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d
|
test
|
get_dashboard_info
|
Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information
|
scout/server/blueprints/dashboard/controllers.py
|
def get_dashboard_info(adapter, institute_id=None, slice_query=None):
"""Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information
"""
LOG.debug("General query with institute_id {}.".format(institute_id))
# if institute_id == 'None' or None, all cases and general stats will be returned
if institute_id == 'None':
institute_id = None
# If a slice_query is present then numbers in "General statistics" and "Case statistics" will
# reflect the data available for the query
general_sliced_info = get_general_case_info(adapter, institute_id=institute_id,
slice_query=slice_query)
total_sliced_cases = general_sliced_info['total_cases']
data = {'total_cases': total_sliced_cases}
if total_sliced_cases == 0:
return data
data['pedigree'] = []
for ped_info in general_sliced_info['pedigree'].values():
ped_info['percent'] = ped_info['count'] / total_sliced_cases
data['pedigree'].append(ped_info)
data['cases'] = get_case_groups(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
data['analysis_types'] = get_analysis_types(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
overview = [
{
'title': 'Phenotype terms',
'count': general_sliced_info['phenotype_cases'],
'percent': general_sliced_info['phenotype_cases'] / total_sliced_cases,
},
{
'title': 'Causative variants',
'count': general_sliced_info['causative_cases'],
'percent': general_sliced_info['causative_cases'] / total_sliced_cases,
},
{
'title': 'Pinned variants',
'count': general_sliced_info['pinned_cases'],
'percent': general_sliced_info['pinned_cases'] / total_sliced_cases,
},
{
'title': 'Cohort tag',
'count': general_sliced_info['cohort_cases'],
'percent': general_sliced_info['cohort_cases'] / total_sliced_cases,
}
]
# Data from "Variant statistics tab" is not filtered by slice_query and numbers will
# reflect verified variants in all available cases for an institute
general_info = get_general_case_info(adapter, institute_id=institute_id)
total_cases = general_info['total_cases']
sliced_case_ids = general_sliced_info['case_ids']
verified_query = {
'verb' : 'validate',
}
if institute_id: # filter by institute if users wishes so
verified_query['institute'] = institute_id
# Case level information
sliced_validation_cases = set()
sliced_validated_cases = set()
# Variant level information
validated_tp = set()
validated_fp = set()
var_valid_orders = 0 # use this counter to count 'True Positive', 'False positive' and 'Not validated' vars
validate_events = adapter.event_collection.find(verified_query)
for validate_event in list(validate_events):
case_id = validate_event.get('case')
var_obj = adapter.variant(case_id=case_id, document_id=validate_event['variant_id'])
if var_obj: # Don't take into account variants which have been removed from db
var_valid_orders += 1
if case_id in sliced_case_ids:
sliced_validation_cases.add(case_id) # add to the set. Can't add same id twice since it'a a set
validation = var_obj.get('validation')
if validation and validation in ['True positive', 'False positive']:
if case_id in sliced_case_ids:
sliced_validated_cases.add(case_id)
if validation == 'True positive':
validated_tp.add(var_obj['_id'])
elif validation == 'False positive':
validated_fp.add(var_obj['_id'])
n_validation_cases = len(sliced_validation_cases)
n_validated_cases = len(sliced_validated_cases)
# append
overview.append(
{
'title': 'Validation ordered',
'count': n_validation_cases,
'percent': n_validation_cases / total_sliced_cases,
})
overview.append(
{
'title': 'Validated cases (TP + FP)',
'count': n_validated_cases,
'percent': n_validated_cases / total_sliced_cases,
})
data['overview'] = overview
variants = []
nr_validated = len(validated_tp) + len(validated_fp)
variants.append(
{
'title': 'Validation ordered',
'count': var_valid_orders,
'percent': 1
}
)
# taking into account that var_valid_orders might be 0:
percent_validated_tp = 0
percent_validated_fp = 0
if var_valid_orders:
percent_validated_tp = len(validated_tp) / var_valid_orders
percent_validated_fp = len(validated_fp) / var_valid_orders
variants.append(
{
'title': 'Validated True Positive',
'count': len(validated_tp),
'percent': percent_validated_tp,
}
)
variants.append(
{
'title': 'Validated False Positive',
'count': len(validated_fp),
'percent': percent_validated_fp,
}
)
data['variants'] = variants
return data
|
def get_dashboard_info(adapter, institute_id=None, slice_query=None):
"""Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information
"""
LOG.debug("General query with institute_id {}.".format(institute_id))
# if institute_id == 'None' or None, all cases and general stats will be returned
if institute_id == 'None':
institute_id = None
# If a slice_query is present then numbers in "General statistics" and "Case statistics" will
# reflect the data available for the query
general_sliced_info = get_general_case_info(adapter, institute_id=institute_id,
slice_query=slice_query)
total_sliced_cases = general_sliced_info['total_cases']
data = {'total_cases': total_sliced_cases}
if total_sliced_cases == 0:
return data
data['pedigree'] = []
for ped_info in general_sliced_info['pedigree'].values():
ped_info['percent'] = ped_info['count'] / total_sliced_cases
data['pedigree'].append(ped_info)
data['cases'] = get_case_groups(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
data['analysis_types'] = get_analysis_types(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
overview = [
{
'title': 'Phenotype terms',
'count': general_sliced_info['phenotype_cases'],
'percent': general_sliced_info['phenotype_cases'] / total_sliced_cases,
},
{
'title': 'Causative variants',
'count': general_sliced_info['causative_cases'],
'percent': general_sliced_info['causative_cases'] / total_sliced_cases,
},
{
'title': 'Pinned variants',
'count': general_sliced_info['pinned_cases'],
'percent': general_sliced_info['pinned_cases'] / total_sliced_cases,
},
{
'title': 'Cohort tag',
'count': general_sliced_info['cohort_cases'],
'percent': general_sliced_info['cohort_cases'] / total_sliced_cases,
}
]
# Data from "Variant statistics tab" is not filtered by slice_query and numbers will
# reflect verified variants in all available cases for an institute
general_info = get_general_case_info(adapter, institute_id=institute_id)
total_cases = general_info['total_cases']
sliced_case_ids = general_sliced_info['case_ids']
verified_query = {
'verb' : 'validate',
}
if institute_id: # filter by institute if users wishes so
verified_query['institute'] = institute_id
# Case level information
sliced_validation_cases = set()
sliced_validated_cases = set()
# Variant level information
validated_tp = set()
validated_fp = set()
var_valid_orders = 0 # use this counter to count 'True Positive', 'False positive' and 'Not validated' vars
validate_events = adapter.event_collection.find(verified_query)
for validate_event in list(validate_events):
case_id = validate_event.get('case')
var_obj = adapter.variant(case_id=case_id, document_id=validate_event['variant_id'])
if var_obj: # Don't take into account variants which have been removed from db
var_valid_orders += 1
if case_id in sliced_case_ids:
sliced_validation_cases.add(case_id) # add to the set. Can't add same id twice since it'a a set
validation = var_obj.get('validation')
if validation and validation in ['True positive', 'False positive']:
if case_id in sliced_case_ids:
sliced_validated_cases.add(case_id)
if validation == 'True positive':
validated_tp.add(var_obj['_id'])
elif validation == 'False positive':
validated_fp.add(var_obj['_id'])
n_validation_cases = len(sliced_validation_cases)
n_validated_cases = len(sliced_validated_cases)
# append
overview.append(
{
'title': 'Validation ordered',
'count': n_validation_cases,
'percent': n_validation_cases / total_sliced_cases,
})
overview.append(
{
'title': 'Validated cases (TP + FP)',
'count': n_validated_cases,
'percent': n_validated_cases / total_sliced_cases,
})
data['overview'] = overview
variants = []
nr_validated = len(validated_tp) + len(validated_fp)
variants.append(
{
'title': 'Validation ordered',
'count': var_valid_orders,
'percent': 1
}
)
# taking into account that var_valid_orders might be 0:
percent_validated_tp = 0
percent_validated_fp = 0
if var_valid_orders:
percent_validated_tp = len(validated_tp) / var_valid_orders
percent_validated_fp = len(validated_fp) / var_valid_orders
variants.append(
{
'title': 'Validated True Positive',
'count': len(validated_tp),
'percent': percent_validated_tp,
}
)
variants.append(
{
'title': 'Validated False Positive',
'count': len(validated_fp),
'percent': percent_validated_fp,
}
)
data['variants'] = variants
return data
|
[
"Returns",
"cases",
"with",
"phenotype"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/dashboard/controllers.py#L6-L163
|
[
"def",
"get_dashboard_info",
"(",
"adapter",
",",
"institute_id",
"=",
"None",
",",
"slice_query",
"=",
"None",
")",
":",
"LOG",
".",
"debug",
"(",
"\"General query with institute_id {}.\"",
".",
"format",
"(",
"institute_id",
")",
")",
"# if institute_id == 'None' or None, all cases and general stats will be returned",
"if",
"institute_id",
"==",
"'None'",
":",
"institute_id",
"=",
"None",
"# If a slice_query is present then numbers in \"General statistics\" and \"Case statistics\" will",
"# reflect the data available for the query",
"general_sliced_info",
"=",
"get_general_case_info",
"(",
"adapter",
",",
"institute_id",
"=",
"institute_id",
",",
"slice_query",
"=",
"slice_query",
")",
"total_sliced_cases",
"=",
"general_sliced_info",
"[",
"'total_cases'",
"]",
"data",
"=",
"{",
"'total_cases'",
":",
"total_sliced_cases",
"}",
"if",
"total_sliced_cases",
"==",
"0",
":",
"return",
"data",
"data",
"[",
"'pedigree'",
"]",
"=",
"[",
"]",
"for",
"ped_info",
"in",
"general_sliced_info",
"[",
"'pedigree'",
"]",
".",
"values",
"(",
")",
":",
"ped_info",
"[",
"'percent'",
"]",
"=",
"ped_info",
"[",
"'count'",
"]",
"/",
"total_sliced_cases",
"data",
"[",
"'pedigree'",
"]",
".",
"append",
"(",
"ped_info",
")",
"data",
"[",
"'cases'",
"]",
"=",
"get_case_groups",
"(",
"adapter",
",",
"total_sliced_cases",
",",
"institute_id",
"=",
"institute_id",
",",
"slice_query",
"=",
"slice_query",
")",
"data",
"[",
"'analysis_types'",
"]",
"=",
"get_analysis_types",
"(",
"adapter",
",",
"total_sliced_cases",
",",
"institute_id",
"=",
"institute_id",
",",
"slice_query",
"=",
"slice_query",
")",
"overview",
"=",
"[",
"{",
"'title'",
":",
"'Phenotype terms'",
",",
"'count'",
":",
"general_sliced_info",
"[",
"'phenotype_cases'",
"]",
",",
"'percent'",
":",
"general_sliced_info",
"[",
"'phenotype_cases'",
"]",
"/",
"total_sliced_cases",
",",
"}",
",",
"{",
"'title'",
":",
"'Causative variants'",
",",
"'count'",
":",
"general_sliced_info",
"[",
"'causative_cases'",
"]",
",",
"'percent'",
":",
"general_sliced_info",
"[",
"'causative_cases'",
"]",
"/",
"total_sliced_cases",
",",
"}",
",",
"{",
"'title'",
":",
"'Pinned variants'",
",",
"'count'",
":",
"general_sliced_info",
"[",
"'pinned_cases'",
"]",
",",
"'percent'",
":",
"general_sliced_info",
"[",
"'pinned_cases'",
"]",
"/",
"total_sliced_cases",
",",
"}",
",",
"{",
"'title'",
":",
"'Cohort tag'",
",",
"'count'",
":",
"general_sliced_info",
"[",
"'cohort_cases'",
"]",
",",
"'percent'",
":",
"general_sliced_info",
"[",
"'cohort_cases'",
"]",
"/",
"total_sliced_cases",
",",
"}",
"]",
"# Data from \"Variant statistics tab\" is not filtered by slice_query and numbers will",
"# reflect verified variants in all available cases for an institute",
"general_info",
"=",
"get_general_case_info",
"(",
"adapter",
",",
"institute_id",
"=",
"institute_id",
")",
"total_cases",
"=",
"general_info",
"[",
"'total_cases'",
"]",
"sliced_case_ids",
"=",
"general_sliced_info",
"[",
"'case_ids'",
"]",
"verified_query",
"=",
"{",
"'verb'",
":",
"'validate'",
",",
"}",
"if",
"institute_id",
":",
"# filter by institute if users wishes so",
"verified_query",
"[",
"'institute'",
"]",
"=",
"institute_id",
"# Case level information",
"sliced_validation_cases",
"=",
"set",
"(",
")",
"sliced_validated_cases",
"=",
"set",
"(",
")",
"# Variant level information",
"validated_tp",
"=",
"set",
"(",
")",
"validated_fp",
"=",
"set",
"(",
")",
"var_valid_orders",
"=",
"0",
"# use this counter to count 'True Positive', 'False positive' and 'Not validated' vars",
"validate_events",
"=",
"adapter",
".",
"event_collection",
".",
"find",
"(",
"verified_query",
")",
"for",
"validate_event",
"in",
"list",
"(",
"validate_events",
")",
":",
"case_id",
"=",
"validate_event",
".",
"get",
"(",
"'case'",
")",
"var_obj",
"=",
"adapter",
".",
"variant",
"(",
"case_id",
"=",
"case_id",
",",
"document_id",
"=",
"validate_event",
"[",
"'variant_id'",
"]",
")",
"if",
"var_obj",
":",
"# Don't take into account variants which have been removed from db",
"var_valid_orders",
"+=",
"1",
"if",
"case_id",
"in",
"sliced_case_ids",
":",
"sliced_validation_cases",
".",
"add",
"(",
"case_id",
")",
"# add to the set. Can't add same id twice since it'a a set",
"validation",
"=",
"var_obj",
".",
"get",
"(",
"'validation'",
")",
"if",
"validation",
"and",
"validation",
"in",
"[",
"'True positive'",
",",
"'False positive'",
"]",
":",
"if",
"case_id",
"in",
"sliced_case_ids",
":",
"sliced_validated_cases",
".",
"add",
"(",
"case_id",
")",
"if",
"validation",
"==",
"'True positive'",
":",
"validated_tp",
".",
"add",
"(",
"var_obj",
"[",
"'_id'",
"]",
")",
"elif",
"validation",
"==",
"'False positive'",
":",
"validated_fp",
".",
"add",
"(",
"var_obj",
"[",
"'_id'",
"]",
")",
"n_validation_cases",
"=",
"len",
"(",
"sliced_validation_cases",
")",
"n_validated_cases",
"=",
"len",
"(",
"sliced_validated_cases",
")",
"# append",
"overview",
".",
"append",
"(",
"{",
"'title'",
":",
"'Validation ordered'",
",",
"'count'",
":",
"n_validation_cases",
",",
"'percent'",
":",
"n_validation_cases",
"/",
"total_sliced_cases",
",",
"}",
")",
"overview",
".",
"append",
"(",
"{",
"'title'",
":",
"'Validated cases (TP + FP)'",
",",
"'count'",
":",
"n_validated_cases",
",",
"'percent'",
":",
"n_validated_cases",
"/",
"total_sliced_cases",
",",
"}",
")",
"data",
"[",
"'overview'",
"]",
"=",
"overview",
"variants",
"=",
"[",
"]",
"nr_validated",
"=",
"len",
"(",
"validated_tp",
")",
"+",
"len",
"(",
"validated_fp",
")",
"variants",
".",
"append",
"(",
"{",
"'title'",
":",
"'Validation ordered'",
",",
"'count'",
":",
"var_valid_orders",
",",
"'percent'",
":",
"1",
"}",
")",
"# taking into account that var_valid_orders might be 0:",
"percent_validated_tp",
"=",
"0",
"percent_validated_fp",
"=",
"0",
"if",
"var_valid_orders",
":",
"percent_validated_tp",
"=",
"len",
"(",
"validated_tp",
")",
"/",
"var_valid_orders",
"percent_validated_fp",
"=",
"len",
"(",
"validated_fp",
")",
"/",
"var_valid_orders",
"variants",
".",
"append",
"(",
"{",
"'title'",
":",
"'Validated True Positive'",
",",
"'count'",
":",
"len",
"(",
"validated_tp",
")",
",",
"'percent'",
":",
"percent_validated_tp",
",",
"}",
")",
"variants",
".",
"append",
"(",
"{",
"'title'",
":",
"'Validated False Positive'",
",",
"'count'",
":",
"len",
"(",
"validated_fp",
")",
",",
"'percent'",
":",
"percent_validated_fp",
",",
"}",
")",
"data",
"[",
"'variants'",
"]",
"=",
"variants",
"return",
"data"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
get_general_case_info
|
Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
|
scout/server/blueprints/dashboard/controllers.py
|
def get_general_case_info(adapter, institute_id=None, slice_query=None):
"""Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
"""
general = {}
# Potentially sensitive slice queries are assumed allowed if we have got this far
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {
1: {
'title': 'Single',
'count': 0
},
2: {
'title': 'Duo',
'count': 0
},
3: {
'title': 'Trio',
'count': 0
},
'many': {
'title': 'Many',
'count': 0
},
}
case_ids = set()
total_cases = 0
for total_cases,case in enumerate(cases,1):
# If only looking at one institute we need to save the case ids
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals',[]))
if nr_individuals == 0:
continue
if nr_individuals > 3:
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general
|
def get_general_case_info(adapter, institute_id=None, slice_query=None):
"""Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
"""
general = {}
# Potentially sensitive slice queries are assumed allowed if we have got this far
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {
1: {
'title': 'Single',
'count': 0
},
2: {
'title': 'Duo',
'count': 0
},
3: {
'title': 'Trio',
'count': 0
},
'many': {
'title': 'Many',
'count': 0
},
}
case_ids = set()
total_cases = 0
for total_cases,case in enumerate(cases,1):
# If only looking at one institute we need to save the case ids
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals',[]))
if nr_individuals == 0:
continue
if nr_individuals > 3:
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general
|
[
"Return",
"general",
"information",
"about",
"cases"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/dashboard/controllers.py#L165-L240
|
[
"def",
"get_general_case_info",
"(",
"adapter",
",",
"institute_id",
"=",
"None",
",",
"slice_query",
"=",
"None",
")",
":",
"general",
"=",
"{",
"}",
"# Potentially sensitive slice queries are assumed allowed if we have got this far",
"name_query",
"=",
"slice_query",
"cases",
"=",
"adapter",
".",
"cases",
"(",
"owner",
"=",
"institute_id",
",",
"name_query",
"=",
"name_query",
")",
"phenotype_cases",
"=",
"0",
"causative_cases",
"=",
"0",
"pinned_cases",
"=",
"0",
"cohort_cases",
"=",
"0",
"pedigree",
"=",
"{",
"1",
":",
"{",
"'title'",
":",
"'Single'",
",",
"'count'",
":",
"0",
"}",
",",
"2",
":",
"{",
"'title'",
":",
"'Duo'",
",",
"'count'",
":",
"0",
"}",
",",
"3",
":",
"{",
"'title'",
":",
"'Trio'",
",",
"'count'",
":",
"0",
"}",
",",
"'many'",
":",
"{",
"'title'",
":",
"'Many'",
",",
"'count'",
":",
"0",
"}",
",",
"}",
"case_ids",
"=",
"set",
"(",
")",
"total_cases",
"=",
"0",
"for",
"total_cases",
",",
"case",
"in",
"enumerate",
"(",
"cases",
",",
"1",
")",
":",
"# If only looking at one institute we need to save the case ids",
"if",
"institute_id",
":",
"case_ids",
".",
"add",
"(",
"case",
"[",
"'_id'",
"]",
")",
"if",
"case",
".",
"get",
"(",
"'phenotype_terms'",
")",
":",
"phenotype_cases",
"+=",
"1",
"if",
"case",
".",
"get",
"(",
"'causatives'",
")",
":",
"causative_cases",
"+=",
"1",
"if",
"case",
".",
"get",
"(",
"'suspects'",
")",
":",
"pinned_cases",
"+=",
"1",
"if",
"case",
".",
"get",
"(",
"'cohorts'",
")",
":",
"cohort_cases",
"+=",
"1",
"nr_individuals",
"=",
"len",
"(",
"case",
".",
"get",
"(",
"'individuals'",
",",
"[",
"]",
")",
")",
"if",
"nr_individuals",
"==",
"0",
":",
"continue",
"if",
"nr_individuals",
">",
"3",
":",
"pedigree",
"[",
"'many'",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"else",
":",
"pedigree",
"[",
"nr_individuals",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"general",
"[",
"'total_cases'",
"]",
"=",
"total_cases",
"general",
"[",
"'phenotype_cases'",
"]",
"=",
"phenotype_cases",
"general",
"[",
"'causative_cases'",
"]",
"=",
"causative_cases",
"general",
"[",
"'pinned_cases'",
"]",
"=",
"pinned_cases",
"general",
"[",
"'cohort_cases'",
"]",
"=",
"cohort_cases",
"general",
"[",
"'pedigree'",
"]",
"=",
"pedigree",
"general",
"[",
"'case_ids'",
"]",
"=",
"case_ids",
"return",
"general"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
get_case_groups
|
Return the information about case groups
Args:
store(adapter.MongoAdapter)
total_cases(int): Total number of cases
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
cases(dict):
|
scout/server/blueprints/dashboard/controllers.py
|
def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None):
"""Return the information about case groups
Args:
store(adapter.MongoAdapter)
total_cases(int): Total number of cases
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
cases(dict):
"""
# Create a group with all cases in the database
cases = [{'status': 'all', 'count': total_cases, 'percent': 1}]
# Group the cases based on their status
pipeline = []
group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}}
subquery = {}
if institute_id and slice_query:
subquery = adapter.cases(owner=institute_id, name_query=slice_query,
yield_query=True)
elif institute_id:
subquery = adapter.cases(owner=institute_id, yield_query=True)
elif slice_query:
subquery = adapter.cases(name_query=slice_query, yield_query=True)
query = {'$match': subquery} if subquery else {}
if query:
pipeline.append(query)
pipeline.append(group)
res = adapter.case_collection.aggregate(pipeline)
for status_group in res:
cases.append({'status': status_group['_id'],
'count': status_group['count'],
'percent': status_group['count'] / total_cases})
return cases
|
def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None):
"""Return the information about case groups
Args:
store(adapter.MongoAdapter)
total_cases(int): Total number of cases
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
cases(dict):
"""
# Create a group with all cases in the database
cases = [{'status': 'all', 'count': total_cases, 'percent': 1}]
# Group the cases based on their status
pipeline = []
group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}}
subquery = {}
if institute_id and slice_query:
subquery = adapter.cases(owner=institute_id, name_query=slice_query,
yield_query=True)
elif institute_id:
subquery = adapter.cases(owner=institute_id, yield_query=True)
elif slice_query:
subquery = adapter.cases(name_query=slice_query, yield_query=True)
query = {'$match': subquery} if subquery else {}
if query:
pipeline.append(query)
pipeline.append(group)
res = adapter.case_collection.aggregate(pipeline)
for status_group in res:
cases.append({'status': status_group['_id'],
'count': status_group['count'],
'percent': status_group['count'] / total_cases})
return cases
|
[
"Return",
"the",
"information",
"about",
"case",
"groups"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/dashboard/controllers.py#L243-L282
|
[
"def",
"get_case_groups",
"(",
"adapter",
",",
"total_cases",
",",
"institute_id",
"=",
"None",
",",
"slice_query",
"=",
"None",
")",
":",
"# Create a group with all cases in the database",
"cases",
"=",
"[",
"{",
"'status'",
":",
"'all'",
",",
"'count'",
":",
"total_cases",
",",
"'percent'",
":",
"1",
"}",
"]",
"# Group the cases based on their status",
"pipeline",
"=",
"[",
"]",
"group",
"=",
"{",
"'$group'",
":",
"{",
"'_id'",
":",
"'$status'",
",",
"'count'",
":",
"{",
"'$sum'",
":",
"1",
"}",
"}",
"}",
"subquery",
"=",
"{",
"}",
"if",
"institute_id",
"and",
"slice_query",
":",
"subquery",
"=",
"adapter",
".",
"cases",
"(",
"owner",
"=",
"institute_id",
",",
"name_query",
"=",
"slice_query",
",",
"yield_query",
"=",
"True",
")",
"elif",
"institute_id",
":",
"subquery",
"=",
"adapter",
".",
"cases",
"(",
"owner",
"=",
"institute_id",
",",
"yield_query",
"=",
"True",
")",
"elif",
"slice_query",
":",
"subquery",
"=",
"adapter",
".",
"cases",
"(",
"name_query",
"=",
"slice_query",
",",
"yield_query",
"=",
"True",
")",
"query",
"=",
"{",
"'$match'",
":",
"subquery",
"}",
"if",
"subquery",
"else",
"{",
"}",
"if",
"query",
":",
"pipeline",
".",
"append",
"(",
"query",
")",
"pipeline",
".",
"append",
"(",
"group",
")",
"res",
"=",
"adapter",
".",
"case_collection",
".",
"aggregate",
"(",
"pipeline",
")",
"for",
"status_group",
"in",
"res",
":",
"cases",
".",
"append",
"(",
"{",
"'status'",
":",
"status_group",
"[",
"'_id'",
"]",
",",
"'count'",
":",
"status_group",
"[",
"'count'",
"]",
",",
"'percent'",
":",
"status_group",
"[",
"'count'",
"]",
"/",
"total_cases",
"}",
")",
"return",
"cases"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
get_analysis_types
|
Return information about analysis types.
Group cases based on analysis type for the individuals.
Args:
adapter(adapter.MongoAdapter)
total_cases(int): Total number of cases
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
analysis_types array of hashes with name: analysis_type(str), count: count(int)
|
scout/server/blueprints/dashboard/controllers.py
|
def get_analysis_types(adapter, total_cases, institute_id=None, slice_query=None):
""" Return information about analysis types.
Group cases based on analysis type for the individuals.
Args:
adapter(adapter.MongoAdapter)
total_cases(int): Total number of cases
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
analysis_types array of hashes with name: analysis_type(str), count: count(int)
"""
# Group cases based on analysis type of the individuals
query = {}
subquery = {}
if institute_id and slice_query:
subquery = adapter.cases(owner=institute_id, name_query=slice_query,
yield_query=True)
elif institute_id:
subquery = adapter.cases(owner=institute_id, yield_query=True)
elif slice_query:
subquery = adapter.cases(name_query=slice_query, yield_query=True)
query = {'$match': subquery}
pipeline = []
if query:
pipeline.append(query)
pipeline.append({'$unwind': '$individuals'})
pipeline.append({'$group': {'_id': '$individuals.analysis_type', 'count': {'$sum': 1}}})
analysis_query = adapter.case_collection.aggregate(pipeline)
analysis_types = [{'name': group['_id'], 'count': group['count']} for group in analysis_query]
return analysis_types
|
def get_analysis_types(adapter, total_cases, institute_id=None, slice_query=None):
""" Return information about analysis types.
Group cases based on analysis type for the individuals.
Args:
adapter(adapter.MongoAdapter)
total_cases(int): Total number of cases
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
analysis_types array of hashes with name: analysis_type(str), count: count(int)
"""
# Group cases based on analysis type of the individuals
query = {}
subquery = {}
if institute_id and slice_query:
subquery = adapter.cases(owner=institute_id, name_query=slice_query,
yield_query=True)
elif institute_id:
subquery = adapter.cases(owner=institute_id, yield_query=True)
elif slice_query:
subquery = adapter.cases(name_query=slice_query, yield_query=True)
query = {'$match': subquery}
pipeline = []
if query:
pipeline.append(query)
pipeline.append({'$unwind': '$individuals'})
pipeline.append({'$group': {'_id': '$individuals.analysis_type', 'count': {'$sum': 1}}})
analysis_query = adapter.case_collection.aggregate(pipeline)
analysis_types = [{'name': group['_id'], 'count': group['count']} for group in analysis_query]
return analysis_types
|
[
"Return",
"information",
"about",
"analysis",
"types",
".",
"Group",
"cases",
"based",
"on",
"analysis",
"type",
"for",
"the",
"individuals",
".",
"Args",
":",
"adapter",
"(",
"adapter",
".",
"MongoAdapter",
")",
"total_cases",
"(",
"int",
")",
":",
"Total",
"number",
"of",
"cases",
"institute_id",
"(",
"str",
")",
"slice_query",
"(",
"str",
")",
":",
"Query",
"to",
"filter",
"cases",
"to",
"obtain",
"statistics",
"for",
".",
"Returns",
":",
"analysis_types",
"array",
"of",
"hashes",
"with",
"name",
":",
"analysis_type",
"(",
"str",
")",
"count",
":",
"count",
"(",
"int",
")"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/dashboard/controllers.py#L284-L319
|
[
"def",
"get_analysis_types",
"(",
"adapter",
",",
"total_cases",
",",
"institute_id",
"=",
"None",
",",
"slice_query",
"=",
"None",
")",
":",
"# Group cases based on analysis type of the individuals",
"query",
"=",
"{",
"}",
"subquery",
"=",
"{",
"}",
"if",
"institute_id",
"and",
"slice_query",
":",
"subquery",
"=",
"adapter",
".",
"cases",
"(",
"owner",
"=",
"institute_id",
",",
"name_query",
"=",
"slice_query",
",",
"yield_query",
"=",
"True",
")",
"elif",
"institute_id",
":",
"subquery",
"=",
"adapter",
".",
"cases",
"(",
"owner",
"=",
"institute_id",
",",
"yield_query",
"=",
"True",
")",
"elif",
"slice_query",
":",
"subquery",
"=",
"adapter",
".",
"cases",
"(",
"name_query",
"=",
"slice_query",
",",
"yield_query",
"=",
"True",
")",
"query",
"=",
"{",
"'$match'",
":",
"subquery",
"}",
"pipeline",
"=",
"[",
"]",
"if",
"query",
":",
"pipeline",
".",
"append",
"(",
"query",
")",
"pipeline",
".",
"append",
"(",
"{",
"'$unwind'",
":",
"'$individuals'",
"}",
")",
"pipeline",
".",
"append",
"(",
"{",
"'$group'",
":",
"{",
"'_id'",
":",
"'$individuals.analysis_type'",
",",
"'count'",
":",
"{",
"'$sum'",
":",
"1",
"}",
"}",
"}",
")",
"analysis_query",
"=",
"adapter",
".",
"case_collection",
".",
"aggregate",
"(",
"pipeline",
")",
"analysis_types",
"=",
"[",
"{",
"'name'",
":",
"group",
"[",
"'_id'",
"]",
",",
"'count'",
":",
"group",
"[",
"'count'",
"]",
"}",
"for",
"group",
"in",
"analysis_query",
"]",
"return",
"analysis_types"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
JSONResponseMixin.render_to_json_response
|
Returns a JSON response, transforming 'context' to make the payload.
|
happenings/utils/mixins.py
|
def render_to_json_response(self, context, **kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**kwargs
)
|
def render_to_json_response(self, context, **kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**kwargs
)
|
[
"Returns",
"a",
"JSON",
"response",
"transforming",
"context",
"to",
"make",
"the",
"payload",
"."
] |
wreckage/django-happenings
|
python
|
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/mixins.py#L17-L25
|
[
"def",
"render_to_json_response",
"(",
"self",
",",
"context",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"HttpResponse",
"(",
"self",
".",
"convert_context_to_json",
"(",
"context",
")",
",",
"content_type",
"=",
"'application/json'",
",",
"*",
"*",
"kwargs",
")"
] |
7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d
|
test
|
JSONResponseMixin.convert_context_to_json
|
Get what we want out of the context dict and convert that to a JSON
object. Note that this does no object serialization b/c we're
not sending any objects.
|
happenings/utils/mixins.py
|
def convert_context_to_json(self, context):
"""
Get what we want out of the context dict and convert that to a JSON
object. Note that this does no object serialization b/c we're
not sending any objects.
"""
if 'month/shift' in self.request.path: # month calendar
return dumps(self.get_month_calendar_dict(context))
elif 'event-list/shift' in self.request.path: # month event list
return dumps(self.get_month_event_list_dict(context))
elif 'cal-and-list/shift' in self.request.path:
cal = self.get_month_calendar_dict(context)
l = self.get_month_event_list_dict(context)
cal.update(l)
return dumps(cal)
else: # day list view
for key, val in context.items():
if isinstance(val, Promise):
context[key] = force_text(val)
return dumps(self.get_day_context_dict(context))
|
def convert_context_to_json(self, context):
"""
Get what we want out of the context dict and convert that to a JSON
object. Note that this does no object serialization b/c we're
not sending any objects.
"""
if 'month/shift' in self.request.path: # month calendar
return dumps(self.get_month_calendar_dict(context))
elif 'event-list/shift' in self.request.path: # month event list
return dumps(self.get_month_event_list_dict(context))
elif 'cal-and-list/shift' in self.request.path:
cal = self.get_month_calendar_dict(context)
l = self.get_month_event_list_dict(context)
cal.update(l)
return dumps(cal)
else: # day list view
for key, val in context.items():
if isinstance(val, Promise):
context[key] = force_text(val)
return dumps(self.get_day_context_dict(context))
|
[
"Get",
"what",
"we",
"want",
"out",
"of",
"the",
"context",
"dict",
"and",
"convert",
"that",
"to",
"a",
"JSON",
"object",
".",
"Note",
"that",
"this",
"does",
"no",
"object",
"serialization",
"b",
"/",
"c",
"we",
"re",
"not",
"sending",
"any",
"objects",
"."
] |
wreckage/django-happenings
|
python
|
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/mixins.py#L27-L47
|
[
"def",
"convert_context_to_json",
"(",
"self",
",",
"context",
")",
":",
"if",
"'month/shift'",
"in",
"self",
".",
"request",
".",
"path",
":",
"# month calendar",
"return",
"dumps",
"(",
"self",
".",
"get_month_calendar_dict",
"(",
"context",
")",
")",
"elif",
"'event-list/shift'",
"in",
"self",
".",
"request",
".",
"path",
":",
"# month event list",
"return",
"dumps",
"(",
"self",
".",
"get_month_event_list_dict",
"(",
"context",
")",
")",
"elif",
"'cal-and-list/shift'",
"in",
"self",
".",
"request",
".",
"path",
":",
"cal",
"=",
"self",
".",
"get_month_calendar_dict",
"(",
"context",
")",
"l",
"=",
"self",
".",
"get_month_event_list_dict",
"(",
"context",
")",
"cal",
".",
"update",
"(",
"l",
")",
"return",
"dumps",
"(",
"cal",
")",
"else",
":",
"# day list view",
"for",
"key",
",",
"val",
"in",
"context",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"Promise",
")",
":",
"context",
"[",
"key",
"]",
"=",
"force_text",
"(",
"val",
")",
"return",
"dumps",
"(",
"self",
".",
"get_day_context_dict",
"(",
"context",
")",
")"
] |
7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d
|
test
|
EventMonthView.get_year_and_month
|
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
|
happenings/views.py
|
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig)
|
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig)
|
[
"Get",
"the",
"year",
"and",
"month",
".",
"First",
"tries",
"from",
"kwargs",
"then",
"from",
"querystrings",
".",
"If",
"none",
"or",
"if",
"cal_ignore",
"qs",
"is",
"specified",
"sets",
"year",
"and",
"month",
"to",
"this",
"year",
"and",
"this",
"month",
"."
] |
wreckage/django-happenings
|
python
|
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/views.py#L54-L80
|
[
"def",
"get_year_and_month",
"(",
"self",
",",
"net",
",",
"qs",
",",
"*",
"*",
"kwargs",
")",
":",
"now",
"=",
"c",
".",
"get_now",
"(",
")",
"year",
"=",
"now",
".",
"year",
"month",
"=",
"now",
".",
"month",
"+",
"net",
"month_orig",
"=",
"None",
"if",
"'cal_ignore=true'",
"not",
"in",
"qs",
":",
"if",
"'year'",
"and",
"'month'",
"in",
"self",
".",
"kwargs",
":",
"# try kwargs",
"year",
",",
"month_orig",
"=",
"map",
"(",
"int",
",",
"(",
"self",
".",
"kwargs",
"[",
"'year'",
"]",
",",
"self",
".",
"kwargs",
"[",
"'month'",
"]",
")",
")",
"month",
"=",
"month_orig",
"+",
"net",
"else",
":",
"try",
":",
"# try querystring",
"year",
"=",
"int",
"(",
"self",
".",
"request",
".",
"GET",
"[",
"'cal_year'",
"]",
")",
"month_orig",
"=",
"int",
"(",
"self",
".",
"request",
".",
"GET",
"[",
"'cal_month'",
"]",
")",
"month",
"=",
"month_orig",
"+",
"net",
"except",
"Exception",
":",
"pass",
"# return the year and month, and any errors that may have occurred do",
"# to an invalid month/year being given.",
"return",
"c",
".",
"clean_year_month",
"(",
"year",
",",
"month",
",",
"month_orig",
")"
] |
7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d
|
test
|
EventDayView.check_for_cancelled_events
|
Check if any events are cancelled on the given date 'd'.
|
happenings/views.py
|
def check_for_cancelled_events(self, d):
"""Check if any events are cancelled on the given date 'd'."""
for event in self.events:
for cn in event.cancellations.all():
if cn.date == d:
event.title += ' (CANCELLED)'
|
def check_for_cancelled_events(self, d):
"""Check if any events are cancelled on the given date 'd'."""
for event in self.events:
for cn in event.cancellations.all():
if cn.date == d:
event.title += ' (CANCELLED)'
|
[
"Check",
"if",
"any",
"events",
"are",
"cancelled",
"on",
"the",
"given",
"date",
"d",
"."
] |
wreckage/django-happenings
|
python
|
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/views.py#L145-L150
|
[
"def",
"check_for_cancelled_events",
"(",
"self",
",",
"d",
")",
":",
"for",
"event",
"in",
"self",
".",
"events",
":",
"for",
"cn",
"in",
"event",
".",
"cancellations",
".",
"all",
"(",
")",
":",
"if",
"cn",
".",
"date",
"==",
"d",
":",
"event",
".",
"title",
"+=",
"' (CANCELLED)'"
] |
7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d
|
test
|
HpoHandler.load_hpo_term
|
Add a hpo object
Arguments:
hpo_obj(dict)
|
scout/adapter/mongo/hpo.py
|
def load_hpo_term(self, hpo_obj):
"""Add a hpo object
Arguments:
hpo_obj(dict)
"""
LOG.debug("Loading hpo term %s into database", hpo_obj['_id'])
try:
self.hpo_term_collection.insert_one(hpo_obj)
except DuplicateKeyError as err:
raise IntegrityError("Hpo term %s already exists in database".format(hpo_obj['_id']))
LOG.debug("Hpo term saved")
|
def load_hpo_term(self, hpo_obj):
"""Add a hpo object
Arguments:
hpo_obj(dict)
"""
LOG.debug("Loading hpo term %s into database", hpo_obj['_id'])
try:
self.hpo_term_collection.insert_one(hpo_obj)
except DuplicateKeyError as err:
raise IntegrityError("Hpo term %s already exists in database".format(hpo_obj['_id']))
LOG.debug("Hpo term saved")
|
[
"Add",
"a",
"hpo",
"object"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L16-L28
|
[
"def",
"load_hpo_term",
"(",
"self",
",",
"hpo_obj",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Loading hpo term %s into database\"",
",",
"hpo_obj",
"[",
"'_id'",
"]",
")",
"try",
":",
"self",
".",
"hpo_term_collection",
".",
"insert_one",
"(",
"hpo_obj",
")",
"except",
"DuplicateKeyError",
"as",
"err",
":",
"raise",
"IntegrityError",
"(",
"\"Hpo term %s already exists in database\"",
".",
"format",
"(",
"hpo_obj",
"[",
"'_id'",
"]",
")",
")",
"LOG",
".",
"debug",
"(",
"\"Hpo term saved\"",
")"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.load_hpo_bulk
|
Add a hpo object
Arguments:
hpo_bulk(list(scout.models.HpoTerm))
Returns:
result: pymongo bulkwrite result
|
scout/adapter/mongo/hpo.py
|
def load_hpo_bulk(self, hpo_bulk):
"""Add a hpo object
Arguments:
hpo_bulk(list(scout.models.HpoTerm))
Returns:
result: pymongo bulkwrite result
"""
LOG.debug("Loading hpo bulk")
try:
result = self.hpo_term_collection.insert_many(hpo_bulk)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result
|
def load_hpo_bulk(self, hpo_bulk):
"""Add a hpo object
Arguments:
hpo_bulk(list(scout.models.HpoTerm))
Returns:
result: pymongo bulkwrite result
"""
LOG.debug("Loading hpo bulk")
try:
result = self.hpo_term_collection.insert_many(hpo_bulk)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result
|
[
"Add",
"a",
"hpo",
"object"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L30-L46
|
[
"def",
"load_hpo_bulk",
"(",
"self",
",",
"hpo_bulk",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Loading hpo bulk\"",
")",
"try",
":",
"result",
"=",
"self",
".",
"hpo_term_collection",
".",
"insert_many",
"(",
"hpo_bulk",
")",
"except",
"(",
"DuplicateKeyError",
",",
"BulkWriteError",
")",
"as",
"err",
":",
"raise",
"IntegrityError",
"(",
"err",
")",
"return",
"result"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.hpo_term
|
Fetch a hpo term
Args:
hpo_id(str)
Returns:
hpo_obj(dict)
|
scout/adapter/mongo/hpo.py
|
def hpo_term(self, hpo_id):
"""Fetch a hpo term
Args:
hpo_id(str)
Returns:
hpo_obj(dict)
"""
LOG.debug("Fetching hpo term %s", hpo_id)
return self.hpo_term_collection.find_one({'_id': hpo_id})
|
def hpo_term(self, hpo_id):
"""Fetch a hpo term
Args:
hpo_id(str)
Returns:
hpo_obj(dict)
"""
LOG.debug("Fetching hpo term %s", hpo_id)
return self.hpo_term_collection.find_one({'_id': hpo_id})
|
[
"Fetch",
"a",
"hpo",
"term"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L48-L59
|
[
"def",
"hpo_term",
"(",
"self",
",",
"hpo_id",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Fetching hpo term %s\"",
",",
"hpo_id",
")",
"return",
"self",
".",
"hpo_term_collection",
".",
"find_one",
"(",
"{",
"'_id'",
":",
"hpo_id",
"}",
")"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.hpo_terms
|
Return all HPO terms
If a query is sent hpo_terms will try to match with regex on term or
description.
Args:
query(str): Part of a hpoterm or description
hpo_term(str): Search for a specific hpo term
limit(int): the number of desired results
Returns:
result(pymongo.Cursor): A cursor with hpo terms
|
scout/adapter/mongo/hpo.py
|
def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None):
"""Return all HPO terms
If a query is sent hpo_terms will try to match with regex on term or
description.
Args:
query(str): Part of a hpoterm or description
hpo_term(str): Search for a specific hpo term
limit(int): the number of desired results
Returns:
result(pymongo.Cursor): A cursor with hpo terms
"""
query_dict = {}
search_term = None
if query:
query_dict = {'$or':
[
{'hpo_id': {'$regex': query, '$options':'i'}},
{'description': {'$regex': query, '$options':'i'}},
]
}
search_term = query
elif text:
new_string = ''
for i,word in enumerate(text.split(' ')):
if i == 0:
new_string += word
else:
new_string += ' \"{0}\"'.format(word)
LOG.info("Search HPO terms with %s", new_string)
query_dict['$text'] = {'$search': new_string}
search_term = text
elif hpo_term:
query_dict['hpo_id'] = hpo_term
search_term = hpo_term
limit = limit or int(10e10)
res = self.hpo_term_collection.find(query_dict).limit(limit).sort('hpo_number',ASCENDING)
LOG.info("Found {0} terms with search word {1}".format(res.count(), search_term))
return res
|
def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None):
"""Return all HPO terms
If a query is sent hpo_terms will try to match with regex on term or
description.
Args:
query(str): Part of a hpoterm or description
hpo_term(str): Search for a specific hpo term
limit(int): the number of desired results
Returns:
result(pymongo.Cursor): A cursor with hpo terms
"""
query_dict = {}
search_term = None
if query:
query_dict = {'$or':
[
{'hpo_id': {'$regex': query, '$options':'i'}},
{'description': {'$regex': query, '$options':'i'}},
]
}
search_term = query
elif text:
new_string = ''
for i,word in enumerate(text.split(' ')):
if i == 0:
new_string += word
else:
new_string += ' \"{0}\"'.format(word)
LOG.info("Search HPO terms with %s", new_string)
query_dict['$text'] = {'$search': new_string}
search_term = text
elif hpo_term:
query_dict['hpo_id'] = hpo_term
search_term = hpo_term
limit = limit or int(10e10)
res = self.hpo_term_collection.find(query_dict).limit(limit).sort('hpo_number',ASCENDING)
LOG.info("Found {0} terms with search word {1}".format(res.count(), search_term))
return res
|
[
"Return",
"all",
"HPO",
"terms"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L61-L104
|
[
"def",
"hpo_terms",
"(",
"self",
",",
"query",
"=",
"None",
",",
"hpo_term",
"=",
"None",
",",
"text",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"query_dict",
"=",
"{",
"}",
"search_term",
"=",
"None",
"if",
"query",
":",
"query_dict",
"=",
"{",
"'$or'",
":",
"[",
"{",
"'hpo_id'",
":",
"{",
"'$regex'",
":",
"query",
",",
"'$options'",
":",
"'i'",
"}",
"}",
",",
"{",
"'description'",
":",
"{",
"'$regex'",
":",
"query",
",",
"'$options'",
":",
"'i'",
"}",
"}",
",",
"]",
"}",
"search_term",
"=",
"query",
"elif",
"text",
":",
"new_string",
"=",
"''",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"text",
".",
"split",
"(",
"' '",
")",
")",
":",
"if",
"i",
"==",
"0",
":",
"new_string",
"+=",
"word",
"else",
":",
"new_string",
"+=",
"' \\\"{0}\\\"'",
".",
"format",
"(",
"word",
")",
"LOG",
".",
"info",
"(",
"\"Search HPO terms with %s\"",
",",
"new_string",
")",
"query_dict",
"[",
"'$text'",
"]",
"=",
"{",
"'$search'",
":",
"new_string",
"}",
"search_term",
"=",
"text",
"elif",
"hpo_term",
":",
"query_dict",
"[",
"'hpo_id'",
"]",
"=",
"hpo_term",
"search_term",
"=",
"hpo_term",
"limit",
"=",
"limit",
"or",
"int",
"(",
"10e10",
")",
"res",
"=",
"self",
".",
"hpo_term_collection",
".",
"find",
"(",
"query_dict",
")",
".",
"limit",
"(",
"limit",
")",
".",
"sort",
"(",
"'hpo_number'",
",",
"ASCENDING",
")",
"LOG",
".",
"info",
"(",
"\"Found {0} terms with search word {1}\"",
".",
"format",
"(",
"res",
".",
"count",
"(",
")",
",",
"search_term",
")",
")",
"return",
"res"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.disease_term
|
Return a disease term
Checks if the identifier is a disease number or a id
Args:
disease_identifier(str)
Returns:
disease_obj(dict)
|
scout/adapter/mongo/hpo.py
|
def disease_term(self, disease_identifier):
"""Return a disease term
Checks if the identifier is a disease number or a id
Args:
disease_identifier(str)
Returns:
disease_obj(dict)
"""
query = {}
try:
disease_identifier = int(disease_identifier)
query['disease_nr'] = disease_identifier
except ValueError:
query['_id'] = disease_identifier
return self.disease_term_collection.find_one(query)
|
def disease_term(self, disease_identifier):
"""Return a disease term
Checks if the identifier is a disease number or a id
Args:
disease_identifier(str)
Returns:
disease_obj(dict)
"""
query = {}
try:
disease_identifier = int(disease_identifier)
query['disease_nr'] = disease_identifier
except ValueError:
query['_id'] = disease_identifier
return self.disease_term_collection.find_one(query)
|
[
"Return",
"a",
"disease",
"term"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L106-L124
|
[
"def",
"disease_term",
"(",
"self",
",",
"disease_identifier",
")",
":",
"query",
"=",
"{",
"}",
"try",
":",
"disease_identifier",
"=",
"int",
"(",
"disease_identifier",
")",
"query",
"[",
"'disease_nr'",
"]",
"=",
"disease_identifier",
"except",
"ValueError",
":",
"query",
"[",
"'_id'",
"]",
"=",
"disease_identifier",
"return",
"self",
".",
"disease_term_collection",
".",
"find_one",
"(",
"query",
")"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.disease_terms
|
Return all disease terms that overlaps a gene
If no gene, return all disease terms
Args:
hgnc_id(int)
Returns:
iterable(dict): A list with all disease terms that match
|
scout/adapter/mongo/hpo.py
|
def disease_terms(self, hgnc_id=None):
"""Return all disease terms that overlaps a gene
If no gene, return all disease terms
Args:
hgnc_id(int)
Returns:
iterable(dict): A list with all disease terms that match
"""
query = {}
if hgnc_id:
LOG.debug("Fetching all diseases for gene %s", hgnc_id)
query['genes'] = hgnc_id
else:
LOG.info("Fetching all disease terms")
return list(self.disease_term_collection.find(query))
|
def disease_terms(self, hgnc_id=None):
"""Return all disease terms that overlaps a gene
If no gene, return all disease terms
Args:
hgnc_id(int)
Returns:
iterable(dict): A list with all disease terms that match
"""
query = {}
if hgnc_id:
LOG.debug("Fetching all diseases for gene %s", hgnc_id)
query['genes'] = hgnc_id
else:
LOG.info("Fetching all disease terms")
return list(self.disease_term_collection.find(query))
|
[
"Return",
"all",
"disease",
"terms",
"that",
"overlaps",
"a",
"gene"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L126-L144
|
[
"def",
"disease_terms",
"(",
"self",
",",
"hgnc_id",
"=",
"None",
")",
":",
"query",
"=",
"{",
"}",
"if",
"hgnc_id",
":",
"LOG",
".",
"debug",
"(",
"\"Fetching all diseases for gene %s\"",
",",
"hgnc_id",
")",
"query",
"[",
"'genes'",
"]",
"=",
"hgnc_id",
"else",
":",
"LOG",
".",
"info",
"(",
"\"Fetching all disease terms\"",
")",
"return",
"list",
"(",
"self",
".",
"disease_term_collection",
".",
"find",
"(",
"query",
")",
")"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.load_disease_term
|
Load a disease term into the database
Args:
disease_obj(dict)
|
scout/adapter/mongo/hpo.py
|
def load_disease_term(self, disease_obj):
"""Load a disease term into the database
Args:
disease_obj(dict)
"""
LOG.debug("Loading disease term %s into database", disease_obj['_id'])
try:
self.disease_term_collection.insert_one(disease_obj)
except DuplicateKeyError as err:
raise IntegrityError("Disease term %s already exists in database".format(disease_obj['_id']))
LOG.debug("Disease term saved")
|
def load_disease_term(self, disease_obj):
"""Load a disease term into the database
Args:
disease_obj(dict)
"""
LOG.debug("Loading disease term %s into database", disease_obj['_id'])
try:
self.disease_term_collection.insert_one(disease_obj)
except DuplicateKeyError as err:
raise IntegrityError("Disease term %s already exists in database".format(disease_obj['_id']))
LOG.debug("Disease term saved")
|
[
"Load",
"a",
"disease",
"term",
"into",
"the",
"database"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L146-L158
|
[
"def",
"load_disease_term",
"(",
"self",
",",
"disease_obj",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Loading disease term %s into database\"",
",",
"disease_obj",
"[",
"'_id'",
"]",
")",
"try",
":",
"self",
".",
"disease_term_collection",
".",
"insert_one",
"(",
"disease_obj",
")",
"except",
"DuplicateKeyError",
"as",
"err",
":",
"raise",
"IntegrityError",
"(",
"\"Disease term %s already exists in database\"",
".",
"format",
"(",
"disease_obj",
"[",
"'_id'",
"]",
")",
")",
"LOG",
".",
"debug",
"(",
"\"Disease term saved\"",
")"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
HpoHandler.generate_hpo_gene_list
|
Generate a sorted list with namedtuples of hpogenes
Each namedtuple of the list looks like (hgnc_id, count)
Args:
hpo_terms(iterable(str))
Returns:
hpo_genes(list(HpoGene))
|
scout/adapter/mongo/hpo.py
|
def generate_hpo_gene_list(self, *hpo_terms):
"""Generate a sorted list with namedtuples of hpogenes
Each namedtuple of the list looks like (hgnc_id, count)
Args:
hpo_terms(iterable(str))
Returns:
hpo_genes(list(HpoGene))
"""
genes = {}
for term in hpo_terms:
hpo_obj = self.hpo_term(term)
if hpo_obj:
for hgnc_id in hpo_obj['genes']:
if hgnc_id in genes:
genes[hgnc_id] += 1
else:
genes[hgnc_id] = 1
else:
LOG.warning("Term %s could not be found", term)
sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True)
return sorted_genes
|
def generate_hpo_gene_list(self, *hpo_terms):
"""Generate a sorted list with namedtuples of hpogenes
Each namedtuple of the list looks like (hgnc_id, count)
Args:
hpo_terms(iterable(str))
Returns:
hpo_genes(list(HpoGene))
"""
genes = {}
for term in hpo_terms:
hpo_obj = self.hpo_term(term)
if hpo_obj:
for hgnc_id in hpo_obj['genes']:
if hgnc_id in genes:
genes[hgnc_id] += 1
else:
genes[hgnc_id] = 1
else:
LOG.warning("Term %s could not be found", term)
sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True)
return sorted_genes
|
[
"Generate",
"a",
"sorted",
"list",
"with",
"namedtuples",
"of",
"hpogenes"
] |
Clinical-Genomics/scout
|
python
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hpo.py#L160-L184
|
[
"def",
"generate_hpo_gene_list",
"(",
"self",
",",
"*",
"hpo_terms",
")",
":",
"genes",
"=",
"{",
"}",
"for",
"term",
"in",
"hpo_terms",
":",
"hpo_obj",
"=",
"self",
".",
"hpo_term",
"(",
"term",
")",
"if",
"hpo_obj",
":",
"for",
"hgnc_id",
"in",
"hpo_obj",
"[",
"'genes'",
"]",
":",
"if",
"hgnc_id",
"in",
"genes",
":",
"genes",
"[",
"hgnc_id",
"]",
"+=",
"1",
"else",
":",
"genes",
"[",
"hgnc_id",
"]",
"=",
"1",
"else",
":",
"LOG",
".",
"warning",
"(",
"\"Term %s could not be found\"",
",",
"term",
")",
"sorted_genes",
"=",
"sorted",
"(",
"genes",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"sorted_genes"
] |
90a551e2e1653a319e654c2405c2866f93d0ebb9
|
test
|
cmd_tool
|
Command line tool for plotting and viewing info on filterbank files
|
blimpy/filterbank.py
|
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on filterbank files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for reading and plotting filterbank files.")
parser.add_argument('-p', action='store', default='ank', dest='what_to_plot', type=str,
help='Show: "w" waterfall (freq vs. time) plot; "s" integrated spectrum plot; \
"t" for time series; "mm" for spectrum including min max; "k" for kurtosis; \
"a" for all available plots and information; and "ank" for all but kurtosis.')
parser.add_argument('filename', type=str,
help='Name of file to read')
parser.add_argument('-b', action='store', default=None, dest='f_start', type=float,
help='Start frequency (begin), in MHz')
parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float,
help='Stop frequency (end), in MHz')
parser.add_argument('-B', action='store', default=None, dest='t_start', type=int,
help='Start integration (begin) ID')
parser.add_argument('-E', action='store', default=None, dest='t_stop', type=int,
help='Stop integration (end) ID')
parser.add_argument('-i', action='store_true', default=False, dest='info_only',
help='Show info only')
parser.add_argument('-a', action='store_true', default=False, dest='average',
help='average along time axis (plot spectrum only)')
parser.add_argument('-s', action='store', default='', dest='plt_filename', type=str,
help='save plot graphic to file (give filename as argument)')
parser.add_argument('-S', action='store_true', default=False, dest='save_only',
help='Turn off plotting of data and only save to file.')
parser.add_argument('-D', action='store_false', default=True, dest='blank_dc',
help='Use to not blank DC bin.')
parser.add_argument('-c', action='store_true', default=False, dest='calibrate_band_pass',
help='Calibrate band pass.')
args = parser.parse_args()
# Open blimpy data
filename = args.filename
load_data = not args.info_only
# only load one integration if looking at spectrum
wtp = args.what_to_plot
if not wtp or 's' in wtp:
if args.t_start == None:
t_start = 0
else:
t_start = args.t_start
t_stop = t_start + 1
if args.average:
t_start = None
t_stop = None
else:
t_start = args.t_start
t_stop = args.t_stop
if args.info_only:
args.blank_dc = False
args.calibrate_band_pass = False
fil = Filterbank(filename, f_start=args.f_start, f_stop=args.f_stop,
t_start=t_start, t_stop=t_stop,
load_data=load_data,blank_dc=args.blank_dc,
cal_band_pass=args.calibrate_band_pass)
fil.info()
# And if we want to plot data, then plot data.
if not args.info_only:
# check start & stop frequencies make sense
#try:
# if args.f_start:
# print "Start freq: %2.2f" % args.f_start
# assert args.f_start >= fil.freqs[0] or np.isclose(args.f_start, fil.freqs[0])
#
# if args.f_stop:
# print "Stop freq: %2.2f" % args.f_stop
# assert args.f_stop <= fil.freqs[-1] or np.isclose(args.f_stop, fil.freqs[-1])
#except AssertionError:
# print "Error: Start and stop frequencies must lie inside file's frequency range."
# print "i.e. between %2.2f-%2.2f MHz." % (fil.freqs[0], fil.freqs[-1])
# exit()
if args.what_to_plot == "w":
plt.figure("waterfall", figsize=(8, 6))
fil.plot_waterfall(f_start=args.f_start, f_stop=args.f_stop)
elif args.what_to_plot == "s":
plt.figure("Spectrum", figsize=(8, 6))
fil.plot_spectrum(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all')
elif args.what_to_plot == "mm":
plt.figure("min max", figsize=(8, 6))
fil.plot_spectrum_min_max(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all')
elif args.what_to_plot == "k":
plt.figure("kurtosis", figsize=(8, 6))
fil.plot_kurtosis(f_start=args.f_start, f_stop=args.f_stop)
elif args.what_to_plot == "t":
plt.figure("Time Series", figsize=(8, 6))
fil.plot_time_series(f_start=args.f_start, f_stop=args.f_stop,orientation='h')
elif args.what_to_plot == "a":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all')
elif args.what_to_plot == "ank":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all',kurtosis=False)
if args.plt_filename != '':
plt.savefig(args.plt_filename)
if not args.save_only:
if 'DISPLAY' in os.environ.keys():
plt.show()
else:
print("No $DISPLAY available.")
|
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on filterbank files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for reading and plotting filterbank files.")
parser.add_argument('-p', action='store', default='ank', dest='what_to_plot', type=str,
help='Show: "w" waterfall (freq vs. time) plot; "s" integrated spectrum plot; \
"t" for time series; "mm" for spectrum including min max; "k" for kurtosis; \
"a" for all available plots and information; and "ank" for all but kurtosis.')
parser.add_argument('filename', type=str,
help='Name of file to read')
parser.add_argument('-b', action='store', default=None, dest='f_start', type=float,
help='Start frequency (begin), in MHz')
parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float,
help='Stop frequency (end), in MHz')
parser.add_argument('-B', action='store', default=None, dest='t_start', type=int,
help='Start integration (begin) ID')
parser.add_argument('-E', action='store', default=None, dest='t_stop', type=int,
help='Stop integration (end) ID')
parser.add_argument('-i', action='store_true', default=False, dest='info_only',
help='Show info only')
parser.add_argument('-a', action='store_true', default=False, dest='average',
help='average along time axis (plot spectrum only)')
parser.add_argument('-s', action='store', default='', dest='plt_filename', type=str,
help='save plot graphic to file (give filename as argument)')
parser.add_argument('-S', action='store_true', default=False, dest='save_only',
help='Turn off plotting of data and only save to file.')
parser.add_argument('-D', action='store_false', default=True, dest='blank_dc',
help='Use to not blank DC bin.')
parser.add_argument('-c', action='store_true', default=False, dest='calibrate_band_pass',
help='Calibrate band pass.')
args = parser.parse_args()
# Open blimpy data
filename = args.filename
load_data = not args.info_only
# only load one integration if looking at spectrum
wtp = args.what_to_plot
if not wtp or 's' in wtp:
if args.t_start == None:
t_start = 0
else:
t_start = args.t_start
t_stop = t_start + 1
if args.average:
t_start = None
t_stop = None
else:
t_start = args.t_start
t_stop = args.t_stop
if args.info_only:
args.blank_dc = False
args.calibrate_band_pass = False
fil = Filterbank(filename, f_start=args.f_start, f_stop=args.f_stop,
t_start=t_start, t_stop=t_stop,
load_data=load_data,blank_dc=args.blank_dc,
cal_band_pass=args.calibrate_band_pass)
fil.info()
# And if we want to plot data, then plot data.
if not args.info_only:
# check start & stop frequencies make sense
#try:
# if args.f_start:
# print "Start freq: %2.2f" % args.f_start
# assert args.f_start >= fil.freqs[0] or np.isclose(args.f_start, fil.freqs[0])
#
# if args.f_stop:
# print "Stop freq: %2.2f" % args.f_stop
# assert args.f_stop <= fil.freqs[-1] or np.isclose(args.f_stop, fil.freqs[-1])
#except AssertionError:
# print "Error: Start and stop frequencies must lie inside file's frequency range."
# print "i.e. between %2.2f-%2.2f MHz." % (fil.freqs[0], fil.freqs[-1])
# exit()
if args.what_to_plot == "w":
plt.figure("waterfall", figsize=(8, 6))
fil.plot_waterfall(f_start=args.f_start, f_stop=args.f_stop)
elif args.what_to_plot == "s":
plt.figure("Spectrum", figsize=(8, 6))
fil.plot_spectrum(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all')
elif args.what_to_plot == "mm":
plt.figure("min max", figsize=(8, 6))
fil.plot_spectrum_min_max(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all')
elif args.what_to_plot == "k":
plt.figure("kurtosis", figsize=(8, 6))
fil.plot_kurtosis(f_start=args.f_start, f_stop=args.f_stop)
elif args.what_to_plot == "t":
plt.figure("Time Series", figsize=(8, 6))
fil.plot_time_series(f_start=args.f_start, f_stop=args.f_stop,orientation='h')
elif args.what_to_plot == "a":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all')
elif args.what_to_plot == "ank":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all',kurtosis=False)
if args.plt_filename != '':
plt.savefig(args.plt_filename)
if not args.save_only:
if 'DISPLAY' in os.environ.keys():
plt.show()
else:
print("No $DISPLAY available.")
|
[
"Command",
"line",
"tool",
"for",
"plotting",
"and",
"viewing",
"info",
"on",
"filterbank",
"files"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L977-L1087
|
[
"def",
"cmd_tool",
"(",
"args",
"=",
"None",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Command line utility for reading and plotting filterbank files.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"'ank'",
",",
"dest",
"=",
"'what_to_plot'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Show: \"w\" waterfall (freq vs. time) plot; \"s\" integrated spectrum plot; \\\n \"t\" for time series; \"mm\" for spectrum including min max; \"k\" for kurtosis; \\\n \"a\" for all available plots and information; and \"ank\" for all but kurtosis.'",
")",
"parser",
".",
"add_argument",
"(",
"'filename'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Name of file to read'",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'f_start'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Start frequency (begin), in MHz'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'f_stop'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Stop frequency (end), in MHz'",
")",
"parser",
".",
"add_argument",
"(",
"'-B'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'t_start'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Start integration (begin) ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-E'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'t_stop'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Stop integration (end) ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'info_only'",
",",
"help",
"=",
"'Show info only'",
")",
"parser",
".",
"add_argument",
"(",
"'-a'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'average'",
",",
"help",
"=",
"'average along time axis (plot spectrum only)'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"''",
",",
"dest",
"=",
"'plt_filename'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'save plot graphic to file (give filename as argument)'",
")",
"parser",
".",
"add_argument",
"(",
"'-S'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'save_only'",
",",
"help",
"=",
"'Turn off plotting of data and only save to file.'",
")",
"parser",
".",
"add_argument",
"(",
"'-D'",
",",
"action",
"=",
"'store_false'",
",",
"default",
"=",
"True",
",",
"dest",
"=",
"'blank_dc'",
",",
"help",
"=",
"'Use to not blank DC bin.'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'calibrate_band_pass'",
",",
"help",
"=",
"'Calibrate band pass.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Open blimpy data",
"filename",
"=",
"args",
".",
"filename",
"load_data",
"=",
"not",
"args",
".",
"info_only",
"# only load one integration if looking at spectrum",
"wtp",
"=",
"args",
".",
"what_to_plot",
"if",
"not",
"wtp",
"or",
"'s'",
"in",
"wtp",
":",
"if",
"args",
".",
"t_start",
"==",
"None",
":",
"t_start",
"=",
"0",
"else",
":",
"t_start",
"=",
"args",
".",
"t_start",
"t_stop",
"=",
"t_start",
"+",
"1",
"if",
"args",
".",
"average",
":",
"t_start",
"=",
"None",
"t_stop",
"=",
"None",
"else",
":",
"t_start",
"=",
"args",
".",
"t_start",
"t_stop",
"=",
"args",
".",
"t_stop",
"if",
"args",
".",
"info_only",
":",
"args",
".",
"blank_dc",
"=",
"False",
"args",
".",
"calibrate_band_pass",
"=",
"False",
"fil",
"=",
"Filterbank",
"(",
"filename",
",",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
",",
"load_data",
"=",
"load_data",
",",
"blank_dc",
"=",
"args",
".",
"blank_dc",
",",
"cal_band_pass",
"=",
"args",
".",
"calibrate_band_pass",
")",
"fil",
".",
"info",
"(",
")",
"# And if we want to plot data, then plot data.",
"if",
"not",
"args",
".",
"info_only",
":",
"# check start & stop frequencies make sense",
"#try:",
"# if args.f_start:",
"# print \"Start freq: %2.2f\" % args.f_start",
"# assert args.f_start >= fil.freqs[0] or np.isclose(args.f_start, fil.freqs[0])",
"#",
"# if args.f_stop:",
"# print \"Stop freq: %2.2f\" % args.f_stop",
"# assert args.f_stop <= fil.freqs[-1] or np.isclose(args.f_stop, fil.freqs[-1])",
"#except AssertionError:",
"# print \"Error: Start and stop frequencies must lie inside file's frequency range.\"",
"# print \"i.e. between %2.2f-%2.2f MHz.\" % (fil.freqs[0], fil.freqs[-1])",
"# exit()",
"if",
"args",
".",
"what_to_plot",
"==",
"\"w\"",
":",
"plt",
".",
"figure",
"(",
"\"waterfall\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_waterfall",
"(",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
")",
"elif",
"args",
".",
"what_to_plot",
"==",
"\"s\"",
":",
"plt",
".",
"figure",
"(",
"\"Spectrum\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_spectrum",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
")",
"elif",
"args",
".",
"what_to_plot",
"==",
"\"mm\"",
":",
"plt",
".",
"figure",
"(",
"\"min max\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_spectrum_min_max",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
")",
"elif",
"args",
".",
"what_to_plot",
"==",
"\"k\"",
":",
"plt",
".",
"figure",
"(",
"\"kurtosis\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_kurtosis",
"(",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
")",
"elif",
"args",
".",
"what_to_plot",
"==",
"\"t\"",
":",
"plt",
".",
"figure",
"(",
"\"Time Series\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_time_series",
"(",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
",",
"orientation",
"=",
"'h'",
")",
"elif",
"args",
".",
"what_to_plot",
"==",
"\"a\"",
":",
"plt",
".",
"figure",
"(",
"\"Multiple diagnostic plots\"",
",",
"figsize",
"=",
"(",
"12",
",",
"9",
")",
",",
"facecolor",
"=",
"'white'",
")",
"fil",
".",
"plot_all",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
")",
"elif",
"args",
".",
"what_to_plot",
"==",
"\"ank\"",
":",
"plt",
".",
"figure",
"(",
"\"Multiple diagnostic plots\"",
",",
"figsize",
"=",
"(",
"12",
",",
"9",
")",
",",
"facecolor",
"=",
"'white'",
")",
"fil",
".",
"plot_all",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"args",
".",
"f_start",
",",
"f_stop",
"=",
"args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
",",
"kurtosis",
"=",
"False",
")",
"if",
"args",
".",
"plt_filename",
"!=",
"''",
":",
"plt",
".",
"savefig",
"(",
"args",
".",
"plt_filename",
")",
"if",
"not",
"args",
".",
"save_only",
":",
"if",
"'DISPLAY'",
"in",
"os",
".",
"environ",
".",
"keys",
"(",
")",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"print",
"(",
"\"No $DISPLAY available.\"",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.read_hdf5
|
Populate Filterbank instance with data from HDF5 file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
|
blimpy/filterbank.py
|
def read_hdf5(self, filename, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from HDF5 file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
print("Warning: this function will be deprecated in the future. Please use Waterfall to open HDF5 files.")
# raise DeprecationWarning('Please use Waterfall to open HDF5 files.')
self.header = {}
self.filename = filename
self.h5 = h5py.File(filename)
for key, val in self.h5[b'data'].attrs.items():
if six.PY3:
key = bytes(key, 'ascii')
if key == b'src_raj':
self.header[key] = Angle(val, unit='hr')
elif key == b'src_dej':
self.header[key] = Angle(val, unit='deg')
else:
self.header[key] = val
self.n_ints_in_file = self.h5[b"data"].shape[0]
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
if load_data:
self.data = self.h5[b"data"][ii_start:ii_stop, :, chan_start_idx:chan_stop_idx]
self.file_size_bytes = os.path.getsize(self.filename)
# if self.header[b'foff'] < 0:
# self.data = self.data[..., ::-1] # Reverse data
else:
print("Skipping data load...")
self.data = np.array([0])
self.n_ints_in_file = 0
self.file_size_bytes = os.path.getsize(self.filename)
|
def read_hdf5(self, filename, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from HDF5 file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
print("Warning: this function will be deprecated in the future. Please use Waterfall to open HDF5 files.")
# raise DeprecationWarning('Please use Waterfall to open HDF5 files.')
self.header = {}
self.filename = filename
self.h5 = h5py.File(filename)
for key, val in self.h5[b'data'].attrs.items():
if six.PY3:
key = bytes(key, 'ascii')
if key == b'src_raj':
self.header[key] = Angle(val, unit='hr')
elif key == b'src_dej':
self.header[key] = Angle(val, unit='deg')
else:
self.header[key] = val
self.n_ints_in_file = self.h5[b"data"].shape[0]
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
if load_data:
self.data = self.h5[b"data"][ii_start:ii_stop, :, chan_start_idx:chan_stop_idx]
self.file_size_bytes = os.path.getsize(self.filename)
# if self.header[b'foff'] < 0:
# self.data = self.data[..., ::-1] # Reverse data
else:
print("Skipping data load...")
self.data = np.array([0])
self.n_ints_in_file = 0
self.file_size_bytes = os.path.getsize(self.filename)
|
[
"Populate",
"Filterbank",
"instance",
"with",
"data",
"from",
"HDF5",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L144-L183
|
[
"def",
"read_hdf5",
"(",
"self",
",",
"filename",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
",",
"load_data",
"=",
"True",
")",
":",
"print",
"(",
"\"Warning: this function will be deprecated in the future. Please use Waterfall to open HDF5 files.\"",
")",
"# raise DeprecationWarning('Please use Waterfall to open HDF5 files.')",
"self",
".",
"header",
"=",
"{",
"}",
"self",
".",
"filename",
"=",
"filename",
"self",
".",
"h5",
"=",
"h5py",
".",
"File",
"(",
"filename",
")",
"for",
"key",
",",
"val",
"in",
"self",
".",
"h5",
"[",
"b'data'",
"]",
".",
"attrs",
".",
"items",
"(",
")",
":",
"if",
"six",
".",
"PY3",
":",
"key",
"=",
"bytes",
"(",
"key",
",",
"'ascii'",
")",
"if",
"key",
"==",
"b'src_raj'",
":",
"self",
".",
"header",
"[",
"key",
"]",
"=",
"Angle",
"(",
"val",
",",
"unit",
"=",
"'hr'",
")",
"elif",
"key",
"==",
"b'src_dej'",
":",
"self",
".",
"header",
"[",
"key",
"]",
"=",
"Angle",
"(",
"val",
",",
"unit",
"=",
"'deg'",
")",
"else",
":",
"self",
".",
"header",
"[",
"key",
"]",
"=",
"val",
"self",
".",
"n_ints_in_file",
"=",
"self",
".",
"h5",
"[",
"b\"data\"",
"]",
".",
"shape",
"[",
"0",
"]",
"i_start",
",",
"i_stop",
",",
"chan_start_idx",
",",
"chan_stop_idx",
"=",
"self",
".",
"_setup_freqs",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
")",
"ii_start",
",",
"ii_stop",
",",
"n_ints",
"=",
"self",
".",
"_setup_time_axis",
"(",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
")",
"if",
"load_data",
":",
"self",
".",
"data",
"=",
"self",
".",
"h5",
"[",
"b\"data\"",
"]",
"[",
"ii_start",
":",
"ii_stop",
",",
":",
",",
"chan_start_idx",
":",
"chan_stop_idx",
"]",
"self",
".",
"file_size_bytes",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"filename",
")",
"# if self.header[b'foff'] < 0:",
"# self.data = self.data[..., ::-1] # Reverse data",
"else",
":",
"print",
"(",
"\"Skipping data load...\"",
")",
"self",
".",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"0",
"]",
")",
"self",
".",
"n_ints_in_file",
"=",
"0",
"self",
".",
"file_size_bytes",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"filename",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank._setup_freqs
|
Setup frequency axis
|
blimpy/filterbank.py
|
def _setup_freqs(self, f_start=None, f_stop=None):
""" Setup frequency axis """
## Setup frequency axis
f0 = self.header[b'fch1']
f_delt = self.header[b'foff']
i_start, i_stop = 0, self.header[b'nchans']
if f_start:
i_start = int((f_start - f0) / f_delt)
if f_stop:
i_stop = int((f_stop - f0) / f_delt)
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
#create freq array
if i_start < i_stop:
i_vals = np.arange(chan_start_idx, chan_stop_idx)
else:
i_vals = np.arange(chan_stop_idx, chan_start_idx)
self.freqs = f_delt * i_vals + f0
# if f_delt < 0:
# self.freqs = self.freqs[::-1]
if chan_stop_idx < chan_start_idx:
chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idx
return i_start, i_stop, chan_start_idx, chan_stop_idx
|
def _setup_freqs(self, f_start=None, f_stop=None):
""" Setup frequency axis """
## Setup frequency axis
f0 = self.header[b'fch1']
f_delt = self.header[b'foff']
i_start, i_stop = 0, self.header[b'nchans']
if f_start:
i_start = int((f_start - f0) / f_delt)
if f_stop:
i_stop = int((f_stop - f0) / f_delt)
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
#create freq array
if i_start < i_stop:
i_vals = np.arange(chan_start_idx, chan_stop_idx)
else:
i_vals = np.arange(chan_stop_idx, chan_start_idx)
self.freqs = f_delt * i_vals + f0
# if f_delt < 0:
# self.freqs = self.freqs[::-1]
if chan_stop_idx < chan_start_idx:
chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idx
return i_start, i_stop, chan_start_idx, chan_stop_idx
|
[
"Setup",
"frequency",
"axis"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L186-L216
|
[
"def",
"_setup_freqs",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
")",
":",
"## Setup frequency axis",
"f0",
"=",
"self",
".",
"header",
"[",
"b'fch1'",
"]",
"f_delt",
"=",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"i_start",
",",
"i_stop",
"=",
"0",
",",
"self",
".",
"header",
"[",
"b'nchans'",
"]",
"if",
"f_start",
":",
"i_start",
"=",
"int",
"(",
"(",
"f_start",
"-",
"f0",
")",
"/",
"f_delt",
")",
"if",
"f_stop",
":",
"i_stop",
"=",
"int",
"(",
"(",
"f_stop",
"-",
"f0",
")",
"/",
"f_delt",
")",
"#calculate closest true index value",
"chan_start_idx",
"=",
"np",
".",
"int",
"(",
"i_start",
")",
"chan_stop_idx",
"=",
"np",
".",
"int",
"(",
"i_stop",
")",
"#create freq array",
"if",
"i_start",
"<",
"i_stop",
":",
"i_vals",
"=",
"np",
".",
"arange",
"(",
"chan_start_idx",
",",
"chan_stop_idx",
")",
"else",
":",
"i_vals",
"=",
"np",
".",
"arange",
"(",
"chan_stop_idx",
",",
"chan_start_idx",
")",
"self",
".",
"freqs",
"=",
"f_delt",
"*",
"i_vals",
"+",
"f0",
"# if f_delt < 0:",
"# self.freqs = self.freqs[::-1]",
"if",
"chan_stop_idx",
"<",
"chan_start_idx",
":",
"chan_stop_idx",
",",
"chan_start_idx",
"=",
"chan_start_idx",
",",
"chan_stop_idx",
"return",
"i_start",
",",
"i_stop",
",",
"chan_start_idx",
",",
"chan_stop_idx"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank._setup_time_axis
|
Setup time axis.
|
blimpy/filterbank.py
|
def _setup_time_axis(self, t_start=None, t_stop=None):
""" Setup time axis. """
# now check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if t_start:
ii_start = t_start
if t_stop:
ii_stop = t_stop
n_ints = ii_stop - ii_start
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
self.timestamps = np.arange(0, n_ints) * t_delt / 24./60./60 + t0
return ii_start, ii_stop, n_ints
|
def _setup_time_axis(self, t_start=None, t_stop=None):
""" Setup time axis. """
# now check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if t_start:
ii_start = t_start
if t_stop:
ii_stop = t_stop
n_ints = ii_stop - ii_start
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
self.timestamps = np.arange(0, n_ints) * t_delt / 24./60./60 + t0
return ii_start, ii_stop, n_ints
|
[
"Setup",
"time",
"axis",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L218-L235
|
[
"def",
"_setup_time_axis",
"(",
"self",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
")",
":",
"# now check to see how many integrations requested",
"ii_start",
",",
"ii_stop",
"=",
"0",
",",
"self",
".",
"n_ints_in_file",
"if",
"t_start",
":",
"ii_start",
"=",
"t_start",
"if",
"t_stop",
":",
"ii_stop",
"=",
"t_stop",
"n_ints",
"=",
"ii_stop",
"-",
"ii_start",
"## Setup time axis",
"t0",
"=",
"self",
".",
"header",
"[",
"b'tstart'",
"]",
"t_delt",
"=",
"self",
".",
"header",
"[",
"b'tsamp'",
"]",
"self",
".",
"timestamps",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"n_ints",
")",
"*",
"t_delt",
"/",
"24.",
"/",
"60.",
"/",
"60",
"+",
"t0",
"return",
"ii_start",
",",
"ii_stop",
",",
"n_ints"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.read_filterbank
|
Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
|
blimpy/filterbank.py
|
def read_filterbank(self, filename=None, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
if filename is None:
filename = self.filename
else:
self.filename = filename
self.header = read_header(filename)
#convert input frequencies into what their corresponding index would be
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
n_bits = self.header[b'nbits']
n_bytes = int(self.header[b'nbits'] / 8)
n_chans = self.header[b'nchans']
n_chans_selected = self.freqs.shape[0]
n_ifs = self.header[b'nifs']
# Load binary data
self.idx_data = len_header(filename)
f = open(filename, 'rb')
f.seek(self.idx_data)
filesize = os.path.getsize(self.filename)
n_bytes_data = filesize - self.idx_data
# Finally add some other info to the class as objects
self.n_ints_in_file = calc_n_ints_in_file(self.filename)
self.file_size_bytes = filesize
## Setup time axis
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
# Seek to first integration
f.seek(int(ii_start * n_bits * n_ifs * n_chans / 8), 1)
# Set up indexes used in file read (taken out of loop for speed)
i0 = np.min((chan_start_idx, chan_stop_idx))
i1 = np.max((chan_start_idx, chan_stop_idx))
#Set up the data type (taken out of loop for speed)
if n_bits == 2:
dd_type = b'uint8'
n_chans_selected = int(n_chans_selected/4)
elif n_bytes == 4:
dd_type = b'float32'
elif n_bytes == 2:
dd_type = b'uint16'
elif n_bytes == 1:
dd_type = b'uint8'
if load_data:
if n_ints * n_ifs * n_chans_selected > MAX_DATA_ARRAY_SIZE:
print("[Filterbank] Error: data array is too large to load. Either select fewer points or manually increase MAX_DATA_ARRAY_SIZE. Large files are now handle with Waterfall .")
sys.exit()
if n_bits == 2:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected*4), dtype=dd_type)
else:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=dd_type)
for ii in range(n_ints):
"""d = f.read(n_bytes * n_chans * n_ifs)
"""
for jj in range(n_ifs):
f.seek(n_bytes * i0, 1) # 1 = from current location
#d = f.read(n_bytes * n_chans_selected)
#bytes_to_read = n_bytes * n_chans_selected
dd = np.fromfile(f, count=n_chans_selected, dtype=dd_type)
# Reverse array if frequency axis is flipped
# if f_delt < 0:
# dd = dd[::-1]
if n_bits == 2:
dd = unpack_2to8(dd)
self.data[ii, jj] = dd
f.seek(n_bytes * (n_chans - i1), 1) # Seek to start of next block
else:
print("Skipping data load...")
self.data = np.array([0], dtype=dd_type)
|
def read_filterbank(self, filename=None, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
if filename is None:
filename = self.filename
else:
self.filename = filename
self.header = read_header(filename)
#convert input frequencies into what their corresponding index would be
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
n_bits = self.header[b'nbits']
n_bytes = int(self.header[b'nbits'] / 8)
n_chans = self.header[b'nchans']
n_chans_selected = self.freqs.shape[0]
n_ifs = self.header[b'nifs']
# Load binary data
self.idx_data = len_header(filename)
f = open(filename, 'rb')
f.seek(self.idx_data)
filesize = os.path.getsize(self.filename)
n_bytes_data = filesize - self.idx_data
# Finally add some other info to the class as objects
self.n_ints_in_file = calc_n_ints_in_file(self.filename)
self.file_size_bytes = filesize
## Setup time axis
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
# Seek to first integration
f.seek(int(ii_start * n_bits * n_ifs * n_chans / 8), 1)
# Set up indexes used in file read (taken out of loop for speed)
i0 = np.min((chan_start_idx, chan_stop_idx))
i1 = np.max((chan_start_idx, chan_stop_idx))
#Set up the data type (taken out of loop for speed)
if n_bits == 2:
dd_type = b'uint8'
n_chans_selected = int(n_chans_selected/4)
elif n_bytes == 4:
dd_type = b'float32'
elif n_bytes == 2:
dd_type = b'uint16'
elif n_bytes == 1:
dd_type = b'uint8'
if load_data:
if n_ints * n_ifs * n_chans_selected > MAX_DATA_ARRAY_SIZE:
print("[Filterbank] Error: data array is too large to load. Either select fewer points or manually increase MAX_DATA_ARRAY_SIZE. Large files are now handle with Waterfall .")
sys.exit()
if n_bits == 2:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected*4), dtype=dd_type)
else:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=dd_type)
for ii in range(n_ints):
"""d = f.read(n_bytes * n_chans * n_ifs)
"""
for jj in range(n_ifs):
f.seek(n_bytes * i0, 1) # 1 = from current location
#d = f.read(n_bytes * n_chans_selected)
#bytes_to_read = n_bytes * n_chans_selected
dd = np.fromfile(f, count=n_chans_selected, dtype=dd_type)
# Reverse array if frequency axis is flipped
# if f_delt < 0:
# dd = dd[::-1]
if n_bits == 2:
dd = unpack_2to8(dd)
self.data[ii, jj] = dd
f.seek(n_bytes * (n_chans - i1), 1) # Seek to start of next block
else:
print("Skipping data load...")
self.data = np.array([0], dtype=dd_type)
|
[
"Populate",
"Filterbank",
"instance",
"with",
"data",
"from",
"Filterbank",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L237-L326
|
[
"def",
"read_filterbank",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
",",
"load_data",
"=",
"True",
")",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"filename",
"else",
":",
"self",
".",
"filename",
"=",
"filename",
"self",
".",
"header",
"=",
"read_header",
"(",
"filename",
")",
"#convert input frequencies into what their corresponding index would be",
"i_start",
",",
"i_stop",
",",
"chan_start_idx",
",",
"chan_stop_idx",
"=",
"self",
".",
"_setup_freqs",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
")",
"n_bits",
"=",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"n_bytes",
"=",
"int",
"(",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"/",
"8",
")",
"n_chans",
"=",
"self",
".",
"header",
"[",
"b'nchans'",
"]",
"n_chans_selected",
"=",
"self",
".",
"freqs",
".",
"shape",
"[",
"0",
"]",
"n_ifs",
"=",
"self",
".",
"header",
"[",
"b'nifs'",
"]",
"# Load binary data",
"self",
".",
"idx_data",
"=",
"len_header",
"(",
"filename",
")",
"f",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"f",
".",
"seek",
"(",
"self",
".",
"idx_data",
")",
"filesize",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"filename",
")",
"n_bytes_data",
"=",
"filesize",
"-",
"self",
".",
"idx_data",
"# Finally add some other info to the class as objects",
"self",
".",
"n_ints_in_file",
"=",
"calc_n_ints_in_file",
"(",
"self",
".",
"filename",
")",
"self",
".",
"file_size_bytes",
"=",
"filesize",
"## Setup time axis",
"ii_start",
",",
"ii_stop",
",",
"n_ints",
"=",
"self",
".",
"_setup_time_axis",
"(",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
")",
"# Seek to first integration",
"f",
".",
"seek",
"(",
"int",
"(",
"ii_start",
"*",
"n_bits",
"*",
"n_ifs",
"*",
"n_chans",
"/",
"8",
")",
",",
"1",
")",
"# Set up indexes used in file read (taken out of loop for speed)",
"i0",
"=",
"np",
".",
"min",
"(",
"(",
"chan_start_idx",
",",
"chan_stop_idx",
")",
")",
"i1",
"=",
"np",
".",
"max",
"(",
"(",
"chan_start_idx",
",",
"chan_stop_idx",
")",
")",
"#Set up the data type (taken out of loop for speed)",
"if",
"n_bits",
"==",
"2",
":",
"dd_type",
"=",
"b'uint8'",
"n_chans_selected",
"=",
"int",
"(",
"n_chans_selected",
"/",
"4",
")",
"elif",
"n_bytes",
"==",
"4",
":",
"dd_type",
"=",
"b'float32'",
"elif",
"n_bytes",
"==",
"2",
":",
"dd_type",
"=",
"b'uint16'",
"elif",
"n_bytes",
"==",
"1",
":",
"dd_type",
"=",
"b'uint8'",
"if",
"load_data",
":",
"if",
"n_ints",
"*",
"n_ifs",
"*",
"n_chans_selected",
">",
"MAX_DATA_ARRAY_SIZE",
":",
"print",
"(",
"\"[Filterbank] Error: data array is too large to load. Either select fewer points or manually increase MAX_DATA_ARRAY_SIZE. Large files are now handle with Waterfall .\"",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"n_bits",
"==",
"2",
":",
"self",
".",
"data",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_ints",
",",
"n_ifs",
",",
"n_chans_selected",
"*",
"4",
")",
",",
"dtype",
"=",
"dd_type",
")",
"else",
":",
"self",
".",
"data",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_ints",
",",
"n_ifs",
",",
"n_chans_selected",
")",
",",
"dtype",
"=",
"dd_type",
")",
"for",
"ii",
"in",
"range",
"(",
"n_ints",
")",
":",
"\"\"\"d = f.read(n_bytes * n_chans * n_ifs)\n \"\"\"",
"for",
"jj",
"in",
"range",
"(",
"n_ifs",
")",
":",
"f",
".",
"seek",
"(",
"n_bytes",
"*",
"i0",
",",
"1",
")",
"# 1 = from current location",
"#d = f.read(n_bytes * n_chans_selected)",
"#bytes_to_read = n_bytes * n_chans_selected",
"dd",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"count",
"=",
"n_chans_selected",
",",
"dtype",
"=",
"dd_type",
")",
"# Reverse array if frequency axis is flipped",
"# if f_delt < 0:",
"# dd = dd[::-1]",
"if",
"n_bits",
"==",
"2",
":",
"dd",
"=",
"unpack_2to8",
"(",
"dd",
")",
"self",
".",
"data",
"[",
"ii",
",",
"jj",
"]",
"=",
"dd",
"f",
".",
"seek",
"(",
"n_bytes",
"*",
"(",
"n_chans",
"-",
"i1",
")",
",",
"1",
")",
"# Seek to start of next block",
"else",
":",
"print",
"(",
"\"Skipping data load...\"",
")",
"self",
".",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"dd_type",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.compute_lst
|
Compute LST for observation
|
blimpy/filterbank.py
|
def compute_lst(self):
""" Compute LST for observation """
if self.header[b'telescope_id'] == 6:
self.coords = gbt_coords
elif self.header[b'telescope_id'] == 4:
self.coords = parkes_coords
else:
raise RuntimeError("Currently only Parkes and GBT supported")
if HAS_SLALIB:
# dut1 = (0.2 /3600.0) * np.pi/12.0
dut1 = 0.0
mjd = self.header[b'tstart']
tellong = np.deg2rad(self.coords[1])
last = s.sla_gmst(mjd) - tellong + s.sla_eqeqx(mjd) + dut1
# lmst = s.sla_gmst(mjd) - tellong
if last < 0.0 : last = last + 2.0*np.pi
return last
else:
raise RuntimeError("This method requires pySLALIB")
|
def compute_lst(self):
""" Compute LST for observation """
if self.header[b'telescope_id'] == 6:
self.coords = gbt_coords
elif self.header[b'telescope_id'] == 4:
self.coords = parkes_coords
else:
raise RuntimeError("Currently only Parkes and GBT supported")
if HAS_SLALIB:
# dut1 = (0.2 /3600.0) * np.pi/12.0
dut1 = 0.0
mjd = self.header[b'tstart']
tellong = np.deg2rad(self.coords[1])
last = s.sla_gmst(mjd) - tellong + s.sla_eqeqx(mjd) + dut1
# lmst = s.sla_gmst(mjd) - tellong
if last < 0.0 : last = last + 2.0*np.pi
return last
else:
raise RuntimeError("This method requires pySLALIB")
|
[
"Compute",
"LST",
"for",
"observation"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L328-L346
|
[
"def",
"compute_lst",
"(",
"self",
")",
":",
"if",
"self",
".",
"header",
"[",
"b'telescope_id'",
"]",
"==",
"6",
":",
"self",
".",
"coords",
"=",
"gbt_coords",
"elif",
"self",
".",
"header",
"[",
"b'telescope_id'",
"]",
"==",
"4",
":",
"self",
".",
"coords",
"=",
"parkes_coords",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Currently only Parkes and GBT supported\"",
")",
"if",
"HAS_SLALIB",
":",
"# dut1 = (0.2 /3600.0) * np.pi/12.0",
"dut1",
"=",
"0.0",
"mjd",
"=",
"self",
".",
"header",
"[",
"b'tstart'",
"]",
"tellong",
"=",
"np",
".",
"deg2rad",
"(",
"self",
".",
"coords",
"[",
"1",
"]",
")",
"last",
"=",
"s",
".",
"sla_gmst",
"(",
"mjd",
")",
"-",
"tellong",
"+",
"s",
".",
"sla_eqeqx",
"(",
"mjd",
")",
"+",
"dut1",
"# lmst = s.sla_gmst(mjd) - tellong",
"if",
"last",
"<",
"0.0",
":",
"last",
"=",
"last",
"+",
"2.0",
"*",
"np",
".",
"pi",
"return",
"last",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"This method requires pySLALIB\"",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.compute_lsrk
|
Computes the LSR in km/s
uses the MJD, RA and DEC of observation to compute
along with the telescope location. Requires pyslalib
|
blimpy/filterbank.py
|
def compute_lsrk(self):
""" Computes the LSR in km/s
uses the MJD, RA and DEC of observation to compute
along with the telescope location. Requires pyslalib
"""
ra = Angle(self.header[b'src_raj'], unit='hourangle')
dec = Angle(self.header[b'src_dej'], unit='degree')
mjdd = self.header[b'tstart']
rarad = ra.to('radian').value
dcrad = dec.to('radian').value
last = self.compute_lst()
tellat = np.deg2rad(self.coords[0])
tellong = np.deg2rad(self.coords[1])
# convert star position to vector
starvect = s.sla_dcs2c(rarad, dcrad)
# velocity component in ra,dec due to Earth rotation
Rgeo = s.sla_rverot( tellat, rarad, dcrad, last)
# get Barycentric and heliocentric velocity and position of the Earth.
evp = s.sla_evp(mjdd, 2000.0)
dvb = evp[0] # barycentric velocity vector, in AU/sec
dpb = evp[1] # barycentric position vector, in AU
dvh = evp[2] # heliocentric velocity vector, in AU/sec
dph = evp[3] # heliocentric position vector, in AU
# dot product of vector to object and heliocentric velocity
# convert AU/sec to km/sec
vcorhelio = -s.sla_dvdv( starvect, dvh) *149.597870e6
vcorbary = -s.sla_dvdv( starvect, dvb) *149.597870e6
# rvlsrd is velocity component in ra,dec direction due to the Sun's
# motion with respect to the "dynamical" local standard of rest
rvlsrd = s.sla_rvlsrd( rarad,dcrad)
# rvlsrk is velocity component in ra,dec direction due to i
# the Sun's motion w.r.t the "kinematic" local standard of rest
rvlsrk = s.sla_rvlsrk( rarad,dcrad)
# rvgalc is velocity component in ra,dec direction due to
#the rotation of the Galaxy.
rvgalc = s.sla_rvgalc( rarad,dcrad)
totalhelio = Rgeo + vcorhelio
totalbary = Rgeo + vcorbary
totallsrk = totalhelio + rvlsrk
totalgal = totalbary + rvlsrd + rvgalc
return totallsrk
|
def compute_lsrk(self):
""" Computes the LSR in km/s
uses the MJD, RA and DEC of observation to compute
along with the telescope location. Requires pyslalib
"""
ra = Angle(self.header[b'src_raj'], unit='hourangle')
dec = Angle(self.header[b'src_dej'], unit='degree')
mjdd = self.header[b'tstart']
rarad = ra.to('radian').value
dcrad = dec.to('radian').value
last = self.compute_lst()
tellat = np.deg2rad(self.coords[0])
tellong = np.deg2rad(self.coords[1])
# convert star position to vector
starvect = s.sla_dcs2c(rarad, dcrad)
# velocity component in ra,dec due to Earth rotation
Rgeo = s.sla_rverot( tellat, rarad, dcrad, last)
# get Barycentric and heliocentric velocity and position of the Earth.
evp = s.sla_evp(mjdd, 2000.0)
dvb = evp[0] # barycentric velocity vector, in AU/sec
dpb = evp[1] # barycentric position vector, in AU
dvh = evp[2] # heliocentric velocity vector, in AU/sec
dph = evp[3] # heliocentric position vector, in AU
# dot product of vector to object and heliocentric velocity
# convert AU/sec to km/sec
vcorhelio = -s.sla_dvdv( starvect, dvh) *149.597870e6
vcorbary = -s.sla_dvdv( starvect, dvb) *149.597870e6
# rvlsrd is velocity component in ra,dec direction due to the Sun's
# motion with respect to the "dynamical" local standard of rest
rvlsrd = s.sla_rvlsrd( rarad,dcrad)
# rvlsrk is velocity component in ra,dec direction due to i
# the Sun's motion w.r.t the "kinematic" local standard of rest
rvlsrk = s.sla_rvlsrk( rarad,dcrad)
# rvgalc is velocity component in ra,dec direction due to
#the rotation of the Galaxy.
rvgalc = s.sla_rvgalc( rarad,dcrad)
totalhelio = Rgeo + vcorhelio
totalbary = Rgeo + vcorbary
totallsrk = totalhelio + rvlsrk
totalgal = totalbary + rvlsrd + rvgalc
return totallsrk
|
[
"Computes",
"the",
"LSR",
"in",
"km",
"/",
"s"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L348-L397
|
[
"def",
"compute_lsrk",
"(",
"self",
")",
":",
"ra",
"=",
"Angle",
"(",
"self",
".",
"header",
"[",
"b'src_raj'",
"]",
",",
"unit",
"=",
"'hourangle'",
")",
"dec",
"=",
"Angle",
"(",
"self",
".",
"header",
"[",
"b'src_dej'",
"]",
",",
"unit",
"=",
"'degree'",
")",
"mjdd",
"=",
"self",
".",
"header",
"[",
"b'tstart'",
"]",
"rarad",
"=",
"ra",
".",
"to",
"(",
"'radian'",
")",
".",
"value",
"dcrad",
"=",
"dec",
".",
"to",
"(",
"'radian'",
")",
".",
"value",
"last",
"=",
"self",
".",
"compute_lst",
"(",
")",
"tellat",
"=",
"np",
".",
"deg2rad",
"(",
"self",
".",
"coords",
"[",
"0",
"]",
")",
"tellong",
"=",
"np",
".",
"deg2rad",
"(",
"self",
".",
"coords",
"[",
"1",
"]",
")",
"# convert star position to vector",
"starvect",
"=",
"s",
".",
"sla_dcs2c",
"(",
"rarad",
",",
"dcrad",
")",
"# velocity component in ra,dec due to Earth rotation",
"Rgeo",
"=",
"s",
".",
"sla_rverot",
"(",
"tellat",
",",
"rarad",
",",
"dcrad",
",",
"last",
")",
"# get Barycentric and heliocentric velocity and position of the Earth.",
"evp",
"=",
"s",
".",
"sla_evp",
"(",
"mjdd",
",",
"2000.0",
")",
"dvb",
"=",
"evp",
"[",
"0",
"]",
"# barycentric velocity vector, in AU/sec",
"dpb",
"=",
"evp",
"[",
"1",
"]",
"# barycentric position vector, in AU",
"dvh",
"=",
"evp",
"[",
"2",
"]",
"# heliocentric velocity vector, in AU/sec",
"dph",
"=",
"evp",
"[",
"3",
"]",
"# heliocentric position vector, in AU",
"# dot product of vector to object and heliocentric velocity",
"# convert AU/sec to km/sec",
"vcorhelio",
"=",
"-",
"s",
".",
"sla_dvdv",
"(",
"starvect",
",",
"dvh",
")",
"*",
"149.597870e6",
"vcorbary",
"=",
"-",
"s",
".",
"sla_dvdv",
"(",
"starvect",
",",
"dvb",
")",
"*",
"149.597870e6",
"# rvlsrd is velocity component in ra,dec direction due to the Sun's",
"# motion with respect to the \"dynamical\" local standard of rest",
"rvlsrd",
"=",
"s",
".",
"sla_rvlsrd",
"(",
"rarad",
",",
"dcrad",
")",
"# rvlsrk is velocity component in ra,dec direction due to i",
"# the Sun's motion w.r.t the \"kinematic\" local standard of rest",
"rvlsrk",
"=",
"s",
".",
"sla_rvlsrk",
"(",
"rarad",
",",
"dcrad",
")",
"# rvgalc is velocity component in ra,dec direction due to",
"#the rotation of the Galaxy.",
"rvgalc",
"=",
"s",
".",
"sla_rvgalc",
"(",
"rarad",
",",
"dcrad",
")",
"totalhelio",
"=",
"Rgeo",
"+",
"vcorhelio",
"totalbary",
"=",
"Rgeo",
"+",
"vcorbary",
"totallsrk",
"=",
"totalhelio",
"+",
"rvlsrk",
"totalgal",
"=",
"totalbary",
"+",
"rvlsrd",
"+",
"rvgalc",
"return",
"totallsrk"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.blank_dc
|
Blank DC bins in coarse channels.
Note: currently only works if entire file is read
|
blimpy/filterbank.py
|
def blank_dc(self, n_coarse_chan):
""" Blank DC bins in coarse channels.
Note: currently only works if entire file is read
"""
if n_coarse_chan < 1:
logger.warning('Coarse channel number < 1, unable to blank DC bin.')
return None
if not n_coarse_chan % int(n_coarse_chan) == 0:
logger.warning('Selection does not contain an interger number of coarse channels, unable to blank DC bin.')
return None
n_coarse_chan = int(n_coarse_chan)
n_chan = self.data.shape[-1]
n_chan_per_coarse = int(n_chan / n_coarse_chan)
mid_chan = int(n_chan_per_coarse / 2)
for ii in range(n_coarse_chan):
ss = ii*n_chan_per_coarse
self.data[..., ss+mid_chan] = np.median(self.data[..., ss+mid_chan+5:ss+mid_chan+10])
|
def blank_dc(self, n_coarse_chan):
""" Blank DC bins in coarse channels.
Note: currently only works if entire file is read
"""
if n_coarse_chan < 1:
logger.warning('Coarse channel number < 1, unable to blank DC bin.')
return None
if not n_coarse_chan % int(n_coarse_chan) == 0:
logger.warning('Selection does not contain an interger number of coarse channels, unable to blank DC bin.')
return None
n_coarse_chan = int(n_coarse_chan)
n_chan = self.data.shape[-1]
n_chan_per_coarse = int(n_chan / n_coarse_chan)
mid_chan = int(n_chan_per_coarse / 2)
for ii in range(n_coarse_chan):
ss = ii*n_chan_per_coarse
self.data[..., ss+mid_chan] = np.median(self.data[..., ss+mid_chan+5:ss+mid_chan+10])
|
[
"Blank",
"DC",
"bins",
"in",
"coarse",
"channels",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L399-L422
|
[
"def",
"blank_dc",
"(",
"self",
",",
"n_coarse_chan",
")",
":",
"if",
"n_coarse_chan",
"<",
"1",
":",
"logger",
".",
"warning",
"(",
"'Coarse channel number < 1, unable to blank DC bin.'",
")",
"return",
"None",
"if",
"not",
"n_coarse_chan",
"%",
"int",
"(",
"n_coarse_chan",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"'Selection does not contain an interger number of coarse channels, unable to blank DC bin.'",
")",
"return",
"None",
"n_coarse_chan",
"=",
"int",
"(",
"n_coarse_chan",
")",
"n_chan",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"-",
"1",
"]",
"n_chan_per_coarse",
"=",
"int",
"(",
"n_chan",
"/",
"n_coarse_chan",
")",
"mid_chan",
"=",
"int",
"(",
"n_chan_per_coarse",
"/",
"2",
")",
"for",
"ii",
"in",
"range",
"(",
"n_coarse_chan",
")",
":",
"ss",
"=",
"ii",
"*",
"n_chan_per_coarse",
"self",
".",
"data",
"[",
"...",
",",
"ss",
"+",
"mid_chan",
"]",
"=",
"np",
".",
"median",
"(",
"self",
".",
"data",
"[",
"...",
",",
"ss",
"+",
"mid_chan",
"+",
"5",
":",
"ss",
"+",
"mid_chan",
"+",
"10",
"]",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.info
|
Print header information
|
blimpy/filterbank.py
|
def info(self):
""" Print header information """
for key, val in self.header.items():
if key == b'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == b'src_dej':
val = val.to_string(unit=u.deg, sep=':')
if key == b'tsamp':
val *= u.second
if key in ('foff', 'fch1'):
val *= u.MHz
if key == b'tstart':
print("%16s : %32s" % ("tstart (ISOT)", Time(val, format='mjd').isot))
key = "tstart (MJD)"
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("Data shape", self.data.shape))
print("%16s : %32s" % ("Start freq (MHz)", self.freqs[0]))
print("%16s : %32s" % ("Stop freq (MHz)", self.freqs[-1]))
|
def info(self):
""" Print header information """
for key, val in self.header.items():
if key == b'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == b'src_dej':
val = val.to_string(unit=u.deg, sep=':')
if key == b'tsamp':
val *= u.second
if key in ('foff', 'fch1'):
val *= u.MHz
if key == b'tstart':
print("%16s : %32s" % ("tstart (ISOT)", Time(val, format='mjd').isot))
key = "tstart (MJD)"
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("Data shape", self.data.shape))
print("%16s : %32s" % ("Start freq (MHz)", self.freqs[0]))
print("%16s : %32s" % ("Stop freq (MHz)", self.freqs[-1]))
|
[
"Print",
"header",
"information"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L424-L444
|
[
"def",
"info",
"(",
"self",
")",
":",
"for",
"key",
",",
"val",
"in",
"self",
".",
"header",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"b'src_raj'",
":",
"val",
"=",
"val",
".",
"to_string",
"(",
"unit",
"=",
"u",
".",
"hour",
",",
"sep",
"=",
"':'",
")",
"if",
"key",
"==",
"b'src_dej'",
":",
"val",
"=",
"val",
".",
"to_string",
"(",
"unit",
"=",
"u",
".",
"deg",
",",
"sep",
"=",
"':'",
")",
"if",
"key",
"==",
"b'tsamp'",
":",
"val",
"*=",
"u",
".",
"second",
"if",
"key",
"in",
"(",
"'foff'",
",",
"'fch1'",
")",
":",
"val",
"*=",
"u",
".",
"MHz",
"if",
"key",
"==",
"b'tstart'",
":",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"tstart (ISOT)\"",
",",
"Time",
"(",
"val",
",",
"format",
"=",
"'mjd'",
")",
".",
"isot",
")",
")",
"key",
"=",
"\"tstart (MJD)\"",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"key",
",",
"val",
")",
")",
"print",
"(",
"\"\\n%16s : %32s\"",
"%",
"(",
"\"Num ints in file\"",
",",
"self",
".",
"n_ints_in_file",
")",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"Data shape\"",
",",
"self",
".",
"data",
".",
"shape",
")",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"Start freq (MHz)\"",
",",
"self",
".",
"freqs",
"[",
"0",
"]",
")",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"Stop freq (MHz)\"",
",",
"self",
".",
"freqs",
"[",
"-",
"1",
"]",
")",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.generate_freqs
|
returns frequency array [f_start...f_stop]
|
blimpy/filterbank.py
|
def generate_freqs(self, f_start, f_stop):
"""
returns frequency array [f_start...f_stop]
"""
fch1 = self.header[b'fch1']
foff = self.header[b'foff']
#convert input frequencies into what their corresponding index would be
i_start = int((f_start - fch1) / foff)
i_stop = int((f_stop - fch1) / foff)
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
#create freq array
i_vals = np.arange(chan_stop_idx, chan_start_idx, 1)
freqs = foff * i_vals + fch1
return freqs
|
def generate_freqs(self, f_start, f_stop):
"""
returns frequency array [f_start...f_stop]
"""
fch1 = self.header[b'fch1']
foff = self.header[b'foff']
#convert input frequencies into what their corresponding index would be
i_start = int((f_start - fch1) / foff)
i_stop = int((f_stop - fch1) / foff)
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
#create freq array
i_vals = np.arange(chan_stop_idx, chan_start_idx, 1)
freqs = foff * i_vals + fch1
return freqs
|
[
"returns",
"frequency",
"array",
"[",
"f_start",
"...",
"f_stop",
"]"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L446-L467
|
[
"def",
"generate_freqs",
"(",
"self",
",",
"f_start",
",",
"f_stop",
")",
":",
"fch1",
"=",
"self",
".",
"header",
"[",
"b'fch1'",
"]",
"foff",
"=",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"#convert input frequencies into what their corresponding index would be",
"i_start",
"=",
"int",
"(",
"(",
"f_start",
"-",
"fch1",
")",
"/",
"foff",
")",
"i_stop",
"=",
"int",
"(",
"(",
"f_stop",
"-",
"fch1",
")",
"/",
"foff",
")",
"#calculate closest true index value",
"chan_start_idx",
"=",
"np",
".",
"int",
"(",
"i_start",
")",
"chan_stop_idx",
"=",
"np",
".",
"int",
"(",
"i_stop",
")",
"#create freq array",
"i_vals",
"=",
"np",
".",
"arange",
"(",
"chan_stop_idx",
",",
"chan_start_idx",
",",
"1",
")",
"freqs",
"=",
"foff",
"*",
"i_vals",
"+",
"fch1",
"return",
"freqs"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank._calc_extent
|
Setup ploting edges.
|
blimpy/filterbank.py
|
def _calc_extent(self,plot_f=None,plot_t=None,MJD_time=False):
""" Setup ploting edges.
"""
plot_f_begin = plot_f[0]
plot_f_end = plot_f[-1] + (plot_f[1]-plot_f[0])
plot_t_begin = self.timestamps[0]
plot_t_end = self.timestamps[-1] + (self.timestamps[1] - self.timestamps[0])
if MJD_time:
extent=(plot_f_begin, plot_f_begin_end, plot_t_begin, plot_t_end)
else:
extent=(plot_f_begin, plot_f_end, 0.0,(plot_t_end-plot_t_begin)*24.*60.*60)
return extent
|
def _calc_extent(self,plot_f=None,plot_t=None,MJD_time=False):
""" Setup ploting edges.
"""
plot_f_begin = plot_f[0]
plot_f_end = plot_f[-1] + (plot_f[1]-plot_f[0])
plot_t_begin = self.timestamps[0]
plot_t_end = self.timestamps[-1] + (self.timestamps[1] - self.timestamps[0])
if MJD_time:
extent=(plot_f_begin, plot_f_begin_end, plot_t_begin, plot_t_end)
else:
extent=(plot_f_begin, plot_f_end, 0.0,(plot_t_end-plot_t_begin)*24.*60.*60)
return extent
|
[
"Setup",
"ploting",
"edges",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L498-L513
|
[
"def",
"_calc_extent",
"(",
"self",
",",
"plot_f",
"=",
"None",
",",
"plot_t",
"=",
"None",
",",
"MJD_time",
"=",
"False",
")",
":",
"plot_f_begin",
"=",
"plot_f",
"[",
"0",
"]",
"plot_f_end",
"=",
"plot_f",
"[",
"-",
"1",
"]",
"+",
"(",
"plot_f",
"[",
"1",
"]",
"-",
"plot_f",
"[",
"0",
"]",
")",
"plot_t_begin",
"=",
"self",
".",
"timestamps",
"[",
"0",
"]",
"plot_t_end",
"=",
"self",
".",
"timestamps",
"[",
"-",
"1",
"]",
"+",
"(",
"self",
".",
"timestamps",
"[",
"1",
"]",
"-",
"self",
".",
"timestamps",
"[",
"0",
"]",
")",
"if",
"MJD_time",
":",
"extent",
"=",
"(",
"plot_f_begin",
",",
"plot_f_begin_end",
",",
"plot_t_begin",
",",
"plot_t_end",
")",
"else",
":",
"extent",
"=",
"(",
"plot_f_begin",
",",
"plot_f_end",
",",
"0.0",
",",
"(",
"plot_t_end",
"-",
"plot_t_begin",
")",
"*",
"24.",
"*",
"60.",
"*",
"60",
")",
"return",
"extent"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.plot_spectrum
|
Plot frequency spectrum of a given file
Args:
t (int): integration number to plot (0 -> len(data))
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
c: color for line
kwargs: keyword args to be passed to matplotlib plot()
|
blimpy/filterbank.py
|
def plot_spectrum(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):
""" Plot frequency spectrum of a given file
Args:
t (int): integration number to plot (0 -> len(data))
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
c: color for line
kwargs: keyword args to be passed to matplotlib plot()
"""
if self.header[b'nbits'] <=2:
logged = False
t='all'
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
if isinstance(t, int):
print("extracting integration %i..." % t)
plot_data = plot_data[t]
elif t == b'all':
print("averaging along time axis...")
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = plot_data.mean(axis=0)
else:
plot_data = plot_data.mean()
else:
raise RuntimeError("Unknown integration %s" % t)
# Rebin to max number of points
dec_fac_x = 1
if plot_data.shape[0] > MAX_PLT_POINTS:
dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)
plot_data = rebin(plot_data, dec_fac_x, 1)
plot_f = rebin(plot_f, dec_fac_x, 1)
if not c:
kwargs['c'] = '#333333'
if logged:
plt.plot(plot_f, db(plot_data),label='Stokes I', **kwargs)
plt.ylabel("Power [dB]")
else:
plt.plot(plot_f, plot_data,label='Stokes I', **kwargs)
plt.ylabel("Power [counts]")
plt.xlabel("Frequency [MHz]")
plt.legend()
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
plt.xlim(plot_f[0], plot_f[-1])
|
def plot_spectrum(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):
""" Plot frequency spectrum of a given file
Args:
t (int): integration number to plot (0 -> len(data))
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
c: color for line
kwargs: keyword args to be passed to matplotlib plot()
"""
if self.header[b'nbits'] <=2:
logged = False
t='all'
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
if isinstance(t, int):
print("extracting integration %i..." % t)
plot_data = plot_data[t]
elif t == b'all':
print("averaging along time axis...")
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = plot_data.mean(axis=0)
else:
plot_data = plot_data.mean()
else:
raise RuntimeError("Unknown integration %s" % t)
# Rebin to max number of points
dec_fac_x = 1
if plot_data.shape[0] > MAX_PLT_POINTS:
dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)
plot_data = rebin(plot_data, dec_fac_x, 1)
plot_f = rebin(plot_f, dec_fac_x, 1)
if not c:
kwargs['c'] = '#333333'
if logged:
plt.plot(plot_f, db(plot_data),label='Stokes I', **kwargs)
plt.ylabel("Power [dB]")
else:
plt.plot(plot_f, plot_data,label='Stokes I', **kwargs)
plt.ylabel("Power [counts]")
plt.xlabel("Frequency [MHz]")
plt.legend()
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
plt.xlim(plot_f[0], plot_f[-1])
|
[
"Plot",
"frequency",
"spectrum",
"of",
"a",
"given",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L515-L576
|
[
"def",
"plot_spectrum",
"(",
"self",
",",
"t",
"=",
"0",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"logged",
"=",
"False",
",",
"if_id",
"=",
"0",
",",
"c",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"<=",
"2",
":",
"logged",
"=",
"False",
"t",
"=",
"'all'",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"plot_f",
",",
"plot_data",
"=",
"self",
".",
"grab_data",
"(",
"f_start",
",",
"f_stop",
",",
"if_id",
")",
"#Using accending frequency for all plots.",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"plot_data",
"=",
"plot_data",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"# Reverse data",
"plot_f",
"=",
"plot_f",
"[",
":",
":",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"t",
",",
"int",
")",
":",
"print",
"(",
"\"extracting integration %i...\"",
"%",
"t",
")",
"plot_data",
"=",
"plot_data",
"[",
"t",
"]",
"elif",
"t",
"==",
"b'all'",
":",
"print",
"(",
"\"averaging along time axis...\"",
")",
"#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1",
"if",
"len",
"(",
"plot_data",
".",
"shape",
")",
">",
"1",
":",
"plot_data",
"=",
"plot_data",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"else",
":",
"plot_data",
"=",
"plot_data",
".",
"mean",
"(",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown integration %s\"",
"%",
"t",
")",
"# Rebin to max number of points",
"dec_fac_x",
"=",
"1",
"if",
"plot_data",
".",
"shape",
"[",
"0",
"]",
">",
"MAX_PLT_POINTS",
":",
"dec_fac_x",
"=",
"int",
"(",
"plot_data",
".",
"shape",
"[",
"0",
"]",
"/",
"MAX_PLT_POINTS",
")",
"plot_data",
"=",
"rebin",
"(",
"plot_data",
",",
"dec_fac_x",
",",
"1",
")",
"plot_f",
"=",
"rebin",
"(",
"plot_f",
",",
"dec_fac_x",
",",
"1",
")",
"if",
"not",
"c",
":",
"kwargs",
"[",
"'c'",
"]",
"=",
"'#333333'",
"if",
"logged",
":",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"db",
"(",
"plot_data",
")",
",",
"label",
"=",
"'Stokes I'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"\"Power [dB]\"",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"plot_data",
",",
"label",
"=",
"'Stokes I'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"\"Power [counts]\"",
")",
"plt",
".",
"xlabel",
"(",
"\"Frequency [MHz]\"",
")",
"plt",
".",
"legend",
"(",
")",
"try",
":",
"plt",
".",
"title",
"(",
"self",
".",
"header",
"[",
"b'source_name'",
"]",
")",
"except",
"KeyError",
":",
"plt",
".",
"title",
"(",
"self",
".",
"filename",
")",
"plt",
".",
"xlim",
"(",
"plot_f",
"[",
"0",
"]",
",",
"plot_f",
"[",
"-",
"1",
"]",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.plot_spectrum_min_max
|
Plot frequency spectrum of a given file
Args:
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
c: color for line
kwargs: keyword args to be passed to matplotlib plot()
|
blimpy/filterbank.py
|
def plot_spectrum_min_max(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):
""" Plot frequency spectrum of a given file
Args:
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
c: color for line
kwargs: keyword args to be passed to matplotlib plot()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
fig_max = plot_data[0].max()
fig_min = plot_data[0].min()
print("averaging along time axis...")
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_max = plot_data.max(axis=0)
plot_min = plot_data.min(axis=0)
plot_data = plot_data.mean(axis=0)
else:
plot_max = plot_data.max()
plot_min = plot_data.min()
plot_data = plot_data.mean()
# Rebin to max number of points
dec_fac_x = 1
MAX_PLT_POINTS = 8*64 # Low resoluition to see the difference.
if plot_data.shape[0] > MAX_PLT_POINTS:
dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)
plot_data = rebin(plot_data, dec_fac_x, 1)
plot_min = rebin(plot_min, dec_fac_x, 1)
plot_max = rebin(plot_max, dec_fac_x, 1)
plot_f = rebin(plot_f, dec_fac_x, 1)
if logged:
plt.plot(plot_f, db(plot_data), "#333333", label='mean', **kwargs)
plt.plot(plot_f, db(plot_max), "#e74c3c", label='max', **kwargs)
plt.plot(plot_f, db(plot_min), '#3b5b92', label='min', **kwargs)
plt.ylabel("Power [dB]")
else:
plt.plot(plot_f, plot_data, "#333333", label='mean', **kwargs)
plt.plot(plot_f, plot_max, "#e74c3c", label='max', **kwargs)
plt.plot(plot_f, plot_min, '#3b5b92', label='min', **kwargs)
plt.ylabel("Power [counts]")
plt.xlabel("Frequency [MHz]")
plt.legend()
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
plt.xlim(plot_f[0], plot_f[-1])
if logged:
plt.ylim(db(fig_min),db(fig_max))
|
def plot_spectrum_min_max(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):
""" Plot frequency spectrum of a given file
Args:
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
c: color for line
kwargs: keyword args to be passed to matplotlib plot()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
fig_max = plot_data[0].max()
fig_min = plot_data[0].min()
print("averaging along time axis...")
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_max = plot_data.max(axis=0)
plot_min = plot_data.min(axis=0)
plot_data = plot_data.mean(axis=0)
else:
plot_max = plot_data.max()
plot_min = plot_data.min()
plot_data = plot_data.mean()
# Rebin to max number of points
dec_fac_x = 1
MAX_PLT_POINTS = 8*64 # Low resoluition to see the difference.
if plot_data.shape[0] > MAX_PLT_POINTS:
dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)
plot_data = rebin(plot_data, dec_fac_x, 1)
plot_min = rebin(plot_min, dec_fac_x, 1)
plot_max = rebin(plot_max, dec_fac_x, 1)
plot_f = rebin(plot_f, dec_fac_x, 1)
if logged:
plt.plot(plot_f, db(plot_data), "#333333", label='mean', **kwargs)
plt.plot(plot_f, db(plot_max), "#e74c3c", label='max', **kwargs)
plt.plot(plot_f, db(plot_min), '#3b5b92', label='min', **kwargs)
plt.ylabel("Power [dB]")
else:
plt.plot(plot_f, plot_data, "#333333", label='mean', **kwargs)
plt.plot(plot_f, plot_max, "#e74c3c", label='max', **kwargs)
plt.plot(plot_f, plot_min, '#3b5b92', label='min', **kwargs)
plt.ylabel("Power [counts]")
plt.xlabel("Frequency [MHz]")
plt.legend()
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
plt.xlim(plot_f[0], plot_f[-1])
if logged:
plt.ylim(db(fig_min),db(fig_max))
|
[
"Plot",
"frequency",
"spectrum",
"of",
"a",
"given",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L578-L642
|
[
"def",
"plot_spectrum_min_max",
"(",
"self",
",",
"t",
"=",
"0",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"logged",
"=",
"False",
",",
"if_id",
"=",
"0",
",",
"c",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"plot_f",
",",
"plot_data",
"=",
"self",
".",
"grab_data",
"(",
"f_start",
",",
"f_stop",
",",
"if_id",
")",
"#Using accending frequency for all plots.",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"plot_data",
"=",
"plot_data",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"# Reverse data",
"plot_f",
"=",
"plot_f",
"[",
":",
":",
"-",
"1",
"]",
"fig_max",
"=",
"plot_data",
"[",
"0",
"]",
".",
"max",
"(",
")",
"fig_min",
"=",
"plot_data",
"[",
"0",
"]",
".",
"min",
"(",
")",
"print",
"(",
"\"averaging along time axis...\"",
")",
"#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1",
"if",
"len",
"(",
"plot_data",
".",
"shape",
")",
">",
"1",
":",
"plot_max",
"=",
"plot_data",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"plot_min",
"=",
"plot_data",
".",
"min",
"(",
"axis",
"=",
"0",
")",
"plot_data",
"=",
"plot_data",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"else",
":",
"plot_max",
"=",
"plot_data",
".",
"max",
"(",
")",
"plot_min",
"=",
"plot_data",
".",
"min",
"(",
")",
"plot_data",
"=",
"plot_data",
".",
"mean",
"(",
")",
"# Rebin to max number of points",
"dec_fac_x",
"=",
"1",
"MAX_PLT_POINTS",
"=",
"8",
"*",
"64",
"# Low resoluition to see the difference.",
"if",
"plot_data",
".",
"shape",
"[",
"0",
"]",
">",
"MAX_PLT_POINTS",
":",
"dec_fac_x",
"=",
"int",
"(",
"plot_data",
".",
"shape",
"[",
"0",
"]",
"/",
"MAX_PLT_POINTS",
")",
"plot_data",
"=",
"rebin",
"(",
"plot_data",
",",
"dec_fac_x",
",",
"1",
")",
"plot_min",
"=",
"rebin",
"(",
"plot_min",
",",
"dec_fac_x",
",",
"1",
")",
"plot_max",
"=",
"rebin",
"(",
"plot_max",
",",
"dec_fac_x",
",",
"1",
")",
"plot_f",
"=",
"rebin",
"(",
"plot_f",
",",
"dec_fac_x",
",",
"1",
")",
"if",
"logged",
":",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"db",
"(",
"plot_data",
")",
",",
"\"#333333\"",
",",
"label",
"=",
"'mean'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"db",
"(",
"plot_max",
")",
",",
"\"#e74c3c\"",
",",
"label",
"=",
"'max'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"db",
"(",
"plot_min",
")",
",",
"'#3b5b92'",
",",
"label",
"=",
"'min'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"\"Power [dB]\"",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"plot_data",
",",
"\"#333333\"",
",",
"label",
"=",
"'mean'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"plot_max",
",",
"\"#e74c3c\"",
",",
"label",
"=",
"'max'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"plot_min",
",",
"'#3b5b92'",
",",
"label",
"=",
"'min'",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"\"Power [counts]\"",
")",
"plt",
".",
"xlabel",
"(",
"\"Frequency [MHz]\"",
")",
"plt",
".",
"legend",
"(",
")",
"try",
":",
"plt",
".",
"title",
"(",
"self",
".",
"header",
"[",
"b'source_name'",
"]",
")",
"except",
"KeyError",
":",
"plt",
".",
"title",
"(",
"self",
".",
"filename",
")",
"plt",
".",
"xlim",
"(",
"plot_f",
"[",
"0",
"]",
",",
"plot_f",
"[",
"-",
"1",
"]",
")",
"if",
"logged",
":",
"plt",
".",
"ylim",
"(",
"db",
"(",
"fig_min",
")",
",",
"db",
"(",
"fig_max",
")",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.plot_waterfall
|
Plot waterfall of data
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
cb (bool): for plotting the colorbar
kwargs: keyword args to be passed to matplotlib imshow()
|
blimpy/filterbank.py
|
def plot_waterfall(self, f_start=None, f_stop=None, if_id=0, logged=True, cb=True, MJD_time=False, **kwargs):
""" Plot waterfall of data
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
cb (bool): for plotting the colorbar
kwargs: keyword args to be passed to matplotlib imshow()
"""
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
if logged:
plot_data = db(plot_data)
# Make sure waterfall plot is under 4k*4k
dec_fac_x, dec_fac_y = 1, 1
if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = int(plot_data.shape[0] / MAX_IMSHOW_POINTS[0])
if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(plot_data.shape[1] / MAX_IMSHOW_POINTS[1])
plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plt.imshow(plot_data,
aspect='auto',
origin='lower',
rasterized=True,
interpolation='nearest',
extent=extent,
cmap='viridis',
**kwargs
)
if cb:
plt.colorbar()
plt.xlabel("Frequency [MHz]")
if MJD_time:
plt.ylabel("Time [MJD]")
else:
plt.ylabel("Time [s]")
|
def plot_waterfall(self, f_start=None, f_stop=None, if_id=0, logged=True, cb=True, MJD_time=False, **kwargs):
""" Plot waterfall of data
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
cb (bool): for plotting the colorbar
kwargs: keyword args to be passed to matplotlib imshow()
"""
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
if logged:
plot_data = db(plot_data)
# Make sure waterfall plot is under 4k*4k
dec_fac_x, dec_fac_y = 1, 1
if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = int(plot_data.shape[0] / MAX_IMSHOW_POINTS[0])
if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(plot_data.shape[1] / MAX_IMSHOW_POINTS[1])
plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plt.imshow(plot_data,
aspect='auto',
origin='lower',
rasterized=True,
interpolation='nearest',
extent=extent,
cmap='viridis',
**kwargs
)
if cb:
plt.colorbar()
plt.xlabel("Frequency [MHz]")
if MJD_time:
plt.ylabel("Time [MJD]")
else:
plt.ylabel("Time [s]")
|
[
"Plot",
"waterfall",
"of",
"data"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L644-L697
|
[
"def",
"plot_waterfall",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"if_id",
"=",
"0",
",",
"logged",
"=",
"True",
",",
"cb",
"=",
"True",
",",
"MJD_time",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"plot_f",
",",
"plot_data",
"=",
"self",
".",
"grab_data",
"(",
"f_start",
",",
"f_stop",
",",
"if_id",
")",
"#Using accending frequency for all plots.",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"plot_data",
"=",
"plot_data",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"# Reverse data",
"plot_f",
"=",
"plot_f",
"[",
":",
":",
"-",
"1",
"]",
"if",
"logged",
":",
"plot_data",
"=",
"db",
"(",
"plot_data",
")",
"# Make sure waterfall plot is under 4k*4k",
"dec_fac_x",
",",
"dec_fac_y",
"=",
"1",
",",
"1",
"if",
"plot_data",
".",
"shape",
"[",
"0",
"]",
">",
"MAX_IMSHOW_POINTS",
"[",
"0",
"]",
":",
"dec_fac_x",
"=",
"int",
"(",
"plot_data",
".",
"shape",
"[",
"0",
"]",
"/",
"MAX_IMSHOW_POINTS",
"[",
"0",
"]",
")",
"if",
"plot_data",
".",
"shape",
"[",
"1",
"]",
">",
"MAX_IMSHOW_POINTS",
"[",
"1",
"]",
":",
"dec_fac_y",
"=",
"int",
"(",
"plot_data",
".",
"shape",
"[",
"1",
"]",
"/",
"MAX_IMSHOW_POINTS",
"[",
"1",
"]",
")",
"plot_data",
"=",
"rebin",
"(",
"plot_data",
",",
"dec_fac_x",
",",
"dec_fac_y",
")",
"try",
":",
"plt",
".",
"title",
"(",
"self",
".",
"header",
"[",
"b'source_name'",
"]",
")",
"except",
"KeyError",
":",
"plt",
".",
"title",
"(",
"self",
".",
"filename",
")",
"extent",
"=",
"self",
".",
"_calc_extent",
"(",
"plot_f",
"=",
"plot_f",
",",
"plot_t",
"=",
"self",
".",
"timestamps",
",",
"MJD_time",
"=",
"MJD_time",
")",
"plt",
".",
"imshow",
"(",
"plot_data",
",",
"aspect",
"=",
"'auto'",
",",
"origin",
"=",
"'lower'",
",",
"rasterized",
"=",
"True",
",",
"interpolation",
"=",
"'nearest'",
",",
"extent",
"=",
"extent",
",",
"cmap",
"=",
"'viridis'",
",",
"*",
"*",
"kwargs",
")",
"if",
"cb",
":",
"plt",
".",
"colorbar",
"(",
")",
"plt",
".",
"xlabel",
"(",
"\"Frequency [MHz]\"",
")",
"if",
"MJD_time",
":",
"plt",
".",
"ylabel",
"(",
"\"Time [MJD]\"",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"\"Time [s]\"",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.plot_time_series
|
Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
|
blimpy/filterbank.py
|
def plot_time_series(self, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):
""" Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
if logged and self.header[b'nbits'] >= 8:
plot_data = db(plot_data)
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = plot_data.mean(axis=1)
else:
plot_data = plot_data.mean()
#Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plot_t = np.linspace(extent[2],extent[3],len(self.timestamps))
if MJD_time:
tlabel = "Time [MJD]"
else:
tlabel = "Time [s]"
if logged:
plabel = "Power [dB]"
else:
plabel = "Power [counts]"
# Reverse oder if vertical orientation.
if 'v' in orientation:
plt.plot(plot_data, plot_t, **kwargs)
plt.xlabel(plabel)
else:
plt.plot(plot_t, plot_data, **kwargs)
plt.xlabel(tlabel)
plt.ylabel(plabel)
ax.autoscale(axis='both',tight=True)
|
def plot_time_series(self, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):
""" Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
if logged and self.header[b'nbits'] >= 8:
plot_data = db(plot_data)
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = plot_data.mean(axis=1)
else:
plot_data = plot_data.mean()
#Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plot_t = np.linspace(extent[2],extent[3],len(self.timestamps))
if MJD_time:
tlabel = "Time [MJD]"
else:
tlabel = "Time [s]"
if logged:
plabel = "Power [dB]"
else:
plabel = "Power [counts]"
# Reverse oder if vertical orientation.
if 'v' in orientation:
plt.plot(plot_data, plot_t, **kwargs)
plt.xlabel(plabel)
else:
plt.plot(plot_t, plot_data, **kwargs)
plt.xlabel(tlabel)
plt.ylabel(plabel)
ax.autoscale(axis='both',tight=True)
|
[
"Plot",
"the",
"time",
"series",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L699-L745
|
[
"def",
"plot_time_series",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"if_id",
"=",
"0",
",",
"logged",
"=",
"True",
",",
"orientation",
"=",
"'h'",
",",
"MJD_time",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"plot_f",
",",
"plot_data",
"=",
"self",
".",
"grab_data",
"(",
"f_start",
",",
"f_stop",
",",
"if_id",
")",
"if",
"logged",
"and",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
">=",
"8",
":",
"plot_data",
"=",
"db",
"(",
"plot_data",
")",
"#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1",
"if",
"len",
"(",
"plot_data",
".",
"shape",
")",
">",
"1",
":",
"plot_data",
"=",
"plot_data",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"else",
":",
"plot_data",
"=",
"plot_data",
".",
"mean",
"(",
")",
"#Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.",
"extent",
"=",
"self",
".",
"_calc_extent",
"(",
"plot_f",
"=",
"plot_f",
",",
"plot_t",
"=",
"self",
".",
"timestamps",
",",
"MJD_time",
"=",
"MJD_time",
")",
"plot_t",
"=",
"np",
".",
"linspace",
"(",
"extent",
"[",
"2",
"]",
",",
"extent",
"[",
"3",
"]",
",",
"len",
"(",
"self",
".",
"timestamps",
")",
")",
"if",
"MJD_time",
":",
"tlabel",
"=",
"\"Time [MJD]\"",
"else",
":",
"tlabel",
"=",
"\"Time [s]\"",
"if",
"logged",
":",
"plabel",
"=",
"\"Power [dB]\"",
"else",
":",
"plabel",
"=",
"\"Power [counts]\"",
"# Reverse oder if vertical orientation.",
"if",
"'v'",
"in",
"orientation",
":",
"plt",
".",
"plot",
"(",
"plot_data",
",",
"plot_t",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"xlabel",
"(",
"plabel",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"plot_t",
",",
"plot_data",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"xlabel",
"(",
"tlabel",
")",
"plt",
".",
"ylabel",
"(",
"plabel",
")",
"ax",
".",
"autoscale",
"(",
"axis",
"=",
"'both'",
",",
"tight",
"=",
"True",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.plot_kurtosis
|
Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
|
blimpy/filterbank.py
|
def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):
""" Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
try:
plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')
except:
plot_kurtosis = plot_data*0.0
plt.plot(plot_f, plot_kurtosis, **kwargs)
plt.ylabel("Kurtosis")
plt.xlabel("Frequency [MHz]")
plt.xlim(plot_f[0], plot_f[-1])
|
def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):
""" Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
try:
plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')
except:
plot_kurtosis = plot_data*0.0
plt.plot(plot_f, plot_kurtosis, **kwargs)
plt.ylabel("Kurtosis")
plt.xlabel("Frequency [MHz]")
plt.xlim(plot_f[0], plot_f[-1])
|
[
"Plot",
"kurtosis"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L747-L773
|
[
"def",
"plot_kurtosis",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"if_id",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"plot_f",
",",
"plot_data",
"=",
"self",
".",
"grab_data",
"(",
"f_start",
",",
"f_stop",
",",
"if_id",
")",
"#Using accending frequency for all plots.",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"plot_data",
"=",
"plot_data",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"# Reverse data",
"plot_f",
"=",
"plot_f",
"[",
":",
":",
"-",
"1",
"]",
"try",
":",
"plot_kurtosis",
"=",
"scipy",
".",
"stats",
".",
"kurtosis",
"(",
"plot_data",
",",
"axis",
"=",
"0",
",",
"nan_policy",
"=",
"'omit'",
")",
"except",
":",
"plot_kurtosis",
"=",
"plot_data",
"*",
"0.0",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"plot_kurtosis",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"\"Kurtosis\"",
")",
"plt",
".",
"xlabel",
"(",
"\"Frequency [MHz]\"",
")",
"plt",
".",
"xlim",
"(",
"plot_f",
"[",
"0",
"]",
",",
"plot_f",
"[",
"-",
"1",
"]",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.plot_all
|
Plot waterfall of data as well as spectrum; also, placeholder to make even more complicated plots in the future.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
t (int): integration number to plot (0 -> len(data))
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
kwargs: keyword args to be passed to matplotlib plot() and imshow()
|
blimpy/filterbank.py
|
def plot_all(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, kurtosis=True, **kwargs):
""" Plot waterfall of data as well as spectrum; also, placeholder to make even more complicated plots in the future.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
t (int): integration number to plot (0 -> len(data))
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
kwargs: keyword args to be passed to matplotlib plot() and imshow()
"""
if self.header[b'nbits'] <=2:
logged = False
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.35, 0.5
bottom, height = 0.45, 0.5
width2, height2 = 0.1125, 0.15
bottom2, left2 = bottom - height2 - .025, left - width2 - .02
bottom3, left3 = bottom2 - height2 - .025, 0.075
rect_waterfall = [left, bottom, width, height]
rect_colorbar = [left + width, bottom, .025, height]
rect_spectrum = [left, bottom2, width, height2]
rect_min_max = [left, bottom3, width, height2]
rect_timeseries = [left + width, bottom, width2, height]
rect_kurtosis = [left3, bottom3, 0.25, height2]
rect_header = [left3 - .05, bottom, 0.2, height]
# --------
# axColorbar = plt.axes(rect_colorbar)
# print 'Ploting Colorbar'
# print plot_data.max()
# print plot_data.min()
#
# plot_colorbar = range(plot_data.min(),plot_data.max(),int((plot_data.max()-plot_data.min())/plot_data.shape[0]))
# plot_colorbar = np.array([[plot_colorbar],[plot_colorbar]])
#
# plt.imshow(plot_colorbar,aspect='auto', rasterized=True, interpolation='nearest',)
# axColorbar.xaxis.set_major_formatter(nullfmt)
# axColorbar.yaxis.set_major_formatter(nullfmt)
# heatmap = axColorbar.pcolor(plot_data, edgecolors = 'none', picker=True)
# plt.colorbar(heatmap, cax = axColorbar)
# --------
axMinMax = plt.axes(rect_min_max)
print('Plotting Min Max')
self.plot_spectrum_min_max(logged=logged, f_start=f_start, f_stop=f_stop, t=t)
plt.title('')
axMinMax.yaxis.tick_right()
axMinMax.yaxis.set_label_position("right")
# --------
axSpectrum = plt.axes(rect_spectrum,sharex=axMinMax)
print('Plotting Spectrum')
self.plot_spectrum(logged=logged, f_start=f_start, f_stop=f_stop, t=t)
plt.title('')
axSpectrum.yaxis.tick_right()
axSpectrum.yaxis.set_label_position("right")
plt.xlabel('')
# axSpectrum.xaxis.set_major_formatter(nullfmt)
plt.setp(axSpectrum.get_xticklabels(), visible=False)
# --------
axWaterfall = plt.axes(rect_waterfall,sharex=axMinMax)
print('Plotting Waterfall')
self.plot_waterfall(f_start=f_start, f_stop=f_stop, logged=logged, cb=False)
plt.xlabel('')
# no labels
# axWaterfall.xaxis.set_major_formatter(nullfmt)
plt.setp(axWaterfall.get_xticklabels(), visible=False)
# --------
axTimeseries = plt.axes(rect_timeseries)
print('Plotting Timeseries')
self.plot_time_series(f_start=f_start, f_stop=f_stop, orientation='v')
axTimeseries.yaxis.set_major_formatter(nullfmt)
# axTimeseries.xaxis.set_major_formatter(nullfmt)
# --------
# Could exclude since it takes much longer to run than the other plots.
if kurtosis:
axKurtosis = plt.axes(rect_kurtosis)
print('Plotting Kurtosis')
self.plot_kurtosis(f_start=f_start, f_stop=f_stop)
# --------
axHeader = plt.axes(rect_header)
print('Plotting Header')
# Generate nicer header
telescopes = {0: 'Fake data',
1: 'Arecibo',
2: 'Ooty',
3: 'Nancay',
4: 'Parkes',
5: 'Jodrell',
6: 'GBT',
8: 'Effelsberg',
10: 'SRT',
64: 'MeerKAT',
65: 'KAT7'
}
telescope = telescopes.get(self.header[b"telescope_id"], self.header[b"telescope_id"])
plot_header = "%14s: %s\n" % ("TELESCOPE_ID", telescope)
for key in (b'SRC_RAJ', b'SRC_DEJ', b'TSTART', b'NCHANS', b'NBEAMS', b'NIFS', b'NBITS'):
try:
plot_header += "%14s: %s\n" % (key, self.header[key.lower()])
except KeyError:
pass
fch1 = "%6.6f MHz" % self.header[b'fch1']
foff = (self.header[b'foff'] * 1e6 * u.Hz)
if np.abs(foff) > 1e6 * u.Hz:
foff = str(foff.to('MHz'))
elif np.abs(foff) > 1e3 * u.Hz:
foff = str(foff.to('kHz'))
else:
foff = str(foff.to('Hz'))
plot_header += "%14s: %s\n" % ("FCH1", fch1)
plot_header += "%14s: %s\n" % ("FOFF", foff)
plt.text(0.05, .95, plot_header, ha='left', va='top', wrap=True)
axHeader.set_facecolor('white')
axHeader.xaxis.set_major_formatter(nullfmt)
axHeader.yaxis.set_major_formatter(nullfmt)
|
def plot_all(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, kurtosis=True, **kwargs):
""" Plot waterfall of data as well as spectrum; also, placeholder to make even more complicated plots in the future.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
t (int): integration number to plot (0 -> len(data))
logged (bool): Plot in linear (False) or dB units (True)
if_id (int): IF identification (if multiple IF signals in file)
kwargs: keyword args to be passed to matplotlib plot() and imshow()
"""
if self.header[b'nbits'] <=2:
logged = False
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.35, 0.5
bottom, height = 0.45, 0.5
width2, height2 = 0.1125, 0.15
bottom2, left2 = bottom - height2 - .025, left - width2 - .02
bottom3, left3 = bottom2 - height2 - .025, 0.075
rect_waterfall = [left, bottom, width, height]
rect_colorbar = [left + width, bottom, .025, height]
rect_spectrum = [left, bottom2, width, height2]
rect_min_max = [left, bottom3, width, height2]
rect_timeseries = [left + width, bottom, width2, height]
rect_kurtosis = [left3, bottom3, 0.25, height2]
rect_header = [left3 - .05, bottom, 0.2, height]
# --------
# axColorbar = plt.axes(rect_colorbar)
# print 'Ploting Colorbar'
# print plot_data.max()
# print plot_data.min()
#
# plot_colorbar = range(plot_data.min(),plot_data.max(),int((plot_data.max()-plot_data.min())/plot_data.shape[0]))
# plot_colorbar = np.array([[plot_colorbar],[plot_colorbar]])
#
# plt.imshow(plot_colorbar,aspect='auto', rasterized=True, interpolation='nearest',)
# axColorbar.xaxis.set_major_formatter(nullfmt)
# axColorbar.yaxis.set_major_formatter(nullfmt)
# heatmap = axColorbar.pcolor(plot_data, edgecolors = 'none', picker=True)
# plt.colorbar(heatmap, cax = axColorbar)
# --------
axMinMax = plt.axes(rect_min_max)
print('Plotting Min Max')
self.plot_spectrum_min_max(logged=logged, f_start=f_start, f_stop=f_stop, t=t)
plt.title('')
axMinMax.yaxis.tick_right()
axMinMax.yaxis.set_label_position("right")
# --------
axSpectrum = plt.axes(rect_spectrum,sharex=axMinMax)
print('Plotting Spectrum')
self.plot_spectrum(logged=logged, f_start=f_start, f_stop=f_stop, t=t)
plt.title('')
axSpectrum.yaxis.tick_right()
axSpectrum.yaxis.set_label_position("right")
plt.xlabel('')
# axSpectrum.xaxis.set_major_formatter(nullfmt)
plt.setp(axSpectrum.get_xticklabels(), visible=False)
# --------
axWaterfall = plt.axes(rect_waterfall,sharex=axMinMax)
print('Plotting Waterfall')
self.plot_waterfall(f_start=f_start, f_stop=f_stop, logged=logged, cb=False)
plt.xlabel('')
# no labels
# axWaterfall.xaxis.set_major_formatter(nullfmt)
plt.setp(axWaterfall.get_xticklabels(), visible=False)
# --------
axTimeseries = plt.axes(rect_timeseries)
print('Plotting Timeseries')
self.plot_time_series(f_start=f_start, f_stop=f_stop, orientation='v')
axTimeseries.yaxis.set_major_formatter(nullfmt)
# axTimeseries.xaxis.set_major_formatter(nullfmt)
# --------
# Could exclude since it takes much longer to run than the other plots.
if kurtosis:
axKurtosis = plt.axes(rect_kurtosis)
print('Plotting Kurtosis')
self.plot_kurtosis(f_start=f_start, f_stop=f_stop)
# --------
axHeader = plt.axes(rect_header)
print('Plotting Header')
# Generate nicer header
telescopes = {0: 'Fake data',
1: 'Arecibo',
2: 'Ooty',
3: 'Nancay',
4: 'Parkes',
5: 'Jodrell',
6: 'GBT',
8: 'Effelsberg',
10: 'SRT',
64: 'MeerKAT',
65: 'KAT7'
}
telescope = telescopes.get(self.header[b"telescope_id"], self.header[b"telescope_id"])
plot_header = "%14s: %s\n" % ("TELESCOPE_ID", telescope)
for key in (b'SRC_RAJ', b'SRC_DEJ', b'TSTART', b'NCHANS', b'NBEAMS', b'NIFS', b'NBITS'):
try:
plot_header += "%14s: %s\n" % (key, self.header[key.lower()])
except KeyError:
pass
fch1 = "%6.6f MHz" % self.header[b'fch1']
foff = (self.header[b'foff'] * 1e6 * u.Hz)
if np.abs(foff) > 1e6 * u.Hz:
foff = str(foff.to('MHz'))
elif np.abs(foff) > 1e3 * u.Hz:
foff = str(foff.to('kHz'))
else:
foff = str(foff.to('Hz'))
plot_header += "%14s: %s\n" % ("FCH1", fch1)
plot_header += "%14s: %s\n" % ("FOFF", foff)
plt.text(0.05, .95, plot_header, ha='left', va='top', wrap=True)
axHeader.set_facecolor('white')
axHeader.xaxis.set_major_formatter(nullfmt)
axHeader.yaxis.set_major_formatter(nullfmt)
|
[
"Plot",
"waterfall",
"of",
"data",
"as",
"well",
"as",
"spectrum",
";",
"also",
"placeholder",
"to",
"make",
"even",
"more",
"complicated",
"plots",
"in",
"the",
"future",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L775-L911
|
[
"def",
"plot_all",
"(",
"self",
",",
"t",
"=",
"0",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"logged",
"=",
"False",
",",
"if_id",
"=",
"0",
",",
"kurtosis",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"<=",
"2",
":",
"logged",
"=",
"False",
"nullfmt",
"=",
"NullFormatter",
"(",
")",
"# no labels",
"# definitions for the axes",
"left",
",",
"width",
"=",
"0.35",
",",
"0.5",
"bottom",
",",
"height",
"=",
"0.45",
",",
"0.5",
"width2",
",",
"height2",
"=",
"0.1125",
",",
"0.15",
"bottom2",
",",
"left2",
"=",
"bottom",
"-",
"height2",
"-",
".025",
",",
"left",
"-",
"width2",
"-",
".02",
"bottom3",
",",
"left3",
"=",
"bottom2",
"-",
"height2",
"-",
".025",
",",
"0.075",
"rect_waterfall",
"=",
"[",
"left",
",",
"bottom",
",",
"width",
",",
"height",
"]",
"rect_colorbar",
"=",
"[",
"left",
"+",
"width",
",",
"bottom",
",",
".025",
",",
"height",
"]",
"rect_spectrum",
"=",
"[",
"left",
",",
"bottom2",
",",
"width",
",",
"height2",
"]",
"rect_min_max",
"=",
"[",
"left",
",",
"bottom3",
",",
"width",
",",
"height2",
"]",
"rect_timeseries",
"=",
"[",
"left",
"+",
"width",
",",
"bottom",
",",
"width2",
",",
"height",
"]",
"rect_kurtosis",
"=",
"[",
"left3",
",",
"bottom3",
",",
"0.25",
",",
"height2",
"]",
"rect_header",
"=",
"[",
"left3",
"-",
".05",
",",
"bottom",
",",
"0.2",
",",
"height",
"]",
"# --------",
"# axColorbar = plt.axes(rect_colorbar)",
"# print 'Ploting Colorbar'",
"# print plot_data.max()",
"# print plot_data.min()",
"#",
"# plot_colorbar = range(plot_data.min(),plot_data.max(),int((plot_data.max()-plot_data.min())/plot_data.shape[0]))",
"# plot_colorbar = np.array([[plot_colorbar],[plot_colorbar]])",
"#",
"# plt.imshow(plot_colorbar,aspect='auto', rasterized=True, interpolation='nearest',)",
"# axColorbar.xaxis.set_major_formatter(nullfmt)",
"# axColorbar.yaxis.set_major_formatter(nullfmt)",
"# heatmap = axColorbar.pcolor(plot_data, edgecolors = 'none', picker=True)",
"# plt.colorbar(heatmap, cax = axColorbar)",
"# --------",
"axMinMax",
"=",
"plt",
".",
"axes",
"(",
"rect_min_max",
")",
"print",
"(",
"'Plotting Min Max'",
")",
"self",
".",
"plot_spectrum_min_max",
"(",
"logged",
"=",
"logged",
",",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t",
"=",
"t",
")",
"plt",
".",
"title",
"(",
"''",
")",
"axMinMax",
".",
"yaxis",
".",
"tick_right",
"(",
")",
"axMinMax",
".",
"yaxis",
".",
"set_label_position",
"(",
"\"right\"",
")",
"# --------",
"axSpectrum",
"=",
"plt",
".",
"axes",
"(",
"rect_spectrum",
",",
"sharex",
"=",
"axMinMax",
")",
"print",
"(",
"'Plotting Spectrum'",
")",
"self",
".",
"plot_spectrum",
"(",
"logged",
"=",
"logged",
",",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t",
"=",
"t",
")",
"plt",
".",
"title",
"(",
"''",
")",
"axSpectrum",
".",
"yaxis",
".",
"tick_right",
"(",
")",
"axSpectrum",
".",
"yaxis",
".",
"set_label_position",
"(",
"\"right\"",
")",
"plt",
".",
"xlabel",
"(",
"''",
")",
"# axSpectrum.xaxis.set_major_formatter(nullfmt)",
"plt",
".",
"setp",
"(",
"axSpectrum",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"# --------",
"axWaterfall",
"=",
"plt",
".",
"axes",
"(",
"rect_waterfall",
",",
"sharex",
"=",
"axMinMax",
")",
"print",
"(",
"'Plotting Waterfall'",
")",
"self",
".",
"plot_waterfall",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"logged",
"=",
"logged",
",",
"cb",
"=",
"False",
")",
"plt",
".",
"xlabel",
"(",
"''",
")",
"# no labels",
"# axWaterfall.xaxis.set_major_formatter(nullfmt)",
"plt",
".",
"setp",
"(",
"axWaterfall",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"# --------",
"axTimeseries",
"=",
"plt",
".",
"axes",
"(",
"rect_timeseries",
")",
"print",
"(",
"'Plotting Timeseries'",
")",
"self",
".",
"plot_time_series",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"orientation",
"=",
"'v'",
")",
"axTimeseries",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"nullfmt",
")",
"# axTimeseries.xaxis.set_major_formatter(nullfmt)",
"# --------",
"# Could exclude since it takes much longer to run than the other plots.",
"if",
"kurtosis",
":",
"axKurtosis",
"=",
"plt",
".",
"axes",
"(",
"rect_kurtosis",
")",
"print",
"(",
"'Plotting Kurtosis'",
")",
"self",
".",
"plot_kurtosis",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
")",
"# --------",
"axHeader",
"=",
"plt",
".",
"axes",
"(",
"rect_header",
")",
"print",
"(",
"'Plotting Header'",
")",
"# Generate nicer header",
"telescopes",
"=",
"{",
"0",
":",
"'Fake data'",
",",
"1",
":",
"'Arecibo'",
",",
"2",
":",
"'Ooty'",
",",
"3",
":",
"'Nancay'",
",",
"4",
":",
"'Parkes'",
",",
"5",
":",
"'Jodrell'",
",",
"6",
":",
"'GBT'",
",",
"8",
":",
"'Effelsberg'",
",",
"10",
":",
"'SRT'",
",",
"64",
":",
"'MeerKAT'",
",",
"65",
":",
"'KAT7'",
"}",
"telescope",
"=",
"telescopes",
".",
"get",
"(",
"self",
".",
"header",
"[",
"b\"telescope_id\"",
"]",
",",
"self",
".",
"header",
"[",
"b\"telescope_id\"",
"]",
")",
"plot_header",
"=",
"\"%14s: %s\\n\"",
"%",
"(",
"\"TELESCOPE_ID\"",
",",
"telescope",
")",
"for",
"key",
"in",
"(",
"b'SRC_RAJ'",
",",
"b'SRC_DEJ'",
",",
"b'TSTART'",
",",
"b'NCHANS'",
",",
"b'NBEAMS'",
",",
"b'NIFS'",
",",
"b'NBITS'",
")",
":",
"try",
":",
"plot_header",
"+=",
"\"%14s: %s\\n\"",
"%",
"(",
"key",
",",
"self",
".",
"header",
"[",
"key",
".",
"lower",
"(",
")",
"]",
")",
"except",
"KeyError",
":",
"pass",
"fch1",
"=",
"\"%6.6f MHz\"",
"%",
"self",
".",
"header",
"[",
"b'fch1'",
"]",
"foff",
"=",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"*",
"1e6",
"*",
"u",
".",
"Hz",
")",
"if",
"np",
".",
"abs",
"(",
"foff",
")",
">",
"1e6",
"*",
"u",
".",
"Hz",
":",
"foff",
"=",
"str",
"(",
"foff",
".",
"to",
"(",
"'MHz'",
")",
")",
"elif",
"np",
".",
"abs",
"(",
"foff",
")",
">",
"1e3",
"*",
"u",
".",
"Hz",
":",
"foff",
"=",
"str",
"(",
"foff",
".",
"to",
"(",
"'kHz'",
")",
")",
"else",
":",
"foff",
"=",
"str",
"(",
"foff",
".",
"to",
"(",
"'Hz'",
")",
")",
"plot_header",
"+=",
"\"%14s: %s\\n\"",
"%",
"(",
"\"FCH1\"",
",",
"fch1",
")",
"plot_header",
"+=",
"\"%14s: %s\\n\"",
"%",
"(",
"\"FOFF\"",
",",
"foff",
")",
"plt",
".",
"text",
"(",
"0.05",
",",
".95",
",",
"plot_header",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
",",
"wrap",
"=",
"True",
")",
"axHeader",
".",
"set_facecolor",
"(",
"'white'",
")",
"axHeader",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"nullfmt",
")",
"axHeader",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"nullfmt",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.write_to_filterbank
|
Write data to blimpy file.
Args:
filename_out (str): Name of output file
|
blimpy/filterbank.py
|
def write_to_filterbank(self, filename_out):
""" Write data to blimpy file.
Args:
filename_out (str): Name of output file
"""
print("[Filterbank] Warning: Non-standard function to write in filterbank (.fil) format. Please use Waterfall.")
n_bytes = int(self.header[b'nbits'] / 8)
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self))
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
def write_to_filterbank(self, filename_out):
""" Write data to blimpy file.
Args:
filename_out (str): Name of output file
"""
print("[Filterbank] Warning: Non-standard function to write in filterbank (.fil) format. Please use Waterfall.")
n_bytes = int(self.header[b'nbits'] / 8)
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self))
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
[
"Write",
"data",
"to",
"blimpy",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L913-L931
|
[
"def",
"write_to_filterbank",
"(",
"self",
",",
"filename_out",
")",
":",
"print",
"(",
"\"[Filterbank] Warning: Non-standard function to write in filterbank (.fil) format. Please use Waterfall.\"",
")",
"n_bytes",
"=",
"int",
"(",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"/",
"8",
")",
"with",
"open",
"(",
"filename_out",
",",
"\"wb\"",
")",
"as",
"fileh",
":",
"fileh",
".",
"write",
"(",
"generate_sigproc_header",
"(",
"self",
")",
")",
"j",
"=",
"self",
".",
"data",
"if",
"n_bytes",
"==",
"4",
":",
"np",
".",
"float32",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")",
"elif",
"n_bytes",
"==",
"2",
":",
"np",
".",
"int16",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")",
"elif",
"n_bytes",
"==",
"1",
":",
"np",
".",
"int8",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.write_to_hdf5
|
Write data to HDF5 file.
Args:
filename_out (str): Name of output file
|
blimpy/filterbank.py
|
def write_to_hdf5(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
Args:
filename_out (str): Name of output file
"""
print("[Filterbank] Warning: Non-standard function to write in HDF5 (.h5) format. Please use Waterfall.")
if not HAS_HDF5:
raise RuntimeError("h5py package required for HDF5 output.")
with h5py.File(filename_out, 'w') as h5:
dset = h5.create_dataset(b'data',
data=self.data,
compression='lzf')
dset_mask = h5.create_dataset(b'mask',
shape=self.data.shape,
compression='lzf',
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value
|
def write_to_hdf5(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
Args:
filename_out (str): Name of output file
"""
print("[Filterbank] Warning: Non-standard function to write in HDF5 (.h5) format. Please use Waterfall.")
if not HAS_HDF5:
raise RuntimeError("h5py package required for HDF5 output.")
with h5py.File(filename_out, 'w') as h5:
dset = h5.create_dataset(b'data',
data=self.data,
compression='lzf')
dset_mask = h5.create_dataset(b'mask',
shape=self.data.shape,
compression='lzf',
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value
|
[
"Write",
"data",
"to",
"HDF5",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L933-L966
|
[
"def",
"write_to_hdf5",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"print",
"(",
"\"[Filterbank] Warning: Non-standard function to write in HDF5 (.h5) format. Please use Waterfall.\"",
")",
"if",
"not",
"HAS_HDF5",
":",
"raise",
"RuntimeError",
"(",
"\"h5py package required for HDF5 output.\"",
")",
"with",
"h5py",
".",
"File",
"(",
"filename_out",
",",
"'w'",
")",
"as",
"h5",
":",
"dset",
"=",
"h5",
".",
"create_dataset",
"(",
"b'data'",
",",
"data",
"=",
"self",
".",
"data",
",",
"compression",
"=",
"'lzf'",
")",
"dset_mask",
"=",
"h5",
".",
"create_dataset",
"(",
"b'mask'",
",",
"shape",
"=",
"self",
".",
"data",
".",
"shape",
",",
"compression",
"=",
"'lzf'",
",",
"dtype",
"=",
"'uint8'",
")",
"dset",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"b\"frequency\"",
"dset",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"b\"feed_id\"",
"dset",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"b\"time\"",
"dset_mask",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"b\"frequency\"",
"dset_mask",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"b\"feed_id\"",
"dset_mask",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"b\"time\"",
"# Copy over header information as attributes",
"for",
"key",
",",
"value",
"in",
"self",
".",
"header",
".",
"items",
"(",
")",
":",
"dset",
".",
"attrs",
"[",
"key",
"]",
"=",
"value"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Filterbank.calibrate_band_pass_N1
|
One way to calibrate the band pass is to take the median value
for every frequency fine channel, and divide by it.
|
blimpy/filterbank.py
|
def calibrate_band_pass_N1(self):
""" One way to calibrate the band pass is to take the median value
for every frequency fine channel, and divide by it.
"""
band_pass = np.median(self.data.squeeze(),axis=0)
self.data = self.data/band_pass
|
def calibrate_band_pass_N1(self):
""" One way to calibrate the band pass is to take the median value
for every frequency fine channel, and divide by it.
"""
band_pass = np.median(self.data.squeeze(),axis=0)
self.data = self.data/band_pass
|
[
"One",
"way",
"to",
"calibrate",
"the",
"band",
"pass",
"is",
"to",
"take",
"the",
"median",
"value",
"for",
"every",
"frequency",
"fine",
"channel",
"and",
"divide",
"by",
"it",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L968-L974
|
[
"def",
"calibrate_band_pass_N1",
"(",
"self",
")",
":",
"band_pass",
"=",
"np",
".",
"median",
"(",
"self",
".",
"data",
".",
"squeeze",
"(",
")",
",",
"axis",
"=",
"0",
")",
"self",
".",
"data",
"=",
"self",
".",
"data",
"/",
"band_pass"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
get_stokes
|
Output stokes parameters (I,Q,U,V) for a rawspec
cross polarization filterbank file
|
blimpy/calib_utils/stokescal.py
|
def get_stokes(cross_dat, feedtype='l'):
'''Output stokes parameters (I,Q,U,V) for a rawspec
cross polarization filterbank file'''
#Compute Stokes Parameters
if feedtype=='l':
#I = XX+YY
I = cross_dat[:,0,:]+cross_dat[:,1,:]
#Q = XX-YY
Q = cross_dat[:,0,:]-cross_dat[:,1,:]
#U = 2*Re(XY)
U = 2*cross_dat[:,2,:]
#V = -2*Im(XY)
V = -2*cross_dat[:,3,:]
elif feedtype=='c':
#I = LL+RR
I = cross_dat[:,0,:]+cross_dat[:,1,:]
#Q = 2*Re(RL)
Q = 2*cross_dat[:,2,:]
#U = 2*Im(RL)
U = -2*cross_dat[:,3,:]
#V = RR-LL
V = cross_dat[:,1,:]-cross_dat[:,0,:]
else:
raise ValueError('feedtype must be \'l\' (linear) or \'c\' (circular)')
#Add middle dimension to match Filterbank format
I = np.expand_dims(I,axis=1)
Q = np.expand_dims(Q,axis=1)
U = np.expand_dims(U,axis=1)
V = np.expand_dims(V,axis=1)
#Compute linear polarization
#L=np.sqrt(np.square(Q)+np.square(U))
return I,Q,U,V
|
def get_stokes(cross_dat, feedtype='l'):
'''Output stokes parameters (I,Q,U,V) for a rawspec
cross polarization filterbank file'''
#Compute Stokes Parameters
if feedtype=='l':
#I = XX+YY
I = cross_dat[:,0,:]+cross_dat[:,1,:]
#Q = XX-YY
Q = cross_dat[:,0,:]-cross_dat[:,1,:]
#U = 2*Re(XY)
U = 2*cross_dat[:,2,:]
#V = -2*Im(XY)
V = -2*cross_dat[:,3,:]
elif feedtype=='c':
#I = LL+RR
I = cross_dat[:,0,:]+cross_dat[:,1,:]
#Q = 2*Re(RL)
Q = 2*cross_dat[:,2,:]
#U = 2*Im(RL)
U = -2*cross_dat[:,3,:]
#V = RR-LL
V = cross_dat[:,1,:]-cross_dat[:,0,:]
else:
raise ValueError('feedtype must be \'l\' (linear) or \'c\' (circular)')
#Add middle dimension to match Filterbank format
I = np.expand_dims(I,axis=1)
Q = np.expand_dims(Q,axis=1)
U = np.expand_dims(U,axis=1)
V = np.expand_dims(V,axis=1)
#Compute linear polarization
#L=np.sqrt(np.square(Q)+np.square(U))
return I,Q,U,V
|
[
"Output",
"stokes",
"parameters",
"(",
"I",
"Q",
"U",
"V",
")",
"for",
"a",
"rawspec",
"cross",
"polarization",
"filterbank",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L6-L42
|
[
"def",
"get_stokes",
"(",
"cross_dat",
",",
"feedtype",
"=",
"'l'",
")",
":",
"#Compute Stokes Parameters",
"if",
"feedtype",
"==",
"'l'",
":",
"#I = XX+YY",
"I",
"=",
"cross_dat",
"[",
":",
",",
"0",
",",
":",
"]",
"+",
"cross_dat",
"[",
":",
",",
"1",
",",
":",
"]",
"#Q = XX-YY",
"Q",
"=",
"cross_dat",
"[",
":",
",",
"0",
",",
":",
"]",
"-",
"cross_dat",
"[",
":",
",",
"1",
",",
":",
"]",
"#U = 2*Re(XY)",
"U",
"=",
"2",
"*",
"cross_dat",
"[",
":",
",",
"2",
",",
":",
"]",
"#V = -2*Im(XY)",
"V",
"=",
"-",
"2",
"*",
"cross_dat",
"[",
":",
",",
"3",
",",
":",
"]",
"elif",
"feedtype",
"==",
"'c'",
":",
"#I = LL+RR",
"I",
"=",
"cross_dat",
"[",
":",
",",
"0",
",",
":",
"]",
"+",
"cross_dat",
"[",
":",
",",
"1",
",",
":",
"]",
"#Q = 2*Re(RL)",
"Q",
"=",
"2",
"*",
"cross_dat",
"[",
":",
",",
"2",
",",
":",
"]",
"#U = 2*Im(RL)",
"U",
"=",
"-",
"2",
"*",
"cross_dat",
"[",
":",
",",
"3",
",",
":",
"]",
"#V = RR-LL",
"V",
"=",
"cross_dat",
"[",
":",
",",
"1",
",",
":",
"]",
"-",
"cross_dat",
"[",
":",
",",
"0",
",",
":",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'feedtype must be \\'l\\' (linear) or \\'c\\' (circular)'",
")",
"#Add middle dimension to match Filterbank format",
"I",
"=",
"np",
".",
"expand_dims",
"(",
"I",
",",
"axis",
"=",
"1",
")",
"Q",
"=",
"np",
".",
"expand_dims",
"(",
"Q",
",",
"axis",
"=",
"1",
")",
"U",
"=",
"np",
".",
"expand_dims",
"(",
"U",
",",
"axis",
"=",
"1",
")",
"V",
"=",
"np",
".",
"expand_dims",
"(",
"V",
",",
"axis",
"=",
"1",
")",
"#Compute linear polarization",
"#L=np.sqrt(np.square(Q)+np.square(U))",
"return",
"I",
",",
"Q",
",",
"U",
",",
"V"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
convert_to_coarse
|
Converts a data array with length n_chans to an array of length n_coarse_chans
by averaging over the coarse channels
|
blimpy/calib_utils/stokescal.py
|
def convert_to_coarse(data,chan_per_coarse):
'''
Converts a data array with length n_chans to an array of length n_coarse_chans
by averaging over the coarse channels
'''
#find number of coarse channels and reshape array
num_coarse = data.size/chan_per_coarse
data_shaped = np.array(np.reshape(data,(num_coarse,chan_per_coarse)))
#Return the average over each coarse channel
return np.mean(data_shaped[:,2:-1],axis=1)
|
def convert_to_coarse(data,chan_per_coarse):
'''
Converts a data array with length n_chans to an array of length n_coarse_chans
by averaging over the coarse channels
'''
#find number of coarse channels and reshape array
num_coarse = data.size/chan_per_coarse
data_shaped = np.array(np.reshape(data,(num_coarse,chan_per_coarse)))
#Return the average over each coarse channel
return np.mean(data_shaped[:,2:-1],axis=1)
|
[
"Converts",
"a",
"data",
"array",
"with",
"length",
"n_chans",
"to",
"an",
"array",
"of",
"length",
"n_coarse_chans",
"by",
"averaging",
"over",
"the",
"coarse",
"channels"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L44-L54
|
[
"def",
"convert_to_coarse",
"(",
"data",
",",
"chan_per_coarse",
")",
":",
"#find number of coarse channels and reshape array",
"num_coarse",
"=",
"data",
".",
"size",
"/",
"chan_per_coarse",
"data_shaped",
"=",
"np",
".",
"array",
"(",
"np",
".",
"reshape",
"(",
"data",
",",
"(",
"num_coarse",
",",
"chan_per_coarse",
")",
")",
")",
"#Return the average over each coarse channel",
"return",
"np",
".",
"mean",
"(",
"data_shaped",
"[",
":",
",",
"2",
":",
"-",
"1",
"]",
",",
"axis",
"=",
"1",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
phase_offsets
|
Calculates phase difference between X and Y feeds given U and V (U and Q for circular basis)
data from a noise diode measurement on the target
|
blimpy/calib_utils/stokescal.py
|
def phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype='l',**kwargs):
'''
Calculates phase difference between X and Y feeds given U and V (U and Q for circular basis)
data from a noise diode measurement on the target
'''
#Fold noise diode data and calculate ON OFF diferences for U and V
if feedtype=='l':
U_OFF,U_ON = foldcal(Udat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
Udiff = U_ON-U_OFF
Vdiff = V_ON-V_OFF
poffset = np.arctan2(-1*Vdiff,Udiff)
if feedtype=='c':
U_OFF,U_ON = foldcal(Udat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
Udiff = U_ON-U_OFF
Qdiff = Q_ON-Q_OFF
poffset = np.arctan2(Udiff,Qdiff)
coarse_p = convert_to_coarse(poffset,chan_per_coarse)
#Correct for problems created by discontinuity in arctan
#Find whether phase offsets have increasing or decreasing slope
y = coarse_p[:6]
x = np.arange(y.size)
m = np.polyfit(x,y,1)[0]
for i in range(coarse_p.size-3):
if (m>0 and coarse_p[i+1]<coarse_p[i]) or (m<0 and coarse_p[i+1]>coarse_p[i]):
coarse_p[i+1] = 2*coarse_p[i+2]-coarse_p[i+3] #Move problem point near the next
return coarse_p
|
def phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype='l',**kwargs):
'''
Calculates phase difference between X and Y feeds given U and V (U and Q for circular basis)
data from a noise diode measurement on the target
'''
#Fold noise diode data and calculate ON OFF diferences for U and V
if feedtype=='l':
U_OFF,U_ON = foldcal(Udat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
Udiff = U_ON-U_OFF
Vdiff = V_ON-V_OFF
poffset = np.arctan2(-1*Vdiff,Udiff)
if feedtype=='c':
U_OFF,U_ON = foldcal(Udat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
Udiff = U_ON-U_OFF
Qdiff = Q_ON-Q_OFF
poffset = np.arctan2(Udiff,Qdiff)
coarse_p = convert_to_coarse(poffset,chan_per_coarse)
#Correct for problems created by discontinuity in arctan
#Find whether phase offsets have increasing or decreasing slope
y = coarse_p[:6]
x = np.arange(y.size)
m = np.polyfit(x,y,1)[0]
for i in range(coarse_p.size-3):
if (m>0 and coarse_p[i+1]<coarse_p[i]) or (m<0 and coarse_p[i+1]>coarse_p[i]):
coarse_p[i+1] = 2*coarse_p[i+2]-coarse_p[i+3] #Move problem point near the next
return coarse_p
|
[
"Calculates",
"phase",
"difference",
"between",
"X",
"and",
"Y",
"feeds",
"given",
"U",
"and",
"V",
"(",
"U",
"and",
"Q",
"for",
"circular",
"basis",
")",
"data",
"from",
"a",
"noise",
"diode",
"measurement",
"on",
"the",
"target"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L56-L88
|
[
"def",
"phase_offsets",
"(",
"Idat",
",",
"Qdat",
",",
"Udat",
",",
"Vdat",
",",
"tsamp",
",",
"chan_per_coarse",
",",
"feedtype",
"=",
"'l'",
",",
"*",
"*",
"kwargs",
")",
":",
"#Fold noise diode data and calculate ON OFF diferences for U and V",
"if",
"feedtype",
"==",
"'l'",
":",
"U_OFF",
",",
"U_ON",
"=",
"foldcal",
"(",
"Udat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"V_OFF",
",",
"V_ON",
"=",
"foldcal",
"(",
"Vdat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Udiff",
"=",
"U_ON",
"-",
"U_OFF",
"Vdiff",
"=",
"V_ON",
"-",
"V_OFF",
"poffset",
"=",
"np",
".",
"arctan2",
"(",
"-",
"1",
"*",
"Vdiff",
",",
"Udiff",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"U_OFF",
",",
"U_ON",
"=",
"foldcal",
"(",
"Udat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Q_OFF",
",",
"Q_ON",
"=",
"foldcal",
"(",
"Qdat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Udiff",
"=",
"U_ON",
"-",
"U_OFF",
"Qdiff",
"=",
"Q_ON",
"-",
"Q_OFF",
"poffset",
"=",
"np",
".",
"arctan2",
"(",
"Udiff",
",",
"Qdiff",
")",
"coarse_p",
"=",
"convert_to_coarse",
"(",
"poffset",
",",
"chan_per_coarse",
")",
"#Correct for problems created by discontinuity in arctan",
"#Find whether phase offsets have increasing or decreasing slope",
"y",
"=",
"coarse_p",
"[",
":",
"6",
"]",
"x",
"=",
"np",
".",
"arange",
"(",
"y",
".",
"size",
")",
"m",
"=",
"np",
".",
"polyfit",
"(",
"x",
",",
"y",
",",
"1",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"coarse_p",
".",
"size",
"-",
"3",
")",
":",
"if",
"(",
"m",
">",
"0",
"and",
"coarse_p",
"[",
"i",
"+",
"1",
"]",
"<",
"coarse_p",
"[",
"i",
"]",
")",
"or",
"(",
"m",
"<",
"0",
"and",
"coarse_p",
"[",
"i",
"+",
"1",
"]",
">",
"coarse_p",
"[",
"i",
"]",
")",
":",
"coarse_p",
"[",
"i",
"+",
"1",
"]",
"=",
"2",
"*",
"coarse_p",
"[",
"i",
"+",
"2",
"]",
"-",
"coarse_p",
"[",
"i",
"+",
"3",
"]",
"#Move problem point near the next",
"return",
"coarse_p"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
gain_offsets
|
Determines relative gain error in the X and Y feeds for an
observation given I and Q (I and V for circular basis) noise diode data.
|
blimpy/calib_utils/stokescal.py
|
def gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype='l',**kwargs):
'''
Determines relative gain error in the X and Y feeds for an
observation given I and Q (I and V for circular basis) noise diode data.
'''
if feedtype=='l':
#Fold noise diode data and calculate ON OFF differences for I and Q
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
#Calculate power in each feed for noise diode ON and OFF
XX_ON = (I_ON+Q_ON)/2
XX_OFF = (I_OFF+Q_OFF)/2
YY_ON = (I_ON-Q_ON)/2
YY_OFF = (I_OFF-Q_OFF)/2
#Calculate gain offset (divided by 2) as defined in Heiles (2001)
G = (XX_OFF-YY_OFF)/(XX_OFF+YY_OFF)
if feedtype=='c':
#Fold noise diode data and calculate ON OFF differences for I and Q
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
#Calculate power in each feed for noise diode ON and OFF
RR_ON = (I_ON+V_ON)/2
RR_OFF = (I_OFF+V_OFF)/2
LL_ON = (I_ON-V_ON)/2
LL_OFF = (I_OFF-V_OFF)/2
#Calculate gain offset (divided by 2) as defined in Heiles (2001)
G = (RR_OFF-LL_OFF)/(RR_OFF+LL_OFF)
return convert_to_coarse(G,chan_per_coarse)
|
def gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype='l',**kwargs):
'''
Determines relative gain error in the X and Y feeds for an
observation given I and Q (I and V for circular basis) noise diode data.
'''
if feedtype=='l':
#Fold noise diode data and calculate ON OFF differences for I and Q
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
#Calculate power in each feed for noise diode ON and OFF
XX_ON = (I_ON+Q_ON)/2
XX_OFF = (I_OFF+Q_OFF)/2
YY_ON = (I_ON-Q_ON)/2
YY_OFF = (I_OFF-Q_OFF)/2
#Calculate gain offset (divided by 2) as defined in Heiles (2001)
G = (XX_OFF-YY_OFF)/(XX_OFF+YY_OFF)
if feedtype=='c':
#Fold noise diode data and calculate ON OFF differences for I and Q
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
#Calculate power in each feed for noise diode ON and OFF
RR_ON = (I_ON+V_ON)/2
RR_OFF = (I_OFF+V_OFF)/2
LL_ON = (I_ON-V_ON)/2
LL_OFF = (I_OFF-V_OFF)/2
#Calculate gain offset (divided by 2) as defined in Heiles (2001)
G = (RR_OFF-LL_OFF)/(RR_OFF+LL_OFF)
return convert_to_coarse(G,chan_per_coarse)
|
[
"Determines",
"relative",
"gain",
"error",
"in",
"the",
"X",
"and",
"Y",
"feeds",
"for",
"an",
"observation",
"given",
"I",
"and",
"Q",
"(",
"I",
"and",
"V",
"for",
"circular",
"basis",
")",
"noise",
"diode",
"data",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L90-L123
|
[
"def",
"gain_offsets",
"(",
"Idat",
",",
"Qdat",
",",
"Udat",
",",
"Vdat",
",",
"tsamp",
",",
"chan_per_coarse",
",",
"feedtype",
"=",
"'l'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"feedtype",
"==",
"'l'",
":",
"#Fold noise diode data and calculate ON OFF differences for I and Q",
"I_OFF",
",",
"I_ON",
"=",
"foldcal",
"(",
"Idat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Q_OFF",
",",
"Q_ON",
"=",
"foldcal",
"(",
"Qdat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"#Calculate power in each feed for noise diode ON and OFF",
"XX_ON",
"=",
"(",
"I_ON",
"+",
"Q_ON",
")",
"/",
"2",
"XX_OFF",
"=",
"(",
"I_OFF",
"+",
"Q_OFF",
")",
"/",
"2",
"YY_ON",
"=",
"(",
"I_ON",
"-",
"Q_ON",
")",
"/",
"2",
"YY_OFF",
"=",
"(",
"I_OFF",
"-",
"Q_OFF",
")",
"/",
"2",
"#Calculate gain offset (divided by 2) as defined in Heiles (2001)",
"G",
"=",
"(",
"XX_OFF",
"-",
"YY_OFF",
")",
"/",
"(",
"XX_OFF",
"+",
"YY_OFF",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"#Fold noise diode data and calculate ON OFF differences for I and Q",
"I_OFF",
",",
"I_ON",
"=",
"foldcal",
"(",
"Idat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"V_OFF",
",",
"V_ON",
"=",
"foldcal",
"(",
"Vdat",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"#Calculate power in each feed for noise diode ON and OFF",
"RR_ON",
"=",
"(",
"I_ON",
"+",
"V_ON",
")",
"/",
"2",
"RR_OFF",
"=",
"(",
"I_OFF",
"+",
"V_OFF",
")",
"/",
"2",
"LL_ON",
"=",
"(",
"I_ON",
"-",
"V_ON",
")",
"/",
"2",
"LL_OFF",
"=",
"(",
"I_OFF",
"-",
"V_OFF",
")",
"/",
"2",
"#Calculate gain offset (divided by 2) as defined in Heiles (2001)",
"G",
"=",
"(",
"RR_OFF",
"-",
"LL_OFF",
")",
"/",
"(",
"RR_OFF",
"+",
"LL_OFF",
")",
"return",
"convert_to_coarse",
"(",
"G",
",",
"chan_per_coarse",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
apply_Mueller
|
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
|
blimpy/calib_utils/stokescal.py
|
def apply_Mueller(I,Q,U,V, gain_offsets, phase_offsets, chan_per_coarse, feedtype='l'):
'''
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
'''
#Find shape of data arrays and calculate number of coarse channels
shape = I.shape
ax0 = I.shape[0]
ax1 = I.shape[1]
nchans = I.shape[2]
ncoarse = nchans/chan_per_coarse
#Reshape data arrays to separate coarse channels
I = np.reshape(I,(ax0,ax1,ncoarse,chan_per_coarse))
Q = np.reshape(Q,(ax0,ax1,ncoarse,chan_per_coarse))
U = np.reshape(U,(ax0,ax1,ncoarse,chan_per_coarse))
V = np.reshape(V,(ax0,ax1,ncoarse,chan_per_coarse))
#Swap axes 2 and 3 to in order for broadcasting to work correctly
I = np.swapaxes(I,2,3)
Q = np.swapaxes(Q,2,3)
U = np.swapaxes(U,2,3)
V = np.swapaxes(V,2,3)
#Apply top left corner of electronics chain inverse Mueller matrix
a = 1/(1-gain_offsets**2)
if feedtype=='l':
Icorr = a*(I-gain_offsets*Q)
Qcorr = a*(-1*gain_offsets*I+Q)
I = None
Q = None
if feedtype=='c':
Icorr = a*(I-gain_offsets*V)
Vcorr = a*(-1*gain_offsets*I+V)
I = None
V = None
#Apply bottom right corner of electronics chain inverse Mueller matrix
if feedtype=='l':
Ucorr = U*np.cos(phase_offsets)-V*np.sin(phase_offsets)
Vcorr = U*np.sin(phase_offsets)+V*np.cos(phase_offsets)
U = None
V = None
if feedtype=='c':
Qcorr = Q*np.cos(phase_offsets)+U*np.sin(phase_offsets)
Ucorr = -1*Q*np.sin(phase_offsets)+U*np.cos(phase_offsets)
Q = None
U = None
#Reshape arrays to original shape
Icorr = np.reshape(np.swapaxes(Icorr,2,3),shape)
Qcorr = np.reshape(np.swapaxes(Qcorr,2,3),shape)
Ucorr = np.reshape(np.swapaxes(Ucorr,2,3),shape)
Vcorr = np.reshape(np.swapaxes(Vcorr,2,3),shape)
#Return corrected data arrays
return Icorr,Qcorr,Ucorr,Vcorr
|
def apply_Mueller(I,Q,U,V, gain_offsets, phase_offsets, chan_per_coarse, feedtype='l'):
'''
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
'''
#Find shape of data arrays and calculate number of coarse channels
shape = I.shape
ax0 = I.shape[0]
ax1 = I.shape[1]
nchans = I.shape[2]
ncoarse = nchans/chan_per_coarse
#Reshape data arrays to separate coarse channels
I = np.reshape(I,(ax0,ax1,ncoarse,chan_per_coarse))
Q = np.reshape(Q,(ax0,ax1,ncoarse,chan_per_coarse))
U = np.reshape(U,(ax0,ax1,ncoarse,chan_per_coarse))
V = np.reshape(V,(ax0,ax1,ncoarse,chan_per_coarse))
#Swap axes 2 and 3 to in order for broadcasting to work correctly
I = np.swapaxes(I,2,3)
Q = np.swapaxes(Q,2,3)
U = np.swapaxes(U,2,3)
V = np.swapaxes(V,2,3)
#Apply top left corner of electronics chain inverse Mueller matrix
a = 1/(1-gain_offsets**2)
if feedtype=='l':
Icorr = a*(I-gain_offsets*Q)
Qcorr = a*(-1*gain_offsets*I+Q)
I = None
Q = None
if feedtype=='c':
Icorr = a*(I-gain_offsets*V)
Vcorr = a*(-1*gain_offsets*I+V)
I = None
V = None
#Apply bottom right corner of electronics chain inverse Mueller matrix
if feedtype=='l':
Ucorr = U*np.cos(phase_offsets)-V*np.sin(phase_offsets)
Vcorr = U*np.sin(phase_offsets)+V*np.cos(phase_offsets)
U = None
V = None
if feedtype=='c':
Qcorr = Q*np.cos(phase_offsets)+U*np.sin(phase_offsets)
Ucorr = -1*Q*np.sin(phase_offsets)+U*np.cos(phase_offsets)
Q = None
U = None
#Reshape arrays to original shape
Icorr = np.reshape(np.swapaxes(Icorr,2,3),shape)
Qcorr = np.reshape(np.swapaxes(Qcorr,2,3),shape)
Ucorr = np.reshape(np.swapaxes(Ucorr,2,3),shape)
Vcorr = np.reshape(np.swapaxes(Vcorr,2,3),shape)
#Return corrected data arrays
return Icorr,Qcorr,Ucorr,Vcorr
|
[
"Returns",
"calibrated",
"Stokes",
"parameters",
"for",
"an",
"observation",
"given",
"an",
"array",
"of",
"differential",
"gains",
"and",
"phase",
"differences",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L124-L181
|
[
"def",
"apply_Mueller",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"gain_offsets",
",",
"phase_offsets",
",",
"chan_per_coarse",
",",
"feedtype",
"=",
"'l'",
")",
":",
"#Find shape of data arrays and calculate number of coarse channels",
"shape",
"=",
"I",
".",
"shape",
"ax0",
"=",
"I",
".",
"shape",
"[",
"0",
"]",
"ax1",
"=",
"I",
".",
"shape",
"[",
"1",
"]",
"nchans",
"=",
"I",
".",
"shape",
"[",
"2",
"]",
"ncoarse",
"=",
"nchans",
"/",
"chan_per_coarse",
"#Reshape data arrays to separate coarse channels",
"I",
"=",
"np",
".",
"reshape",
"(",
"I",
",",
"(",
"ax0",
",",
"ax1",
",",
"ncoarse",
",",
"chan_per_coarse",
")",
")",
"Q",
"=",
"np",
".",
"reshape",
"(",
"Q",
",",
"(",
"ax0",
",",
"ax1",
",",
"ncoarse",
",",
"chan_per_coarse",
")",
")",
"U",
"=",
"np",
".",
"reshape",
"(",
"U",
",",
"(",
"ax0",
",",
"ax1",
",",
"ncoarse",
",",
"chan_per_coarse",
")",
")",
"V",
"=",
"np",
".",
"reshape",
"(",
"V",
",",
"(",
"ax0",
",",
"ax1",
",",
"ncoarse",
",",
"chan_per_coarse",
")",
")",
"#Swap axes 2 and 3 to in order for broadcasting to work correctly",
"I",
"=",
"np",
".",
"swapaxes",
"(",
"I",
",",
"2",
",",
"3",
")",
"Q",
"=",
"np",
".",
"swapaxes",
"(",
"Q",
",",
"2",
",",
"3",
")",
"U",
"=",
"np",
".",
"swapaxes",
"(",
"U",
",",
"2",
",",
"3",
")",
"V",
"=",
"np",
".",
"swapaxes",
"(",
"V",
",",
"2",
",",
"3",
")",
"#Apply top left corner of electronics chain inverse Mueller matrix",
"a",
"=",
"1",
"/",
"(",
"1",
"-",
"gain_offsets",
"**",
"2",
")",
"if",
"feedtype",
"==",
"'l'",
":",
"Icorr",
"=",
"a",
"*",
"(",
"I",
"-",
"gain_offsets",
"*",
"Q",
")",
"Qcorr",
"=",
"a",
"*",
"(",
"-",
"1",
"*",
"gain_offsets",
"*",
"I",
"+",
"Q",
")",
"I",
"=",
"None",
"Q",
"=",
"None",
"if",
"feedtype",
"==",
"'c'",
":",
"Icorr",
"=",
"a",
"*",
"(",
"I",
"-",
"gain_offsets",
"*",
"V",
")",
"Vcorr",
"=",
"a",
"*",
"(",
"-",
"1",
"*",
"gain_offsets",
"*",
"I",
"+",
"V",
")",
"I",
"=",
"None",
"V",
"=",
"None",
"#Apply bottom right corner of electronics chain inverse Mueller matrix",
"if",
"feedtype",
"==",
"'l'",
":",
"Ucorr",
"=",
"U",
"*",
"np",
".",
"cos",
"(",
"phase_offsets",
")",
"-",
"V",
"*",
"np",
".",
"sin",
"(",
"phase_offsets",
")",
"Vcorr",
"=",
"U",
"*",
"np",
".",
"sin",
"(",
"phase_offsets",
")",
"+",
"V",
"*",
"np",
".",
"cos",
"(",
"phase_offsets",
")",
"U",
"=",
"None",
"V",
"=",
"None",
"if",
"feedtype",
"==",
"'c'",
":",
"Qcorr",
"=",
"Q",
"*",
"np",
".",
"cos",
"(",
"phase_offsets",
")",
"+",
"U",
"*",
"np",
".",
"sin",
"(",
"phase_offsets",
")",
"Ucorr",
"=",
"-",
"1",
"*",
"Q",
"*",
"np",
".",
"sin",
"(",
"phase_offsets",
")",
"+",
"U",
"*",
"np",
".",
"cos",
"(",
"phase_offsets",
")",
"Q",
"=",
"None",
"U",
"=",
"None",
"#Reshape arrays to original shape",
"Icorr",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"swapaxes",
"(",
"Icorr",
",",
"2",
",",
"3",
")",
",",
"shape",
")",
"Qcorr",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"swapaxes",
"(",
"Qcorr",
",",
"2",
",",
"3",
")",
",",
"shape",
")",
"Ucorr",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"swapaxes",
"(",
"Ucorr",
",",
"2",
",",
"3",
")",
",",
"shape",
")",
"Vcorr",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"swapaxes",
"(",
"Vcorr",
",",
"2",
",",
"3",
")",
",",
"shape",
")",
"#Return corrected data arrays",
"return",
"Icorr",
",",
"Qcorr",
",",
"Ucorr",
",",
"Vcorr"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
calibrate_pols
|
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
|
blimpy/calib_utils/stokescal.py
|
def calibrate_pols(cross_pols,diode_cross,obsI=None,onefile=True,feedtype='l',**kwargs):
'''
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
'''
#Obtain time sample length, frequencies, and noise diode data
obs = Waterfall(diode_cross,max_load=150)
cross_dat = obs.data
tsamp = obs.header['tsamp']
#Calculate number of coarse channels in the noise diode measurement (usually 8)
dio_ncoarse = obs.calc_n_coarse_chan()
dio_nchans = obs.header['nchans']
dio_chan_per_coarse = dio_nchans/dio_ncoarse
obs = None
Idat,Qdat,Udat,Vdat = get_stokes(cross_dat,feedtype)
cross_dat = None
#Calculate differential gain and phase from noise diode measurements
print('Calculating Mueller Matrix variables')
gams = gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
psis = phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
#Clear data arrays to save memory
Idat = None
Qdat = None
Udat = None
Vdat = None
#Get corrected Stokes parameters
print('Opening '+cross_pols)
cross_obs = Waterfall(cross_pols,max_load=150)
obs_ncoarse = cross_obs.calc_n_coarse_chan()
obs_nchans = cross_obs.header['nchans']
obs_chan_per_coarse = obs_nchans/obs_ncoarse
print('Grabbing Stokes parameters')
I,Q,U,V = get_stokes(cross_obs.data,feedtype)
print('Applying Mueller Matrix')
I,Q,U,V = apply_Mueller(I,Q,U,V,gams,psis,obs_chan_per_coarse,feedtype)
#Use onefile (default) to produce one filterbank file containing all Stokes information
if onefile==True:
cross_obs.data[:,0,:] = np.squeeze(I)
cross_obs.data[:,1,:] = np.squeeze(Q)
cross_obs.data[:,2,:] = np.squeeze(U)
cross_obs.data[:,3,:] = np.squeeze(V)
cross_obs.write_to_fil(cross_pols[:-15]+'.SIQUV.polcal.fil')
print('Calibrated Stokes parameters written to '+cross_pols[:-15]+'.SIQUV.polcal.fil')
return
#Write corrected Stokes parameters to four filterbank files if onefile==False
obs = Waterfall(obs_I,max_load=150)
obs.data = I
obs.write_to_fil(cross_pols[:-15]+'.SI.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes I written to '+cross_pols[:-15]+'.SI.polcal.fil')
obs.data = Q
obs.write_to_fil(cross_pols[:-15]+'.Q.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes Q written to '+cross_pols[:-15]+'.Q.polcal.fil')
obs.data = U
obs.write_to_fil(cross_pols[:-15]+'.U.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes U written to '+cross_pols[:-15]+'.U.polcal.fil')
obs.data = V
obs.write_to_fil(cross_pols[:-15]+'.V.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes V written to '+cross_pols[:-15]+'.V.polcal.fil')
|
def calibrate_pols(cross_pols,diode_cross,obsI=None,onefile=True,feedtype='l',**kwargs):
'''
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
'''
#Obtain time sample length, frequencies, and noise diode data
obs = Waterfall(diode_cross,max_load=150)
cross_dat = obs.data
tsamp = obs.header['tsamp']
#Calculate number of coarse channels in the noise diode measurement (usually 8)
dio_ncoarse = obs.calc_n_coarse_chan()
dio_nchans = obs.header['nchans']
dio_chan_per_coarse = dio_nchans/dio_ncoarse
obs = None
Idat,Qdat,Udat,Vdat = get_stokes(cross_dat,feedtype)
cross_dat = None
#Calculate differential gain and phase from noise diode measurements
print('Calculating Mueller Matrix variables')
gams = gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
psis = phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
#Clear data arrays to save memory
Idat = None
Qdat = None
Udat = None
Vdat = None
#Get corrected Stokes parameters
print('Opening '+cross_pols)
cross_obs = Waterfall(cross_pols,max_load=150)
obs_ncoarse = cross_obs.calc_n_coarse_chan()
obs_nchans = cross_obs.header['nchans']
obs_chan_per_coarse = obs_nchans/obs_ncoarse
print('Grabbing Stokes parameters')
I,Q,U,V = get_stokes(cross_obs.data,feedtype)
print('Applying Mueller Matrix')
I,Q,U,V = apply_Mueller(I,Q,U,V,gams,psis,obs_chan_per_coarse,feedtype)
#Use onefile (default) to produce one filterbank file containing all Stokes information
if onefile==True:
cross_obs.data[:,0,:] = np.squeeze(I)
cross_obs.data[:,1,:] = np.squeeze(Q)
cross_obs.data[:,2,:] = np.squeeze(U)
cross_obs.data[:,3,:] = np.squeeze(V)
cross_obs.write_to_fil(cross_pols[:-15]+'.SIQUV.polcal.fil')
print('Calibrated Stokes parameters written to '+cross_pols[:-15]+'.SIQUV.polcal.fil')
return
#Write corrected Stokes parameters to four filterbank files if onefile==False
obs = Waterfall(obs_I,max_load=150)
obs.data = I
obs.write_to_fil(cross_pols[:-15]+'.SI.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes I written to '+cross_pols[:-15]+'.SI.polcal.fil')
obs.data = Q
obs.write_to_fil(cross_pols[:-15]+'.Q.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes Q written to '+cross_pols[:-15]+'.Q.polcal.fil')
obs.data = U
obs.write_to_fil(cross_pols[:-15]+'.U.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes U written to '+cross_pols[:-15]+'.U.polcal.fil')
obs.data = V
obs.write_to_fil(cross_pols[:-15]+'.V.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes V written to '+cross_pols[:-15]+'.V.polcal.fil')
|
[
"Write",
"Stokes",
"-",
"calibrated",
"filterbank",
"file",
"for",
"a",
"given",
"observation",
"with",
"a",
"calibrator",
"noise",
"diode",
"measurement",
"on",
"the",
"source"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L183-L264
|
[
"def",
"calibrate_pols",
"(",
"cross_pols",
",",
"diode_cross",
",",
"obsI",
"=",
"None",
",",
"onefile",
"=",
"True",
",",
"feedtype",
"=",
"'l'",
",",
"*",
"*",
"kwargs",
")",
":",
"#Obtain time sample length, frequencies, and noise diode data",
"obs",
"=",
"Waterfall",
"(",
"diode_cross",
",",
"max_load",
"=",
"150",
")",
"cross_dat",
"=",
"obs",
".",
"data",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"#Calculate number of coarse channels in the noise diode measurement (usually 8)",
"dio_ncoarse",
"=",
"obs",
".",
"calc_n_coarse_chan",
"(",
")",
"dio_nchans",
"=",
"obs",
".",
"header",
"[",
"'nchans'",
"]",
"dio_chan_per_coarse",
"=",
"dio_nchans",
"/",
"dio_ncoarse",
"obs",
"=",
"None",
"Idat",
",",
"Qdat",
",",
"Udat",
",",
"Vdat",
"=",
"get_stokes",
"(",
"cross_dat",
",",
"feedtype",
")",
"cross_dat",
"=",
"None",
"#Calculate differential gain and phase from noise diode measurements",
"print",
"(",
"'Calculating Mueller Matrix variables'",
")",
"gams",
"=",
"gain_offsets",
"(",
"Idat",
",",
"Qdat",
",",
"Udat",
",",
"Vdat",
",",
"tsamp",
",",
"dio_chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"psis",
"=",
"phase_offsets",
"(",
"Idat",
",",
"Qdat",
",",
"Udat",
",",
"Vdat",
",",
"tsamp",
",",
"dio_chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"#Clear data arrays to save memory",
"Idat",
"=",
"None",
"Qdat",
"=",
"None",
"Udat",
"=",
"None",
"Vdat",
"=",
"None",
"#Get corrected Stokes parameters",
"print",
"(",
"'Opening '",
"+",
"cross_pols",
")",
"cross_obs",
"=",
"Waterfall",
"(",
"cross_pols",
",",
"max_load",
"=",
"150",
")",
"obs_ncoarse",
"=",
"cross_obs",
".",
"calc_n_coarse_chan",
"(",
")",
"obs_nchans",
"=",
"cross_obs",
".",
"header",
"[",
"'nchans'",
"]",
"obs_chan_per_coarse",
"=",
"obs_nchans",
"/",
"obs_ncoarse",
"print",
"(",
"'Grabbing Stokes parameters'",
")",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"cross_obs",
".",
"data",
",",
"feedtype",
")",
"print",
"(",
"'Applying Mueller Matrix'",
")",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"apply_Mueller",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"gams",
",",
"psis",
",",
"obs_chan_per_coarse",
",",
"feedtype",
")",
"#Use onefile (default) to produce one filterbank file containing all Stokes information",
"if",
"onefile",
"==",
"True",
":",
"cross_obs",
".",
"data",
"[",
":",
",",
"0",
",",
":",
"]",
"=",
"np",
".",
"squeeze",
"(",
"I",
")",
"cross_obs",
".",
"data",
"[",
":",
",",
"1",
",",
":",
"]",
"=",
"np",
".",
"squeeze",
"(",
"Q",
")",
"cross_obs",
".",
"data",
"[",
":",
",",
"2",
",",
":",
"]",
"=",
"np",
".",
"squeeze",
"(",
"U",
")",
"cross_obs",
".",
"data",
"[",
":",
",",
"3",
",",
":",
"]",
"=",
"np",
".",
"squeeze",
"(",
"V",
")",
"cross_obs",
".",
"write_to_fil",
"(",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.SIQUV.polcal.fil'",
")",
"print",
"(",
"'Calibrated Stokes parameters written to '",
"+",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.SIQUV.polcal.fil'",
")",
"return",
"#Write corrected Stokes parameters to four filterbank files if onefile==False",
"obs",
"=",
"Waterfall",
"(",
"obs_I",
",",
"max_load",
"=",
"150",
")",
"obs",
".",
"data",
"=",
"I",
"obs",
".",
"write_to_fil",
"(",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.SI.polcal.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"print",
"(",
"'Calibrated Stokes I written to '",
"+",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.SI.polcal.fil'",
")",
"obs",
".",
"data",
"=",
"Q",
"obs",
".",
"write_to_fil",
"(",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.Q.polcal.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"print",
"(",
"'Calibrated Stokes Q written to '",
"+",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.Q.polcal.fil'",
")",
"obs",
".",
"data",
"=",
"U",
"obs",
".",
"write_to_fil",
"(",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.U.polcal.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"print",
"(",
"'Calibrated Stokes U written to '",
"+",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.U.polcal.fil'",
")",
"obs",
".",
"data",
"=",
"V",
"obs",
".",
"write_to_fil",
"(",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.V.polcal.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"print",
"(",
"'Calibrated Stokes V written to '",
"+",
"cross_pols",
"[",
":",
"-",
"15",
"]",
"+",
"'.V.polcal.fil'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
fracpols
|
Output fractional linear and circular polarizations for a
rawspec cross polarization .fil file. NOT STANDARD USE
|
blimpy/calib_utils/stokescal.py
|
def fracpols(str, **kwargs):
'''Output fractional linear and circular polarizations for a
rawspec cross polarization .fil file. NOT STANDARD USE'''
I,Q,U,V,L=get_stokes(str, **kwargs)
return L/I,V/I
|
def fracpols(str, **kwargs):
'''Output fractional linear and circular polarizations for a
rawspec cross polarization .fil file. NOT STANDARD USE'''
I,Q,U,V,L=get_stokes(str, **kwargs)
return L/I,V/I
|
[
"Output",
"fractional",
"linear",
"and",
"circular",
"polarizations",
"for",
"a",
"rawspec",
"cross",
"polarization",
".",
"fil",
"file",
".",
"NOT",
"STANDARD",
"USE"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L267-L272
|
[
"def",
"fracpols",
"(",
"str",
",",
"*",
"*",
"kwargs",
")",
":",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"L",
"=",
"get_stokes",
"(",
"str",
",",
"*",
"*",
"kwargs",
")",
"return",
"L",
"/",
"I",
",",
"V",
"/",
"I"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
write_stokefils
|
Writes up to 5 new filterbank files corresponding to each Stokes
parameter (and total linear polarization L) for a given cross polarization .fil file
|
blimpy/calib_utils/stokescal.py
|
def write_stokefils(str, str_I, Ifil=False, Qfil=False, Ufil=False, Vfil=False, Lfil=False, **kwargs):
'''Writes up to 5 new filterbank files corresponding to each Stokes
parameter (and total linear polarization L) for a given cross polarization .fil file'''
I,Q,U,V,L=get_stokes(str, **kwargs)
obs = Waterfall(str_I, max_load=150) #Load filterbank file to write stokes data to
if Ifil:
obs.data = I
obs.write_to_fil(str[:-15]+'.I.fil') #assuming file is named *.cross_pols.fil
if Qfil:
obs.data = Q
obs.write_to_fil(str[:-15]+'.Q.fil') #assuming file is named *.cross_pols.fil
if Ufil:
obs.data = U
obs.write_to_fil(str[:-15]+'.U.fil') #assuming file is named *.cross_pols.fil
if Vfil:
obs.data = V
obs.write_to_fil(str[:-15]+'.V.fil') #assuming file is named *.cross_pols.fil
if Lfil:
obs.data = L
obs.write_to_fil(str[:-15]+'.L.fil')
|
def write_stokefils(str, str_I, Ifil=False, Qfil=False, Ufil=False, Vfil=False, Lfil=False, **kwargs):
'''Writes up to 5 new filterbank files corresponding to each Stokes
parameter (and total linear polarization L) for a given cross polarization .fil file'''
I,Q,U,V,L=get_stokes(str, **kwargs)
obs = Waterfall(str_I, max_load=150) #Load filterbank file to write stokes data to
if Ifil:
obs.data = I
obs.write_to_fil(str[:-15]+'.I.fil') #assuming file is named *.cross_pols.fil
if Qfil:
obs.data = Q
obs.write_to_fil(str[:-15]+'.Q.fil') #assuming file is named *.cross_pols.fil
if Ufil:
obs.data = U
obs.write_to_fil(str[:-15]+'.U.fil') #assuming file is named *.cross_pols.fil
if Vfil:
obs.data = V
obs.write_to_fil(str[:-15]+'.V.fil') #assuming file is named *.cross_pols.fil
if Lfil:
obs.data = L
obs.write_to_fil(str[:-15]+'.L.fil')
|
[
"Writes",
"up",
"to",
"5",
"new",
"filterbank",
"files",
"corresponding",
"to",
"each",
"Stokes",
"parameter",
"(",
"and",
"total",
"linear",
"polarization",
"L",
")",
"for",
"a",
"given",
"cross",
"polarization",
".",
"fil",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L274-L298
|
[
"def",
"write_stokefils",
"(",
"str",
",",
"str_I",
",",
"Ifil",
"=",
"False",
",",
"Qfil",
"=",
"False",
",",
"Ufil",
"=",
"False",
",",
"Vfil",
"=",
"False",
",",
"Lfil",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"L",
"=",
"get_stokes",
"(",
"str",
",",
"*",
"*",
"kwargs",
")",
"obs",
"=",
"Waterfall",
"(",
"str_I",
",",
"max_load",
"=",
"150",
")",
"#Load filterbank file to write stokes data to",
"if",
"Ifil",
":",
"obs",
".",
"data",
"=",
"I",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.I.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"if",
"Qfil",
":",
"obs",
".",
"data",
"=",
"Q",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.Q.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"if",
"Ufil",
":",
"obs",
".",
"data",
"=",
"U",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.U.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"if",
"Vfil",
":",
"obs",
".",
"data",
"=",
"V",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.V.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"if",
"Lfil",
":",
"obs",
".",
"data",
"=",
"L",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.L.fil'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
write_polfils
|
Writes two new filterbank files containing fractional linear and
circular polarization data
|
blimpy/calib_utils/stokescal.py
|
def write_polfils(str, str_I, **kwargs):
'''Writes two new filterbank files containing fractional linear and
circular polarization data'''
lin,circ=fracpols(str, **kwargs)
obs = Waterfall(str_I, max_load=150)
obs.data = lin
obs.write_to_fil(str[:-15]+'.linpol.fil') #assuming file is named *.cross_pols.fil
obs.data = circ
obs.write_to_fil(str[:-15]+'.circpol.fil')
|
def write_polfils(str, str_I, **kwargs):
'''Writes two new filterbank files containing fractional linear and
circular polarization data'''
lin,circ=fracpols(str, **kwargs)
obs = Waterfall(str_I, max_load=150)
obs.data = lin
obs.write_to_fil(str[:-15]+'.linpol.fil') #assuming file is named *.cross_pols.fil
obs.data = circ
obs.write_to_fil(str[:-15]+'.circpol.fil')
|
[
"Writes",
"two",
"new",
"filterbank",
"files",
"containing",
"fractional",
"linear",
"and",
"circular",
"polarization",
"data"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/stokescal.py#L301-L312
|
[
"def",
"write_polfils",
"(",
"str",
",",
"str_I",
",",
"*",
"*",
"kwargs",
")",
":",
"lin",
",",
"circ",
"=",
"fracpols",
"(",
"str",
",",
"*",
"*",
"kwargs",
")",
"obs",
"=",
"Waterfall",
"(",
"str_I",
",",
"max_load",
"=",
"150",
")",
"obs",
".",
"data",
"=",
"lin",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.linpol.fil'",
")",
"#assuming file is named *.cross_pols.fil",
"obs",
".",
"data",
"=",
"circ",
"obs",
".",
"write_to_fil",
"(",
"str",
"[",
":",
"-",
"15",
"]",
"+",
"'.circpol.fil'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
closest
|
Return the index of the closest in xarr to value val
|
blimpy/utils.py
|
def closest(xarr, val):
""" Return the index of the closest in xarr to value val """
idx_closest = np.argmin(np.abs(np.array(xarr) - val))
return idx_closest
|
def closest(xarr, val):
""" Return the index of the closest in xarr to value val """
idx_closest = np.argmin(np.abs(np.array(xarr) - val))
return idx_closest
|
[
"Return",
"the",
"index",
"of",
"the",
"closest",
"in",
"xarr",
"to",
"value",
"val"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L18-L21
|
[
"def",
"closest",
"(",
"xarr",
",",
"val",
")",
":",
"idx_closest",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"array",
"(",
"xarr",
")",
"-",
"val",
")",
")",
"return",
"idx_closest"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
rebin
|
Rebin data by averaging bins together
Args:
d (np.array): data
n_x (int): number of bins in x dir to rebin into one
n_y (int): number of bins in y dir to rebin into one
Returns:
d: rebinned data with shape (n_x, n_y)
|
blimpy/utils.py
|
def rebin(d, n_x, n_y=None):
""" Rebin data by averaging bins together
Args:
d (np.array): data
n_x (int): number of bins in x dir to rebin into one
n_y (int): number of bins in y dir to rebin into one
Returns:
d: rebinned data with shape (n_x, n_y)
"""
if d.ndim == 2:
if n_y is None:
n_y = 1
if n_x is None:
n_x = 1
d = d[:int(d.shape[0] // n_x) * n_x, :int(d.shape[1] // n_y) * n_y]
d = d.reshape((d.shape[0] // n_x, n_x, d.shape[1] // n_y, n_y))
d = d.mean(axis=3)
d = d.mean(axis=1)
elif d.ndim == 1:
d = d[:int(d.shape[0] // n_x) * n_x]
d = d.reshape((d.shape[0] // n_x, n_x))
d = d.mean(axis=1)
else:
raise RuntimeError("Only NDIM <= 2 supported")
return d
|
def rebin(d, n_x, n_y=None):
""" Rebin data by averaging bins together
Args:
d (np.array): data
n_x (int): number of bins in x dir to rebin into one
n_y (int): number of bins in y dir to rebin into one
Returns:
d: rebinned data with shape (n_x, n_y)
"""
if d.ndim == 2:
if n_y is None:
n_y = 1
if n_x is None:
n_x = 1
d = d[:int(d.shape[0] // n_x) * n_x, :int(d.shape[1] // n_y) * n_y]
d = d.reshape((d.shape[0] // n_x, n_x, d.shape[1] // n_y, n_y))
d = d.mean(axis=3)
d = d.mean(axis=1)
elif d.ndim == 1:
d = d[:int(d.shape[0] // n_x) * n_x]
d = d.reshape((d.shape[0] // n_x, n_x))
d = d.mean(axis=1)
else:
raise RuntimeError("Only NDIM <= 2 supported")
return d
|
[
"Rebin",
"data",
"by",
"averaging",
"bins",
"together"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L24-L51
|
[
"def",
"rebin",
"(",
"d",
",",
"n_x",
",",
"n_y",
"=",
"None",
")",
":",
"if",
"d",
".",
"ndim",
"==",
"2",
":",
"if",
"n_y",
"is",
"None",
":",
"n_y",
"=",
"1",
"if",
"n_x",
"is",
"None",
":",
"n_x",
"=",
"1",
"d",
"=",
"d",
"[",
":",
"int",
"(",
"d",
".",
"shape",
"[",
"0",
"]",
"//",
"n_x",
")",
"*",
"n_x",
",",
":",
"int",
"(",
"d",
".",
"shape",
"[",
"1",
"]",
"//",
"n_y",
")",
"*",
"n_y",
"]",
"d",
"=",
"d",
".",
"reshape",
"(",
"(",
"d",
".",
"shape",
"[",
"0",
"]",
"//",
"n_x",
",",
"n_x",
",",
"d",
".",
"shape",
"[",
"1",
"]",
"//",
"n_y",
",",
"n_y",
")",
")",
"d",
"=",
"d",
".",
"mean",
"(",
"axis",
"=",
"3",
")",
"d",
"=",
"d",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"elif",
"d",
".",
"ndim",
"==",
"1",
":",
"d",
"=",
"d",
"[",
":",
"int",
"(",
"d",
".",
"shape",
"[",
"0",
"]",
"//",
"n_x",
")",
"*",
"n_x",
"]",
"d",
"=",
"d",
".",
"reshape",
"(",
"(",
"d",
".",
"shape",
"[",
"0",
"]",
"//",
"n_x",
",",
"n_x",
")",
")",
"d",
"=",
"d",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Only NDIM <= 2 supported\"",
")",
"return",
"d"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
unpack
|
upgrade data from nbits to 8bits
Notes: Pretty sure this function is a little broken!
|
blimpy/utils.py
|
def unpack(data, nbit):
"""upgrade data from nbits to 8bits
Notes: Pretty sure this function is a little broken!
"""
if nbit > 8:
raise ValueError("unpack: nbit must be <= 8")
if 8 % nbit != 0:
raise ValueError("unpack: nbit must divide into 8")
if data.dtype not in (np.uint8, np.int8):
raise TypeError("unpack: dtype must be 8-bit")
if nbit == 8:
return data
elif nbit == 4:
data = unpack_4to8(data)
return data
elif nbit == 2:
data = unpack_2to8(data)
return data
elif nbit == 1:
data = unpack_1to8(data)
return data
|
def unpack(data, nbit):
"""upgrade data from nbits to 8bits
Notes: Pretty sure this function is a little broken!
"""
if nbit > 8:
raise ValueError("unpack: nbit must be <= 8")
if 8 % nbit != 0:
raise ValueError("unpack: nbit must divide into 8")
if data.dtype not in (np.uint8, np.int8):
raise TypeError("unpack: dtype must be 8-bit")
if nbit == 8:
return data
elif nbit == 4:
data = unpack_4to8(data)
return data
elif nbit == 2:
data = unpack_2to8(data)
return data
elif nbit == 1:
data = unpack_1to8(data)
return data
|
[
"upgrade",
"data",
"from",
"nbits",
"to",
"8bits"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L54-L75
|
[
"def",
"unpack",
"(",
"data",
",",
"nbit",
")",
":",
"if",
"nbit",
">",
"8",
":",
"raise",
"ValueError",
"(",
"\"unpack: nbit must be <= 8\"",
")",
"if",
"8",
"%",
"nbit",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"unpack: nbit must divide into 8\"",
")",
"if",
"data",
".",
"dtype",
"not",
"in",
"(",
"np",
".",
"uint8",
",",
"np",
".",
"int8",
")",
":",
"raise",
"TypeError",
"(",
"\"unpack: dtype must be 8-bit\"",
")",
"if",
"nbit",
"==",
"8",
":",
"return",
"data",
"elif",
"nbit",
"==",
"4",
":",
"data",
"=",
"unpack_4to8",
"(",
"data",
")",
"return",
"data",
"elif",
"nbit",
"==",
"2",
":",
"data",
"=",
"unpack_2to8",
"(",
"data",
")",
"return",
"data",
"elif",
"nbit",
"==",
"1",
":",
"data",
"=",
"unpack_1to8",
"(",
"data",
")",
"return",
"data"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
unpack_2to8
|
Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
DATA MUST BE LOADED as np.array() with dtype='uint8'.
This works with some clever shifting and AND / OR operations.
Data is LOADED as 8-bit, then promoted to 32-bits:
/ABCD EFGH/ (8 bits of data)
/0000 0000/0000 0000/0000 0000/ABCD EFGH/ (8 bits of data as a 32-bit word)
Once promoted, we can do some shifting, AND and OR operations:
/0000 0000/0000 ABCD/EFGH 0000/0000 0000/ (shifted << 12)
/0000 0000/0000 ABCD/EFGH 0000/ABCD EFGH/ (bitwise OR of previous two lines)
/0000 0000/0000 ABCD/0000 0000/0000 EFGH/ (bitwise AND with mask 0xF000F)
/0000 00AB/CD00 0000/0000 00EF/GH00 0000/ (prev. line shifted << 6)
/0000 00AB/CD00 ABCD/0000 00EF/GH00 EFGH/ (bitwise OR of previous two lines)
/0000 00AB/0000 00CD/0000 00EF/0000 00GH/ (bitwise AND with 0x3030303)
Then we change the view of the data to interpret it as 4x8 bit:
[000000AB, 000000CD, 000000EF, 000000GH] (change view from 32-bit to 4x8-bit)
The converted bits are then mapped to values in the range [-40, 40] according to a lookup chart.
The mapping is based on specifications in the breakthough docs:
https://github.com/UCBerkeleySETI/breakthrough/blob/master/doc/RAW-File-Format.md
|
blimpy/utils.py
|
def unpack_2to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
DATA MUST BE LOADED as np.array() with dtype='uint8'.
This works with some clever shifting and AND / OR operations.
Data is LOADED as 8-bit, then promoted to 32-bits:
/ABCD EFGH/ (8 bits of data)
/0000 0000/0000 0000/0000 0000/ABCD EFGH/ (8 bits of data as a 32-bit word)
Once promoted, we can do some shifting, AND and OR operations:
/0000 0000/0000 ABCD/EFGH 0000/0000 0000/ (shifted << 12)
/0000 0000/0000 ABCD/EFGH 0000/ABCD EFGH/ (bitwise OR of previous two lines)
/0000 0000/0000 ABCD/0000 0000/0000 EFGH/ (bitwise AND with mask 0xF000F)
/0000 00AB/CD00 0000/0000 00EF/GH00 0000/ (prev. line shifted << 6)
/0000 00AB/CD00 ABCD/0000 00EF/GH00 EFGH/ (bitwise OR of previous two lines)
/0000 00AB/0000 00CD/0000 00EF/0000 00GH/ (bitwise AND with 0x3030303)
Then we change the view of the data to interpret it as 4x8 bit:
[000000AB, 000000CD, 000000EF, 000000GH] (change view from 32-bit to 4x8-bit)
The converted bits are then mapped to values in the range [-40, 40] according to a lookup chart.
The mapping is based on specifications in the breakthough docs:
https://github.com/UCBerkeleySETI/breakthrough/blob/master/doc/RAW-File-Format.md
"""
two_eight_lookup = {0: 40,
1: 12,
2: -12,
3: -40}
tmp = data.astype(np.uint32)
tmp = (tmp | (tmp << 12)) & 0xF000F
tmp = (tmp | (tmp << 6)) & 0x3030303
tmp = tmp.byteswap()
tmp = tmp.view('uint8')
mapped = np.array(tmp, dtype=np.int8)
for k, v in two_eight_lookup.items():
mapped[tmp == k] = v
return mapped
|
def unpack_2to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
DATA MUST BE LOADED as np.array() with dtype='uint8'.
This works with some clever shifting and AND / OR operations.
Data is LOADED as 8-bit, then promoted to 32-bits:
/ABCD EFGH/ (8 bits of data)
/0000 0000/0000 0000/0000 0000/ABCD EFGH/ (8 bits of data as a 32-bit word)
Once promoted, we can do some shifting, AND and OR operations:
/0000 0000/0000 ABCD/EFGH 0000/0000 0000/ (shifted << 12)
/0000 0000/0000 ABCD/EFGH 0000/ABCD EFGH/ (bitwise OR of previous two lines)
/0000 0000/0000 ABCD/0000 0000/0000 EFGH/ (bitwise AND with mask 0xF000F)
/0000 00AB/CD00 0000/0000 00EF/GH00 0000/ (prev. line shifted << 6)
/0000 00AB/CD00 ABCD/0000 00EF/GH00 EFGH/ (bitwise OR of previous two lines)
/0000 00AB/0000 00CD/0000 00EF/0000 00GH/ (bitwise AND with 0x3030303)
Then we change the view of the data to interpret it as 4x8 bit:
[000000AB, 000000CD, 000000EF, 000000GH] (change view from 32-bit to 4x8-bit)
The converted bits are then mapped to values in the range [-40, 40] according to a lookup chart.
The mapping is based on specifications in the breakthough docs:
https://github.com/UCBerkeleySETI/breakthrough/blob/master/doc/RAW-File-Format.md
"""
two_eight_lookup = {0: 40,
1: 12,
2: -12,
3: -40}
tmp = data.astype(np.uint32)
tmp = (tmp | (tmp << 12)) & 0xF000F
tmp = (tmp | (tmp << 6)) & 0x3030303
tmp = tmp.byteswap()
tmp = tmp.view('uint8')
mapped = np.array(tmp, dtype=np.int8)
for k, v in two_eight_lookup.items():
mapped[tmp == k] = v
return mapped
|
[
"Promote",
"2",
"-",
"bit",
"unisgned",
"data",
"into",
"8",
"-",
"bit",
"unsigned",
"data",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L87-L130
|
[
"def",
"unpack_2to8",
"(",
"data",
")",
":",
"two_eight_lookup",
"=",
"{",
"0",
":",
"40",
",",
"1",
":",
"12",
",",
"2",
":",
"-",
"12",
",",
"3",
":",
"-",
"40",
"}",
"tmp",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"tmp",
"=",
"(",
"tmp",
"|",
"(",
"tmp",
"<<",
"12",
")",
")",
"&",
"0xF000F",
"tmp",
"=",
"(",
"tmp",
"|",
"(",
"tmp",
"<<",
"6",
")",
")",
"&",
"0x3030303",
"tmp",
"=",
"tmp",
".",
"byteswap",
"(",
")",
"tmp",
"=",
"tmp",
".",
"view",
"(",
"'uint8'",
")",
"mapped",
"=",
"np",
".",
"array",
"(",
"tmp",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"for",
"k",
",",
"v",
"in",
"two_eight_lookup",
".",
"items",
"(",
")",
":",
"mapped",
"[",
"tmp",
"==",
"k",
"]",
"=",
"v",
"return",
"mapped"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
unpack_4to8
|
Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
|
blimpy/utils.py
|
def unpack_4to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
"""
tmpdata = data.astype(np.int16) # np.empty(upshape, dtype=np.int16)
tmpdata = (tmpdata | (tmpdata << 4)) & 0x0F0F
# tmpdata = tmpdata << 4 # Shift into high bits to avoid needing to sign extend
updata = tmpdata.byteswap()
return updata.view(data.dtype)
|
def unpack_4to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
"""
tmpdata = data.astype(np.int16) # np.empty(upshape, dtype=np.int16)
tmpdata = (tmpdata | (tmpdata << 4)) & 0x0F0F
# tmpdata = tmpdata << 4 # Shift into high bits to avoid needing to sign extend
updata = tmpdata.byteswap()
return updata.view(data.dtype)
|
[
"Promote",
"2",
"-",
"bit",
"unisgned",
"data",
"into",
"8",
"-",
"bit",
"unsigned",
"data",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L133-L156
|
[
"def",
"unpack_4to8",
"(",
"data",
")",
":",
"tmpdata",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"# np.empty(upshape, dtype=np.int16)",
"tmpdata",
"=",
"(",
"tmpdata",
"|",
"(",
"tmpdata",
"<<",
"4",
")",
")",
"&",
"0x0F0F",
"# tmpdata = tmpdata << 4 # Shift into high bits to avoid needing to sign extend",
"updata",
"=",
"tmpdata",
".",
"byteswap",
"(",
")",
"return",
"updata",
".",
"view",
"(",
"data",
".",
"dtype",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
get_diff
|
Returns ON-OFF for all Stokes parameters given a cross_pols noise diode measurement
|
blimpy/calib_utils/calib_plots.py
|
def get_diff(dio_cross,feedtype,**kwargs):
'''
Returns ON-OFF for all Stokes parameters given a cross_pols noise diode measurement
'''
#Get Stokes parameters, frequencies, and time sample length
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Fold noise diode data
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Do ON-OFF subtraction
Idiff = I_ON-I_OFF
Qdiff = Q_ON-Q_OFF
Udiff = U_ON-U_OFF
Vdiff = V_ON-V_OFF
return Idiff,Qdiff,Udiff,Vdiff,freqs
|
def get_diff(dio_cross,feedtype,**kwargs):
'''
Returns ON-OFF for all Stokes parameters given a cross_pols noise diode measurement
'''
#Get Stokes parameters, frequencies, and time sample length
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Fold noise diode data
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Do ON-OFF subtraction
Idiff = I_ON-I_OFF
Qdiff = Q_ON-Q_OFF
Udiff = U_ON-U_OFF
Vdiff = V_ON-V_OFF
return Idiff,Qdiff,Udiff,Vdiff,freqs
|
[
"Returns",
"ON",
"-",
"OFF",
"for",
"all",
"Stokes",
"parameters",
"given",
"a",
"cross_pols",
"noise",
"diode",
"measurement"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L7-L32
|
[
"def",
"get_diff",
"(",
"dio_cross",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get Stokes parameters, frequencies, and time sample length",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"freqs",
"=",
"obs",
".",
"populate_freqs",
"(",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"obs",
"=",
"None",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"#Fold noise diode data",
"I_OFF",
",",
"I_ON",
"=",
"foldcal",
"(",
"I",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Q_OFF",
",",
"Q_ON",
"=",
"foldcal",
"(",
"Q",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"U_OFF",
",",
"U_ON",
"=",
"foldcal",
"(",
"U",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"V_OFF",
",",
"V_ON",
"=",
"foldcal",
"(",
"V",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"#Do ON-OFF subtraction",
"Idiff",
"=",
"I_ON",
"-",
"I_OFF",
"Qdiff",
"=",
"Q_ON",
"-",
"Q_OFF",
"Udiff",
"=",
"U_ON",
"-",
"U_OFF",
"Vdiff",
"=",
"V_ON",
"-",
"V_OFF",
"return",
"Idiff",
",",
"Qdiff",
",",
"Udiff",
",",
"Vdiff",
",",
"freqs"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_Stokes_diode
|
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
|
blimpy/calib_utils/calib_plots.py
|
def plot_Stokes_diode(dio_cross,diff=True,feedtype='l',**kwargs):
'''
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
'''
#If diff=True, get ON-OFF. If not get ON and OFF separately
if diff==True:
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
else:
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
I,Q,U,V = get_stokes(data,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Plot spectra
if diff==True:
plt.plot(freqs,Idiff,'k-',label='I')
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.plot(freqs,Udiff,'g-',label='U')
plt.plot(freqs,Vdiff,'m-',label='V')
else:
plt.plot(freqs,I_ON,'k-',label='I ON')
plt.plot(freqs,I_OFF,'k--',label='I OFF')
plt.plot(freqs,Q_ON,'r-',label='Q ON')
plt.plot(freqs,Q_OFF,'r--',label='Q OFF')
plt.plot(freqs,U_ON,'g-',label='U ON')
plt.plot(freqs,U_OFF,'g--',label='U OFF')
plt.plot(freqs,V_ON,'m-',label='V ON')
plt.plot(freqs,V_OFF,'m--',label='V OFF')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Uncalibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)')
|
def plot_Stokes_diode(dio_cross,diff=True,feedtype='l',**kwargs):
'''
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
'''
#If diff=True, get ON-OFF. If not get ON and OFF separately
if diff==True:
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
else:
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
I,Q,U,V = get_stokes(data,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Plot spectra
if diff==True:
plt.plot(freqs,Idiff,'k-',label='I')
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.plot(freqs,Udiff,'g-',label='U')
plt.plot(freqs,Vdiff,'m-',label='V')
else:
plt.plot(freqs,I_ON,'k-',label='I ON')
plt.plot(freqs,I_OFF,'k--',label='I OFF')
plt.plot(freqs,Q_ON,'r-',label='Q ON')
plt.plot(freqs,Q_OFF,'r--',label='Q OFF')
plt.plot(freqs,U_ON,'g-',label='U ON')
plt.plot(freqs,U_OFF,'g--',label='U OFF')
plt.plot(freqs,V_ON,'m-',label='V ON')
plt.plot(freqs,V_OFF,'m--',label='V OFF')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Uncalibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)')
|
[
"Plots",
"the",
"uncalibrated",
"full",
"stokes",
"spectrum",
"of",
"the",
"noise",
"diode",
".",
"Use",
"diff",
"=",
"False",
"to",
"plot",
"both",
"ON",
"and",
"OFF",
"or",
"diff",
"=",
"True",
"for",
"ON",
"-",
"OFF"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L34-L74
|
[
"def",
"plot_Stokes_diode",
"(",
"dio_cross",
",",
"diff",
"=",
"True",
",",
"feedtype",
"=",
"'l'",
",",
"*",
"*",
"kwargs",
")",
":",
"#If diff=True, get ON-OFF. If not get ON and OFF separately",
"if",
"diff",
"==",
"True",
":",
"Idiff",
",",
"Qdiff",
",",
"Udiff",
",",
"Vdiff",
",",
"freqs",
"=",
"get_diff",
"(",
"dio_cross",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"freqs",
"=",
"obs",
".",
"populate_freqs",
"(",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"I_OFF",
",",
"I_ON",
"=",
"foldcal",
"(",
"I",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Q_OFF",
",",
"Q_ON",
"=",
"foldcal",
"(",
"Q",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"U_OFF",
",",
"U_ON",
"=",
"foldcal",
"(",
"U",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"V_OFF",
",",
"V_ON",
"=",
"foldcal",
"(",
"V",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"#Plot spectra",
"if",
"diff",
"==",
"True",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Idiff",
",",
"'k-'",
",",
"label",
"=",
"'I'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Qdiff",
",",
"'r-'",
",",
"label",
"=",
"'Q'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Udiff",
",",
"'g-'",
",",
"label",
"=",
"'U'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Vdiff",
",",
"'m-'",
",",
"label",
"=",
"'V'",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"I_ON",
",",
"'k-'",
",",
"label",
"=",
"'I ON'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"I_OFF",
",",
"'k--'",
",",
"label",
"=",
"'I OFF'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Q_ON",
",",
"'r-'",
",",
"label",
"=",
"'Q ON'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Q_OFF",
",",
"'r--'",
",",
"label",
"=",
"'Q OFF'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"U_ON",
",",
"'g-'",
",",
"label",
"=",
"'U ON'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"U_OFF",
",",
"'g--'",
",",
"label",
"=",
"'U OFF'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"V_ON",
",",
"'m-'",
",",
"label",
"=",
"'V ON'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"V_OFF",
",",
"'m--'",
",",
"label",
"=",
"'V OFF'",
")",
"plt",
".",
"legend",
"(",
")",
"plt",
".",
"xlabel",
"(",
"'Frequency (MHz)'",
")",
"plt",
".",
"title",
"(",
"'Uncalibrated Full Stokes Noise Diode Spectrum'",
")",
"plt",
".",
"ylabel",
"(",
"'Power (Counts)'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_calibrated_diode
|
Plots the corrected noise diode spectrum for a given noise diode measurement
after application of the inverse Mueller matrix for the electronics chain.
|
blimpy/calib_utils/calib_plots.py
|
def plot_calibrated_diode(dio_cross,chan_per_coarse=8,feedtype='l',**kwargs):
'''
Plots the corrected noise diode spectrum for a given noise diode measurement
after application of the inverse Mueller matrix for the electronics chain.
'''
#Get full stokes data for the ND observation
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
data = None
#Calculate Mueller Matrix variables for each coarse channel
psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
G = gain_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
#Apply the Mueller matrix to original noise diode data and refold
I,Q,U,V = apply_Mueller(I,Q,U,V,G,psis,chan_per_coarse,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Delete data arrays for space
I = None
Q = None
U = None
V = None
#Plot new ON-OFF spectra
plt.plot(freqs,I_ON-I_OFF,'k-',label='I')
plt.plot(freqs,Q_ON-Q_OFF,'r-',label='Q')
plt.plot(freqs,U_ON-U_OFF,'g-',label='U')
plt.plot(freqs,V_ON-V_OFF,'m-',label='V')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Calibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)')
|
def plot_calibrated_diode(dio_cross,chan_per_coarse=8,feedtype='l',**kwargs):
'''
Plots the corrected noise diode spectrum for a given noise diode measurement
after application of the inverse Mueller matrix for the electronics chain.
'''
#Get full stokes data for the ND observation
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
data = None
#Calculate Mueller Matrix variables for each coarse channel
psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
G = gain_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
#Apply the Mueller matrix to original noise diode data and refold
I,Q,U,V = apply_Mueller(I,Q,U,V,G,psis,chan_per_coarse,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Delete data arrays for space
I = None
Q = None
U = None
V = None
#Plot new ON-OFF spectra
plt.plot(freqs,I_ON-I_OFF,'k-',label='I')
plt.plot(freqs,Q_ON-Q_OFF,'r-',label='Q')
plt.plot(freqs,U_ON-U_OFF,'g-',label='U')
plt.plot(freqs,V_ON-V_OFF,'m-',label='V')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Calibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)')
|
[
"Plots",
"the",
"corrected",
"noise",
"diode",
"spectrum",
"for",
"a",
"given",
"noise",
"diode",
"measurement",
"after",
"application",
"of",
"the",
"inverse",
"Mueller",
"matrix",
"for",
"the",
"electronics",
"chain",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L77-L117
|
[
"def",
"plot_calibrated_diode",
"(",
"dio_cross",
",",
"chan_per_coarse",
"=",
"8",
",",
"feedtype",
"=",
"'l'",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get full stokes data for the ND observation",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"freqs",
"=",
"obs",
".",
"populate_freqs",
"(",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"obs",
"=",
"None",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"data",
"=",
"None",
"#Calculate Mueller Matrix variables for each coarse channel",
"psis",
"=",
"phase_offsets",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"tsamp",
",",
"chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"G",
"=",
"gain_offsets",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"tsamp",
",",
"chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"#Apply the Mueller matrix to original noise diode data and refold",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"apply_Mueller",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"G",
",",
"psis",
",",
"chan_per_coarse",
",",
"feedtype",
")",
"I_OFF",
",",
"I_ON",
"=",
"foldcal",
"(",
"I",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"Q_OFF",
",",
"Q_ON",
"=",
"foldcal",
"(",
"Q",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"U_OFF",
",",
"U_ON",
"=",
"foldcal",
"(",
"U",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"V_OFF",
",",
"V_ON",
"=",
"foldcal",
"(",
"V",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"#Delete data arrays for space",
"I",
"=",
"None",
"Q",
"=",
"None",
"U",
"=",
"None",
"V",
"=",
"None",
"#Plot new ON-OFF spectra",
"plt",
".",
"plot",
"(",
"freqs",
",",
"I_ON",
"-",
"I_OFF",
",",
"'k-'",
",",
"label",
"=",
"'I'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Q_ON",
"-",
"Q_OFF",
",",
"'r-'",
",",
"label",
"=",
"'Q'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"U_ON",
"-",
"U_OFF",
",",
"'g-'",
",",
"label",
"=",
"'U'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"V_ON",
"-",
"V_OFF",
",",
"'m-'",
",",
"label",
"=",
"'V'",
")",
"plt",
".",
"legend",
"(",
")",
"plt",
".",
"xlabel",
"(",
"'Frequency (MHz)'",
")",
"plt",
".",
"title",
"(",
"'Calibrated Full Stokes Noise Diode Spectrum'",
")",
"plt",
".",
"ylabel",
"(",
"'Power (Counts)'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_phase_offsets
|
Plots the calculated phase offsets of each coarse channel along with
the UV (or QU) noise diode spectrum for comparison
|
blimpy/calib_utils/calib_plots.py
|
def plot_phase_offsets(dio_cross,chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated phase offsets of each coarse channel along with
the UV (or QU) noise diode spectrum for comparison
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,chan_per_coarse)
coarse_degs = np.degrees(coarse_psis)
#Plot phase offsets
if ax2==None:
plt.subplot(211)
else:
axPsi = plt.axes(ax2)
plt.setp(axPsi.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_degs,'ko',markersize=2,label='Coarse Channel $\psi$')
plt.ylabel('Degrees')
plt.grid(True)
plt.title('Phase Offsets')
if legend==True:
plt.legend()
#Plot U and V spectra
if ax1==None:
plt.subplot(212)
else:
axUV = plt.axes(ax1)
plt.plot(freqs,Udiff,'g-',label='U')
if feedtype=='l':
plt.plot(freqs,Vdiff,'m-',label='V')
if feedtype=='c':
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
plt.grid(True)
if legend==True:
plt.legend()
|
def plot_phase_offsets(dio_cross,chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated phase offsets of each coarse channel along with
the UV (or QU) noise diode spectrum for comparison
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,chan_per_coarse)
coarse_degs = np.degrees(coarse_psis)
#Plot phase offsets
if ax2==None:
plt.subplot(211)
else:
axPsi = plt.axes(ax2)
plt.setp(axPsi.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_degs,'ko',markersize=2,label='Coarse Channel $\psi$')
plt.ylabel('Degrees')
plt.grid(True)
plt.title('Phase Offsets')
if legend==True:
plt.legend()
#Plot U and V spectra
if ax1==None:
plt.subplot(212)
else:
axUV = plt.axes(ax1)
plt.plot(freqs,Udiff,'g-',label='U')
if feedtype=='l':
plt.plot(freqs,Vdiff,'m-',label='V')
if feedtype=='c':
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
plt.grid(True)
if legend==True:
plt.legend()
|
[
"Plots",
"the",
"calculated",
"phase",
"offsets",
"of",
"each",
"coarse",
"channel",
"along",
"with",
"the",
"UV",
"(",
"or",
"QU",
")",
"noise",
"diode",
"spectrum",
"for",
"comparison"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L120-L165
|
[
"def",
"plot_phase_offsets",
"(",
"dio_cross",
",",
"chan_per_coarse",
"=",
"8",
",",
"feedtype",
"=",
"'l'",
",",
"ax1",
"=",
"None",
",",
"ax2",
"=",
"None",
",",
"legend",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get ON-OFF ND spectra",
"Idiff",
",",
"Qdiff",
",",
"Udiff",
",",
"Vdiff",
",",
"freqs",
"=",
"get_diff",
"(",
"dio_cross",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"obs",
"=",
"None",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"#Get phase offsets and convert to degrees",
"coarse_psis",
"=",
"phase_offsets",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"tsamp",
",",
"chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"coarse_freqs",
"=",
"convert_to_coarse",
"(",
"freqs",
",",
"chan_per_coarse",
")",
"coarse_degs",
"=",
"np",
".",
"degrees",
"(",
"coarse_psis",
")",
"#Plot phase offsets",
"if",
"ax2",
"==",
"None",
":",
"plt",
".",
"subplot",
"(",
"211",
")",
"else",
":",
"axPsi",
"=",
"plt",
".",
"axes",
"(",
"ax2",
")",
"plt",
".",
"setp",
"(",
"axPsi",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"plt",
".",
"plot",
"(",
"coarse_freqs",
",",
"coarse_degs",
",",
"'ko'",
",",
"markersize",
"=",
"2",
",",
"label",
"=",
"'Coarse Channel $\\psi$'",
")",
"plt",
".",
"ylabel",
"(",
"'Degrees'",
")",
"plt",
".",
"grid",
"(",
"True",
")",
"plt",
".",
"title",
"(",
"'Phase Offsets'",
")",
"if",
"legend",
"==",
"True",
":",
"plt",
".",
"legend",
"(",
")",
"#Plot U and V spectra",
"if",
"ax1",
"==",
"None",
":",
"plt",
".",
"subplot",
"(",
"212",
")",
"else",
":",
"axUV",
"=",
"plt",
".",
"axes",
"(",
"ax1",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Udiff",
",",
"'g-'",
",",
"label",
"=",
"'U'",
")",
"if",
"feedtype",
"==",
"'l'",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Vdiff",
",",
"'m-'",
",",
"label",
"=",
"'V'",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"Qdiff",
",",
"'r-'",
",",
"label",
"=",
"'Q'",
")",
"plt",
".",
"xlabel",
"(",
"'Frequency (MHz)'",
")",
"plt",
".",
"ylabel",
"(",
"'Power (Counts)'",
")",
"plt",
".",
"grid",
"(",
"True",
")",
"if",
"legend",
"==",
"True",
":",
"plt",
".",
"legend",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_gain_offsets
|
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
|
blimpy/calib_utils/calib_plots.py
|
def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend()
|
def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend()
|
[
"Plots",
"the",
"calculated",
"gain",
"offsets",
"of",
"each",
"coarse",
"channel",
"along",
"with",
"the",
"time",
"averaged",
"power",
"spectra",
"of",
"the",
"X",
"and",
"Y",
"feeds"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L167-L215
|
[
"def",
"plot_gain_offsets",
"(",
"dio_cross",
",",
"dio_chan_per_coarse",
"=",
"8",
",",
"feedtype",
"=",
"'l'",
",",
"ax1",
"=",
"None",
",",
"ax2",
"=",
"None",
",",
"legend",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get ON-OFF ND spectra",
"Idiff",
",",
"Qdiff",
",",
"Udiff",
",",
"Vdiff",
",",
"freqs",
"=",
"get_diff",
"(",
"dio_cross",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"obs",
"=",
"None",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"#Get phase offsets and convert to degrees",
"coarse_G",
"=",
"gain_offsets",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"tsamp",
",",
"dio_chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"coarse_freqs",
"=",
"convert_to_coarse",
"(",
"freqs",
",",
"dio_chan_per_coarse",
")",
"#Get X and Y spectra for the noise diode ON and OFF",
"#If using circular feeds these correspond to LL and RR",
"XX_OFF",
",",
"XX_ON",
"=",
"foldcal",
"(",
"np",
".",
"expand_dims",
"(",
"data",
"[",
":",
",",
"0",
",",
":",
"]",
",",
"axis",
"=",
"1",
")",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"YY_OFF",
",",
"YY_ON",
"=",
"foldcal",
"(",
"np",
".",
"expand_dims",
"(",
"data",
"[",
":",
",",
"1",
",",
":",
"]",
",",
"axis",
"=",
"1",
")",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"if",
"ax1",
"==",
"None",
":",
"plt",
".",
"subplot",
"(",
"211",
")",
"else",
":",
"axG",
"=",
"plt",
".",
"axes",
"(",
"ax1",
")",
"plt",
".",
"setp",
"(",
"axG",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"plt",
".",
"plot",
"(",
"coarse_freqs",
",",
"coarse_G",
",",
"'ko'",
",",
"markersize",
"=",
"2",
")",
"plt",
".",
"ylabel",
"(",
"r'$\\frac{\\Delta G}{2}$'",
",",
"rotation",
"=",
"90",
")",
"if",
"feedtype",
"==",
"'l'",
":",
"plt",
".",
"title",
"(",
"'XY Gain Difference'",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"plt",
".",
"title",
"(",
"'LR Gain Difference'",
")",
"plt",
".",
"grid",
"(",
"True",
")",
"if",
"ax2",
"==",
"None",
":",
"plt",
".",
"subplot",
"(",
"212",
")",
"else",
":",
"axXY",
"=",
"plt",
".",
"axes",
"(",
"ax2",
",",
"sharex",
"=",
"axG",
")",
"if",
"feedtype",
"==",
"'l'",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"XX_OFF",
",",
"'b-'",
",",
"label",
"=",
"'XX'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"YY_OFF",
",",
"'r-'",
",",
"label",
"=",
"'YY'",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"XX_OFF",
",",
"'b-'",
",",
"label",
"=",
"'LL'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"YY_OFF",
",",
"'r-'",
",",
"label",
"=",
"'RR'",
")",
"plt",
".",
"xlabel",
"(",
"'Frequency (MHz)'",
")",
"plt",
".",
"ylabel",
"(",
"'Power (Counts)'",
")",
"if",
"legend",
"==",
"True",
":",
"plt",
".",
"legend",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_diode_fold
|
Plots the calculated average power and time sampling of ON (red) and
OFF (blue) for a noise diode measurement over the observation time series
|
blimpy/calib_utils/calib_plots.py
|
def plot_diode_fold(dio_cross,bothfeeds=True,feedtype='l',min_samp=-500,max_samp=7000,legend=True,**kwargs):
'''
Plots the calculated average power and time sampling of ON (red) and
OFF (blue) for a noise diode measurement over the observation time series
'''
#Get full stokes data of ND measurement
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Calculate time series, OFF and ON averages, and time samples for each
tseriesI = np.squeeze(np.mean(I,axis=2))
I_OFF,I_ON,OFFints,ONints = foldcal(I,tsamp,inds=True,**kwargs)
if bothfeeds==True:
if feedtype=='l':
tseriesQ = np.squeeze(np.mean(Q,axis=2))
tseriesX = (tseriesI+tseriesQ)/2
tseriesY = (tseriesI-tseriesQ)/2
if feedtype=='c':
tseriesV = np.squeeze(np.mean(V,axis=2))
tseriesR = (tseriesI+tseriesV)/2
tseriesL = (tseriesI-tseriesV)/2
stop = ONints[-1,1]
#Plot time series and calculated average for ON and OFF
if bothfeeds==False:
plt.plot(tseriesI[0:stop],'k-',label='Total Power')
for i in ONints:
plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_ON)),'r-')
for i in OFFints:
plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_OFF)),'b-')
else:
if feedtype=='l':
diff = np.mean(tseriesX)-np.mean(tseriesY)
plt.plot(tseriesX[0:stop],'b-',label='XX')
plt.plot(tseriesY[0:stop]+diff,'r-',label='YY (shifted)')
if feedtype=='c':
diff = np.mean(tseriesL)-np.mean(tseriesR)
plt.plot(tseriesL[0:stop],'b-',label='LL')
plt.plot(tseriesR[0:stop]+diff,'r-',label='RR (shifted)')
#Calculate plotting limits
if bothfeeds==False:
lowlim = np.mean(I_OFF)-(np.mean(I_ON)-np.mean(I_OFF))/2
hilim = np.mean(I_ON)+(np.mean(I_ON)-np.mean(I_OFF))/2
plt.ylim((lowlim,hilim))
plt.xlim((min_samp,max_samp))
plt.xlabel('Time Sample Number')
plt.ylabel('Power (Counts)')
plt.title('Noise Diode Fold')
if legend==True:
plt.legend()
|
def plot_diode_fold(dio_cross,bothfeeds=True,feedtype='l',min_samp=-500,max_samp=7000,legend=True,**kwargs):
'''
Plots the calculated average power and time sampling of ON (red) and
OFF (blue) for a noise diode measurement over the observation time series
'''
#Get full stokes data of ND measurement
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Calculate time series, OFF and ON averages, and time samples for each
tseriesI = np.squeeze(np.mean(I,axis=2))
I_OFF,I_ON,OFFints,ONints = foldcal(I,tsamp,inds=True,**kwargs)
if bothfeeds==True:
if feedtype=='l':
tseriesQ = np.squeeze(np.mean(Q,axis=2))
tseriesX = (tseriesI+tseriesQ)/2
tseriesY = (tseriesI-tseriesQ)/2
if feedtype=='c':
tseriesV = np.squeeze(np.mean(V,axis=2))
tseriesR = (tseriesI+tseriesV)/2
tseriesL = (tseriesI-tseriesV)/2
stop = ONints[-1,1]
#Plot time series and calculated average for ON and OFF
if bothfeeds==False:
plt.plot(tseriesI[0:stop],'k-',label='Total Power')
for i in ONints:
plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_ON)),'r-')
for i in OFFints:
plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_OFF)),'b-')
else:
if feedtype=='l':
diff = np.mean(tseriesX)-np.mean(tseriesY)
plt.plot(tseriesX[0:stop],'b-',label='XX')
plt.plot(tseriesY[0:stop]+diff,'r-',label='YY (shifted)')
if feedtype=='c':
diff = np.mean(tseriesL)-np.mean(tseriesR)
plt.plot(tseriesL[0:stop],'b-',label='LL')
plt.plot(tseriesR[0:stop]+diff,'r-',label='RR (shifted)')
#Calculate plotting limits
if bothfeeds==False:
lowlim = np.mean(I_OFF)-(np.mean(I_ON)-np.mean(I_OFF))/2
hilim = np.mean(I_ON)+(np.mean(I_ON)-np.mean(I_OFF))/2
plt.ylim((lowlim,hilim))
plt.xlim((min_samp,max_samp))
plt.xlabel('Time Sample Number')
plt.ylabel('Power (Counts)')
plt.title('Noise Diode Fold')
if legend==True:
plt.legend()
|
[
"Plots",
"the",
"calculated",
"average",
"power",
"and",
"time",
"sampling",
"of",
"ON",
"(",
"red",
")",
"and",
"OFF",
"(",
"blue",
")",
"for",
"a",
"noise",
"diode",
"measurement",
"over",
"the",
"observation",
"time",
"series"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L217-L272
|
[
"def",
"plot_diode_fold",
"(",
"dio_cross",
",",
"bothfeeds",
"=",
"True",
",",
"feedtype",
"=",
"'l'",
",",
"min_samp",
"=",
"-",
"500",
",",
"max_samp",
"=",
"7000",
",",
"legend",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get full stokes data of ND measurement",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"obs",
"=",
"None",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"#Calculate time series, OFF and ON averages, and time samples for each",
"tseriesI",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"mean",
"(",
"I",
",",
"axis",
"=",
"2",
")",
")",
"I_OFF",
",",
"I_ON",
",",
"OFFints",
",",
"ONints",
"=",
"foldcal",
"(",
"I",
",",
"tsamp",
",",
"inds",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"if",
"bothfeeds",
"==",
"True",
":",
"if",
"feedtype",
"==",
"'l'",
":",
"tseriesQ",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"mean",
"(",
"Q",
",",
"axis",
"=",
"2",
")",
")",
"tseriesX",
"=",
"(",
"tseriesI",
"+",
"tseriesQ",
")",
"/",
"2",
"tseriesY",
"=",
"(",
"tseriesI",
"-",
"tseriesQ",
")",
"/",
"2",
"if",
"feedtype",
"==",
"'c'",
":",
"tseriesV",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"mean",
"(",
"V",
",",
"axis",
"=",
"2",
")",
")",
"tseriesR",
"=",
"(",
"tseriesI",
"+",
"tseriesV",
")",
"/",
"2",
"tseriesL",
"=",
"(",
"tseriesI",
"-",
"tseriesV",
")",
"/",
"2",
"stop",
"=",
"ONints",
"[",
"-",
"1",
",",
"1",
"]",
"#Plot time series and calculated average for ON and OFF",
"if",
"bothfeeds",
"==",
"False",
":",
"plt",
".",
"plot",
"(",
"tseriesI",
"[",
"0",
":",
"stop",
"]",
",",
"'k-'",
",",
"label",
"=",
"'Total Power'",
")",
"for",
"i",
"in",
"ONints",
":",
"plt",
".",
"plot",
"(",
"np",
".",
"arange",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
")",
",",
"np",
".",
"full",
"(",
"(",
"i",
"[",
"1",
"]",
"-",
"i",
"[",
"0",
"]",
")",
",",
"np",
".",
"mean",
"(",
"I_ON",
")",
")",
",",
"'r-'",
")",
"for",
"i",
"in",
"OFFints",
":",
"plt",
".",
"plot",
"(",
"np",
".",
"arange",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
")",
",",
"np",
".",
"full",
"(",
"(",
"i",
"[",
"1",
"]",
"-",
"i",
"[",
"0",
"]",
")",
",",
"np",
".",
"mean",
"(",
"I_OFF",
")",
")",
",",
"'b-'",
")",
"else",
":",
"if",
"feedtype",
"==",
"'l'",
":",
"diff",
"=",
"np",
".",
"mean",
"(",
"tseriesX",
")",
"-",
"np",
".",
"mean",
"(",
"tseriesY",
")",
"plt",
".",
"plot",
"(",
"tseriesX",
"[",
"0",
":",
"stop",
"]",
",",
"'b-'",
",",
"label",
"=",
"'XX'",
")",
"plt",
".",
"plot",
"(",
"tseriesY",
"[",
"0",
":",
"stop",
"]",
"+",
"diff",
",",
"'r-'",
",",
"label",
"=",
"'YY (shifted)'",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"diff",
"=",
"np",
".",
"mean",
"(",
"tseriesL",
")",
"-",
"np",
".",
"mean",
"(",
"tseriesR",
")",
"plt",
".",
"plot",
"(",
"tseriesL",
"[",
"0",
":",
"stop",
"]",
",",
"'b-'",
",",
"label",
"=",
"'LL'",
")",
"plt",
".",
"plot",
"(",
"tseriesR",
"[",
"0",
":",
"stop",
"]",
"+",
"diff",
",",
"'r-'",
",",
"label",
"=",
"'RR (shifted)'",
")",
"#Calculate plotting limits",
"if",
"bothfeeds",
"==",
"False",
":",
"lowlim",
"=",
"np",
".",
"mean",
"(",
"I_OFF",
")",
"-",
"(",
"np",
".",
"mean",
"(",
"I_ON",
")",
"-",
"np",
".",
"mean",
"(",
"I_OFF",
")",
")",
"/",
"2",
"hilim",
"=",
"np",
".",
"mean",
"(",
"I_ON",
")",
"+",
"(",
"np",
".",
"mean",
"(",
"I_ON",
")",
"-",
"np",
".",
"mean",
"(",
"I_OFF",
")",
")",
"/",
"2",
"plt",
".",
"ylim",
"(",
"(",
"lowlim",
",",
"hilim",
")",
")",
"plt",
".",
"xlim",
"(",
"(",
"min_samp",
",",
"max_samp",
")",
")",
"plt",
".",
"xlabel",
"(",
"'Time Sample Number'",
")",
"plt",
".",
"ylabel",
"(",
"'Power (Counts)'",
")",
"plt",
".",
"title",
"(",
"'Noise Diode Fold'",
")",
"if",
"legend",
"==",
"True",
":",
"plt",
".",
"legend",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_fullcalib
|
Generates and shows five plots: Uncalibrated diode, calibrated diode, fold information,
phase offsets, and gain offsets for a noise diode measurement. Most useful diagnostic plot to
make sure calibration proceeds correctly.
|
blimpy/calib_utils/calib_plots.py
|
def plot_fullcalib(dio_cross,feedtype='l',**kwargs):
'''
Generates and shows five plots: Uncalibrated diode, calibrated diode, fold information,
phase offsets, and gain offsets for a noise diode measurement. Most useful diagnostic plot to
make sure calibration proceeds correctly.
'''
plt.figure("Multiple Calibration Plots", figsize=(12,9))
left, width = 0.075,0.435
bottom, height = 0.45,0.5
width2 = 0.232
bottom2, height2 = 0.115,0.0975
rect_uncal = [left,bottom,width,height]
rect_cal = [left+width+0.025,bottom,width,height]
rect_fold = [left,bottom2,width2,0.22]
rect_gain1 = [left+width2+0.1,bottom2,width2,height2]
rect_phase1 = [left+width2*2+0.1*2,bottom2,width2,height2]
rect_gain2 = [left+width2+0.1,bottom2+height2+0.025,width2,height2]
rect_phase2 = [left+width2*2+0.1*2,bottom2+height2+0.025,width2,height2]
#--------
axFold = plt.axes(rect_fold)
print('Plotting Diode Fold')
plot_diode_fold(dio_cross,bothfeeds=False,feedtype=feedtype,min_samp=2000,max_samp=5500,legend=False,**kwargs)
#--------
print('Plotting Gain Offsets')
plot_gain_offsets(dio_cross,feedtype=feedtype,ax1=rect_gain2,ax2=rect_gain1,legend=False,**kwargs)
#--------
print('Plotting Phase Offsets')
plot_phase_offsets(dio_cross,feedtype=feedtype,ax1=rect_phase1,ax2=rect_phase2,legend=False,**kwargs)
plt.ylabel('')
#--------
ax_uncal = plt.axes(rect_uncal)
print('Plotting Uncalibrated Diode')
plot_Stokes_diode(dio_cross,feedtype=feedtype,**kwargs)
#--------
ax_cal = plt.axes(rect_cal,sharey=ax_uncal)
print('Plotting Calibrated Diode')
plot_calibrated_diode(dio_cross,feedtype=feedtype,**kwargs)
plt.ylabel('')
plt.setp(ax_cal.get_yticklabels(),visible=False)
plt.savefig(dio_cross[:-4]+'.stokescalib.png',dpi=2000)
plt.show()
|
def plot_fullcalib(dio_cross,feedtype='l',**kwargs):
'''
Generates and shows five plots: Uncalibrated diode, calibrated diode, fold information,
phase offsets, and gain offsets for a noise diode measurement. Most useful diagnostic plot to
make sure calibration proceeds correctly.
'''
plt.figure("Multiple Calibration Plots", figsize=(12,9))
left, width = 0.075,0.435
bottom, height = 0.45,0.5
width2 = 0.232
bottom2, height2 = 0.115,0.0975
rect_uncal = [left,bottom,width,height]
rect_cal = [left+width+0.025,bottom,width,height]
rect_fold = [left,bottom2,width2,0.22]
rect_gain1 = [left+width2+0.1,bottom2,width2,height2]
rect_phase1 = [left+width2*2+0.1*2,bottom2,width2,height2]
rect_gain2 = [left+width2+0.1,bottom2+height2+0.025,width2,height2]
rect_phase2 = [left+width2*2+0.1*2,bottom2+height2+0.025,width2,height2]
#--------
axFold = plt.axes(rect_fold)
print('Plotting Diode Fold')
plot_diode_fold(dio_cross,bothfeeds=False,feedtype=feedtype,min_samp=2000,max_samp=5500,legend=False,**kwargs)
#--------
print('Plotting Gain Offsets')
plot_gain_offsets(dio_cross,feedtype=feedtype,ax1=rect_gain2,ax2=rect_gain1,legend=False,**kwargs)
#--------
print('Plotting Phase Offsets')
plot_phase_offsets(dio_cross,feedtype=feedtype,ax1=rect_phase1,ax2=rect_phase2,legend=False,**kwargs)
plt.ylabel('')
#--------
ax_uncal = plt.axes(rect_uncal)
print('Plotting Uncalibrated Diode')
plot_Stokes_diode(dio_cross,feedtype=feedtype,**kwargs)
#--------
ax_cal = plt.axes(rect_cal,sharey=ax_uncal)
print('Plotting Calibrated Diode')
plot_calibrated_diode(dio_cross,feedtype=feedtype,**kwargs)
plt.ylabel('')
plt.setp(ax_cal.get_yticklabels(),visible=False)
plt.savefig(dio_cross[:-4]+'.stokescalib.png',dpi=2000)
plt.show()
|
[
"Generates",
"and",
"shows",
"five",
"plots",
":",
"Uncalibrated",
"diode",
"calibrated",
"diode",
"fold",
"information",
"phase",
"offsets",
"and",
"gain",
"offsets",
"for",
"a",
"noise",
"diode",
"measurement",
".",
"Most",
"useful",
"diagnostic",
"plot",
"to",
"make",
"sure",
"calibration",
"proceeds",
"correctly",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L275-L323
|
[
"def",
"plot_fullcalib",
"(",
"dio_cross",
",",
"feedtype",
"=",
"'l'",
",",
"*",
"*",
"kwargs",
")",
":",
"plt",
".",
"figure",
"(",
"\"Multiple Calibration Plots\"",
",",
"figsize",
"=",
"(",
"12",
",",
"9",
")",
")",
"left",
",",
"width",
"=",
"0.075",
",",
"0.435",
"bottom",
",",
"height",
"=",
"0.45",
",",
"0.5",
"width2",
"=",
"0.232",
"bottom2",
",",
"height2",
"=",
"0.115",
",",
"0.0975",
"rect_uncal",
"=",
"[",
"left",
",",
"bottom",
",",
"width",
",",
"height",
"]",
"rect_cal",
"=",
"[",
"left",
"+",
"width",
"+",
"0.025",
",",
"bottom",
",",
"width",
",",
"height",
"]",
"rect_fold",
"=",
"[",
"left",
",",
"bottom2",
",",
"width2",
",",
"0.22",
"]",
"rect_gain1",
"=",
"[",
"left",
"+",
"width2",
"+",
"0.1",
",",
"bottom2",
",",
"width2",
",",
"height2",
"]",
"rect_phase1",
"=",
"[",
"left",
"+",
"width2",
"*",
"2",
"+",
"0.1",
"*",
"2",
",",
"bottom2",
",",
"width2",
",",
"height2",
"]",
"rect_gain2",
"=",
"[",
"left",
"+",
"width2",
"+",
"0.1",
",",
"bottom2",
"+",
"height2",
"+",
"0.025",
",",
"width2",
",",
"height2",
"]",
"rect_phase2",
"=",
"[",
"left",
"+",
"width2",
"*",
"2",
"+",
"0.1",
"*",
"2",
",",
"bottom2",
"+",
"height2",
"+",
"0.025",
",",
"width2",
",",
"height2",
"]",
"#--------",
"axFold",
"=",
"plt",
".",
"axes",
"(",
"rect_fold",
")",
"print",
"(",
"'Plotting Diode Fold'",
")",
"plot_diode_fold",
"(",
"dio_cross",
",",
"bothfeeds",
"=",
"False",
",",
"feedtype",
"=",
"feedtype",
",",
"min_samp",
"=",
"2000",
",",
"max_samp",
"=",
"5500",
",",
"legend",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"#--------",
"print",
"(",
"'Plotting Gain Offsets'",
")",
"plot_gain_offsets",
"(",
"dio_cross",
",",
"feedtype",
"=",
"feedtype",
",",
"ax1",
"=",
"rect_gain2",
",",
"ax2",
"=",
"rect_gain1",
",",
"legend",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"#--------",
"print",
"(",
"'Plotting Phase Offsets'",
")",
"plot_phase_offsets",
"(",
"dio_cross",
",",
"feedtype",
"=",
"feedtype",
",",
"ax1",
"=",
"rect_phase1",
",",
"ax2",
"=",
"rect_phase2",
",",
"legend",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"''",
")",
"#--------",
"ax_uncal",
"=",
"plt",
".",
"axes",
"(",
"rect_uncal",
")",
"print",
"(",
"'Plotting Uncalibrated Diode'",
")",
"plot_Stokes_diode",
"(",
"dio_cross",
",",
"feedtype",
"=",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"#--------",
"ax_cal",
"=",
"plt",
".",
"axes",
"(",
"rect_cal",
",",
"sharey",
"=",
"ax_uncal",
")",
"print",
"(",
"'Plotting Calibrated Diode'",
")",
"plot_calibrated_diode",
"(",
"dio_cross",
",",
"feedtype",
"=",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"''",
")",
"plt",
".",
"setp",
"(",
"ax_cal",
".",
"get_yticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"plt",
".",
"savefig",
"(",
"dio_cross",
"[",
":",
"-",
"4",
"]",
"+",
"'.stokescalib.png'",
",",
"dpi",
"=",
"2000",
")",
"plt",
".",
"show",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
plot_diodespec
|
Plots the full-band Stokes I spectrum of the noise diode (ON-OFF)
|
blimpy/calib_utils/calib_plots.py
|
def plot_diodespec(ON_obs,OFF_obs,calflux,calfreq,spec_in,units='mJy',**kwargs):
'''
Plots the full-band Stokes I spectrum of the noise diode (ON-OFF)
'''
dspec = diode_spec(ON_obs,OFF_obs,calflux,calfreq,spec_in,**kwargs)
obs = Waterfall(ON_obs,max_load=150)
freqs = obs.populate_freqs()
chan_per_coarse = obs.header['nchans']/obs.calc_n_coarse_chan()
coarse_freqs = convert_to_coarse(freqs,chan_per_coarse)
plt.ion()
plt.figure()
plt.plot(coarse_freqs,dspec)
plt.xlabel('Frequency (MHz)')
plt.ylabel('Flux Density ('+units+')')
plt.title('Noise Diode Spectrum')
|
def plot_diodespec(ON_obs,OFF_obs,calflux,calfreq,spec_in,units='mJy',**kwargs):
'''
Plots the full-band Stokes I spectrum of the noise diode (ON-OFF)
'''
dspec = diode_spec(ON_obs,OFF_obs,calflux,calfreq,spec_in,**kwargs)
obs = Waterfall(ON_obs,max_load=150)
freqs = obs.populate_freqs()
chan_per_coarse = obs.header['nchans']/obs.calc_n_coarse_chan()
coarse_freqs = convert_to_coarse(freqs,chan_per_coarse)
plt.ion()
plt.figure()
plt.plot(coarse_freqs,dspec)
plt.xlabel('Frequency (MHz)')
plt.ylabel('Flux Density ('+units+')')
plt.title('Noise Diode Spectrum')
|
[
"Plots",
"the",
"full",
"-",
"band",
"Stokes",
"I",
"spectrum",
"of",
"the",
"noise",
"diode",
"(",
"ON",
"-",
"OFF",
")"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L325-L340
|
[
"def",
"plot_diodespec",
"(",
"ON_obs",
",",
"OFF_obs",
",",
"calflux",
",",
"calfreq",
",",
"spec_in",
",",
"units",
"=",
"'mJy'",
",",
"*",
"*",
"kwargs",
")",
":",
"dspec",
"=",
"diode_spec",
"(",
"ON_obs",
",",
"OFF_obs",
",",
"calflux",
",",
"calfreq",
",",
"spec_in",
",",
"*",
"*",
"kwargs",
")",
"obs",
"=",
"Waterfall",
"(",
"ON_obs",
",",
"max_load",
"=",
"150",
")",
"freqs",
"=",
"obs",
".",
"populate_freqs",
"(",
")",
"chan_per_coarse",
"=",
"obs",
".",
"header",
"[",
"'nchans'",
"]",
"/",
"obs",
".",
"calc_n_coarse_chan",
"(",
")",
"coarse_freqs",
"=",
"convert_to_coarse",
"(",
"freqs",
",",
"chan_per_coarse",
")",
"plt",
".",
"ion",
"(",
")",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"plot",
"(",
"coarse_freqs",
",",
"dspec",
")",
"plt",
".",
"xlabel",
"(",
"'Frequency (MHz)'",
")",
"plt",
".",
"ylabel",
"(",
"'Flux Density ('",
"+",
"units",
"+",
"')'",
")",
"plt",
".",
"title",
"(",
"'Noise Diode Spectrum'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
cmd_tool
|
Read input and output frequency, and output file name
|
blimpy/dice.py
|
def cmd_tool():
'''Read input and output frequency, and output file name
'''
parser = argparse.ArgumentParser(description='Dices hdf5 or fil files and writes to hdf5 or fil.')
parser.add_argument('-f', '--input_filename', action='store', default=None, dest='in_fname', type=str, help='Name of file to write from (HDF5 or FIL)')
parser.add_argument('-b', action='store', default=None, dest='f_start', type=float, help='Start frequency in MHz')
parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float, help='Stop frequency in MHz')
parser.add_argument('-x', '--output_file', action='store', default=None, dest='out_format', type=str, help='Output file format [.h5 or .fil].')
parser.add_argument('-o', '--output_filename', action='store', default=None, dest='out_fname', type=str, help='Ouput file name to write (to HDF5 or FIL).')
parser.add_argument('-l', action='store', default=None, dest='max_load', type=float,help='Maximum data limit to load. Default:1GB')
args = parser.parse_args()
if len(sys.argv) == 1:
logger.error('Indicate file name and start and stop frequencies')
sys.exit()
if args.in_fname == None:
logger.error('Need to indicate input file name')
sys.exit()
if args.out_fname == None:
if (args.out_format == None) or (args.out_format == 'h5'):
if args.in_fname[len(args.in_fname)-4:] == '.fil':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.fil','_diced.h5')
elif args.in_fname[len(args.in_fname)-3:] == '.h5':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.h5','_diced.h5')
else:
logger.error('Input file not recognized')
sys.exit()
elif args.out_format == 'fil':
if args.in_fname[len(args.in_fname)-4:] == '.fil':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.fil','_diced.fil')
elif args.in_fname[len(args.in_fname)-3:] == '.h5':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.h5','_diced.fil')
else:
logger.error('input file not recognized.')
sys.exit()
else:
logger.error('Must indicate either output file name or valid output file extension.')
sys.exit()
elif (args.out_fname[len(args.out_fname)-4:] == '.fil') and (args.out_format == 'h5'):
logger.error('Output file extension does not match output file name')
sys.exit()
elif (args.out_fname[len(args.out_fname)-3:] == '.h5') and (args.out_format == 'fil'):
logger.error('Output file extension does not match output file name.')
sys.exit()
if (args.out_fname[len(args.out_fname)-3:] != '.h5') and (args.out_fname[len(args.out_fname)-4:] != '.fil'):
logger.error('Indicate output file name with extension, or simply output file extension.')
sys.exit()
if args.f_start == None and args.f_stop == None:
logger.error('Please give either start and/or end frequencies. Otherwise use fil2h5 or h52fil functions.')
sys.exit()
if args.f_start == None:
logger.warning('Lower frequency not given, setting to ' + str(f_min_file) + ' MHz to match file.')
if args.f_stop == None:
logger.warning('Higher frequency not given, setting to ' + str(f_max_file) + ' MHz to match file.')
#Read start frequency and bandwidth from data set
file_big = Waterfall(args.in_fname, max_load = args.max_load)
f_min_file = file_big.header['fch1']
f_max_file = file_big.header['fch1'] + file_big.header['nchans']*file_big.header['foff']
if f_max_file < f_min_file:
f_max_file,f_min_file = f_min_file,f_max_file
FreqBWFile = f_max_file-f_min_file
stdDF = FreqBWFile / float(file_big.calc_n_coarse_chan()) #stdDF = 2.9296875
if args.f_stop < args.f_start:
args.f_stop,args.f_start = args.f_start,args.f_stop
if args.f_start < f_max_file and args.f_start > f_min_file and args.f_stop > f_max_file:
args.f_stop = f_max_file
logger.warning('Higher frequency set to ' + str(f_max_file) + ' MHz to match file.')
if args.f_stop < f_max_file and args.f_stop > f_min_file and args.f_start < f_min_file:
args.f_start = f_min_file
logger.warning('Lower frequency set to ' + str(f_min_file) + ' MHz to match file.')
if args.f_start < f_min_file and args.f_stop > f_max_file:
args.f_start = f_min_file
args.f_stop = f_max_file
logger.warning('Lower frequency set to ' + str(f_min_file) + ' MHz and higher frequency set to ' + str(f_max_file) + ' MHz to match file.')
# print '\nindicated frequencies include file frequency span - no need to dice\n'
# sys.exit()
if min(args.f_start,args.f_stop) < f_min_file or max(args.f_start,args.f_stop) > f_max_file:
logger.error('Bandwidth to extract must be within ' + str(f_min_file) + ' MHz and ' + str(f_max_file) + ' MHz.')
sys.exit()
# calculate real coarse channel begin and end freqs
f_start_real = math.floor((min(args.f_start,args.f_stop) - f_min_file)/stdDF)*stdDF + f_min_file
f_stop_real = f_max_file - math.floor((f_max_file - max(args.f_start,args.f_stop))/stdDF)*stdDF
# print
# print "true start frequency is " + str(f_start_real)
# print "true stop frequency is " + str(f_stop_real)
logger.info('Writing to ' + args.out_fname)
logger.info('Extacting from ' + str(f_start_real) + ' MHz to ' + str(f_stop_real) + ' MHz.')
# create waterfall object
file_small = Waterfall(args.in_fname, f_start = f_start_real, f_stop = f_stop_real, max_load = args.max_load)
# write waterfall object
if args.out_fname[len(args.out_fname)-4:] == '.fil':
file_small.write_to_fil(args.out_fname)
elif args.out_fname[len(args.out_fname)-3:] == '.h5':
file_small.write_to_hdf5(args.out_fname)
else:
logger.error('Error in output file creation : verify output file name and extension.')
sys.exit()
|
def cmd_tool():
'''Read input and output frequency, and output file name
'''
parser = argparse.ArgumentParser(description='Dices hdf5 or fil files and writes to hdf5 or fil.')
parser.add_argument('-f', '--input_filename', action='store', default=None, dest='in_fname', type=str, help='Name of file to write from (HDF5 or FIL)')
parser.add_argument('-b', action='store', default=None, dest='f_start', type=float, help='Start frequency in MHz')
parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float, help='Stop frequency in MHz')
parser.add_argument('-x', '--output_file', action='store', default=None, dest='out_format', type=str, help='Output file format [.h5 or .fil].')
parser.add_argument('-o', '--output_filename', action='store', default=None, dest='out_fname', type=str, help='Ouput file name to write (to HDF5 or FIL).')
parser.add_argument('-l', action='store', default=None, dest='max_load', type=float,help='Maximum data limit to load. Default:1GB')
args = parser.parse_args()
if len(sys.argv) == 1:
logger.error('Indicate file name and start and stop frequencies')
sys.exit()
if args.in_fname == None:
logger.error('Need to indicate input file name')
sys.exit()
if args.out_fname == None:
if (args.out_format == None) or (args.out_format == 'h5'):
if args.in_fname[len(args.in_fname)-4:] == '.fil':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.fil','_diced.h5')
elif args.in_fname[len(args.in_fname)-3:] == '.h5':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.h5','_diced.h5')
else:
logger.error('Input file not recognized')
sys.exit()
elif args.out_format == 'fil':
if args.in_fname[len(args.in_fname)-4:] == '.fil':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.fil','_diced.fil')
elif args.in_fname[len(args.in_fname)-3:] == '.h5':
args.out_fname = args.in_fname
args.out_fname = args.out_fname.replace('.h5','_diced.fil')
else:
logger.error('input file not recognized.')
sys.exit()
else:
logger.error('Must indicate either output file name or valid output file extension.')
sys.exit()
elif (args.out_fname[len(args.out_fname)-4:] == '.fil') and (args.out_format == 'h5'):
logger.error('Output file extension does not match output file name')
sys.exit()
elif (args.out_fname[len(args.out_fname)-3:] == '.h5') and (args.out_format == 'fil'):
logger.error('Output file extension does not match output file name.')
sys.exit()
if (args.out_fname[len(args.out_fname)-3:] != '.h5') and (args.out_fname[len(args.out_fname)-4:] != '.fil'):
logger.error('Indicate output file name with extension, or simply output file extension.')
sys.exit()
if args.f_start == None and args.f_stop == None:
logger.error('Please give either start and/or end frequencies. Otherwise use fil2h5 or h52fil functions.')
sys.exit()
if args.f_start == None:
logger.warning('Lower frequency not given, setting to ' + str(f_min_file) + ' MHz to match file.')
if args.f_stop == None:
logger.warning('Higher frequency not given, setting to ' + str(f_max_file) + ' MHz to match file.')
#Read start frequency and bandwidth from data set
file_big = Waterfall(args.in_fname, max_load = args.max_load)
f_min_file = file_big.header['fch1']
f_max_file = file_big.header['fch1'] + file_big.header['nchans']*file_big.header['foff']
if f_max_file < f_min_file:
f_max_file,f_min_file = f_min_file,f_max_file
FreqBWFile = f_max_file-f_min_file
stdDF = FreqBWFile / float(file_big.calc_n_coarse_chan()) #stdDF = 2.9296875
if args.f_stop < args.f_start:
args.f_stop,args.f_start = args.f_start,args.f_stop
if args.f_start < f_max_file and args.f_start > f_min_file and args.f_stop > f_max_file:
args.f_stop = f_max_file
logger.warning('Higher frequency set to ' + str(f_max_file) + ' MHz to match file.')
if args.f_stop < f_max_file and args.f_stop > f_min_file and args.f_start < f_min_file:
args.f_start = f_min_file
logger.warning('Lower frequency set to ' + str(f_min_file) + ' MHz to match file.')
if args.f_start < f_min_file and args.f_stop > f_max_file:
args.f_start = f_min_file
args.f_stop = f_max_file
logger.warning('Lower frequency set to ' + str(f_min_file) + ' MHz and higher frequency set to ' + str(f_max_file) + ' MHz to match file.')
# print '\nindicated frequencies include file frequency span - no need to dice\n'
# sys.exit()
if min(args.f_start,args.f_stop) < f_min_file or max(args.f_start,args.f_stop) > f_max_file:
logger.error('Bandwidth to extract must be within ' + str(f_min_file) + ' MHz and ' + str(f_max_file) + ' MHz.')
sys.exit()
# calculate real coarse channel begin and end freqs
f_start_real = math.floor((min(args.f_start,args.f_stop) - f_min_file)/stdDF)*stdDF + f_min_file
f_stop_real = f_max_file - math.floor((f_max_file - max(args.f_start,args.f_stop))/stdDF)*stdDF
# print
# print "true start frequency is " + str(f_start_real)
# print "true stop frequency is " + str(f_stop_real)
logger.info('Writing to ' + args.out_fname)
logger.info('Extacting from ' + str(f_start_real) + ' MHz to ' + str(f_stop_real) + ' MHz.')
# create waterfall object
file_small = Waterfall(args.in_fname, f_start = f_start_real, f_stop = f_stop_real, max_load = args.max_load)
# write waterfall object
if args.out_fname[len(args.out_fname)-4:] == '.fil':
file_small.write_to_fil(args.out_fname)
elif args.out_fname[len(args.out_fname)-3:] == '.h5':
file_small.write_to_hdf5(args.out_fname)
else:
logger.error('Error in output file creation : verify output file name and extension.')
sys.exit()
|
[
"Read",
"input",
"and",
"output",
"frequency",
"and",
"output",
"file",
"name"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/dice.py#L33-L157
|
[
"def",
"cmd_tool",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Dices hdf5 or fil files and writes to hdf5 or fil.'",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--input_filename'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'in_fname'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Name of file to write from (HDF5 or FIL)'",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'f_start'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Start frequency in MHz'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'f_stop'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Stop frequency in MHz'",
")",
"parser",
".",
"add_argument",
"(",
"'-x'",
",",
"'--output_file'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'out_format'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Output file format [.h5 or .fil].'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output_filename'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'out_fname'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Ouput file name to write (to HDF5 or FIL).'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'max_load'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Maximum data limit to load. Default:1GB'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"==",
"1",
":",
"logger",
".",
"error",
"(",
"'Indicate file name and start and stop frequencies'",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"args",
".",
"in_fname",
"==",
"None",
":",
"logger",
".",
"error",
"(",
"'Need to indicate input file name'",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"args",
".",
"out_fname",
"==",
"None",
":",
"if",
"(",
"args",
".",
"out_format",
"==",
"None",
")",
"or",
"(",
"args",
".",
"out_format",
"==",
"'h5'",
")",
":",
"if",
"args",
".",
"in_fname",
"[",
"len",
"(",
"args",
".",
"in_fname",
")",
"-",
"4",
":",
"]",
"==",
"'.fil'",
":",
"args",
".",
"out_fname",
"=",
"args",
".",
"in_fname",
"args",
".",
"out_fname",
"=",
"args",
".",
"out_fname",
".",
"replace",
"(",
"'.fil'",
",",
"'_diced.h5'",
")",
"elif",
"args",
".",
"in_fname",
"[",
"len",
"(",
"args",
".",
"in_fname",
")",
"-",
"3",
":",
"]",
"==",
"'.h5'",
":",
"args",
".",
"out_fname",
"=",
"args",
".",
"in_fname",
"args",
".",
"out_fname",
"=",
"args",
".",
"out_fname",
".",
"replace",
"(",
"'.h5'",
",",
"'_diced.h5'",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Input file not recognized'",
")",
"sys",
".",
"exit",
"(",
")",
"elif",
"args",
".",
"out_format",
"==",
"'fil'",
":",
"if",
"args",
".",
"in_fname",
"[",
"len",
"(",
"args",
".",
"in_fname",
")",
"-",
"4",
":",
"]",
"==",
"'.fil'",
":",
"args",
".",
"out_fname",
"=",
"args",
".",
"in_fname",
"args",
".",
"out_fname",
"=",
"args",
".",
"out_fname",
".",
"replace",
"(",
"'.fil'",
",",
"'_diced.fil'",
")",
"elif",
"args",
".",
"in_fname",
"[",
"len",
"(",
"args",
".",
"in_fname",
")",
"-",
"3",
":",
"]",
"==",
"'.h5'",
":",
"args",
".",
"out_fname",
"=",
"args",
".",
"in_fname",
"args",
".",
"out_fname",
"=",
"args",
".",
"out_fname",
".",
"replace",
"(",
"'.h5'",
",",
"'_diced.fil'",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'input file not recognized.'",
")",
"sys",
".",
"exit",
"(",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Must indicate either output file name or valid output file extension.'",
")",
"sys",
".",
"exit",
"(",
")",
"elif",
"(",
"args",
".",
"out_fname",
"[",
"len",
"(",
"args",
".",
"out_fname",
")",
"-",
"4",
":",
"]",
"==",
"'.fil'",
")",
"and",
"(",
"args",
".",
"out_format",
"==",
"'h5'",
")",
":",
"logger",
".",
"error",
"(",
"'Output file extension does not match output file name'",
")",
"sys",
".",
"exit",
"(",
")",
"elif",
"(",
"args",
".",
"out_fname",
"[",
"len",
"(",
"args",
".",
"out_fname",
")",
"-",
"3",
":",
"]",
"==",
"'.h5'",
")",
"and",
"(",
"args",
".",
"out_format",
"==",
"'fil'",
")",
":",
"logger",
".",
"error",
"(",
"'Output file extension does not match output file name.'",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"(",
"args",
".",
"out_fname",
"[",
"len",
"(",
"args",
".",
"out_fname",
")",
"-",
"3",
":",
"]",
"!=",
"'.h5'",
")",
"and",
"(",
"args",
".",
"out_fname",
"[",
"len",
"(",
"args",
".",
"out_fname",
")",
"-",
"4",
":",
"]",
"!=",
"'.fil'",
")",
":",
"logger",
".",
"error",
"(",
"'Indicate output file name with extension, or simply output file extension.'",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"args",
".",
"f_start",
"==",
"None",
"and",
"args",
".",
"f_stop",
"==",
"None",
":",
"logger",
".",
"error",
"(",
"'Please give either start and/or end frequencies. Otherwise use fil2h5 or h52fil functions.'",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"args",
".",
"f_start",
"==",
"None",
":",
"logger",
".",
"warning",
"(",
"'Lower frequency not given, setting to '",
"+",
"str",
"(",
"f_min_file",
")",
"+",
"' MHz to match file.'",
")",
"if",
"args",
".",
"f_stop",
"==",
"None",
":",
"logger",
".",
"warning",
"(",
"'Higher frequency not given, setting to '",
"+",
"str",
"(",
"f_max_file",
")",
"+",
"' MHz to match file.'",
")",
"#Read start frequency and bandwidth from data set\r",
"file_big",
"=",
"Waterfall",
"(",
"args",
".",
"in_fname",
",",
"max_load",
"=",
"args",
".",
"max_load",
")",
"f_min_file",
"=",
"file_big",
".",
"header",
"[",
"'fch1'",
"]",
"f_max_file",
"=",
"file_big",
".",
"header",
"[",
"'fch1'",
"]",
"+",
"file_big",
".",
"header",
"[",
"'nchans'",
"]",
"*",
"file_big",
".",
"header",
"[",
"'foff'",
"]",
"if",
"f_max_file",
"<",
"f_min_file",
":",
"f_max_file",
",",
"f_min_file",
"=",
"f_min_file",
",",
"f_max_file",
"FreqBWFile",
"=",
"f_max_file",
"-",
"f_min_file",
"stdDF",
"=",
"FreqBWFile",
"/",
"float",
"(",
"file_big",
".",
"calc_n_coarse_chan",
"(",
")",
")",
"#stdDF = 2.9296875\r",
"if",
"args",
".",
"f_stop",
"<",
"args",
".",
"f_start",
":",
"args",
".",
"f_stop",
",",
"args",
".",
"f_start",
"=",
"args",
".",
"f_start",
",",
"args",
".",
"f_stop",
"if",
"args",
".",
"f_start",
"<",
"f_max_file",
"and",
"args",
".",
"f_start",
">",
"f_min_file",
"and",
"args",
".",
"f_stop",
">",
"f_max_file",
":",
"args",
".",
"f_stop",
"=",
"f_max_file",
"logger",
".",
"warning",
"(",
"'Higher frequency set to '",
"+",
"str",
"(",
"f_max_file",
")",
"+",
"' MHz to match file.'",
")",
"if",
"args",
".",
"f_stop",
"<",
"f_max_file",
"and",
"args",
".",
"f_stop",
">",
"f_min_file",
"and",
"args",
".",
"f_start",
"<",
"f_min_file",
":",
"args",
".",
"f_start",
"=",
"f_min_file",
"logger",
".",
"warning",
"(",
"'Lower frequency set to '",
"+",
"str",
"(",
"f_min_file",
")",
"+",
"' MHz to match file.'",
")",
"if",
"args",
".",
"f_start",
"<",
"f_min_file",
"and",
"args",
".",
"f_stop",
">",
"f_max_file",
":",
"args",
".",
"f_start",
"=",
"f_min_file",
"args",
".",
"f_stop",
"=",
"f_max_file",
"logger",
".",
"warning",
"(",
"'Lower frequency set to '",
"+",
"str",
"(",
"f_min_file",
")",
"+",
"' MHz and higher frequency set to '",
"+",
"str",
"(",
"f_max_file",
")",
"+",
"' MHz to match file.'",
")",
"# print '\\nindicated frequencies include file frequency span - no need to dice\\n'\r",
"# sys.exit()\r",
"if",
"min",
"(",
"args",
".",
"f_start",
",",
"args",
".",
"f_stop",
")",
"<",
"f_min_file",
"or",
"max",
"(",
"args",
".",
"f_start",
",",
"args",
".",
"f_stop",
")",
">",
"f_max_file",
":",
"logger",
".",
"error",
"(",
"'Bandwidth to extract must be within '",
"+",
"str",
"(",
"f_min_file",
")",
"+",
"' MHz and '",
"+",
"str",
"(",
"f_max_file",
")",
"+",
"' MHz.'",
")",
"sys",
".",
"exit",
"(",
")",
"# calculate real coarse channel begin and end freqs\r",
"f_start_real",
"=",
"math",
".",
"floor",
"(",
"(",
"min",
"(",
"args",
".",
"f_start",
",",
"args",
".",
"f_stop",
")",
"-",
"f_min_file",
")",
"/",
"stdDF",
")",
"*",
"stdDF",
"+",
"f_min_file",
"f_stop_real",
"=",
"f_max_file",
"-",
"math",
".",
"floor",
"(",
"(",
"f_max_file",
"-",
"max",
"(",
"args",
".",
"f_start",
",",
"args",
".",
"f_stop",
")",
")",
"/",
"stdDF",
")",
"*",
"stdDF",
"# print\r",
"# print \"true start frequency is \" + str(f_start_real)\r",
"# print \"true stop frequency is \" + str(f_stop_real)\r",
"logger",
".",
"info",
"(",
"'Writing to '",
"+",
"args",
".",
"out_fname",
")",
"logger",
".",
"info",
"(",
"'Extacting from '",
"+",
"str",
"(",
"f_start_real",
")",
"+",
"' MHz to '",
"+",
"str",
"(",
"f_stop_real",
")",
"+",
"' MHz.'",
")",
"# create waterfall object\r",
"file_small",
"=",
"Waterfall",
"(",
"args",
".",
"in_fname",
",",
"f_start",
"=",
"f_start_real",
",",
"f_stop",
"=",
"f_stop_real",
",",
"max_load",
"=",
"args",
".",
"max_load",
")",
"# write waterfall object\r",
"if",
"args",
".",
"out_fname",
"[",
"len",
"(",
"args",
".",
"out_fname",
")",
"-",
"4",
":",
"]",
"==",
"'.fil'",
":",
"file_small",
".",
"write_to_fil",
"(",
"args",
".",
"out_fname",
")",
"elif",
"args",
".",
"out_fname",
"[",
"len",
"(",
"args",
".",
"out_fname",
")",
"-",
"3",
":",
"]",
"==",
"'.h5'",
":",
"file_small",
".",
"write_to_hdf5",
"(",
"args",
".",
"out_fname",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Error in output file creation : verify output file name and extension.'",
")",
"sys",
".",
"exit",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
cmd_tool
|
Command line utility for creating HDF5 blimpy files.
|
blimpy/deprecated/fil2hdf.py
|
def cmd_tool(args=None):
""" Command line utility for creating HDF5 blimpy files. """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating HDF5 Filterbank files.")
parser.add_argument('dirname', type=str, help='Name of directory to read')
args = parser.parse_args()
if not HAS_BITSHUFFLE:
print("Error: the bitshuffle library is required to run this script.")
exit()
filelist = glob.glob(os.path.join(args.dirname, '*.fil'))
for filename in filelist:
if not os.path.exists(filename + '.h5'):
t0 = time.time()
print("\nReading %s header..." % filename)
fb = Filterbank(filename, load_data=False)
data_shape = (fb.n_ints_in_file, fb.header['nifs'], fb.header['nchans'])
data_dtype = fb.data.dtype
print(data_dtype)
print("Creating new dataset, %s" % str(data_shape))
block_size = 0
h5 = h5py.File(filename + '.h5', 'w')
h5.attrs['CLASS'] = 'FILTERBANK'
dset = h5.create_dataset('data',
shape=data_shape,
compression=bitshuffle.h5.H5FILTER,
compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),
dtype=data_dtype)
dset_mask = h5.create_dataset('mask',
shape=data_shape,
compression=bitshuffle.h5.H5FILTER,
compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),
dtype='uint8')
dset.dims[0].label = "frequency"
dset.dims[1].label = "feed_id"
dset.dims[2].label = "time"
dset_mask.dims[0].label = "frequency"
dset_mask.dims[1].label = "feed_id"
dset_mask.dims[2].label = "time"
# Copy over header information as attributes
for key, value in fb.header.items():
dset.attrs[key] = value
filesize = os.path.getsize(filename)
if filesize >= MAX_SIZE:
n_int_per_read = int(filesize / MAX_SIZE / 2)
print("Filling in with data over %i reads..." % n_int_per_read)
for ii in range(0, n_int_per_read):
print("Reading %i of %i" % (ii + 1, n_int_per_read))
#print ii*n_int_per_read, (ii+1)*n_int_per_read
fb = Filterbank(filename, t_start=ii*n_int_per_read, t_stop=(ii+1) * n_int_per_read)
dset[ii*n_int_per_read:(ii+1)*n_int_per_read] = fb.data[:]
else:
fb = Filterbank(filename)
print(dset.shape, " -> ", fb.data.shape)
dset[:] = fb.data[:]
h5.close()
t1 = time.time()
print("Conversion time: %2.2fs" % (t1- t0))
|
def cmd_tool(args=None):
""" Command line utility for creating HDF5 blimpy files. """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating HDF5 Filterbank files.")
parser.add_argument('dirname', type=str, help='Name of directory to read')
args = parser.parse_args()
if not HAS_BITSHUFFLE:
print("Error: the bitshuffle library is required to run this script.")
exit()
filelist = glob.glob(os.path.join(args.dirname, '*.fil'))
for filename in filelist:
if not os.path.exists(filename + '.h5'):
t0 = time.time()
print("\nReading %s header..." % filename)
fb = Filterbank(filename, load_data=False)
data_shape = (fb.n_ints_in_file, fb.header['nifs'], fb.header['nchans'])
data_dtype = fb.data.dtype
print(data_dtype)
print("Creating new dataset, %s" % str(data_shape))
block_size = 0
h5 = h5py.File(filename + '.h5', 'w')
h5.attrs['CLASS'] = 'FILTERBANK'
dset = h5.create_dataset('data',
shape=data_shape,
compression=bitshuffle.h5.H5FILTER,
compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),
dtype=data_dtype)
dset_mask = h5.create_dataset('mask',
shape=data_shape,
compression=bitshuffle.h5.H5FILTER,
compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),
dtype='uint8')
dset.dims[0].label = "frequency"
dset.dims[1].label = "feed_id"
dset.dims[2].label = "time"
dset_mask.dims[0].label = "frequency"
dset_mask.dims[1].label = "feed_id"
dset_mask.dims[2].label = "time"
# Copy over header information as attributes
for key, value in fb.header.items():
dset.attrs[key] = value
filesize = os.path.getsize(filename)
if filesize >= MAX_SIZE:
n_int_per_read = int(filesize / MAX_SIZE / 2)
print("Filling in with data over %i reads..." % n_int_per_read)
for ii in range(0, n_int_per_read):
print("Reading %i of %i" % (ii + 1, n_int_per_read))
#print ii*n_int_per_read, (ii+1)*n_int_per_read
fb = Filterbank(filename, t_start=ii*n_int_per_read, t_stop=(ii+1) * n_int_per_read)
dset[ii*n_int_per_read:(ii+1)*n_int_per_read] = fb.data[:]
else:
fb = Filterbank(filename)
print(dset.shape, " -> ", fb.data.shape)
dset[:] = fb.data[:]
h5.close()
t1 = time.time()
print("Conversion time: %2.2fs" % (t1- t0))
|
[
"Command",
"line",
"utility",
"for",
"creating",
"HDF5",
"blimpy",
"files",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/deprecated/fil2hdf.py#L19-L91
|
[
"def",
"cmd_tool",
"(",
"args",
"=",
"None",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Command line utility for creating HDF5 Filterbank files.\"",
")",
"parser",
".",
"add_argument",
"(",
"'dirname'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Name of directory to read'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"HAS_BITSHUFFLE",
":",
"print",
"(",
"\"Error: the bitshuffle library is required to run this script.\"",
")",
"exit",
"(",
")",
"filelist",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"dirname",
",",
"'*.fil'",
")",
")",
"for",
"filename",
"in",
"filelist",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
"+",
"'.h5'",
")",
":",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"\\nReading %s header...\"",
"%",
"filename",
")",
"fb",
"=",
"Filterbank",
"(",
"filename",
",",
"load_data",
"=",
"False",
")",
"data_shape",
"=",
"(",
"fb",
".",
"n_ints_in_file",
",",
"fb",
".",
"header",
"[",
"'nifs'",
"]",
",",
"fb",
".",
"header",
"[",
"'nchans'",
"]",
")",
"data_dtype",
"=",
"fb",
".",
"data",
".",
"dtype",
"print",
"(",
"data_dtype",
")",
"print",
"(",
"\"Creating new dataset, %s\"",
"%",
"str",
"(",
"data_shape",
")",
")",
"block_size",
"=",
"0",
"h5",
"=",
"h5py",
".",
"File",
"(",
"filename",
"+",
"'.h5'",
",",
"'w'",
")",
"h5",
".",
"attrs",
"[",
"'CLASS'",
"]",
"=",
"'FILTERBANK'",
"dset",
"=",
"h5",
".",
"create_dataset",
"(",
"'data'",
",",
"shape",
"=",
"data_shape",
",",
"compression",
"=",
"bitshuffle",
".",
"h5",
".",
"H5FILTER",
",",
"compression_opts",
"=",
"(",
"block_size",
",",
"bitshuffle",
".",
"h5",
".",
"H5_COMPRESS_LZ4",
")",
",",
"dtype",
"=",
"data_dtype",
")",
"dset_mask",
"=",
"h5",
".",
"create_dataset",
"(",
"'mask'",
",",
"shape",
"=",
"data_shape",
",",
"compression",
"=",
"bitshuffle",
".",
"h5",
".",
"H5FILTER",
",",
"compression_opts",
"=",
"(",
"block_size",
",",
"bitshuffle",
".",
"h5",
".",
"H5_COMPRESS_LZ4",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"dset",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"\"frequency\"",
"dset",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"\"feed_id\"",
"dset",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"\"time\"",
"dset_mask",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"\"frequency\"",
"dset_mask",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"\"feed_id\"",
"dset_mask",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"\"time\"",
"# Copy over header information as attributes",
"for",
"key",
",",
"value",
"in",
"fb",
".",
"header",
".",
"items",
"(",
")",
":",
"dset",
".",
"attrs",
"[",
"key",
"]",
"=",
"value",
"filesize",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
"if",
"filesize",
">=",
"MAX_SIZE",
":",
"n_int_per_read",
"=",
"int",
"(",
"filesize",
"/",
"MAX_SIZE",
"/",
"2",
")",
"print",
"(",
"\"Filling in with data over %i reads...\"",
"%",
"n_int_per_read",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"n_int_per_read",
")",
":",
"print",
"(",
"\"Reading %i of %i\"",
"%",
"(",
"ii",
"+",
"1",
",",
"n_int_per_read",
")",
")",
"#print ii*n_int_per_read, (ii+1)*n_int_per_read",
"fb",
"=",
"Filterbank",
"(",
"filename",
",",
"t_start",
"=",
"ii",
"*",
"n_int_per_read",
",",
"t_stop",
"=",
"(",
"ii",
"+",
"1",
")",
"*",
"n_int_per_read",
")",
"dset",
"[",
"ii",
"*",
"n_int_per_read",
":",
"(",
"ii",
"+",
"1",
")",
"*",
"n_int_per_read",
"]",
"=",
"fb",
".",
"data",
"[",
":",
"]",
"else",
":",
"fb",
"=",
"Filterbank",
"(",
"filename",
")",
"print",
"(",
"dset",
".",
"shape",
",",
"\" -> \"",
",",
"fb",
".",
"data",
".",
"shape",
")",
"dset",
"[",
":",
"]",
"=",
"fb",
".",
"data",
"[",
":",
"]",
"h5",
".",
"close",
"(",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"Conversion time: %2.2fs\"",
"%",
"(",
"t1",
"-",
"t0",
")",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
open_file
|
Open a HDF5 or filterbank file
Returns instance of a Reader to read data from file.
================== ==================================================
Filename extension File type
================== ==================================================
h5, hdf5 HDF5 format
fil fil format
*other* Will raise NotImplementedError
================== ==================================================
|
blimpy/file_wrapper.py
|
def open_file(filename, f_start=None, f_stop=None,t_start=None, t_stop=None,load_data=True,max_load=1.):
"""Open a HDF5 or filterbank file
Returns instance of a Reader to read data from file.
================== ==================================================
Filename extension File type
================== ==================================================
h5, hdf5 HDF5 format
fil fil format
*other* Will raise NotImplementedError
================== ==================================================
"""
if not os.path.isfile(filename):
type(filename)
print(filename)
raise IOError("No such file or directory: " + filename)
filename = os.path.expandvars(os.path.expanduser(filename))
# Get file extension to determine type
ext = filename.split(".")[-1].strip().lower()
if six.PY3:
ext = bytes(ext, 'ascii')
if h5py.is_hdf5(filename):
# Open HDF5 file
return H5Reader(filename, f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop,
load_data=load_data, max_load=max_load)
elif sigproc.is_filterbank(filename):
# Open FIL file
return FilReader(filename, f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, load_data=load_data, max_load=max_load)
else:
raise NotImplementedError('Cannot open this type of file with Waterfall')
|
def open_file(filename, f_start=None, f_stop=None,t_start=None, t_stop=None,load_data=True,max_load=1.):
"""Open a HDF5 or filterbank file
Returns instance of a Reader to read data from file.
================== ==================================================
Filename extension File type
================== ==================================================
h5, hdf5 HDF5 format
fil fil format
*other* Will raise NotImplementedError
================== ==================================================
"""
if not os.path.isfile(filename):
type(filename)
print(filename)
raise IOError("No such file or directory: " + filename)
filename = os.path.expandvars(os.path.expanduser(filename))
# Get file extension to determine type
ext = filename.split(".")[-1].strip().lower()
if six.PY3:
ext = bytes(ext, 'ascii')
if h5py.is_hdf5(filename):
# Open HDF5 file
return H5Reader(filename, f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop,
load_data=load_data, max_load=max_load)
elif sigproc.is_filterbank(filename):
# Open FIL file
return FilReader(filename, f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, load_data=load_data, max_load=max_load)
else:
raise NotImplementedError('Cannot open this type of file with Waterfall')
|
[
"Open",
"a",
"HDF5",
"or",
"filterbank",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L736-L770
|
[
"def",
"open_file",
"(",
"filename",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
",",
"load_data",
"=",
"True",
",",
"max_load",
"=",
"1.",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"type",
"(",
"filename",
")",
"print",
"(",
"filename",
")",
"raise",
"IOError",
"(",
"\"No such file or directory: \"",
"+",
"filename",
")",
"filename",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
")",
"# Get file extension to determine type",
"ext",
"=",
"filename",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"six",
".",
"PY3",
":",
"ext",
"=",
"bytes",
"(",
"ext",
",",
"'ascii'",
")",
"if",
"h5py",
".",
"is_hdf5",
"(",
"filename",
")",
":",
"# Open HDF5 file",
"return",
"H5Reader",
"(",
"filename",
",",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
",",
"load_data",
"=",
"load_data",
",",
"max_load",
"=",
"max_load",
")",
"elif",
"sigproc",
".",
"is_filterbank",
"(",
"filename",
")",
":",
"# Open FIL file",
"return",
"FilReader",
"(",
"filename",
",",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
",",
"load_data",
"=",
"load_data",
",",
"max_load",
"=",
"max_load",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Cannot open this type of file with Waterfall'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader._setup_selection_range
|
Making sure the selection if time and frequency are within the file limits.
Args:
init (bool): If call during __init__
|
blimpy/file_wrapper.py
|
def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False):
"""Making sure the selection if time and frequency are within the file limits.
Args:
init (bool): If call during __init__
"""
# This avoids resetting values
if init is True:
if t_start is None:
t_start = self.t_begin
if t_stop is None:
t_stop = self.t_end
if f_start is None:
f_start = self.f_begin
if f_stop is None:
f_stop = self.f_end
else:
if f_start is None:
f_start = self.f_start
if f_stop is None:
f_stop = self.f_stop
if t_start is None:
t_start = self.t_start
if t_stop is None:
t_stop = self.t_stop
# By now, all values start/stop are populated.
if t_stop >= 0 and t_start >= 0 and t_stop < t_start:
t_stop, t_start = t_start,t_stop
logger.warning('Given t_stop < t_start, assuming reversed values.')
if f_stop and f_start and f_stop < f_start:
f_stop, f_start = f_start,f_stop
logger.warning('Given f_stop < f_start, assuming reversed values.')
if t_start >= self.t_begin and t_start < self.t_end:
self.t_start = int(t_start)
else:
if init is False or t_start != None:
logger.warning('Setting t_start = %f, since t_start not given or not valid.'%self.t_begin)
self.t_start = self.t_begin
if t_stop <= self.t_end and t_stop > self.t_begin:
self.t_stop = int(t_stop)
else:
if init is False or t_stop:
logger.warning('Setting t_stop = %f, since t_stop not given or not valid.'%self.t_end)
self.t_stop = self.t_end
if f_start >= self.f_begin and f_start < self.f_end:
self.f_start = f_start
else:
if init is False or f_start:
logger.warning('Setting f_start = %f, since f_start not given or not valid.'%self.f_begin)
self.f_start = self.f_begin
if f_stop <= self.f_end and f_stop > self.f_begin:
self.f_stop = f_stop
else:
if init is False or f_stop:
logger.warning('Setting f_stop = %f, since f_stop not given or not valid.'%self.f_end)
self.f_stop = self.f_end
# Now we have setup bounds, we can calculate shape of selection
self.selection_shape = self._calc_selection_shape()
|
def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False):
"""Making sure the selection if time and frequency are within the file limits.
Args:
init (bool): If call during __init__
"""
# This avoids resetting values
if init is True:
if t_start is None:
t_start = self.t_begin
if t_stop is None:
t_stop = self.t_end
if f_start is None:
f_start = self.f_begin
if f_stop is None:
f_stop = self.f_end
else:
if f_start is None:
f_start = self.f_start
if f_stop is None:
f_stop = self.f_stop
if t_start is None:
t_start = self.t_start
if t_stop is None:
t_stop = self.t_stop
# By now, all values start/stop are populated.
if t_stop >= 0 and t_start >= 0 and t_stop < t_start:
t_stop, t_start = t_start,t_stop
logger.warning('Given t_stop < t_start, assuming reversed values.')
if f_stop and f_start and f_stop < f_start:
f_stop, f_start = f_start,f_stop
logger.warning('Given f_stop < f_start, assuming reversed values.')
if t_start >= self.t_begin and t_start < self.t_end:
self.t_start = int(t_start)
else:
if init is False or t_start != None:
logger.warning('Setting t_start = %f, since t_start not given or not valid.'%self.t_begin)
self.t_start = self.t_begin
if t_stop <= self.t_end and t_stop > self.t_begin:
self.t_stop = int(t_stop)
else:
if init is False or t_stop:
logger.warning('Setting t_stop = %f, since t_stop not given or not valid.'%self.t_end)
self.t_stop = self.t_end
if f_start >= self.f_begin and f_start < self.f_end:
self.f_start = f_start
else:
if init is False or f_start:
logger.warning('Setting f_start = %f, since f_start not given or not valid.'%self.f_begin)
self.f_start = self.f_begin
if f_stop <= self.f_end and f_stop > self.f_begin:
self.f_stop = f_stop
else:
if init is False or f_stop:
logger.warning('Setting f_stop = %f, since f_stop not given or not valid.'%self.f_end)
self.f_stop = self.f_end
# Now we have setup bounds, we can calculate shape of selection
self.selection_shape = self._calc_selection_shape()
|
[
"Making",
"sure",
"the",
"selection",
"if",
"time",
"and",
"frequency",
"are",
"within",
"the",
"file",
"limits",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L45-L110
|
[
"def",
"_setup_selection_range",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
",",
"init",
"=",
"False",
")",
":",
"# This avoids resetting values",
"if",
"init",
"is",
"True",
":",
"if",
"t_start",
"is",
"None",
":",
"t_start",
"=",
"self",
".",
"t_begin",
"if",
"t_stop",
"is",
"None",
":",
"t_stop",
"=",
"self",
".",
"t_end",
"if",
"f_start",
"is",
"None",
":",
"f_start",
"=",
"self",
".",
"f_begin",
"if",
"f_stop",
"is",
"None",
":",
"f_stop",
"=",
"self",
".",
"f_end",
"else",
":",
"if",
"f_start",
"is",
"None",
":",
"f_start",
"=",
"self",
".",
"f_start",
"if",
"f_stop",
"is",
"None",
":",
"f_stop",
"=",
"self",
".",
"f_stop",
"if",
"t_start",
"is",
"None",
":",
"t_start",
"=",
"self",
".",
"t_start",
"if",
"t_stop",
"is",
"None",
":",
"t_stop",
"=",
"self",
".",
"t_stop",
"# By now, all values start/stop are populated.",
"if",
"t_stop",
">=",
"0",
"and",
"t_start",
">=",
"0",
"and",
"t_stop",
"<",
"t_start",
":",
"t_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"logger",
".",
"warning",
"(",
"'Given t_stop < t_start, assuming reversed values.'",
")",
"if",
"f_stop",
"and",
"f_start",
"and",
"f_stop",
"<",
"f_start",
":",
"f_stop",
",",
"f_start",
"=",
"f_start",
",",
"f_stop",
"logger",
".",
"warning",
"(",
"'Given f_stop < f_start, assuming reversed values.'",
")",
"if",
"t_start",
">=",
"self",
".",
"t_begin",
"and",
"t_start",
"<",
"self",
".",
"t_end",
":",
"self",
".",
"t_start",
"=",
"int",
"(",
"t_start",
")",
"else",
":",
"if",
"init",
"is",
"False",
"or",
"t_start",
"!=",
"None",
":",
"logger",
".",
"warning",
"(",
"'Setting t_start = %f, since t_start not given or not valid.'",
"%",
"self",
".",
"t_begin",
")",
"self",
".",
"t_start",
"=",
"self",
".",
"t_begin",
"if",
"t_stop",
"<=",
"self",
".",
"t_end",
"and",
"t_stop",
">",
"self",
".",
"t_begin",
":",
"self",
".",
"t_stop",
"=",
"int",
"(",
"t_stop",
")",
"else",
":",
"if",
"init",
"is",
"False",
"or",
"t_stop",
":",
"logger",
".",
"warning",
"(",
"'Setting t_stop = %f, since t_stop not given or not valid.'",
"%",
"self",
".",
"t_end",
")",
"self",
".",
"t_stop",
"=",
"self",
".",
"t_end",
"if",
"f_start",
">=",
"self",
".",
"f_begin",
"and",
"f_start",
"<",
"self",
".",
"f_end",
":",
"self",
".",
"f_start",
"=",
"f_start",
"else",
":",
"if",
"init",
"is",
"False",
"or",
"f_start",
":",
"logger",
".",
"warning",
"(",
"'Setting f_start = %f, since f_start not given or not valid.'",
"%",
"self",
".",
"f_begin",
")",
"self",
".",
"f_start",
"=",
"self",
".",
"f_begin",
"if",
"f_stop",
"<=",
"self",
".",
"f_end",
"and",
"f_stop",
">",
"self",
".",
"f_begin",
":",
"self",
".",
"f_stop",
"=",
"f_stop",
"else",
":",
"if",
"init",
"is",
"False",
"or",
"f_stop",
":",
"logger",
".",
"warning",
"(",
"'Setting f_stop = %f, since f_stop not given or not valid.'",
"%",
"self",
".",
"f_end",
")",
"self",
".",
"f_stop",
"=",
"self",
".",
"f_end",
"# Now we have setup bounds, we can calculate shape of selection",
"self",
".",
"selection_shape",
"=",
"self",
".",
"_calc_selection_shape",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader._setup_dtype
|
Calculating dtype
|
blimpy/file_wrapper.py
|
def _setup_dtype(self):
"""Calculating dtype
"""
#Set up the data type
if self._n_bytes == 4:
return b'float32'
elif self._n_bytes == 2:
return b'uint16'
elif self._n_bytes == 1:
return b'uint8'
else:
logger.warning('Having trouble setting dtype, assuming float32.')
return b'float32'
|
def _setup_dtype(self):
"""Calculating dtype
"""
#Set up the data type
if self._n_bytes == 4:
return b'float32'
elif self._n_bytes == 2:
return b'uint16'
elif self._n_bytes == 1:
return b'uint8'
else:
logger.warning('Having trouble setting dtype, assuming float32.')
return b'float32'
|
[
"Calculating",
"dtype"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L118-L131
|
[
"def",
"_setup_dtype",
"(",
"self",
")",
":",
"#Set up the data type",
"if",
"self",
".",
"_n_bytes",
"==",
"4",
":",
"return",
"b'float32'",
"elif",
"self",
".",
"_n_bytes",
"==",
"2",
":",
"return",
"b'uint16'",
"elif",
"self",
".",
"_n_bytes",
"==",
"1",
":",
"return",
"b'uint8'",
"else",
":",
"logger",
".",
"warning",
"(",
"'Having trouble setting dtype, assuming float32.'",
")",
"return",
"b'float32'"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader._calc_selection_size
|
Calculate size of data of interest.
|
blimpy/file_wrapper.py
|
def _calc_selection_size(self):
"""Calculate size of data of interest.
"""
#Check to see how many integrations requested
n_ints = self.t_stop - self.t_start
#Check to see how many frequency channels requested
n_chan = (self.f_stop - self.f_start) / abs(self.header[b'foff'])
n_bytes = self._n_bytes
selection_size = int(n_ints*n_chan*n_bytes)
return selection_size
|
def _calc_selection_size(self):
"""Calculate size of data of interest.
"""
#Check to see how many integrations requested
n_ints = self.t_stop - self.t_start
#Check to see how many frequency channels requested
n_chan = (self.f_stop - self.f_start) / abs(self.header[b'foff'])
n_bytes = self._n_bytes
selection_size = int(n_ints*n_chan*n_bytes)
return selection_size
|
[
"Calculate",
"size",
"of",
"data",
"of",
"interest",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L133-L145
|
[
"def",
"_calc_selection_size",
"(",
"self",
")",
":",
"#Check to see how many integrations requested",
"n_ints",
"=",
"self",
".",
"t_stop",
"-",
"self",
".",
"t_start",
"#Check to see how many frequency channels requested",
"n_chan",
"=",
"(",
"self",
".",
"f_stop",
"-",
"self",
".",
"f_start",
")",
"/",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"n_bytes",
"=",
"self",
".",
"_n_bytes",
"selection_size",
"=",
"int",
"(",
"n_ints",
"*",
"n_chan",
"*",
"n_bytes",
")",
"return",
"selection_size"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader._calc_selection_shape
|
Calculate shape of data of interest.
|
blimpy/file_wrapper.py
|
def _calc_selection_shape(self):
"""Calculate shape of data of interest.
"""
#Check how many integrations requested
n_ints = int(self.t_stop - self.t_start)
#Check how many frequency channels requested
n_chan = int(np.round((self.f_stop - self.f_start) / abs(self.header[b'foff'])))
selection_shape = (n_ints,int(self.header[b'nifs']),n_chan)
return selection_shape
|
def _calc_selection_shape(self):
"""Calculate shape of data of interest.
"""
#Check how many integrations requested
n_ints = int(self.t_stop - self.t_start)
#Check how many frequency channels requested
n_chan = int(np.round((self.f_stop - self.f_start) / abs(self.header[b'foff'])))
selection_shape = (n_ints,int(self.header[b'nifs']),n_chan)
return selection_shape
|
[
"Calculate",
"shape",
"of",
"data",
"of",
"interest",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L147-L158
|
[
"def",
"_calc_selection_shape",
"(",
"self",
")",
":",
"#Check how many integrations requested",
"n_ints",
"=",
"int",
"(",
"self",
".",
"t_stop",
"-",
"self",
".",
"t_start",
")",
"#Check how many frequency channels requested",
"n_chan",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"(",
"self",
".",
"f_stop",
"-",
"self",
".",
"f_start",
")",
"/",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
")",
")",
"selection_shape",
"=",
"(",
"n_ints",
",",
"int",
"(",
"self",
".",
"header",
"[",
"b'nifs'",
"]",
")",
",",
"n_chan",
")",
"return",
"selection_shape"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader._setup_chans
|
Setup channel borders
|
blimpy/file_wrapper.py
|
def _setup_chans(self):
"""Setup channel borders
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
i_start, i_stop = 0, self.n_channels_in_file
if self.f_start:
i_start = np.round((self.f_start - f0) / self.header[b'foff'])
if self.f_stop:
i_stop = np.round((self.f_stop - f0) / self.header[b'foff'])
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
if chan_stop_idx < chan_start_idx:
chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idx
self.chan_start_idx = chan_start_idx
self.chan_stop_idx = chan_stop_idx
|
def _setup_chans(self):
"""Setup channel borders
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
i_start, i_stop = 0, self.n_channels_in_file
if self.f_start:
i_start = np.round((self.f_start - f0) / self.header[b'foff'])
if self.f_stop:
i_stop = np.round((self.f_stop - f0) / self.header[b'foff'])
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
if chan_stop_idx < chan_start_idx:
chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idx
self.chan_start_idx = chan_start_idx
self.chan_stop_idx = chan_stop_idx
|
[
"Setup",
"channel",
"borders"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L160-L183
|
[
"def",
"_setup_chans",
"(",
"self",
")",
":",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"f0",
"=",
"self",
".",
"f_end",
"else",
":",
"f0",
"=",
"self",
".",
"f_begin",
"i_start",
",",
"i_stop",
"=",
"0",
",",
"self",
".",
"n_channels_in_file",
"if",
"self",
".",
"f_start",
":",
"i_start",
"=",
"np",
".",
"round",
"(",
"(",
"self",
".",
"f_start",
"-",
"f0",
")",
"/",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"if",
"self",
".",
"f_stop",
":",
"i_stop",
"=",
"np",
".",
"round",
"(",
"(",
"self",
".",
"f_stop",
"-",
"f0",
")",
"/",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"#calculate closest true index value",
"chan_start_idx",
"=",
"np",
".",
"int",
"(",
"i_start",
")",
"chan_stop_idx",
"=",
"np",
".",
"int",
"(",
"i_stop",
")",
"if",
"chan_stop_idx",
"<",
"chan_start_idx",
":",
"chan_stop_idx",
",",
"chan_start_idx",
"=",
"chan_start_idx",
",",
"chan_stop_idx",
"self",
".",
"chan_start_idx",
"=",
"chan_start_idx",
"self",
".",
"chan_stop_idx",
"=",
"chan_stop_idx"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader._setup_freqs
|
Updating frequency borders from channel values
|
blimpy/file_wrapper.py
|
def _setup_freqs(self):
"""Updating frequency borders from channel values
"""
if self.header[b'foff'] > 0:
self.f_start = self.f_begin + self.chan_start_idx*abs(self.header[b'foff'])
self.f_stop = self.f_begin + self.chan_stop_idx*abs(self.header[b'foff'])
else:
self.f_start = self.f_end - self.chan_stop_idx*abs(self.header[b'foff'])
self.f_stop = self.f_end - self.chan_start_idx*abs(self.header[b'foff'])
|
def _setup_freqs(self):
"""Updating frequency borders from channel values
"""
if self.header[b'foff'] > 0:
self.f_start = self.f_begin + self.chan_start_idx*abs(self.header[b'foff'])
self.f_stop = self.f_begin + self.chan_stop_idx*abs(self.header[b'foff'])
else:
self.f_start = self.f_end - self.chan_stop_idx*abs(self.header[b'foff'])
self.f_stop = self.f_end - self.chan_start_idx*abs(self.header[b'foff'])
|
[
"Updating",
"frequency",
"borders",
"from",
"channel",
"values"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L185-L194
|
[
"def",
"_setup_freqs",
"(",
"self",
")",
":",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
">",
"0",
":",
"self",
".",
"f_start",
"=",
"self",
".",
"f_begin",
"+",
"self",
".",
"chan_start_idx",
"*",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"self",
".",
"f_stop",
"=",
"self",
".",
"f_begin",
"+",
"self",
".",
"chan_stop_idx",
"*",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"else",
":",
"self",
".",
"f_start",
"=",
"self",
".",
"f_end",
"-",
"self",
".",
"chan_stop_idx",
"*",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"self",
".",
"f_stop",
"=",
"self",
".",
"f_end",
"-",
"self",
".",
"chan_start_idx",
"*",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader.populate_timestamps
|
Populate time axis.
IF update_header then only return tstart
|
blimpy/file_wrapper.py
|
def populate_timestamps(self,update_header=False):
""" Populate time axis.
IF update_header then only return tstart
"""
#Check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if self.t_start:
ii_start = self.t_start
if self.t_stop:
ii_stop = self.t_stop
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
if update_header:
timestamps = ii_start * t_delt / 24./60./60. + t0
else:
timestamps = np.arange(ii_start, ii_stop) * t_delt / 24./60./60. + t0
return timestamps
|
def populate_timestamps(self,update_header=False):
""" Populate time axis.
IF update_header then only return tstart
"""
#Check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if self.t_start:
ii_start = self.t_start
if self.t_stop:
ii_stop = self.t_stop
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
if update_header:
timestamps = ii_start * t_delt / 24./60./60. + t0
else:
timestamps = np.arange(ii_start, ii_stop) * t_delt / 24./60./60. + t0
return timestamps
|
[
"Populate",
"time",
"axis",
".",
"IF",
"update_header",
"then",
"only",
"return",
"tstart"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L196-L217
|
[
"def",
"populate_timestamps",
"(",
"self",
",",
"update_header",
"=",
"False",
")",
":",
"#Check to see how many integrations requested",
"ii_start",
",",
"ii_stop",
"=",
"0",
",",
"self",
".",
"n_ints_in_file",
"if",
"self",
".",
"t_start",
":",
"ii_start",
"=",
"self",
".",
"t_start",
"if",
"self",
".",
"t_stop",
":",
"ii_stop",
"=",
"self",
".",
"t_stop",
"## Setup time axis",
"t0",
"=",
"self",
".",
"header",
"[",
"b'tstart'",
"]",
"t_delt",
"=",
"self",
".",
"header",
"[",
"b'tsamp'",
"]",
"if",
"update_header",
":",
"timestamps",
"=",
"ii_start",
"*",
"t_delt",
"/",
"24.",
"/",
"60.",
"/",
"60.",
"+",
"t0",
"else",
":",
"timestamps",
"=",
"np",
".",
"arange",
"(",
"ii_start",
",",
"ii_stop",
")",
"*",
"t_delt",
"/",
"24.",
"/",
"60.",
"/",
"60.",
"+",
"t0",
"return",
"timestamps"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader.populate_freqs
|
Populate frequency axis
|
blimpy/file_wrapper.py
|
def populate_freqs(self):
"""
Populate frequency axis
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
self._setup_chans()
#create freq array
i_vals = np.arange(self.chan_start_idx, self.chan_stop_idx)
freqs = self.header[b'foff'] * i_vals + f0
return freqs
|
def populate_freqs(self):
"""
Populate frequency axis
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
self._setup_chans()
#create freq array
i_vals = np.arange(self.chan_start_idx, self.chan_stop_idx)
freqs = self.header[b'foff'] * i_vals + f0
return freqs
|
[
"Populate",
"frequency",
"axis"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L219-L235
|
[
"def",
"populate_freqs",
"(",
"self",
")",
":",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"f0",
"=",
"self",
".",
"f_end",
"else",
":",
"f0",
"=",
"self",
".",
"f_begin",
"self",
".",
"_setup_chans",
"(",
")",
"#create freq array",
"i_vals",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"chan_start_idx",
",",
"self",
".",
"chan_stop_idx",
")",
"freqs",
"=",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"*",
"i_vals",
"+",
"f0",
"return",
"freqs"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader.calc_n_coarse_chan
|
This makes an attempt to calculate the number of coarse channels in a given file.
Note:
This is unlikely to work on non-Breakthrough Listen data, as a-priori knowledge of
the digitizer system is required.
|
blimpy/file_wrapper.py
|
def calc_n_coarse_chan(self, chan_bw=None):
""" This makes an attempt to calculate the number of coarse channels in a given file.
Note:
This is unlikely to work on non-Breakthrough Listen data, as a-priori knowledge of
the digitizer system is required.
"""
nchans = int(self.header[b'nchans'])
# Do we have a file with enough channels that it has coarse channelization?
if chan_bw is not None:
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / chan_bw)
return n_coarse_chan
elif nchans >= 2**20:
# Does the common FFT length of 2^20 divide through without a remainder?
# This should work for most GBT and all Parkes hires data
if nchans % 2**20 == 0:
n_coarse_chan = nchans // 2**20
return n_coarse_chan
# Early GBT data has non-2^N FFT length, check if it is GBT data
elif self.header[b'telescope_id'] == 6:
coarse_chan_bw = 2.9296875
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / coarse_chan_bw)
return n_coarse_chan
else:
logger.warning("Couldn't figure out n_coarse_chan")
elif self.header[b'telescope_id'] == 6 and nchans < 2**20:
#For GBT non-hires data
coarse_chan_bw = 2.9296875
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / coarse_chan_bw)
return n_coarse_chan
else:
logger.warning("This function currently only works for hires BL Parkes or GBT data.")
|
def calc_n_coarse_chan(self, chan_bw=None):
""" This makes an attempt to calculate the number of coarse channels in a given file.
Note:
This is unlikely to work on non-Breakthrough Listen data, as a-priori knowledge of
the digitizer system is required.
"""
nchans = int(self.header[b'nchans'])
# Do we have a file with enough channels that it has coarse channelization?
if chan_bw is not None:
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / chan_bw)
return n_coarse_chan
elif nchans >= 2**20:
# Does the common FFT length of 2^20 divide through without a remainder?
# This should work for most GBT and all Parkes hires data
if nchans % 2**20 == 0:
n_coarse_chan = nchans // 2**20
return n_coarse_chan
# Early GBT data has non-2^N FFT length, check if it is GBT data
elif self.header[b'telescope_id'] == 6:
coarse_chan_bw = 2.9296875
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / coarse_chan_bw)
return n_coarse_chan
else:
logger.warning("Couldn't figure out n_coarse_chan")
elif self.header[b'telescope_id'] == 6 and nchans < 2**20:
#For GBT non-hires data
coarse_chan_bw = 2.9296875
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / coarse_chan_bw)
return n_coarse_chan
else:
logger.warning("This function currently only works for hires BL Parkes or GBT data.")
|
[
"This",
"makes",
"an",
"attempt",
"to",
"calculate",
"the",
"number",
"of",
"coarse",
"channels",
"in",
"a",
"given",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L237-L272
|
[
"def",
"calc_n_coarse_chan",
"(",
"self",
",",
"chan_bw",
"=",
"None",
")",
":",
"nchans",
"=",
"int",
"(",
"self",
".",
"header",
"[",
"b'nchans'",
"]",
")",
"# Do we have a file with enough channels that it has coarse channelization?",
"if",
"chan_bw",
"is",
"not",
"None",
":",
"bandwidth",
"=",
"abs",
"(",
"self",
".",
"f_stop",
"-",
"self",
".",
"f_start",
")",
"n_coarse_chan",
"=",
"int",
"(",
"bandwidth",
"/",
"chan_bw",
")",
"return",
"n_coarse_chan",
"elif",
"nchans",
">=",
"2",
"**",
"20",
":",
"# Does the common FFT length of 2^20 divide through without a remainder?",
"# This should work for most GBT and all Parkes hires data",
"if",
"nchans",
"%",
"2",
"**",
"20",
"==",
"0",
":",
"n_coarse_chan",
"=",
"nchans",
"//",
"2",
"**",
"20",
"return",
"n_coarse_chan",
"# Early GBT data has non-2^N FFT length, check if it is GBT data",
"elif",
"self",
".",
"header",
"[",
"b'telescope_id'",
"]",
"==",
"6",
":",
"coarse_chan_bw",
"=",
"2.9296875",
"bandwidth",
"=",
"abs",
"(",
"self",
".",
"f_stop",
"-",
"self",
".",
"f_start",
")",
"n_coarse_chan",
"=",
"int",
"(",
"bandwidth",
"/",
"coarse_chan_bw",
")",
"return",
"n_coarse_chan",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Couldn't figure out n_coarse_chan\"",
")",
"elif",
"self",
".",
"header",
"[",
"b'telescope_id'",
"]",
"==",
"6",
"and",
"nchans",
"<",
"2",
"**",
"20",
":",
"#For GBT non-hires data",
"coarse_chan_bw",
"=",
"2.9296875",
"bandwidth",
"=",
"abs",
"(",
"self",
".",
"f_stop",
"-",
"self",
".",
"f_start",
")",
"n_coarse_chan",
"=",
"int",
"(",
"bandwidth",
"/",
"coarse_chan_bw",
")",
"return",
"n_coarse_chan",
"else",
":",
"logger",
".",
"warning",
"(",
"\"This function currently only works for hires BL Parkes or GBT data.\"",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader.calc_n_blobs
|
Given the blob dimensions, calculate how many fit in the data selection.
|
blimpy/file_wrapper.py
|
def calc_n_blobs(self, blob_dim):
""" Given the blob dimensions, calculate how many fit in the data selection.
"""
n_blobs = int(np.ceil(1.0 * np.prod(self.selection_shape) / np.prod(blob_dim)))
return n_blobs
|
def calc_n_blobs(self, blob_dim):
""" Given the blob dimensions, calculate how many fit in the data selection.
"""
n_blobs = int(np.ceil(1.0 * np.prod(self.selection_shape) / np.prod(blob_dim)))
return n_blobs
|
[
"Given",
"the",
"blob",
"dimensions",
"calculate",
"how",
"many",
"fit",
"in",
"the",
"data",
"selection",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L274-L280
|
[
"def",
"calc_n_blobs",
"(",
"self",
",",
"blob_dim",
")",
":",
"n_blobs",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"1.0",
"*",
"np",
".",
"prod",
"(",
"self",
".",
"selection_shape",
")",
"/",
"np",
".",
"prod",
"(",
"blob_dim",
")",
")",
")",
"return",
"n_blobs"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Reader.isheavy
|
Check if the current selection is too large.
|
blimpy/file_wrapper.py
|
def isheavy(self):
""" Check if the current selection is too large.
"""
selection_size_bytes = self._calc_selection_size()
if selection_size_bytes > self.MAX_DATA_ARRAY_SIZE:
return True
else:
return False
|
def isheavy(self):
""" Check if the current selection is too large.
"""
selection_size_bytes = self._calc_selection_size()
if selection_size_bytes > self.MAX_DATA_ARRAY_SIZE:
return True
else:
return False
|
[
"Check",
"if",
"the",
"current",
"selection",
"is",
"too",
"large",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L282-L291
|
[
"def",
"isheavy",
"(",
"self",
")",
":",
"selection_size_bytes",
"=",
"self",
".",
"_calc_selection_size",
"(",
")",
"if",
"selection_size_bytes",
">",
"self",
".",
"MAX_DATA_ARRAY_SIZE",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
H5Reader.read_header
|
Read header and return a Python dictionary of key:value pairs
|
blimpy/file_wrapper.py
|
def read_header(self):
""" Read header and return a Python dictionary of key:value pairs
"""
self.header = {}
for key, val in self.h5['data'].attrs.items():
if six.PY3:
key = bytes(key, 'ascii')
if key == b'src_raj':
self.header[key] = Angle(val, unit='hr')
elif key == b'src_dej':
self.header[key] = Angle(val, unit='deg')
else:
self.header[key] = val
return self.header
|
def read_header(self):
""" Read header and return a Python dictionary of key:value pairs
"""
self.header = {}
for key, val in self.h5['data'].attrs.items():
if six.PY3:
key = bytes(key, 'ascii')
if key == b'src_raj':
self.header[key] = Angle(val, unit='hr')
elif key == b'src_dej':
self.header[key] = Angle(val, unit='deg')
else:
self.header[key] = val
return self.header
|
[
"Read",
"header",
"and",
"return",
"a",
"Python",
"dictionary",
"of",
"key",
":",
"value",
"pairs"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L383-L399
|
[
"def",
"read_header",
"(",
"self",
")",
":",
"self",
".",
"header",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"self",
".",
"h5",
"[",
"'data'",
"]",
".",
"attrs",
".",
"items",
"(",
")",
":",
"if",
"six",
".",
"PY3",
":",
"key",
"=",
"bytes",
"(",
"key",
",",
"'ascii'",
")",
"if",
"key",
"==",
"b'src_raj'",
":",
"self",
".",
"header",
"[",
"key",
"]",
"=",
"Angle",
"(",
"val",
",",
"unit",
"=",
"'hr'",
")",
"elif",
"key",
"==",
"b'src_dej'",
":",
"self",
".",
"header",
"[",
"key",
"]",
"=",
"Angle",
"(",
"val",
",",
"unit",
"=",
"'deg'",
")",
"else",
":",
"self",
".",
"header",
"[",
"key",
"]",
"=",
"val",
"return",
"self",
".",
"header"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
H5Reader._find_blob_start
|
Find first blob from selection.
|
blimpy/file_wrapper.py
|
def _find_blob_start(self, blob_dim, n_blob):
"""Find first blob from selection.
"""
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Check which is the blob time offset
blob_time_start = self.t_start + blob_dim[self.time_axis]*n_blob
#Check which is the blob frequency offset (in channels)
blob_freq_start = self.chan_start_idx + (blob_dim[self.freq_axis]*n_blob)%self.selection_shape[self.freq_axis]
blob_start = np.array([blob_time_start, 0, blob_freq_start])
return blob_start
|
def _find_blob_start(self, blob_dim, n_blob):
"""Find first blob from selection.
"""
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Check which is the blob time offset
blob_time_start = self.t_start + blob_dim[self.time_axis]*n_blob
#Check which is the blob frequency offset (in channels)
blob_freq_start = self.chan_start_idx + (blob_dim[self.freq_axis]*n_blob)%self.selection_shape[self.freq_axis]
blob_start = np.array([blob_time_start, 0, blob_freq_start])
return blob_start
|
[
"Find",
"first",
"blob",
"from",
"selection",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L401-L416
|
[
"def",
"_find_blob_start",
"(",
"self",
",",
"blob_dim",
",",
"n_blob",
")",
":",
"#Convert input frequencies into what their corresponding channel number would be.",
"self",
".",
"_setup_chans",
"(",
")",
"#Check which is the blob time offset",
"blob_time_start",
"=",
"self",
".",
"t_start",
"+",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"n_blob",
"#Check which is the blob frequency offset (in channels)",
"blob_freq_start",
"=",
"self",
".",
"chan_start_idx",
"+",
"(",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
"*",
"n_blob",
")",
"%",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
"blob_start",
"=",
"np",
".",
"array",
"(",
"[",
"blob_time_start",
",",
"0",
",",
"blob_freq_start",
"]",
")",
"return",
"blob_start"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
H5Reader.read_data
|
Read data
|
blimpy/file_wrapper.py
|
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Read data
"""
self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop)
#check if selection is small enough.
if self.isheavy():
logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))
self.data = np.array([0],dtype=self._d_type)
return None
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Update frequencies ranges from channel number.
self._setup_freqs()
self.data = self.h5["data"][self.t_start:self.t_stop,:,self.chan_start_idx:self.chan_stop_idx]
|
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Read data
"""
self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop)
#check if selection is small enough.
if self.isheavy():
logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))
self.data = np.array([0],dtype=self._d_type)
return None
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Update frequencies ranges from channel number.
self._setup_freqs()
self.data = self.h5["data"][self.t_start:self.t_stop,:,self.chan_start_idx:self.chan_stop_idx]
|
[
"Read",
"data"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L418-L435
|
[
"def",
"read_data",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
")",
":",
"self",
".",
"_setup_selection_range",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
")",
"#check if selection is small enough.",
"if",
"self",
".",
"isheavy",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection.\"",
"%",
"(",
"self",
".",
"_calc_selection_size",
"(",
")",
"/",
"(",
"1024.",
"**",
"3",
")",
",",
"self",
".",
"MAX_DATA_ARRAY_SIZE",
"/",
"(",
"1024.",
"**",
"3",
")",
")",
")",
"self",
".",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"return",
"None",
"#Convert input frequencies into what their corresponding channel number would be.",
"self",
".",
"_setup_chans",
"(",
")",
"#Update frequencies ranges from channel number.",
"self",
".",
"_setup_freqs",
"(",
")",
"self",
".",
"data",
"=",
"self",
".",
"h5",
"[",
"\"data\"",
"]",
"[",
"self",
".",
"t_start",
":",
"self",
".",
"t_stop",
",",
":",
",",
"self",
".",
"chan_start_idx",
":",
"self",
".",
"chan_stop_idx",
"]"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
H5Reader.read_blob
|
Read blob from a selection.
|
blimpy/file_wrapper.py
|
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
#This prevents issues when the last blob is smaller than the others in time
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob, 1, blob_dim[self.freq_axis])
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start(blob_dim, n_blob)
blob_end = blob_start + np.array(updated_blob_dim)
blob = self.h5["data"][int(blob_start[self.time_axis]):int(blob_end[self.time_axis]),
:,
int(blob_start[self.freq_axis]):int(blob_end[self.freq_axis])
]
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob
|
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
#This prevents issues when the last blob is smaller than the others in time
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob, 1, blob_dim[self.freq_axis])
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start(blob_dim, n_blob)
blob_end = blob_start + np.array(updated_blob_dim)
blob = self.h5["data"][int(blob_start[self.time_axis]):int(blob_end[self.time_axis]),
:,
int(blob_start[self.freq_axis]):int(blob_end[self.freq_axis])
]
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob
|
[
"Read",
"blob",
"from",
"a",
"selection",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L437-L462
|
[
"def",
"read_blob",
"(",
"self",
",",
"blob_dim",
",",
"n_blob",
"=",
"0",
")",
":",
"n_blobs",
"=",
"self",
".",
"calc_n_blobs",
"(",
"blob_dim",
")",
"if",
"n_blob",
">",
"n_blobs",
"or",
"n_blob",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Please provide correct n_blob value. Given %i, but max values is %i'",
"%",
"(",
"n_blob",
",",
"n_blobs",
")",
")",
"#This prevents issues when the last blob is smaller than the others in time",
"if",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"(",
"n_blob",
"+",
"1",
")",
">",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
":",
"updated_blob_dim",
"=",
"(",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
"-",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"n_blob",
",",
"1",
",",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
"else",
":",
"updated_blob_dim",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"blob_dim",
"]",
"blob_start",
"=",
"self",
".",
"_find_blob_start",
"(",
"blob_dim",
",",
"n_blob",
")",
"blob_end",
"=",
"blob_start",
"+",
"np",
".",
"array",
"(",
"updated_blob_dim",
")",
"blob",
"=",
"self",
".",
"h5",
"[",
"\"data\"",
"]",
"[",
"int",
"(",
"blob_start",
"[",
"self",
".",
"time_axis",
"]",
")",
":",
"int",
"(",
"blob_end",
"[",
"self",
".",
"time_axis",
"]",
")",
",",
":",
",",
"int",
"(",
"blob_start",
"[",
"self",
".",
"freq_axis",
"]",
")",
":",
"int",
"(",
"blob_end",
"[",
"self",
".",
"freq_axis",
"]",
")",
"]",
"# if self.header[b'foff'] < 0:",
"# blob = blob[:,:,::-1]",
"return",
"blob"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
FilReader.read_header
|
Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
Returns:
Python dict of key:value pairs, OR returns file offset indexes for values.
|
blimpy/file_wrapper.py
|
def read_header(self, return_idxs=False):
""" Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
Returns:
Python dict of key:value pairs, OR returns file offset indexes for values.
"""
self.header = sigproc.read_header(self.filename, return_idxs=return_idxs)
return self.header
|
def read_header(self, return_idxs=False):
""" Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
Returns:
Python dict of key:value pairs, OR returns file offset indexes for values.
"""
self.header = sigproc.read_header(self.filename, return_idxs=return_idxs)
return self.header
|
[
"Read",
"blimpy",
"header",
"and",
"return",
"a",
"Python",
"dictionary",
"of",
"key",
":",
"value",
"pairs"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L559-L574
|
[
"def",
"read_header",
"(",
"self",
",",
"return_idxs",
"=",
"False",
")",
":",
"self",
".",
"header",
"=",
"sigproc",
".",
"read_header",
"(",
"self",
".",
"filename",
",",
"return_idxs",
"=",
"return_idxs",
")",
"return",
"self",
".",
"header"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
FilReader.read_data
|
Read data.
|
blimpy/file_wrapper.py
|
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Read data.
"""
self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop)
#check if selection is small enough.
if self.isheavy():
logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, "
"header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))
self.data = np.array([0],dtype=self._d_type)
return None
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Update frequencies ranges from channel number.
self._setup_freqs()
n_chans = self.header[b'nchans']
n_chans_selected = self.selection_shape[self.freq_axis]
n_ifs = self.header[b'nifs']
# Load binary data
f = open(self.filename, 'rb')
f.seek(int(self.idx_data))
# now check to see how many integrations requested
n_ints = self.t_stop - self.t_start
# Seek to first integration
f.seek(int(self.t_start * self._n_bytes * n_ifs * n_chans), 1)
#Loading data
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=self._d_type)
for ii in range(n_ints):
for jj in range(n_ifs):
f.seek(int(self._n_bytes * self.chan_start_idx), 1) # 1 = from current location
dd = np.fromfile(f, count=n_chans_selected, dtype=self._d_type)
# Reverse array if frequency axis is flipped
# if self.header[b'foff'] < 0:
# dd = dd[::-1]
self.data[ii, jj] = dd
f.seek(int(self._n_bytes * (n_chans - self.chan_stop_idx)), 1)
|
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Read data.
"""
self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop)
#check if selection is small enough.
if self.isheavy():
logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, "
"header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))
self.data = np.array([0],dtype=self._d_type)
return None
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Update frequencies ranges from channel number.
self._setup_freqs()
n_chans = self.header[b'nchans']
n_chans_selected = self.selection_shape[self.freq_axis]
n_ifs = self.header[b'nifs']
# Load binary data
f = open(self.filename, 'rb')
f.seek(int(self.idx_data))
# now check to see how many integrations requested
n_ints = self.t_stop - self.t_start
# Seek to first integration
f.seek(int(self.t_start * self._n_bytes * n_ifs * n_chans), 1)
#Loading data
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=self._d_type)
for ii in range(n_ints):
for jj in range(n_ifs):
f.seek(int(self._n_bytes * self.chan_start_idx), 1) # 1 = from current location
dd = np.fromfile(f, count=n_chans_selected, dtype=self._d_type)
# Reverse array if frequency axis is flipped
# if self.header[b'foff'] < 0:
# dd = dd[::-1]
self.data[ii, jj] = dd
f.seek(int(self._n_bytes * (n_chans - self.chan_stop_idx)), 1)
|
[
"Read",
"data",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L576-L622
|
[
"def",
"read_data",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
")",
":",
"self",
".",
"_setup_selection_range",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
")",
"#check if selection is small enough.",
"if",
"self",
".",
"isheavy",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, \"",
"\"header loaded, but data not loaded, please try another (t,v) selection.\"",
"%",
"(",
"self",
".",
"_calc_selection_size",
"(",
")",
"/",
"(",
"1024.",
"**",
"3",
")",
",",
"self",
".",
"MAX_DATA_ARRAY_SIZE",
"/",
"(",
"1024.",
"**",
"3",
")",
")",
")",
"self",
".",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"return",
"None",
"#Convert input frequencies into what their corresponding channel number would be.",
"self",
".",
"_setup_chans",
"(",
")",
"#Update frequencies ranges from channel number.",
"self",
".",
"_setup_freqs",
"(",
")",
"n_chans",
"=",
"self",
".",
"header",
"[",
"b'nchans'",
"]",
"n_chans_selected",
"=",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
"n_ifs",
"=",
"self",
".",
"header",
"[",
"b'nifs'",
"]",
"# Load binary data",
"f",
"=",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"idx_data",
")",
")",
"# now check to see how many integrations requested",
"n_ints",
"=",
"self",
".",
"t_stop",
"-",
"self",
".",
"t_start",
"# Seek to first integration",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"t_start",
"*",
"self",
".",
"_n_bytes",
"*",
"n_ifs",
"*",
"n_chans",
")",
",",
"1",
")",
"#Loading data",
"self",
".",
"data",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_ints",
",",
"n_ifs",
",",
"n_chans_selected",
")",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"for",
"ii",
"in",
"range",
"(",
"n_ints",
")",
":",
"for",
"jj",
"in",
"range",
"(",
"n_ifs",
")",
":",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"_n_bytes",
"*",
"self",
".",
"chan_start_idx",
")",
",",
"1",
")",
"# 1 = from current location",
"dd",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"count",
"=",
"n_chans_selected",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"# Reverse array if frequency axis is flipped",
"# if self.header[b'foff'] < 0:",
"# dd = dd[::-1]",
"self",
".",
"data",
"[",
"ii",
",",
"jj",
"]",
"=",
"dd",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"_n_bytes",
"*",
"(",
"n_chans",
"-",
"self",
".",
"chan_stop_idx",
")",
")",
",",
"1",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
FilReader._find_blob_start
|
Find first blob from selection.
|
blimpy/file_wrapper.py
|
def _find_blob_start(self):
"""Find first blob from selection.
"""
# Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
# Check which is the blob time offset
blob_time_start = self.t_start
# Check which is the blob frequency offset (in channels)
blob_freq_start = self.chan_start_idx
blob_start = blob_time_start * self.n_channels_in_file + blob_freq_start
return blob_start
|
def _find_blob_start(self):
"""Find first blob from selection.
"""
# Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
# Check which is the blob time offset
blob_time_start = self.t_start
# Check which is the blob frequency offset (in channels)
blob_freq_start = self.chan_start_idx
blob_start = blob_time_start * self.n_channels_in_file + blob_freq_start
return blob_start
|
[
"Find",
"first",
"blob",
"from",
"selection",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L624-L639
|
[
"def",
"_find_blob_start",
"(",
"self",
")",
":",
"# Convert input frequencies into what their corresponding channel number would be.",
"self",
".",
"_setup_chans",
"(",
")",
"# Check which is the blob time offset",
"blob_time_start",
"=",
"self",
".",
"t_start",
"# Check which is the blob frequency offset (in channels)",
"blob_freq_start",
"=",
"self",
".",
"chan_start_idx",
"blob_start",
"=",
"blob_time_start",
"*",
"self",
".",
"n_channels_in_file",
"+",
"blob_freq_start",
"return",
"blob_start"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
FilReader.read_blob
|
Read blob from a selection.
|
blimpy/file_wrapper.py
|
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
# This prevents issues when the last blob is smaller than the others in time.
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (int(self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob), 1, int(blob_dim[self.freq_axis]))
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start()
blob = np.zeros(updated_blob_dim, dtype=self._d_type)
# EE: For now; also assuming one polarization and one beam.
# Assuming the blob will loop over the whole frequency range.
if self.f_start == self.f_begin and self.f_stop == self.f_end:
blob_flat_size = np.prod(blob_dim)
updated_blob_flat_size = np.prod(updated_blob_dim)
# Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_flat_size)))
dd = np.fromfile(f, count=updated_blob_flat_size, dtype=self._d_type)
if dd.shape[0] == updated_blob_flat_size:
blob = dd.reshape(updated_blob_dim)
else:
logger.info('DD shape != blob shape.')
blob = dd.reshape((int(dd.shape[0]/blob_dim[self.freq_axis]),blob_dim[self.beam_axis],blob_dim[self.freq_axis]))
else:
for blobt in range(updated_blob_dim[self.time_axis]):
#Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_dim[self.time_axis]*self.n_channels_in_file + blobt*self.n_channels_in_file)))
dd = np.fromfile(f, count=blob_dim[self.freq_axis], dtype=self._d_type)
blob[blobt] = dd
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob
|
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
# This prevents issues when the last blob is smaller than the others in time.
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (int(self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob), 1, int(blob_dim[self.freq_axis]))
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start()
blob = np.zeros(updated_blob_dim, dtype=self._d_type)
# EE: For now; also assuming one polarization and one beam.
# Assuming the blob will loop over the whole frequency range.
if self.f_start == self.f_begin and self.f_stop == self.f_end:
blob_flat_size = np.prod(blob_dim)
updated_blob_flat_size = np.prod(updated_blob_dim)
# Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_flat_size)))
dd = np.fromfile(f, count=updated_blob_flat_size, dtype=self._d_type)
if dd.shape[0] == updated_blob_flat_size:
blob = dd.reshape(updated_blob_dim)
else:
logger.info('DD shape != blob shape.')
blob = dd.reshape((int(dd.shape[0]/blob_dim[self.freq_axis]),blob_dim[self.beam_axis],blob_dim[self.freq_axis]))
else:
for blobt in range(updated_blob_dim[self.time_axis]):
#Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_dim[self.time_axis]*self.n_channels_in_file + blobt*self.n_channels_in_file)))
dd = np.fromfile(f, count=blob_dim[self.freq_axis], dtype=self._d_type)
blob[blobt] = dd
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob
|
[
"Read",
"blob",
"from",
"a",
"selection",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L641-L690
|
[
"def",
"read_blob",
"(",
"self",
",",
"blob_dim",
",",
"n_blob",
"=",
"0",
")",
":",
"n_blobs",
"=",
"self",
".",
"calc_n_blobs",
"(",
"blob_dim",
")",
"if",
"n_blob",
">",
"n_blobs",
"or",
"n_blob",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Please provide correct n_blob value. Given %i, but max values is %i'",
"%",
"(",
"n_blob",
",",
"n_blobs",
")",
")",
"# This prevents issues when the last blob is smaller than the others in time.",
"if",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"(",
"n_blob",
"+",
"1",
")",
">",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
":",
"updated_blob_dim",
"=",
"(",
"int",
"(",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
"-",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"n_blob",
")",
",",
"1",
",",
"int",
"(",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
")",
"else",
":",
"updated_blob_dim",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"blob_dim",
"]",
"blob_start",
"=",
"self",
".",
"_find_blob_start",
"(",
")",
"blob",
"=",
"np",
".",
"zeros",
"(",
"updated_blob_dim",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"# EE: For now; also assuming one polarization and one beam.",
"# Assuming the blob will loop over the whole frequency range.",
"if",
"self",
".",
"f_start",
"==",
"self",
".",
"f_begin",
"and",
"self",
".",
"f_stop",
"==",
"self",
".",
"f_end",
":",
"blob_flat_size",
"=",
"np",
".",
"prod",
"(",
"blob_dim",
")",
"updated_blob_flat_size",
"=",
"np",
".",
"prod",
"(",
"updated_blob_dim",
")",
"# Load binary data",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"idx_data",
"+",
"self",
".",
"_n_bytes",
"*",
"(",
"blob_start",
"+",
"n_blob",
"*",
"blob_flat_size",
")",
")",
")",
"dd",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"count",
"=",
"updated_blob_flat_size",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"if",
"dd",
".",
"shape",
"[",
"0",
"]",
"==",
"updated_blob_flat_size",
":",
"blob",
"=",
"dd",
".",
"reshape",
"(",
"updated_blob_dim",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'DD shape != blob shape.'",
")",
"blob",
"=",
"dd",
".",
"reshape",
"(",
"(",
"int",
"(",
"dd",
".",
"shape",
"[",
"0",
"]",
"/",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
",",
"blob_dim",
"[",
"self",
".",
"beam_axis",
"]",
",",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
")",
"else",
":",
"for",
"blobt",
"in",
"range",
"(",
"updated_blob_dim",
"[",
"self",
".",
"time_axis",
"]",
")",
":",
"#Load binary data",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"idx_data",
"+",
"self",
".",
"_n_bytes",
"*",
"(",
"blob_start",
"+",
"n_blob",
"*",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"self",
".",
"n_channels_in_file",
"+",
"blobt",
"*",
"self",
".",
"n_channels_in_file",
")",
")",
")",
"dd",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"count",
"=",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"blob",
"[",
"blobt",
"]",
"=",
"dd",
"# if self.header[b'foff'] < 0:",
"# blob = blob[:,:,::-1]",
"return",
"blob"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
FilReader.read_all
|
read all the data.
If reverse=True the x axis is flipped.
|
blimpy/file_wrapper.py
|
def read_all(self,reverse=True):
""" read all the data.
If reverse=True the x axis is flipped.
"""
raise NotImplementedError('To be implemented')
# go to start of the data
self.filfile.seek(int(self.datastart))
# read data into 2-D numpy array
# data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.channels,self.blocksize,order='F')
data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.blocksize, self.channels)
if reverse:
data = data[:,::-1]
return data
|
def read_all(self,reverse=True):
""" read all the data.
If reverse=True the x axis is flipped.
"""
raise NotImplementedError('To be implemented')
# go to start of the data
self.filfile.seek(int(self.datastart))
# read data into 2-D numpy array
# data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.channels,self.blocksize,order='F')
data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.blocksize, self.channels)
if reverse:
data = data[:,::-1]
return data
|
[
"read",
"all",
"the",
"data",
".",
"If",
"reverse",
"=",
"True",
"the",
"x",
"axis",
"is",
"flipped",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L692-L705
|
[
"def",
"read_all",
"(",
"self",
",",
"reverse",
"=",
"True",
")",
":",
"raise",
"NotImplementedError",
"(",
"'To be implemented'",
")",
"# go to start of the data",
"self",
".",
"filfile",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"datastart",
")",
")",
"# read data into 2-D numpy array",
"# data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.channels,self.blocksize,order='F')",
"data",
"=",
"np",
".",
"fromfile",
"(",
"self",
".",
"filfile",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
".",
"reshape",
"(",
"self",
".",
"blocksize",
",",
"self",
".",
"channels",
")",
"if",
"reverse",
":",
"data",
"=",
"data",
"[",
":",
",",
":",
":",
"-",
"1",
"]",
"return",
"data"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
FilReader.read_row
|
Read a block of data. The number of samples per row is set in self.channels
If reverse=True the x axis is flipped.
|
blimpy/file_wrapper.py
|
def read_row(self,rownumber,reverse=True):
""" Read a block of data. The number of samples per row is set in self.channels
If reverse=True the x axis is flipped.
"""
raise NotImplementedError('To be implemented')
# go to start of the row
self.filfile.seek(int(self.datastart+self.channels*rownumber*(int(self.nbits/8))))
# read data into 2-D numpy array
data=np.fromfile(self.filfile,count=self.channels,dtype=self.dtype).reshape(1, self.channels)
if reverse:
data = data[:,::-1]
return data
|
def read_row(self,rownumber,reverse=True):
""" Read a block of data. The number of samples per row is set in self.channels
If reverse=True the x axis is flipped.
"""
raise NotImplementedError('To be implemented')
# go to start of the row
self.filfile.seek(int(self.datastart+self.channels*rownumber*(int(self.nbits/8))))
# read data into 2-D numpy array
data=np.fromfile(self.filfile,count=self.channels,dtype=self.dtype).reshape(1, self.channels)
if reverse:
data = data[:,::-1]
return data
|
[
"Read",
"a",
"block",
"of",
"data",
".",
"The",
"number",
"of",
"samples",
"per",
"row",
"is",
"set",
"in",
"self",
".",
"channels",
"If",
"reverse",
"=",
"True",
"the",
"x",
"axis",
"is",
"flipped",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L707-L719
|
[
"def",
"read_row",
"(",
"self",
",",
"rownumber",
",",
"reverse",
"=",
"True",
")",
":",
"raise",
"NotImplementedError",
"(",
"'To be implemented'",
")",
"# go to start of the row",
"self",
".",
"filfile",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"datastart",
"+",
"self",
".",
"channels",
"*",
"rownumber",
"*",
"(",
"int",
"(",
"self",
".",
"nbits",
"/",
"8",
")",
")",
")",
")",
"# read data into 2-D numpy array",
"data",
"=",
"np",
".",
"fromfile",
"(",
"self",
".",
"filfile",
",",
"count",
"=",
"self",
".",
"channels",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
".",
"reshape",
"(",
"1",
",",
"self",
".",
"channels",
")",
"if",
"reverse",
":",
"data",
"=",
"data",
"[",
":",
",",
":",
":",
"-",
"1",
"]",
"return",
"data"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
cmd_tool
|
Command line tool for plotting and viewing info on blimpy files
|
blimpy/waterfall.py
|
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on blimpy files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for reading and plotting blimpy files.")
parser.add_argument('filename', type=str,
help='Name of file to read')
parser.add_argument('-p', action='store', default='a', dest='what_to_plot', type=str,
help='Show: "w" waterfall (freq vs. time) plot; "s" integrated spectrum plot; \
"t" for time series; "mm" for spectrum including min max; "k" for kurtosis; \
"a" for all available plots and information; and "ank" for all but kurtosis.')
parser.add_argument('-b', action='store', default=None, dest='f_start', type=float,
help='Start frequency (begin), in MHz')
parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float,
help='Stop frequency (end), in MHz')
parser.add_argument('-B', action='store', default=None, dest='t_start', type=int,
help='Start integration (begin, inclusive) ID ')
parser.add_argument('-E', action='store', default=None, dest='t_stop', type=int,
help='Stop integration (end, exclusive) ID')
parser.add_argument('-i', action='store_true', default=False, dest='info_only',
help='Show info only')
parser.add_argument('-a', action='store_true', default=False, dest='average',
help='average along time axis (plot spectrum only)')
parser.add_argument('-s', action='store', default='', dest='plt_filename', type=str,
help='save plot graphic to file (give filename as argument)')
parser.add_argument('-S', action='store_true', default=False, dest='save_only',
help='Turn off plotting of data and only save to file.')
parser.add_argument('-D', action='store_false', default=True, dest='blank_dc',
help='Use to not blank DC bin.')
parser.add_argument('-H', action='store_true', default=False, dest='to_hdf5',
help='Write file to hdf5 format.')
parser.add_argument('-F', action='store_true', default=False, dest='to_fil',
help='Write file to .fil format.')
parser.add_argument('-o', action='store', default=None, dest='filename_out', type=str,
help='Filename output (if not probided, the name will be the same but with apropiate extension).')
parser.add_argument('-l', action='store', default=None, dest='max_load', type=float,
help='Maximum data limit to load. Default:1GB')
if args is None:
args = sys.argv[1:]
parse_args = parser.parse_args(args)
# Open blimpy data
filename = parse_args.filename
load_data = not parse_args.info_only
info_only = parse_args.info_only
filename_out = parse_args.filename_out
fil = Waterfall(filename, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t_start=parse_args.t_start, t_stop=parse_args.t_stop, load_data=load_data, max_load=parse_args.max_load)
fil.info()
#Check the size of selection.
if fil.container.isheavy() or parse_args.to_hdf5 or parse_args.to_fil:
info_only = True
# And if we want to plot data, then plot data.
if not info_only:
print('')
if parse_args.blank_dc:
logger.info("Blanking DC bin")
n_coarse_chan = fil.calc_n_coarse_chan()
fil.blank_dc(n_coarse_chan)
if parse_args.what_to_plot == "w":
plt.figure("waterfall", figsize=(8, 6))
fil.plot_waterfall(f_start=parse_args.f_start, f_stop=parse_args.f_stop)
elif parse_args.what_to_plot == "s":
plt.figure("Spectrum", figsize=(8, 6))
fil.plot_spectrum(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all')
elif parse_args.what_to_plot == "mm":
plt.figure("min max", figsize=(8, 6))
fil.plot_spectrum_min_max(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all')
elif parse_args.what_to_plot == "k":
plt.figure("kurtosis", figsize=(8, 6))
fil.plot_kurtosis(f_start=parse_args.f_start, f_stop=parse_args.f_stop)
elif parse_args.what_to_plot == "t":
plt.figure("Time Series", figsize=(8, 6))
fil.plot_time_series(f_start=parse_args.f_start, f_stop=parse_args.f_stop,orientation='h')
elif parse_args.what_to_plot == "a":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all')
elif parse_args.what_to_plot == "ank":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all',kutosis=False)
if parse_args.plt_filename != '':
plt.savefig(parse_args.plt_filename)
if not parse_args.save_only:
if 'DISPLAY' in os.environ.keys():
plt.show()
else:
logger.warning("No $DISPLAY available.")
else:
if parse_args.to_hdf5 and parse_args.to_fil:
raise Warning('Either provide to_hdf5 or to_fil, but not both.')
if parse_args.to_hdf5:
if not filename_out:
filename_out = filename.replace('.fil','.h5')
elif '.h5' not in filename_out:
filename_out = filename_out.replace('.fil','')+'.h5'
logger.info('Writing file : %s'%(filename_out))
fil.write_to_hdf5(filename_out)
logger.info('File written.')
if parse_args.to_fil:
if not filename_out:
filename_out = filename.replace('.h5','.fil')
elif '.fil' not in filename_out:
filename_out = filename_out.replace('.h5','')+'.fil'
logger.info('Writing file : %s'%(filename_out))
fil.write_to_fil(filename_out)
logger.info('File written.')
|
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on blimpy files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for reading and plotting blimpy files.")
parser.add_argument('filename', type=str,
help='Name of file to read')
parser.add_argument('-p', action='store', default='a', dest='what_to_plot', type=str,
help='Show: "w" waterfall (freq vs. time) plot; "s" integrated spectrum plot; \
"t" for time series; "mm" for spectrum including min max; "k" for kurtosis; \
"a" for all available plots and information; and "ank" for all but kurtosis.')
parser.add_argument('-b', action='store', default=None, dest='f_start', type=float,
help='Start frequency (begin), in MHz')
parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float,
help='Stop frequency (end), in MHz')
parser.add_argument('-B', action='store', default=None, dest='t_start', type=int,
help='Start integration (begin, inclusive) ID ')
parser.add_argument('-E', action='store', default=None, dest='t_stop', type=int,
help='Stop integration (end, exclusive) ID')
parser.add_argument('-i', action='store_true', default=False, dest='info_only',
help='Show info only')
parser.add_argument('-a', action='store_true', default=False, dest='average',
help='average along time axis (plot spectrum only)')
parser.add_argument('-s', action='store', default='', dest='plt_filename', type=str,
help='save plot graphic to file (give filename as argument)')
parser.add_argument('-S', action='store_true', default=False, dest='save_only',
help='Turn off plotting of data and only save to file.')
parser.add_argument('-D', action='store_false', default=True, dest='blank_dc',
help='Use to not blank DC bin.')
parser.add_argument('-H', action='store_true', default=False, dest='to_hdf5',
help='Write file to hdf5 format.')
parser.add_argument('-F', action='store_true', default=False, dest='to_fil',
help='Write file to .fil format.')
parser.add_argument('-o', action='store', default=None, dest='filename_out', type=str,
help='Filename output (if not probided, the name will be the same but with apropiate extension).')
parser.add_argument('-l', action='store', default=None, dest='max_load', type=float,
help='Maximum data limit to load. Default:1GB')
if args is None:
args = sys.argv[1:]
parse_args = parser.parse_args(args)
# Open blimpy data
filename = parse_args.filename
load_data = not parse_args.info_only
info_only = parse_args.info_only
filename_out = parse_args.filename_out
fil = Waterfall(filename, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t_start=parse_args.t_start, t_stop=parse_args.t_stop, load_data=load_data, max_load=parse_args.max_load)
fil.info()
#Check the size of selection.
if fil.container.isheavy() or parse_args.to_hdf5 or parse_args.to_fil:
info_only = True
# And if we want to plot data, then plot data.
if not info_only:
print('')
if parse_args.blank_dc:
logger.info("Blanking DC bin")
n_coarse_chan = fil.calc_n_coarse_chan()
fil.blank_dc(n_coarse_chan)
if parse_args.what_to_plot == "w":
plt.figure("waterfall", figsize=(8, 6))
fil.plot_waterfall(f_start=parse_args.f_start, f_stop=parse_args.f_stop)
elif parse_args.what_to_plot == "s":
plt.figure("Spectrum", figsize=(8, 6))
fil.plot_spectrum(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all')
elif parse_args.what_to_plot == "mm":
plt.figure("min max", figsize=(8, 6))
fil.plot_spectrum_min_max(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all')
elif parse_args.what_to_plot == "k":
plt.figure("kurtosis", figsize=(8, 6))
fil.plot_kurtosis(f_start=parse_args.f_start, f_stop=parse_args.f_stop)
elif parse_args.what_to_plot == "t":
plt.figure("Time Series", figsize=(8, 6))
fil.plot_time_series(f_start=parse_args.f_start, f_stop=parse_args.f_stop,orientation='h')
elif parse_args.what_to_plot == "a":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all')
elif parse_args.what_to_plot == "ank":
plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white')
fil.plot_all(logged=True, f_start=parse_args.f_start, f_stop=parse_args.f_stop, t='all',kutosis=False)
if parse_args.plt_filename != '':
plt.savefig(parse_args.plt_filename)
if not parse_args.save_only:
if 'DISPLAY' in os.environ.keys():
plt.show()
else:
logger.warning("No $DISPLAY available.")
else:
if parse_args.to_hdf5 and parse_args.to_fil:
raise Warning('Either provide to_hdf5 or to_fil, but not both.')
if parse_args.to_hdf5:
if not filename_out:
filename_out = filename.replace('.fil','.h5')
elif '.h5' not in filename_out:
filename_out = filename_out.replace('.fil','')+'.h5'
logger.info('Writing file : %s'%(filename_out))
fil.write_to_hdf5(filename_out)
logger.info('File written.')
if parse_args.to_fil:
if not filename_out:
filename_out = filename.replace('.h5','.fil')
elif '.fil' not in filename_out:
filename_out = filename_out.replace('.h5','')+'.fil'
logger.info('Writing file : %s'%(filename_out))
fil.write_to_fil(filename_out)
logger.info('File written.')
|
[
"Command",
"line",
"tool",
"for",
"plotting",
"and",
"viewing",
"info",
"on",
"blimpy",
"files"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L541-L663
|
[
"def",
"cmd_tool",
"(",
"args",
"=",
"None",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Command line utility for reading and plotting blimpy files.\"",
")",
"parser",
".",
"add_argument",
"(",
"'filename'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Name of file to read'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"'a'",
",",
"dest",
"=",
"'what_to_plot'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Show: \"w\" waterfall (freq vs. time) plot; \"s\" integrated spectrum plot; \\\n \"t\" for time series; \"mm\" for spectrum including min max; \"k\" for kurtosis; \\\n \"a\" for all available plots and information; and \"ank\" for all but kurtosis.'",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'f_start'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Start frequency (begin), in MHz'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'f_stop'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Stop frequency (end), in MHz'",
")",
"parser",
".",
"add_argument",
"(",
"'-B'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'t_start'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Start integration (begin, inclusive) ID '",
")",
"parser",
".",
"add_argument",
"(",
"'-E'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'t_stop'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Stop integration (end, exclusive) ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'info_only'",
",",
"help",
"=",
"'Show info only'",
")",
"parser",
".",
"add_argument",
"(",
"'-a'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'average'",
",",
"help",
"=",
"'average along time axis (plot spectrum only)'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"''",
",",
"dest",
"=",
"'plt_filename'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'save plot graphic to file (give filename as argument)'",
")",
"parser",
".",
"add_argument",
"(",
"'-S'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'save_only'",
",",
"help",
"=",
"'Turn off plotting of data and only save to file.'",
")",
"parser",
".",
"add_argument",
"(",
"'-D'",
",",
"action",
"=",
"'store_false'",
",",
"default",
"=",
"True",
",",
"dest",
"=",
"'blank_dc'",
",",
"help",
"=",
"'Use to not blank DC bin.'",
")",
"parser",
".",
"add_argument",
"(",
"'-H'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'to_hdf5'",
",",
"help",
"=",
"'Write file to hdf5 format.'",
")",
"parser",
".",
"add_argument",
"(",
"'-F'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'to_fil'",
",",
"help",
"=",
"'Write file to .fil format.'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'filename_out'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Filename output (if not probided, the name will be the same but with apropiate extension).'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'max_load'",
",",
"type",
"=",
"float",
",",
"help",
"=",
"'Maximum data limit to load. Default:1GB'",
")",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"parse_args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"# Open blimpy data",
"filename",
"=",
"parse_args",
".",
"filename",
"load_data",
"=",
"not",
"parse_args",
".",
"info_only",
"info_only",
"=",
"parse_args",
".",
"info_only",
"filename_out",
"=",
"parse_args",
".",
"filename_out",
"fil",
"=",
"Waterfall",
"(",
"filename",
",",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
",",
"t_start",
"=",
"parse_args",
".",
"t_start",
",",
"t_stop",
"=",
"parse_args",
".",
"t_stop",
",",
"load_data",
"=",
"load_data",
",",
"max_load",
"=",
"parse_args",
".",
"max_load",
")",
"fil",
".",
"info",
"(",
")",
"#Check the size of selection.",
"if",
"fil",
".",
"container",
".",
"isheavy",
"(",
")",
"or",
"parse_args",
".",
"to_hdf5",
"or",
"parse_args",
".",
"to_fil",
":",
"info_only",
"=",
"True",
"# And if we want to plot data, then plot data.",
"if",
"not",
"info_only",
":",
"print",
"(",
"''",
")",
"if",
"parse_args",
".",
"blank_dc",
":",
"logger",
".",
"info",
"(",
"\"Blanking DC bin\"",
")",
"n_coarse_chan",
"=",
"fil",
".",
"calc_n_coarse_chan",
"(",
")",
"fil",
".",
"blank_dc",
"(",
"n_coarse_chan",
")",
"if",
"parse_args",
".",
"what_to_plot",
"==",
"\"w\"",
":",
"plt",
".",
"figure",
"(",
"\"waterfall\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_waterfall",
"(",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
")",
"elif",
"parse_args",
".",
"what_to_plot",
"==",
"\"s\"",
":",
"plt",
".",
"figure",
"(",
"\"Spectrum\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_spectrum",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
")",
"elif",
"parse_args",
".",
"what_to_plot",
"==",
"\"mm\"",
":",
"plt",
".",
"figure",
"(",
"\"min max\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_spectrum_min_max",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
")",
"elif",
"parse_args",
".",
"what_to_plot",
"==",
"\"k\"",
":",
"plt",
".",
"figure",
"(",
"\"kurtosis\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_kurtosis",
"(",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
")",
"elif",
"parse_args",
".",
"what_to_plot",
"==",
"\"t\"",
":",
"plt",
".",
"figure",
"(",
"\"Time Series\"",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
")",
"fil",
".",
"plot_time_series",
"(",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
",",
"orientation",
"=",
"'h'",
")",
"elif",
"parse_args",
".",
"what_to_plot",
"==",
"\"a\"",
":",
"plt",
".",
"figure",
"(",
"\"Multiple diagnostic plots\"",
",",
"figsize",
"=",
"(",
"12",
",",
"9",
")",
",",
"facecolor",
"=",
"'white'",
")",
"fil",
".",
"plot_all",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
")",
"elif",
"parse_args",
".",
"what_to_plot",
"==",
"\"ank\"",
":",
"plt",
".",
"figure",
"(",
"\"Multiple diagnostic plots\"",
",",
"figsize",
"=",
"(",
"12",
",",
"9",
")",
",",
"facecolor",
"=",
"'white'",
")",
"fil",
".",
"plot_all",
"(",
"logged",
"=",
"True",
",",
"f_start",
"=",
"parse_args",
".",
"f_start",
",",
"f_stop",
"=",
"parse_args",
".",
"f_stop",
",",
"t",
"=",
"'all'",
",",
"kutosis",
"=",
"False",
")",
"if",
"parse_args",
".",
"plt_filename",
"!=",
"''",
":",
"plt",
".",
"savefig",
"(",
"parse_args",
".",
"plt_filename",
")",
"if",
"not",
"parse_args",
".",
"save_only",
":",
"if",
"'DISPLAY'",
"in",
"os",
".",
"environ",
".",
"keys",
"(",
")",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"No $DISPLAY available.\"",
")",
"else",
":",
"if",
"parse_args",
".",
"to_hdf5",
"and",
"parse_args",
".",
"to_fil",
":",
"raise",
"Warning",
"(",
"'Either provide to_hdf5 or to_fil, but not both.'",
")",
"if",
"parse_args",
".",
"to_hdf5",
":",
"if",
"not",
"filename_out",
":",
"filename_out",
"=",
"filename",
".",
"replace",
"(",
"'.fil'",
",",
"'.h5'",
")",
"elif",
"'.h5'",
"not",
"in",
"filename_out",
":",
"filename_out",
"=",
"filename_out",
".",
"replace",
"(",
"'.fil'",
",",
"''",
")",
"+",
"'.h5'",
"logger",
".",
"info",
"(",
"'Writing file : %s'",
"%",
"(",
"filename_out",
")",
")",
"fil",
".",
"write_to_hdf5",
"(",
"filename_out",
")",
"logger",
".",
"info",
"(",
"'File written.'",
")",
"if",
"parse_args",
".",
"to_fil",
":",
"if",
"not",
"filename_out",
":",
"filename_out",
"=",
"filename",
".",
"replace",
"(",
"'.h5'",
",",
"'.fil'",
")",
"elif",
"'.fil'",
"not",
"in",
"filename_out",
":",
"filename_out",
"=",
"filename_out",
".",
"replace",
"(",
"'.h5'",
",",
"''",
")",
"+",
"'.fil'",
"logger",
".",
"info",
"(",
"'Writing file : %s'",
"%",
"(",
"filename_out",
")",
")",
"fil",
".",
"write_to_fil",
"(",
"filename_out",
")",
"logger",
".",
"info",
"(",
"'File written.'",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.read_data
|
Reads data selection if small enough.
|
blimpy/waterfall.py
|
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Reads data selection if small enough.
"""
self.container.read_data(f_start=f_start, f_stop=f_stop,t_start=t_start, t_stop=t_stop)
self.__load_data()
|
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Reads data selection if small enough.
"""
self.container.read_data(f_start=f_start, f_stop=f_stop,t_start=t_start, t_stop=t_stop)
self.__load_data()
|
[
"Reads",
"data",
"selection",
"if",
"small",
"enough",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L159-L165
|
[
"def",
"read_data",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
")",
":",
"self",
".",
"container",
".",
"read_data",
"(",
"f_start",
"=",
"f_start",
",",
"f_stop",
"=",
"f_stop",
",",
"t_start",
"=",
"t_start",
",",
"t_stop",
"=",
"t_stop",
")",
"self",
".",
"__load_data",
"(",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__update_header
|
Updates the header information from the original file to the selection.
|
blimpy/waterfall.py
|
def __update_header(self):
""" Updates the header information from the original file to the selection.
"""
#Updating frequency of first channel from selection
if self.header[b'foff'] < 0:
self.header[b'fch1'] = self.container.f_stop
else:
self.header[b'fch1'] = self.container.f_start
#Updating number of coarse channels.
self.header[b'nchans'] = self.container.selection_shape[self.freq_axis]
#Updating time stamp for first time bin from selection
self.header[b'tstart'] = self.container.populate_timestamps(update_header=True)
|
def __update_header(self):
""" Updates the header information from the original file to the selection.
"""
#Updating frequency of first channel from selection
if self.header[b'foff'] < 0:
self.header[b'fch1'] = self.container.f_stop
else:
self.header[b'fch1'] = self.container.f_start
#Updating number of coarse channels.
self.header[b'nchans'] = self.container.selection_shape[self.freq_axis]
#Updating time stamp for first time bin from selection
self.header[b'tstart'] = self.container.populate_timestamps(update_header=True)
|
[
"Updates",
"the",
"header",
"information",
"from",
"the",
"original",
"file",
"to",
"the",
"selection",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L167-L181
|
[
"def",
"__update_header",
"(",
"self",
")",
":",
"#Updating frequency of first channel from selection",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"self",
".",
"header",
"[",
"b'fch1'",
"]",
"=",
"self",
".",
"container",
".",
"f_stop",
"else",
":",
"self",
".",
"header",
"[",
"b'fch1'",
"]",
"=",
"self",
".",
"container",
".",
"f_start",
"#Updating number of coarse channels.",
"self",
".",
"header",
"[",
"b'nchans'",
"]",
"=",
"self",
".",
"container",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
"#Updating time stamp for first time bin from selection",
"self",
".",
"header",
"[",
"b'tstart'",
"]",
"=",
"self",
".",
"container",
".",
"populate_timestamps",
"(",
"update_header",
"=",
"True",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.info
|
Print header information and other derived information.
|
blimpy/waterfall.py
|
def info(self):
""" Print header information and other derived information. """
print("\n--- File Info ---")
for key, val in self.file_header.items():
if key == 'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == 'src_dej':
val = val.to_string(unit=u.deg, sep=':')
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("File shape", self.file_shape))
print("--- Selection Info ---")
print("%16s : %32s" % ("Data selection shape", self.selection_shape))
print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start))
print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop))
|
def info(self):
""" Print header information and other derived information. """
print("\n--- File Info ---")
for key, val in self.file_header.items():
if key == 'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == 'src_dej':
val = val.to_string(unit=u.deg, sep=':')
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("File shape", self.file_shape))
print("--- Selection Info ---")
print("%16s : %32s" % ("Data selection shape", self.selection_shape))
print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start))
print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop))
|
[
"Print",
"header",
"information",
"and",
"other",
"derived",
"information",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L195-L212
|
[
"def",
"info",
"(",
"self",
")",
":",
"print",
"(",
"\"\\n--- File Info ---\"",
")",
"for",
"key",
",",
"val",
"in",
"self",
".",
"file_header",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'src_raj'",
":",
"val",
"=",
"val",
".",
"to_string",
"(",
"unit",
"=",
"u",
".",
"hour",
",",
"sep",
"=",
"':'",
")",
"if",
"key",
"==",
"'src_dej'",
":",
"val",
"=",
"val",
".",
"to_string",
"(",
"unit",
"=",
"u",
".",
"deg",
",",
"sep",
"=",
"':'",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"key",
",",
"val",
")",
")",
"print",
"(",
"\"\\n%16s : %32s\"",
"%",
"(",
"\"Num ints in file\"",
",",
"self",
".",
"n_ints_in_file",
")",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"File shape\"",
",",
"self",
".",
"file_shape",
")",
")",
"print",
"(",
"\"--- Selection Info ---\"",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"Data selection shape\"",
",",
"self",
".",
"selection_shape",
")",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"Minimum freq (MHz)\"",
",",
"self",
".",
"container",
".",
"f_start",
")",
")",
"print",
"(",
"\"%16s : %32s\"",
"%",
"(",
"\"Maximum freq (MHz)\"",
",",
"self",
".",
"container",
".",
"f_stop",
")",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.write_to_fil
|
Write data to .fil file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
|
blimpy/waterfall.py
|
def write_to_fil(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
"""
#For timing how long it takes to write a file.
t0 = time.time()
#Update header
self.__update_header()
if self.container.isheavy():
self.__write_to_fil_heavy(filename_out)
else:
self.__write_to_fil_light(filename_out)
t1 = time.time()
logger.info('Conversion time: %2.2fsec' % (t1- t0))
|
def write_to_fil(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
"""
#For timing how long it takes to write a file.
t0 = time.time()
#Update header
self.__update_header()
if self.container.isheavy():
self.__write_to_fil_heavy(filename_out)
else:
self.__write_to_fil_light(filename_out)
t1 = time.time()
logger.info('Conversion time: %2.2fsec' % (t1- t0))
|
[
"Write",
"data",
"to",
".",
"fil",
"file",
".",
"It",
"check",
"the",
"file",
"size",
"then",
"decides",
"how",
"to",
"write",
"the",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L214-L234
|
[
"def",
"write_to_fil",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#For timing how long it takes to write a file.",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"#Update header",
"self",
".",
"__update_header",
"(",
")",
"if",
"self",
".",
"container",
".",
"isheavy",
"(",
")",
":",
"self",
".",
"__write_to_fil_heavy",
"(",
"filename_out",
")",
"else",
":",
"self",
".",
"__write_to_fil_light",
"(",
"filename_out",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"'Conversion time: %2.2fsec'",
"%",
"(",
"t1",
"-",
"t0",
")",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__write_to_fil_heavy
|
Write data to .fil file.
Args:
filename_out (str): Name of output file
|
blimpy/waterfall.py
|
def __write_to_fil_heavy(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
Args:
filename_out (str): Name of output file
"""
#Note that a chunk is not a blob!!
chunk_dim = self.__get_chunk_dimensions()
blob_dim = self.__get_blob_dimensions(chunk_dim)
n_blobs = self.container.calc_n_blobs(blob_dim)
#Write header of .fil file
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self)) #generate_sigproc_header comes from sigproc.py
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
#Write data of .fil file.
with open(filename_out, "a") as fileh:
j = bob
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
def __write_to_fil_heavy(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
Args:
filename_out (str): Name of output file
"""
#Note that a chunk is not a blob!!
chunk_dim = self.__get_chunk_dimensions()
blob_dim = self.__get_blob_dimensions(chunk_dim)
n_blobs = self.container.calc_n_blobs(blob_dim)
#Write header of .fil file
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self)) #generate_sigproc_header comes from sigproc.py
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
#Write data of .fil file.
with open(filename_out, "a") as fileh:
j = bob
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
[
"Write",
"data",
"to",
".",
"fil",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L236-L267
|
[
"def",
"__write_to_fil_heavy",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#Note that a chunk is not a blob!!",
"chunk_dim",
"=",
"self",
".",
"__get_chunk_dimensions",
"(",
")",
"blob_dim",
"=",
"self",
".",
"__get_blob_dimensions",
"(",
"chunk_dim",
")",
"n_blobs",
"=",
"self",
".",
"container",
".",
"calc_n_blobs",
"(",
"blob_dim",
")",
"#Write header of .fil file",
"n_bytes",
"=",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"/",
"8",
"with",
"open",
"(",
"filename_out",
",",
"\"wb\"",
")",
"as",
"fileh",
":",
"fileh",
".",
"write",
"(",
"generate_sigproc_header",
"(",
"self",
")",
")",
"#generate_sigproc_header comes from sigproc.py",
"logger",
".",
"info",
"(",
"'Using %i n_blobs to write the data.'",
"%",
"n_blobs",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"n_blobs",
")",
":",
"logger",
".",
"info",
"(",
"'Reading %i of %i'",
"%",
"(",
"ii",
"+",
"1",
",",
"n_blobs",
")",
")",
"bob",
"=",
"self",
".",
"container",
".",
"read_blob",
"(",
"blob_dim",
",",
"n_blob",
"=",
"ii",
")",
"#Write data of .fil file.",
"with",
"open",
"(",
"filename_out",
",",
"\"a\"",
")",
"as",
"fileh",
":",
"j",
"=",
"bob",
"if",
"n_bytes",
"==",
"4",
":",
"np",
".",
"float32",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")",
"elif",
"n_bytes",
"==",
"2",
":",
"np",
".",
"int16",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")",
"elif",
"n_bytes",
"==",
"1",
":",
"np",
".",
"int8",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__write_to_fil_light
|
Write data to .fil file.
Args:
filename_out (str): Name of output file
|
blimpy/waterfall.py
|
def __write_to_fil_light(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
Args:
filename_out (str): Name of output file
"""
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self)) #generate_sigproc_header comes from sigproc.py
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
def __write_to_fil_light(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
Args:
filename_out (str): Name of output file
"""
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self)) #generate_sigproc_header comes from sigproc.py
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
[
"Write",
"data",
"to",
".",
"fil",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L269-L285
|
[
"def",
"__write_to_fil_light",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"n_bytes",
"=",
"self",
".",
"header",
"[",
"b'nbits'",
"]",
"/",
"8",
"with",
"open",
"(",
"filename_out",
",",
"\"wb\"",
")",
"as",
"fileh",
":",
"fileh",
".",
"write",
"(",
"generate_sigproc_header",
"(",
"self",
")",
")",
"#generate_sigproc_header comes from sigproc.py",
"j",
"=",
"self",
".",
"data",
"if",
"n_bytes",
"==",
"4",
":",
"np",
".",
"float32",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")",
"elif",
"n_bytes",
"==",
"2",
":",
"np",
".",
"int16",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")",
"elif",
"n_bytes",
"==",
"1",
":",
"np",
".",
"int8",
"(",
"j",
".",
"ravel",
"(",
")",
")",
".",
"tofile",
"(",
"fileh",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.write_to_hdf5
|
Write data to HDF5 file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
|
blimpy/waterfall.py
|
def write_to_hdf5(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
"""
#For timing how long it takes to write a file.
t0 = time.time()
#Update header
self.__update_header()
if self.container.isheavy():
self.__write_to_hdf5_heavy(filename_out)
else:
self.__write_to_hdf5_light(filename_out)
t1 = time.time()
logger.info('Conversion time: %2.2fsec' % (t1- t0))
|
def write_to_hdf5(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
"""
#For timing how long it takes to write a file.
t0 = time.time()
#Update header
self.__update_header()
if self.container.isheavy():
self.__write_to_hdf5_heavy(filename_out)
else:
self.__write_to_hdf5_light(filename_out)
t1 = time.time()
logger.info('Conversion time: %2.2fsec' % (t1- t0))
|
[
"Write",
"data",
"to",
"HDF5",
"file",
".",
"It",
"check",
"the",
"file",
"size",
"then",
"decides",
"how",
"to",
"write",
"the",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L287-L307
|
[
"def",
"write_to_hdf5",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#For timing how long it takes to write a file.",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"#Update header",
"self",
".",
"__update_header",
"(",
")",
"if",
"self",
".",
"container",
".",
"isheavy",
"(",
")",
":",
"self",
".",
"__write_to_hdf5_heavy",
"(",
"filename_out",
")",
"else",
":",
"self",
".",
"__write_to_hdf5_light",
"(",
"filename_out",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"'Conversion time: %2.2fsec'",
"%",
"(",
"t1",
"-",
"t0",
")",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__write_to_hdf5_heavy
|
Write data to HDF5 file.
Args:
filename_out (str): Name of output file
|
blimpy/waterfall.py
|
def __write_to_hdf5_heavy(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
Args:
filename_out (str): Name of output file
"""
block_size = 0
#Note that a chunk is not a blob!!
chunk_dim = self.__get_chunk_dimensions()
blob_dim = self.__get_blob_dimensions(chunk_dim)
n_blobs = self.container.calc_n_blobs(blob_dim)
with h5py.File(filename_out, 'w') as h5:
h5.attrs[b'CLASS'] = b'FILTERBANK'
h5.attrs[b'VERSION'] = b'1.0'
if HAS_BITSHUFFLE:
bs_compression = bitshuffle.h5.H5FILTER
bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)
else:
bs_compression = None
bs_compression_opts = None
logger.warning("Warning: bitshuffle not found. No compression applied.")
dset = h5.create_dataset('data',
shape=self.selection_shape,
chunks=chunk_dim,
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype=self.data.dtype)
dset_mask = h5.create_dataset('mask',
shape=self.selection_shape,
chunks=chunk_dim,
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value
if blob_dim[self.freq_axis] < self.selection_shape[self.freq_axis]:
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
#-----
#Using channels instead of frequency.
c_start = self.container.chan_start_idx + ii*blob_dim[self.freq_axis]
t_start = self.container.t_start + (c_start/self.selection_shape[self.freq_axis])*blob_dim[self.time_axis]
t_stop = t_start + blob_dim[self.time_axis]
# Reverse array if frequency axis is flipped
# if self.header['foff'] < 0:
# c_stop = self.selection_shape[self.freq_axis] - (c_start)%self.selection_shape[self.freq_axis]
# c_start = c_stop - blob_dim[self.freq_axis]
# else:
c_start = (c_start)%self.selection_shape[self.freq_axis]
c_stop = c_start + blob_dim[self.freq_axis]
#-----
logger.debug(t_start,t_stop,c_start,c_stop)
dset[t_start:t_stop,0,c_start:c_stop] = bob[:]
else:
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
t_start = self.container.t_start + ii*blob_dim[self.time_axis]
#This prevents issues when the last blob is smaller than the others in time
if (ii+1)*blob_dim[self.time_axis] > self.n_ints_in_file:
t_stop = self.n_ints_in_file
else:
t_stop = (ii+1)*blob_dim[self.time_axis]
dset[t_start:t_stop] = bob[:]
|
def __write_to_hdf5_heavy(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
Args:
filename_out (str): Name of output file
"""
block_size = 0
#Note that a chunk is not a blob!!
chunk_dim = self.__get_chunk_dimensions()
blob_dim = self.__get_blob_dimensions(chunk_dim)
n_blobs = self.container.calc_n_blobs(blob_dim)
with h5py.File(filename_out, 'w') as h5:
h5.attrs[b'CLASS'] = b'FILTERBANK'
h5.attrs[b'VERSION'] = b'1.0'
if HAS_BITSHUFFLE:
bs_compression = bitshuffle.h5.H5FILTER
bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)
else:
bs_compression = None
bs_compression_opts = None
logger.warning("Warning: bitshuffle not found. No compression applied.")
dset = h5.create_dataset('data',
shape=self.selection_shape,
chunks=chunk_dim,
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype=self.data.dtype)
dset_mask = h5.create_dataset('mask',
shape=self.selection_shape,
chunks=chunk_dim,
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value
if blob_dim[self.freq_axis] < self.selection_shape[self.freq_axis]:
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
#-----
#Using channels instead of frequency.
c_start = self.container.chan_start_idx + ii*blob_dim[self.freq_axis]
t_start = self.container.t_start + (c_start/self.selection_shape[self.freq_axis])*blob_dim[self.time_axis]
t_stop = t_start + blob_dim[self.time_axis]
# Reverse array if frequency axis is flipped
# if self.header['foff'] < 0:
# c_stop = self.selection_shape[self.freq_axis] - (c_start)%self.selection_shape[self.freq_axis]
# c_start = c_stop - blob_dim[self.freq_axis]
# else:
c_start = (c_start)%self.selection_shape[self.freq_axis]
c_stop = c_start + blob_dim[self.freq_axis]
#-----
logger.debug(t_start,t_stop,c_start,c_stop)
dset[t_start:t_stop,0,c_start:c_stop] = bob[:]
else:
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
t_start = self.container.t_start + ii*blob_dim[self.time_axis]
#This prevents issues when the last blob is smaller than the others in time
if (ii+1)*blob_dim[self.time_axis] > self.n_ints_in_file:
t_stop = self.n_ints_in_file
else:
t_stop = (ii+1)*blob_dim[self.time_axis]
dset[t_start:t_stop] = bob[:]
|
[
"Write",
"data",
"to",
"HDF5",
"file",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L309-L404
|
[
"def",
"__write_to_hdf5_heavy",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"block_size",
"=",
"0",
"#Note that a chunk is not a blob!!",
"chunk_dim",
"=",
"self",
".",
"__get_chunk_dimensions",
"(",
")",
"blob_dim",
"=",
"self",
".",
"__get_blob_dimensions",
"(",
"chunk_dim",
")",
"n_blobs",
"=",
"self",
".",
"container",
".",
"calc_n_blobs",
"(",
"blob_dim",
")",
"with",
"h5py",
".",
"File",
"(",
"filename_out",
",",
"'w'",
")",
"as",
"h5",
":",
"h5",
".",
"attrs",
"[",
"b'CLASS'",
"]",
"=",
"b'FILTERBANK'",
"h5",
".",
"attrs",
"[",
"b'VERSION'",
"]",
"=",
"b'1.0'",
"if",
"HAS_BITSHUFFLE",
":",
"bs_compression",
"=",
"bitshuffle",
".",
"h5",
".",
"H5FILTER",
"bs_compression_opts",
"=",
"(",
"block_size",
",",
"bitshuffle",
".",
"h5",
".",
"H5_COMPRESS_LZ4",
")",
"else",
":",
"bs_compression",
"=",
"None",
"bs_compression_opts",
"=",
"None",
"logger",
".",
"warning",
"(",
"\"Warning: bitshuffle not found. No compression applied.\"",
")",
"dset",
"=",
"h5",
".",
"create_dataset",
"(",
"'data'",
",",
"shape",
"=",
"self",
".",
"selection_shape",
",",
"chunks",
"=",
"chunk_dim",
",",
"compression",
"=",
"bs_compression",
",",
"compression_opts",
"=",
"bs_compression_opts",
",",
"dtype",
"=",
"self",
".",
"data",
".",
"dtype",
")",
"dset_mask",
"=",
"h5",
".",
"create_dataset",
"(",
"'mask'",
",",
"shape",
"=",
"self",
".",
"selection_shape",
",",
"chunks",
"=",
"chunk_dim",
",",
"compression",
"=",
"bs_compression",
",",
"compression_opts",
"=",
"bs_compression_opts",
",",
"dtype",
"=",
"'uint8'",
")",
"dset",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"b\"frequency\"",
"dset",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"b\"feed_id\"",
"dset",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"b\"time\"",
"dset_mask",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"b\"frequency\"",
"dset_mask",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"b\"feed_id\"",
"dset_mask",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"b\"time\"",
"# Copy over header information as attributes",
"for",
"key",
",",
"value",
"in",
"self",
".",
"header",
".",
"items",
"(",
")",
":",
"dset",
".",
"attrs",
"[",
"key",
"]",
"=",
"value",
"if",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
"<",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
":",
"logger",
".",
"info",
"(",
"'Using %i n_blobs to write the data.'",
"%",
"n_blobs",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"n_blobs",
")",
":",
"logger",
".",
"info",
"(",
"'Reading %i of %i'",
"%",
"(",
"ii",
"+",
"1",
",",
"n_blobs",
")",
")",
"bob",
"=",
"self",
".",
"container",
".",
"read_blob",
"(",
"blob_dim",
",",
"n_blob",
"=",
"ii",
")",
"#-----",
"#Using channels instead of frequency.",
"c_start",
"=",
"self",
".",
"container",
".",
"chan_start_idx",
"+",
"ii",
"*",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
"t_start",
"=",
"self",
".",
"container",
".",
"t_start",
"+",
"(",
"c_start",
"/",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
")",
"*",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"t_stop",
"=",
"t_start",
"+",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"# Reverse array if frequency axis is flipped",
"# if self.header['foff'] < 0:",
"# c_stop = self.selection_shape[self.freq_axis] - (c_start)%self.selection_shape[self.freq_axis]",
"# c_start = c_stop - blob_dim[self.freq_axis]",
"# else:",
"c_start",
"=",
"(",
"c_start",
")",
"%",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
"c_stop",
"=",
"c_start",
"+",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
"#-----",
"logger",
".",
"debug",
"(",
"t_start",
",",
"t_stop",
",",
"c_start",
",",
"c_stop",
")",
"dset",
"[",
"t_start",
":",
"t_stop",
",",
"0",
",",
"c_start",
":",
"c_stop",
"]",
"=",
"bob",
"[",
":",
"]",
"else",
":",
"logger",
".",
"info",
"(",
"'Using %i n_blobs to write the data.'",
"%",
"n_blobs",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"n_blobs",
")",
":",
"logger",
".",
"info",
"(",
"'Reading %i of %i'",
"%",
"(",
"ii",
"+",
"1",
",",
"n_blobs",
")",
")",
"bob",
"=",
"self",
".",
"container",
".",
"read_blob",
"(",
"blob_dim",
",",
"n_blob",
"=",
"ii",
")",
"t_start",
"=",
"self",
".",
"container",
".",
"t_start",
"+",
"ii",
"*",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"#This prevents issues when the last blob is smaller than the others in time",
"if",
"(",
"ii",
"+",
"1",
")",
"*",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
">",
"self",
".",
"n_ints_in_file",
":",
"t_stop",
"=",
"self",
".",
"n_ints_in_file",
"else",
":",
"t_stop",
"=",
"(",
"ii",
"+",
"1",
")",
"*",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"dset",
"[",
"t_start",
":",
"t_stop",
"]",
"=",
"bob",
"[",
":",
"]"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__write_to_hdf5_light
|
Write data to HDF5 file in one go.
Args:
filename_out (str): Name of output file
|
blimpy/waterfall.py
|
def __write_to_hdf5_light(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file in one go.
Args:
filename_out (str): Name of output file
"""
block_size = 0
with h5py.File(filename_out, 'w') as h5:
h5.attrs[b'CLASS'] = b'FILTERBANK'
h5.attrs[b'VERSION'] = b'1.0'
if HAS_BITSHUFFLE:
bs_compression = bitshuffle.h5.H5FILTER
bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)
else:
bs_compression = None
bs_compression_opts = None
logger.warning("Warning: bitshuffle not found. No compression applied.")
dset = h5.create_dataset('data',
data=self.data,
# compression='lzf')
compression=bs_compression,
compression_opts=bs_compression_opts)
dset_mask = h5.create_dataset('mask',
shape=self.file_shape,
# compression='lzf',
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value
|
def __write_to_hdf5_light(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file in one go.
Args:
filename_out (str): Name of output file
"""
block_size = 0
with h5py.File(filename_out, 'w') as h5:
h5.attrs[b'CLASS'] = b'FILTERBANK'
h5.attrs[b'VERSION'] = b'1.0'
if HAS_BITSHUFFLE:
bs_compression = bitshuffle.h5.H5FILTER
bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)
else:
bs_compression = None
bs_compression_opts = None
logger.warning("Warning: bitshuffle not found. No compression applied.")
dset = h5.create_dataset('data',
data=self.data,
# compression='lzf')
compression=bs_compression,
compression_opts=bs_compression_opts)
dset_mask = h5.create_dataset('mask',
shape=self.file_shape,
# compression='lzf',
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value
|
[
"Write",
"data",
"to",
"HDF5",
"file",
"in",
"one",
"go",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L406-L452
|
[
"def",
"__write_to_hdf5_light",
"(",
"self",
",",
"filename_out",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"block_size",
"=",
"0",
"with",
"h5py",
".",
"File",
"(",
"filename_out",
",",
"'w'",
")",
"as",
"h5",
":",
"h5",
".",
"attrs",
"[",
"b'CLASS'",
"]",
"=",
"b'FILTERBANK'",
"h5",
".",
"attrs",
"[",
"b'VERSION'",
"]",
"=",
"b'1.0'",
"if",
"HAS_BITSHUFFLE",
":",
"bs_compression",
"=",
"bitshuffle",
".",
"h5",
".",
"H5FILTER",
"bs_compression_opts",
"=",
"(",
"block_size",
",",
"bitshuffle",
".",
"h5",
".",
"H5_COMPRESS_LZ4",
")",
"else",
":",
"bs_compression",
"=",
"None",
"bs_compression_opts",
"=",
"None",
"logger",
".",
"warning",
"(",
"\"Warning: bitshuffle not found. No compression applied.\"",
")",
"dset",
"=",
"h5",
".",
"create_dataset",
"(",
"'data'",
",",
"data",
"=",
"self",
".",
"data",
",",
"# compression='lzf')",
"compression",
"=",
"bs_compression",
",",
"compression_opts",
"=",
"bs_compression_opts",
")",
"dset_mask",
"=",
"h5",
".",
"create_dataset",
"(",
"'mask'",
",",
"shape",
"=",
"self",
".",
"file_shape",
",",
"# compression='lzf',",
"compression",
"=",
"bs_compression",
",",
"compression_opts",
"=",
"bs_compression_opts",
",",
"dtype",
"=",
"'uint8'",
")",
"dset",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"b\"frequency\"",
"dset",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"b\"feed_id\"",
"dset",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"b\"time\"",
"dset_mask",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"b\"frequency\"",
"dset_mask",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"b\"feed_id\"",
"dset_mask",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"b\"time\"",
"# Copy over header information as attributes",
"for",
"key",
",",
"value",
"in",
"self",
".",
"header",
".",
"items",
"(",
")",
":",
"dset",
".",
"attrs",
"[",
"key",
"]",
"=",
"value"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__get_blob_dimensions
|
Sets the blob dimmentions, trying to read around 1024 MiB at a time.
This is assuming a chunk is about 1 MiB.
|
blimpy/waterfall.py
|
def __get_blob_dimensions(self, chunk_dim):
""" Sets the blob dimmentions, trying to read around 1024 MiB at a time.
This is assuming a chunk is about 1 MiB.
"""
#Taking the size into consideration, but avoiding having multiple blobs within a single time bin.
if self.selection_shape[self.freq_axis] > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
freq_axis_size = self.selection_shape[self.freq_axis]
# while freq_axis_size > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
# freq_axis_size /= 2
time_axis_size = 1
else:
freq_axis_size = self.selection_shape[self.freq_axis]
time_axis_size = np.min([chunk_dim[self.time_axis] * MAX_BLOB_MB * chunk_dim[self.freq_axis] / freq_axis_size, self.selection_shape[self.time_axis]])
blob_dim = (int(time_axis_size), 1, freq_axis_size)
return blob_dim
|
def __get_blob_dimensions(self, chunk_dim):
""" Sets the blob dimmentions, trying to read around 1024 MiB at a time.
This is assuming a chunk is about 1 MiB.
"""
#Taking the size into consideration, but avoiding having multiple blobs within a single time bin.
if self.selection_shape[self.freq_axis] > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
freq_axis_size = self.selection_shape[self.freq_axis]
# while freq_axis_size > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
# freq_axis_size /= 2
time_axis_size = 1
else:
freq_axis_size = self.selection_shape[self.freq_axis]
time_axis_size = np.min([chunk_dim[self.time_axis] * MAX_BLOB_MB * chunk_dim[self.freq_axis] / freq_axis_size, self.selection_shape[self.time_axis]])
blob_dim = (int(time_axis_size), 1, freq_axis_size)
return blob_dim
|
[
"Sets",
"the",
"blob",
"dimmentions",
"trying",
"to",
"read",
"around",
"1024",
"MiB",
"at",
"a",
"time",
".",
"This",
"is",
"assuming",
"a",
"chunk",
"is",
"about",
"1",
"MiB",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L454-L471
|
[
"def",
"__get_blob_dimensions",
"(",
"self",
",",
"chunk_dim",
")",
":",
"#Taking the size into consideration, but avoiding having multiple blobs within a single time bin.",
"if",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
">",
"chunk_dim",
"[",
"self",
".",
"freq_axis",
"]",
"*",
"MAX_BLOB_MB",
":",
"freq_axis_size",
"=",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
"# while freq_axis_size > chunk_dim[self.freq_axis]*MAX_BLOB_MB:",
"# freq_axis_size /= 2",
"time_axis_size",
"=",
"1",
"else",
":",
"freq_axis_size",
"=",
"self",
".",
"selection_shape",
"[",
"self",
".",
"freq_axis",
"]",
"time_axis_size",
"=",
"np",
".",
"min",
"(",
"[",
"chunk_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"MAX_BLOB_MB",
"*",
"chunk_dim",
"[",
"self",
".",
"freq_axis",
"]",
"/",
"freq_axis_size",
",",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
"]",
")",
"blob_dim",
"=",
"(",
"int",
"(",
"time_axis_size",
")",
",",
"1",
",",
"freq_axis_size",
")",
"return",
"blob_dim"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.__get_chunk_dimensions
|
Sets the chunking dimmentions depending on the file type.
|
blimpy/waterfall.py
|
def __get_chunk_dimensions(self):
""" Sets the chunking dimmentions depending on the file type.
"""
#Usually '.0000.' is in self.filename
if np.abs(self.header[b'foff']) < 1e-5:
logger.info('Detecting high frequency resolution data.')
chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel.
return chunk_dim
#Usually '.0001.' is in self.filename
elif np.abs(self.header[b'tsamp']) < 1e-3:
logger.info('Detecting high time resolution data.')
chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00)
return chunk_dim
#Usually '.0002.' is in self.filename
elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5:
logger.info('Detecting intermediate frequency and time resolution data.')
chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00)
# chunk_dim = (1,1,65536/4)
return chunk_dim
else:
logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.')
chunk_dim = (1,1,512)
return chunk_dim
|
def __get_chunk_dimensions(self):
""" Sets the chunking dimmentions depending on the file type.
"""
#Usually '.0000.' is in self.filename
if np.abs(self.header[b'foff']) < 1e-5:
logger.info('Detecting high frequency resolution data.')
chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel.
return chunk_dim
#Usually '.0001.' is in self.filename
elif np.abs(self.header[b'tsamp']) < 1e-3:
logger.info('Detecting high time resolution data.')
chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00)
return chunk_dim
#Usually '.0002.' is in self.filename
elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5:
logger.info('Detecting intermediate frequency and time resolution data.')
chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00)
# chunk_dim = (1,1,65536/4)
return chunk_dim
else:
logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.')
chunk_dim = (1,1,512)
return chunk_dim
|
[
"Sets",
"the",
"chunking",
"dimmentions",
"depending",
"on",
"the",
"file",
"type",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L473-L496
|
[
"def",
"__get_chunk_dimensions",
"(",
"self",
")",
":",
"#Usually '.0000.' is in self.filename",
"if",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"<",
"1e-5",
":",
"logger",
".",
"info",
"(",
"'Detecting high frequency resolution data.'",
")",
"chunk_dim",
"=",
"(",
"1",
",",
"1",
",",
"1048576",
")",
"#1048576 is the number of channels in a coarse channel.",
"return",
"chunk_dim",
"#Usually '.0001.' is in self.filename",
"elif",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'tsamp'",
"]",
")",
"<",
"1e-3",
":",
"logger",
".",
"info",
"(",
"'Detecting high time resolution data.'",
")",
"chunk_dim",
"=",
"(",
"2048",
",",
"1",
",",
"512",
")",
"#512 is the total number of channels per single band (ie. blc00)",
"return",
"chunk_dim",
"#Usually '.0002.' is in self.filename",
"elif",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"<",
"1e-2",
"and",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
">=",
"1e-5",
":",
"logger",
".",
"info",
"(",
"'Detecting intermediate frequency and time resolution data.'",
")",
"chunk_dim",
"=",
"(",
"10",
",",
"1",
",",
"65536",
")",
"#65536 is the total number of channels per single band (ie. blc00)",
"# chunk_dim = (1,1,65536/4)",
"return",
"chunk_dim",
"else",
":",
"logger",
".",
"warning",
"(",
"'File format not known. Will use minimum chunking. NOT OPTIMAL.'",
")",
"chunk_dim",
"=",
"(",
"1",
",",
"1",
",",
"512",
")",
"return",
"chunk_dim"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
Waterfall.grab_data
|
Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
|
blimpy/waterfall.py
|
def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):
""" Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
"""
self.freqs = self.populate_freqs()
self.timestamps = self.populate_timestamps()
if f_start is None:
f_start = self.freqs[0]
if f_stop is None:
f_stop = self.freqs[-1]
i0 = np.argmin(np.abs(self.freqs - f_start))
i1 = np.argmin(np.abs(self.freqs - f_stop))
if i0 < i1:
plot_f = self.freqs[i0:i1 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])
else:
plot_f = self.freqs[i1:i0 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])
return plot_f, plot_data
|
def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):
""" Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
"""
self.freqs = self.populate_freqs()
self.timestamps = self.populate_timestamps()
if f_start is None:
f_start = self.freqs[0]
if f_stop is None:
f_stop = self.freqs[-1]
i0 = np.argmin(np.abs(self.freqs - f_start))
i1 = np.argmin(np.abs(self.freqs - f_stop))
if i0 < i1:
plot_f = self.freqs[i0:i1 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])
else:
plot_f = self.freqs[i1:i0 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])
return plot_f, plot_data
|
[
"Extract",
"a",
"portion",
"of",
"data",
"by",
"frequency",
"range",
"."
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L509-L539
|
[
"def",
"grab_data",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
",",
"if_id",
"=",
"0",
")",
":",
"self",
".",
"freqs",
"=",
"self",
".",
"populate_freqs",
"(",
")",
"self",
".",
"timestamps",
"=",
"self",
".",
"populate_timestamps",
"(",
")",
"if",
"f_start",
"is",
"None",
":",
"f_start",
"=",
"self",
".",
"freqs",
"[",
"0",
"]",
"if",
"f_stop",
"is",
"None",
":",
"f_stop",
"=",
"self",
".",
"freqs",
"[",
"-",
"1",
"]",
"i0",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"freqs",
"-",
"f_start",
")",
")",
"i1",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"freqs",
"-",
"f_stop",
")",
")",
"if",
"i0",
"<",
"i1",
":",
"plot_f",
"=",
"self",
".",
"freqs",
"[",
"i0",
":",
"i1",
"+",
"1",
"]",
"plot_data",
"=",
"np",
".",
"squeeze",
"(",
"self",
".",
"data",
"[",
"t_start",
":",
"t_stop",
",",
"...",
",",
"i0",
":",
"i1",
"+",
"1",
"]",
")",
"else",
":",
"plot_f",
"=",
"self",
".",
"freqs",
"[",
"i1",
":",
"i0",
"+",
"1",
"]",
"plot_data",
"=",
"np",
".",
"squeeze",
"(",
"self",
".",
"data",
"[",
"t_start",
":",
"t_stop",
",",
"...",
",",
"i1",
":",
"i0",
"+",
"1",
"]",
")",
"return",
"plot_f",
",",
"plot_data"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
cmd_tool
|
Command line tool for plotting and viewing info on guppi raw files
|
blimpy/guppi.py
|
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on guppi raw files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.")
parser.add_argument('filename', type=str, help='Name of file to read')
parser.add_argument('-o', dest='outdir', type=str, default='./', help='output directory for PNG files')
args = parser.parse_args()
r = GuppiRaw(args.filename)
r.print_stats()
bname = os.path.splitext(os.path.basename(args.filename))[0]
bname = os.path.join(args.outdir, bname)
r.plot_histogram(filename="%s_hist.png" % bname)
r.plot_spectrum(filename="%s_spec.png" % bname)
|
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on guppi raw files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.")
parser.add_argument('filename', type=str, help='Name of file to read')
parser.add_argument('-o', dest='outdir', type=str, default='./', help='output directory for PNG files')
args = parser.parse_args()
r = GuppiRaw(args.filename)
r.print_stats()
bname = os.path.splitext(os.path.basename(args.filename))[0]
bname = os.path.join(args.outdir, bname)
r.plot_histogram(filename="%s_hist.png" % bname)
r.plot_spectrum(filename="%s_spec.png" % bname)
|
[
"Command",
"line",
"tool",
"for",
"plotting",
"and",
"viewing",
"info",
"on",
"guppi",
"raw",
"files"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/guppi.py#L442-L459
|
[
"def",
"cmd_tool",
"(",
"args",
"=",
"None",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Command line utility for creating spectra from GuppiRaw files.\"",
")",
"parser",
".",
"add_argument",
"(",
"'filename'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Name of file to read'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"dest",
"=",
"'outdir'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'./'",
",",
"help",
"=",
"'output directory for PNG files'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"r",
"=",
"GuppiRaw",
"(",
"args",
".",
"filename",
")",
"r",
".",
"print_stats",
"(",
")",
"bname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"args",
".",
"filename",
")",
")",
"[",
"0",
"]",
"bname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdir",
",",
"bname",
")",
"r",
".",
"plot_histogram",
"(",
"filename",
"=",
"\"%s_hist.png\"",
"%",
"bname",
")",
"r",
".",
"plot_spectrum",
"(",
"filename",
"=",
"\"%s_spec.png\"",
"%",
"bname",
")"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
GuppiRaw.read_header
|
Read next header (multiple headers in file)
Returns:
(header, data_idx) - a dictionary of keyword:value header data and
also the byte index of where the corresponding data block resides.
|
blimpy/guppi.py
|
def read_header(self):
""" Read next header (multiple headers in file)
Returns:
(header, data_idx) - a dictionary of keyword:value header data and
also the byte index of where the corresponding data block resides.
"""
start_idx = self.file_obj.tell()
key, val = '', ''
header_dict = {}
keep_reading = True
first_line = self.file_obj
try:
while keep_reading:
if start_idx + 80 > self.filesize:
keep_reading = False
raise EndOfFileError("End Of Data File")
line = self.file_obj.read(80)
if PYTHON3:
line = line.decode("utf-8")
# print line
if line.startswith('END'):
keep_reading = False
break
else:
key, val = line.split('=')
key, val = key.strip(), val.strip()
if "'" in val:
# Items in quotes are strings
val = str(val.strip("'").strip())
elif "." in val:
# Items with periods are floats (if not a string)
val = float(val)
else:
# Otherwise it's an integer
val = int(val)
header_dict[key] = val
except ValueError:
print("CURRENT LINE: ", line)
print("BLOCK START IDX: ", start_idx)
print("FILE SIZE: ", self.filesize)
print("NEXT 512 BYTES: \n")
print(self.file_obj.read(512))
raise
data_idx = self.file_obj.tell()
# Seek past padding if DIRECTIO is being used
if "DIRECTIO" in header_dict.keys():
if int(header_dict["DIRECTIO"]) == 1:
if data_idx % 512:
data_idx += (512 - data_idx % 512)
self.file_obj.seek(start_idx)
return header_dict, data_idx
|
def read_header(self):
""" Read next header (multiple headers in file)
Returns:
(header, data_idx) - a dictionary of keyword:value header data and
also the byte index of where the corresponding data block resides.
"""
start_idx = self.file_obj.tell()
key, val = '', ''
header_dict = {}
keep_reading = True
first_line = self.file_obj
try:
while keep_reading:
if start_idx + 80 > self.filesize:
keep_reading = False
raise EndOfFileError("End Of Data File")
line = self.file_obj.read(80)
if PYTHON3:
line = line.decode("utf-8")
# print line
if line.startswith('END'):
keep_reading = False
break
else:
key, val = line.split('=')
key, val = key.strip(), val.strip()
if "'" in val:
# Items in quotes are strings
val = str(val.strip("'").strip())
elif "." in val:
# Items with periods are floats (if not a string)
val = float(val)
else:
# Otherwise it's an integer
val = int(val)
header_dict[key] = val
except ValueError:
print("CURRENT LINE: ", line)
print("BLOCK START IDX: ", start_idx)
print("FILE SIZE: ", self.filesize)
print("NEXT 512 BYTES: \n")
print(self.file_obj.read(512))
raise
data_idx = self.file_obj.tell()
# Seek past padding if DIRECTIO is being used
if "DIRECTIO" in header_dict.keys():
if int(header_dict["DIRECTIO"]) == 1:
if data_idx % 512:
data_idx += (512 - data_idx % 512)
self.file_obj.seek(start_idx)
return header_dict, data_idx
|
[
"Read",
"next",
"header",
"(",
"multiple",
"headers",
"in",
"file",
")"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/guppi.py#L105-L164
|
[
"def",
"read_header",
"(",
"self",
")",
":",
"start_idx",
"=",
"self",
".",
"file_obj",
".",
"tell",
"(",
")",
"key",
",",
"val",
"=",
"''",
",",
"''",
"header_dict",
"=",
"{",
"}",
"keep_reading",
"=",
"True",
"first_line",
"=",
"self",
".",
"file_obj",
"try",
":",
"while",
"keep_reading",
":",
"if",
"start_idx",
"+",
"80",
">",
"self",
".",
"filesize",
":",
"keep_reading",
"=",
"False",
"raise",
"EndOfFileError",
"(",
"\"End Of Data File\"",
")",
"line",
"=",
"self",
".",
"file_obj",
".",
"read",
"(",
"80",
")",
"if",
"PYTHON3",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# print line",
"if",
"line",
".",
"startswith",
"(",
"'END'",
")",
":",
"keep_reading",
"=",
"False",
"break",
"else",
":",
"key",
",",
"val",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"key",
",",
"val",
"=",
"key",
".",
"strip",
"(",
")",
",",
"val",
".",
"strip",
"(",
")",
"if",
"\"'\"",
"in",
"val",
":",
"# Items in quotes are strings",
"val",
"=",
"str",
"(",
"val",
".",
"strip",
"(",
"\"'\"",
")",
".",
"strip",
"(",
")",
")",
"elif",
"\".\"",
"in",
"val",
":",
"# Items with periods are floats (if not a string)",
"val",
"=",
"float",
"(",
"val",
")",
"else",
":",
"# Otherwise it's an integer",
"val",
"=",
"int",
"(",
"val",
")",
"header_dict",
"[",
"key",
"]",
"=",
"val",
"except",
"ValueError",
":",
"print",
"(",
"\"CURRENT LINE: \"",
",",
"line",
")",
"print",
"(",
"\"BLOCK START IDX: \"",
",",
"start_idx",
")",
"print",
"(",
"\"FILE SIZE: \"",
",",
"self",
".",
"filesize",
")",
"print",
"(",
"\"NEXT 512 BYTES: \\n\"",
")",
"print",
"(",
"self",
".",
"file_obj",
".",
"read",
"(",
"512",
")",
")",
"raise",
"data_idx",
"=",
"self",
".",
"file_obj",
".",
"tell",
"(",
")",
"# Seek past padding if DIRECTIO is being used",
"if",
"\"DIRECTIO\"",
"in",
"header_dict",
".",
"keys",
"(",
")",
":",
"if",
"int",
"(",
"header_dict",
"[",
"\"DIRECTIO\"",
"]",
")",
"==",
"1",
":",
"if",
"data_idx",
"%",
"512",
":",
"data_idx",
"+=",
"(",
"512",
"-",
"data_idx",
"%",
"512",
")",
"self",
".",
"file_obj",
".",
"seek",
"(",
"start_idx",
")",
"return",
"header_dict",
",",
"data_idx"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
GuppiRaw.read_first_header
|
Read first header in file
Returns:
header (dict): keyword:value pairs of header metadata
|
blimpy/guppi.py
|
def read_first_header(self):
""" Read first header in file
Returns:
header (dict): keyword:value pairs of header metadata
"""
self.file_obj.seek(0)
header_dict, pos = self.read_header()
self.file_obj.seek(0)
return header_dict
|
def read_first_header(self):
""" Read first header in file
Returns:
header (dict): keyword:value pairs of header metadata
"""
self.file_obj.seek(0)
header_dict, pos = self.read_header()
self.file_obj.seek(0)
return header_dict
|
[
"Read",
"first",
"header",
"in",
"file"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/guppi.py#L166-L175
|
[
"def",
"read_first_header",
"(",
"self",
")",
":",
"self",
".",
"file_obj",
".",
"seek",
"(",
"0",
")",
"header_dict",
",",
"pos",
"=",
"self",
".",
"read_header",
"(",
")",
"self",
".",
"file_obj",
".",
"seek",
"(",
"0",
")",
"return",
"header_dict"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
test
|
GuppiRaw.get_data
|
returns a generator object that reads data a block at a time;
the generator prints "File depleted" and returns nothing when all data in the file has been read.
:return:
|
blimpy/guppi.py
|
def get_data(self):
"""
returns a generator object that reads data a block at a time;
the generator prints "File depleted" and returns nothing when all data in the file has been read.
:return:
"""
with self as gr:
while True:
try:
yield gr.read_next_data_block_int8()
except Exception as e:
print("File depleted")
yield None, None, None
|
def get_data(self):
"""
returns a generator object that reads data a block at a time;
the generator prints "File depleted" and returns nothing when all data in the file has been read.
:return:
"""
with self as gr:
while True:
try:
yield gr.read_next_data_block_int8()
except Exception as e:
print("File depleted")
yield None, None, None
|
[
"returns",
"a",
"generator",
"object",
"that",
"reads",
"data",
"a",
"block",
"at",
"a",
"time",
";",
"the",
"generator",
"prints",
"File",
"depleted",
"and",
"returns",
"nothing",
"when",
"all",
"data",
"in",
"the",
"file",
"has",
"been",
"read",
".",
":",
"return",
":"
] |
UCBerkeleySETI/blimpy
|
python
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/guppi.py#L194-L206
|
[
"def",
"get_data",
"(",
"self",
")",
":",
"with",
"self",
"as",
"gr",
":",
"while",
"True",
":",
"try",
":",
"yield",
"gr",
".",
"read_next_data_block_int8",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"File depleted\"",
")",
"yield",
"None",
",",
"None",
",",
"None"
] |
b8822d3e3e911944370d84371a91fa0c29e9772e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.