sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def markdown_safe(value, arg=None):
""" Render markdown over a given value, optionally using varios extensions.
Default extensions could be defined which MARKDOWN_EXTENSIONS option.
Enables safe mode, which strips raw HTML and only returns HTML generated
by markdown.
:returns: A rendered markdown.
"""
extensions = (arg and arg.split(',')) or settings.MARKDOWN_EXTENSIONS
return _markdown(value, extensions=extensions, safe=True) | Render markdown over a given value, optionally using varios extensions.
Default extensions could be defined which MARKDOWN_EXTENSIONS option.
Enables safe mode, which strips raw HTML and only returns HTML generated
by markdown.
:returns: A rendered markdown. | entailment |
def markdown_editor(selector):
""" Enable markdown editor for given textarea.
:returns: Editor template context.
"""
return dict(
selector=selector,
extra_settings=mark_safe(simplejson.dumps(
dict(previewParserPath=reverse('django_markdown_preview'))))) | Enable markdown editor for given textarea.
:returns: Editor template context. | entailment |
def markdown_media_css():
""" Add css requirements to HTML.
:returns: Editor template context.
"""
return dict(
CSS_SET=posixpath.join(
settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css'
),
CSS_SKIN=posixpath.join(
'django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN,
'style.css'
)
) | Add css requirements to HTML.
:returns: Editor template context. | entailment |
def convert(source, to, format=None, extra_args=(), encoding='utf-8'):
"""Convert given `source` from `format` `to` another.
`source` may be either a file path or a string to be converted.
It's possible to pass `extra_args` if needed. In case `format` is not
provided, it will try to invert the format based on given `source`.
Raises OSError if pandoc is not found! Make sure it has been installed and
is available at path.
"""
return _convert(
_read_file, _process_file,
source, to,
format, extra_args,
encoding=encoding) | Convert given `source` from `format` `to` another.
`source` may be either a file path or a string to be converted.
It's possible to pass `extra_args` if needed. In case `format` is not
provided, it will try to invert the format based on given `source`.
Raises OSError if pandoc is not found! Make sure it has been installed and
is available at path. | entailment |
def get_pandoc_formats():
""" Dynamic preprocessor for Pandoc formats.
Return 2 lists. "from_formats" and "to_formats".
"""
try:
p = subprocess.Popen(
['pandoc', '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
raise OSError("You probably do not have pandoc installed.")
help_text = p.communicate()[0].decode().splitlines(False)
txt = ' '.join(help_text[1:help_text.index('Options:')])
aux = txt.split('Output formats: ')
in_ = aux[0].split('Input formats: ')[1].split(',')
out = aux[1].split(',')
return [f.strip() for f in in_], [f.strip() for f in out] | Dynamic preprocessor for Pandoc formats.
Return 2 lists. "from_formats" and "to_formats". | entailment |
def render(self, name, value, attrs=None, renderer=None):
""" Render widget.
:returns: A rendered HTML
"""
html = super(MarkdownWidget, self).render(name, value, attrs, renderer)
attrs = self.build_attrs(attrs)
html += editor_js_initialization("#%s" % attrs['id'])
return mark_safe(html) | Render widget.
:returns: A rendered HTML | entailment |
def extendMarkdown(self, md, md_globals):
""" Add InlineGraphvizPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('graphviz_block',
InlineGraphvizPreprocessor(md),
"_begin") | Add InlineGraphvizPreprocessor to the Markdown instance. | entailment |
def run(self, lines):
""" Match and generate dot code blocks."""
text = "\n".join(lines)
while 1:
m = BLOCK_RE.search(text)
if m:
command = m.group('command')
# Whitelist command, prevent command injection.
if command not in SUPPORTED_COMMAMDS:
raise Exception('Command not supported: %s' % command)
filename = m.group('filename')
content = m.group('content')
filetype = filename[filename.rfind('.')+1:]
args = [command, '-T'+filetype]
try:
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
proc.stdin.write(content.encode('utf-8'))
output, err = proc.communicate()
if filetype == 'svg':
data_url_filetype = 'svg+xml'
encoding = 'utf-8'
img = output.decode(encoding)
if filetype == 'png':
data_url_filetype = 'png'
encoding = 'base64'
output = base64.b64encode(output)
data_path = "data:image/%s;%s,%s" % (
data_url_filetype,
encoding,
output)
img = ""
text = '%s\n%s\n%s' % (
text[:m.start()], img, text[m.end():])
except Exception as e:
err = str(e) + ' : ' + str(args)
return (
'<pre>Error : ' + err + '</pre>'
'<pre>' + content + '</pre>').split('\n')
else:
break
return text.split("\n") | Match and generate dot code blocks. | entailment |
def post(self, command, data=None):
"""Post data to API."""
now = calendar.timegm(datetime.datetime.now().timetuple())
if now > self.expiration:
auth = self.__open("/oauth/token", data=self.oauth)
self.__sethead(auth['access_token'])
return self.__open("%s%s" % (self.api, command),
headers=self.head, data=data) | Post data to API. | entailment |
def __sethead(self, access_token):
"""Set HTTP header."""
self.access_token = access_token
now = calendar.timegm(datetime.datetime.now().timetuple())
self.expiration = now + 1800
self.head = {"Authorization": "Bearer %s" % access_token,
"User-Agent": self.user_agent
} | Set HTTP header. | entailment |
def __open(self, url, headers=None, data=None, baseurl=""):
"""Use raw urlopen command."""
headers = headers or {}
if not baseurl:
baseurl = self.baseurl
req = Request("%s%s" % (baseurl, url), headers=headers)
_LOGGER.debug(url)
try:
req.data = urlencode(data).encode('utf-8')
except TypeError:
pass
opener = build_opener()
try:
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
data = json.loads(resp.read().decode(charset))
opener.close()
_LOGGER.debug(json.dumps(data))
return data
except HTTPError as exception_:
if exception_.code == 408:
_LOGGER.debug("%s", exception_)
return False
raise TeslaException(exception_.code) | Use raw urlopen command. | entailment |
def update(self):
"""Update the parking brake sensor."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_drive_params(self._id)
if data:
if not data['shift_state'] or data['shift_state'] == 'P':
self.__state = True
else:
self.__state = False | Update the parking brake sensor. | entailment |
def update(self):
"""Update the HVAC state."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_climate_params(self._id)
if data:
if time.time() - self.__manual_update_time > 60:
self.__is_auto_conditioning_on = (data
['is_auto_conditioning_on'])
self.__is_climate_on = data['is_climate_on']
self.__driver_temp_setting = (data['driver_temp_setting']
if data['driver_temp_setting']
else self.__driver_temp_setting)
self.__passenger_temp_setting = (data['passenger_temp_setting']
if
data['passenger_temp_setting']
else
self.__passenger_temp_setting)
self.__inside_temp = (data['inside_temp'] if data['inside_temp']
else self.__inside_temp)
self.__outside_temp = (data['outside_temp'] if data['outside_temp']
else self.__outside_temp)
self.__fan_status = data['fan_status'] | Update the HVAC state. | entailment |
def set_temperature(self, temp):
"""Set both the driver and passenger temperature to temp."""
temp = round(temp, 1)
self.__manual_update_time = time.time()
data = self._controller.command(self._id, 'set_temps',
{"driver_temp": temp,
"passenger_temp": temp},
wake_if_asleep=True)
if data['response']['result']:
self.__driver_temp_setting = temp
self.__passenger_temp_setting = temp | Set both the driver and passenger temperature to temp. | entailment |
def set_status(self, enabled):
"""Enable or disable the HVAC."""
self.__manual_update_time = time.time()
if enabled:
data = self._controller.command(self._id,
'auto_conditioning_start',
wake_if_asleep=True)
if data['response']['result']:
self.__is_auto_conditioning_on = True
self.__is_climate_on = True
else:
data = self._controller.command(self._id,
'auto_conditioning_stop',
wake_if_asleep=True)
if data['response']['result']:
self.__is_auto_conditioning_on = False
self.__is_climate_on = False
self.update() | Enable or disable the HVAC. | entailment |
def update(self):
"""Update the temperature."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_climate_params(self._id)
if data:
self.__inside_temp = (data['inside_temp'] if data['inside_temp']
else self.__inside_temp)
self.__outside_temp = (data['outside_temp'] if data['outside_temp']
else self.__outside_temp) | Update the temperature. | entailment |
def update(self):
"""Update the charging state of the Tesla Vehicle."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data and (time.time() - self.__manual_update_time > 60):
if data['charging_state'] != "Charging":
self.__charger_state = False
else:
self.__charger_state = True | Update the charging state of the Tesla Vehicle. | entailment |
def start_charge(self):
"""Start charging the Tesla Vehicle."""
if not self.__charger_state:
data = self._controller.command(self._id, 'charge_start',
wake_if_asleep=True)
if data and data['response']['result']:
self.__charger_state = True
self.__manual_update_time = time.time() | Start charging the Tesla Vehicle. | entailment |
def stop_charge(self):
"""Stop charging the Tesla Vehicle."""
if self.__charger_state:
data = self._controller.command(self._id, 'charge_stop',
wake_if_asleep=True)
if data and data['response']['result']:
self.__charger_state = False
self.__manual_update_time = time.time() | Stop charging the Tesla Vehicle. | entailment |
def update(self):
"""Update the status of the range setting."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data and (time.time() - self.__manual_update_time > 60):
self.__maxrange_state = data['charge_to_max_range'] | Update the status of the range setting. | entailment |
def set_max(self):
"""Set the charger to max range for trips."""
if not self.__maxrange_state:
data = self._controller.command(self._id, 'charge_max_range',
wake_if_asleep=True)
if data['response']['result']:
self.__maxrange_state = True
self.__manual_update_time = time.time() | Set the charger to max range for trips. | entailment |
def set_standard(self):
"""Set the charger to standard range for daily commute."""
if self.__maxrange_state:
data = self._controller.command(self._id, 'charge_standard',
wake_if_asleep=True)
if data and data['response']['result']:
self.__maxrange_state = False
self.__manual_update_time = time.time() | Set the charger to standard range for daily commute. | entailment |
def unlock(self):
"""Unlock the doors and extend handles where applicable."""
if self.__lock_state:
data = self._controller.command(self._id, 'door_unlock',
wake_if_asleep=True)
if data['response']['result']:
self.__lock_state = False
self.__manual_update_time = time.time() | Unlock the doors and extend handles where applicable. | entailment |
def lock(self):
"""Close the charger door."""
if not self.__lock_state:
data = self._controller.command(self._id, 'charge_port_door_close',
wake_if_asleep=True)
if data['response']['result']:
self.__lock_state = True
self.__manual_update_time = time.time() | Close the charger door. | entailment |
def wake_up(func):
# pylint: disable=no-self-argument
# issue is use of wraps on classmethods which should be replaced:
# https://hynek.me/articles/decorators/
"""Wrap a API f so it will attempt to wake the vehicle if asleep.
The command f is run once if the vehicle_id was last reported
online. Assuming f returns None and wake_if_asleep is True, 5 attempts
will be made to wake the vehicle to reissue the command. In addition,
if there is a `could_not_wake_buses` error, it will retry the command
Args:
inst (Controller): The instance of a controller
vehicle_id (string): The vehicle to attempt to wake.
TODO: This currently requires a vehicle_id, but update() does not; This
should also be updated to allow that case
wake_if_asleep (bool): Keyword arg to force a vehicle awake. Must be
set in the wrapped function f
Throws:
RetryLimitError
"""
@wraps(func)
def wrapped(*args, **kwargs):
# pylint: disable=too-many-branches,protected-access, not-callable
def valid_result(result):
"""Check if TeslaAPI result succesful.
Parameters
----------
result : tesla API result
This is the result of a Tesla Rest API call.
Returns
-------
bool
Tesla API failure can be checked in a dict with a bool in
['response']['result'], a bool, or None or
['response']['reason'] == 'could_not_wake_buses'
Returns true when a failure state not detected.
"""
try:
return (result is not None and result is not False and
(result is True or
(isinstance(result, dict) and
isinstance(result['response'], dict) and
('result' in result['response'] and
result['response']['result'] is True) or
('reason' in result['response'] and
result['response']['reason'] !=
'could_not_wake_buses') or
('result' not in result['response']))))
except TypeError as exception:
_LOGGER.error("Result: %s, %s", result, exception)
retries = 0
sleep_delay = 2
inst = args[0]
vehicle_id = args[1]
result = None
if (vehicle_id is not None and vehicle_id in inst.car_online and
inst.car_online[vehicle_id]):
try:
result = func(*args, **kwargs)
except TeslaException:
pass
if valid_result(result):
return result
_LOGGER.debug("wake_up needed for %s -> %s \n"
"Info: args:%s, kwargs:%s, "
"vehicle_id:%s, car_online:%s",
func.__name__, # pylint: disable=no-member
result, args, kwargs, vehicle_id,
inst.car_online)
inst.car_online[vehicle_id] = False
while ('wake_if_asleep' in kwargs and kwargs['wake_if_asleep']
and
# Check online state
(vehicle_id is None or
(vehicle_id is not None and
vehicle_id in inst.car_online and
not inst.car_online[vehicle_id]))):
result = inst._wake_up(vehicle_id)
_LOGGER.debug("%s(%s): Wake Attempt(%s): %s",
func.__name__, # pylint: disable=no-member,
vehicle_id,
retries, result)
if not result:
if retries < 5:
time.sleep(sleep_delay**(retries+2))
retries += 1
continue
else:
inst.car_online[vehicle_id] = False
raise RetryLimitError
else:
break
# try function five more times
retries = 0
while True:
try:
result = func(*args, **kwargs)
_LOGGER.debug("%s(%s): Retry Attempt(%s): %s",
func.__name__, # pylint: disable=no-member,
vehicle_id,
retries, result)
except TeslaException:
pass
finally:
retries += 1
time.sleep(sleep_delay**(retries+1))
if valid_result(result):
return result
if retries >= 5:
raise RetryLimitError
return wrapped | Wrap a API f so it will attempt to wake the vehicle if asleep.
The command f is run once if the vehicle_id was last reported
online. Assuming f returns None and wake_if_asleep is True, 5 attempts
will be made to wake the vehicle to reissue the command. In addition,
if there is a `could_not_wake_buses` error, it will retry the command
Args:
inst (Controller): The instance of a controller
vehicle_id (string): The vehicle to attempt to wake.
TODO: This currently requires a vehicle_id, but update() does not; This
should also be updated to allow that case
wake_if_asleep (bool): Keyword arg to force a vehicle awake. Must be
set in the wrapped function f
Throws:
RetryLimitError | entailment |
def post(self, vehicle_id, command, data=None, wake_if_asleep=True):
# pylint: disable=unused-argument
"""Send post command to the vehicle_id.
This is a wrapped function by wake_up.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
command : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
data : dict
Optional parameters.
wake_if_asleep : bool
Function for wake_up decorator indicating whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
data = data or {}
return self.__connection.post('vehicles/%i/%s' %
(vehicle_id, command), data) | Send post command to the vehicle_id.
This is a wrapped function by wake_up.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
command : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
data : dict
Optional parameters.
wake_if_asleep : bool
Function for wake_up decorator indicating whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object. | entailment |
def get(self, vehicle_id, command, wake_if_asleep=False):
# pylint: disable=unused-argument
"""Send get command to the vehicle_id.
This is a wrapped function by wake_up.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
command : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
wake_if_asleep : bool
Function for wake_up decorator indicating whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
return self.__connection.get('vehicles/%i/%s' % (vehicle_id, command)) | Send get command to the vehicle_id.
This is a wrapped function by wake_up.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
command : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
wake_if_asleep : bool
Function for wake_up decorator indicating whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object. | entailment |
def data_request(self, vehicle_id, name, wake_if_asleep=False):
"""Get requested data from vehicle_id.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
name: string
Name of data to be requested from the data_request endpoint which
rolls ups all data plus vehicle configuration.
https://tesla-api.timdorr.com/vehicle/state/data
wake_if_asleep : bool
Function for underlying api call for whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
return self.get(vehicle_id, 'vehicle_data/%s' % name,
wake_if_asleep=wake_if_asleep)['response'] | Get requested data from vehicle_id.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
name: string
Name of data to be requested from the data_request endpoint which
rolls ups all data plus vehicle configuration.
https://tesla-api.timdorr.com/vehicle/state/data
wake_if_asleep : bool
Function for underlying api call for whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object. | entailment |
def command(self, vehicle_id, name, data=None, wake_if_asleep=True):
"""Post name command to the vehicle_id.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
name : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
data : dict
Optional parameters.
wake_if_asleep : bool
Function for underlying api call for whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
data = data or {}
return self.post(vehicle_id, 'command/%s' % name, data,
wake_if_asleep=wake_if_asleep) | Post name command to the vehicle_id.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
name : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
data : dict
Optional parameters.
wake_if_asleep : bool
Function for underlying api call for whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object. | entailment |
def update(self, car_id=None, wake_if_asleep=False, force=False):
"""Update all vehicle attributes in the cache.
This command will connect to the Tesla API and first update the list of
online vehicles assuming no attempt for at least the [update_interval].
It will then update all the cached values for cars that are awake
assuming no update has occurred for at least the [update_interval].
Args:
inst (Controller): The instance of a controller
car_id (string): The vehicle to update. If None, all cars are updated.
wake_if_asleep (bool): Keyword arg to force a vehicle awake. This is
processed by the wake_up decorator.
force (bool): Keyword arg to force a vehicle update regardless of the
update_interval
Returns:
True if any update succeeded for any vehicle else false
Throws:
RetryLimitError
"""
cur_time = time.time()
with self.__lock:
# Update the online cars using get_vehicles()
last_update = self._last_attempted_update_time
if (force or cur_time - last_update > self.update_interval):
cars = self.get_vehicles()
for car in cars:
self.car_online[car['id']] = (car['state'] == 'online')
self._last_attempted_update_time = cur_time
# Only update online vehicles that haven't been updated recently
# The throttling is per car's last succesful update
# Note: This separate check is because there may be individual cars
# to update.
update_succeeded = False
for id_, value in self.car_online.items():
# If specific car_id provided, only update match
if (car_id is not None and car_id != id_):
continue
if (value and # pylint: disable=too-many-boolean-expressions
(id_ in self.__update and self.__update[id_]) and
(force or id_ not in self._last_update_time or
((cur_time - self._last_update_time[id_]) >
self.update_interval))):
# Only update cars with update flag on
try:
data = self.get(id_, 'data', wake_if_asleep)
except TeslaException:
data = None
if data and data['response']:
response = data['response']
self.__climate[car_id] = response['climate_state']
self.__charging[car_id] = response['charge_state']
self.__state[car_id] = response['vehicle_state']
self.__driving[car_id] = response['drive_state']
self.__gui[car_id] = response['gui_settings']
self.car_online[car_id] = (response['state']
== 'online')
self._last_update_time[car_id] = time.time()
update_succeeded = True
return update_succeeded | Update all vehicle attributes in the cache.
This command will connect to the Tesla API and first update the list of
online vehicles assuming no attempt for at least the [update_interval].
It will then update all the cached values for cars that are awake
assuming no update has occurred for at least the [update_interval].
Args:
inst (Controller): The instance of a controller
car_id (string): The vehicle to update. If None, all cars are updated.
wake_if_asleep (bool): Keyword arg to force a vehicle awake. This is
processed by the wake_up decorator.
force (bool): Keyword arg to force a vehicle update regardless of the
update_interval
Returns:
True if any update succeeded for any vehicle else false
Throws:
RetryLimitError | entailment |
def update(self):
"""Update the battery state."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data:
self.__battery_level = data['battery_level']
self.__charging_state = data['charging_state'] | Update the battery state. | entailment |
def update(self):
"""Update the battery range state."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data:
self.__battery_range = data['battery_range']
self.__est_battery_range = data['est_battery_range']
self.__ideal_battery_range = data['ideal_battery_range']
data = self._controller.get_gui_params(self._id)
if data:
if data['gui_distance_units'] == "mi/hr":
self.measurement = 'LENGTH_MILES'
else:
self.measurement = 'LENGTH_KILOMETERS'
self.__rated = (data['gui_range_display'] == "Rated") | Update the battery range state. | entailment |
def assumed_state(self):
# pylint: disable=protected-access
"""Return whether the data is from an online vehicle."""
return (not self._controller.car_online[self.id()] and
(self._controller._last_update_time[self.id()] -
self._controller._last_wake_up_time[self.id()] >
self._controller.update_interval)) | Return whether the data is from an online vehicle. | entailment |
def update(self):
"""Update the current GPS location."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_drive_params(self._id)
if data:
self.__longitude = data['longitude']
self.__latitude = data['latitude']
self.__heading = data['heading']
if self.__longitude and self.__latitude and self.__heading:
self.__location = {'longitude': self.__longitude,
'latitude': self.__latitude,
'heading': self.__heading} | Update the current GPS location. | entailment |
def update(self):
"""Update the odometer and the unit of measurement based on GUI."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_state_params(self._id)
if data:
self.__odometer = data['odometer']
data = self._controller.get_gui_params(self._id)
if data:
if data['gui_distance_units'] == "mi/hr":
self.measurement = 'LENGTH_MILES'
else:
self.measurement = 'LENGTH_KILOMETERS'
self.__rated = (data['gui_range_display'] == "Rated") | Update the odometer and the unit of measurement based on GUI. | entailment |
def lc(**kwargs):
"""
Create parameters for a new light curve dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
obs_params = []
syn_params, constraints = lc_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
#obs_params += lc_dep(**kwargs).to_list()
#~ obs_params += [FloatArrayParameter(qualifier='flag', value=kwargs.get('flag', []), default_unit=None, description='Signal flag')]
#~ obs_params += [FloatArrayParameter(qualifier='weight', value=kwargs.get('weight', []), default_unit=None, description='Signal weight')]
#~ obs_params += [FloatParameter(qualifier='timeoffset', value=kwargs.get('timeoffset', 0.0), default_unit=u.d, description='Zeropoint date offset for observations')]
#~ obs_params += [FloatParameter(qualifier='statweight', value=kwargs.get('statweight', 0.0), default_unit=None, description='Statistical weight in overall fitting')]
return ParameterSet(obs_params), constraints | Create parameters for a new light curve dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def rv(**kwargs):
"""
Create parameters for a new radial velocity dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
obs_params = []
#obs_params += [FloatParameter(qualifier='statweight', value = kwargs.get('statweight', 1.0), default_unit=u.dimensionless_unscaled, description='Statistical weight in overall fitting')]
syn_params, constraints = rv_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
#obs_params += rv_dep(**kwargs).to_list()
return ParameterSet(obs_params), constraints | Create parameters for a new radial velocity dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def lp(**kwargs):
"""
Create parameters for a new line profile dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
obs_params = []
#obs_params += [FloatParameter(qualifier='statweight', value = kwargs.get('statweight', 1.0), default_unit=u.dimensionless_unscaled, description='Statistical weight in overall fitting')]
syn_params, constraints = lp_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
#obs_params += rv_dep(**kwargs).to_list()
return ParameterSet(obs_params), constraints | Create parameters for a new line profile dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def etv(**kwargs):
"""
Create parameters for a new eclipse timing variations dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: default for the values of any of the ParameterSet
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
if not conf.devel:
raise NotImplementedError("'etv' dataset not officially supported for this release. Enable developer mode to test.")
obs_params = []
syn_params, constraints = etv_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
#obs_params += etv_dep(**kwargs).to_list()
return ParameterSet(obs_params), constraints | Create parameters for a new eclipse timing variations dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: default for the values of any of the ParameterSet
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def orb(**kwargs):
"""
Create parameters for a new orbit dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
obs_params = []
#~ obs_params += [FloatArrayParameter(qualifier='exptime', value=kwargs.get('exptime', []), default_unit=u.s, description='Signal exposure time')]
#~ obs_params += [FloatArrayParameter(qualifier='flag', value=kwargs.get('flag', []), default_unit=None, description='Signal flag')]
#~ obs_params += [FloatArrayParameter(qualifier='weight', value=kwargs.get('weight', []), default_unit=None, description='Signal weight')]
#~ obs_params += [FloatParameter(qualifier='timeoffset', value=kwargs.get('timeoffset', 0.0), default_unit=u.d, description='Zeropoint date offset for observations')]
#~ obs_params += [FloatParameter(qualifier='statweight', value=kwargs.get('statweight', 0.0), default_unit=None, description='Statistical weight in overall fitting')]
syn_params, constraints = orb_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
#obs_params += orb_dep(**kwargs).to_list()
return ParameterSet(obs_params), [] | Create parameters for a new orbit dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def mesh(**kwargs):
"""
Create parameters for a new mesh dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
obs_params = []
syn_params, constraints = mesh_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
obs_params += [SelectParameter(qualifier='include_times', value=kwargs.get('include_times', []), description='append to times from the following datasets/time standards', choices=['t0@system'])]
obs_params += [SelectParameter(qualifier='columns', value=kwargs.get('columns', []), description='columns to expose within the mesh', choices=_mesh_columns)]
#obs_params += mesh_dep(**kwargs).to_list()
return ParameterSet(obs_params), constraints | Create parameters for a new mesh dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def phoebe(**kwargs):
"""
Compute options for using the PHOEBE 2.0 backend.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.phoebe` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
params = []
params += [BoolParameter(qualifier='enabled', copy_for={'context': 'dataset', 'dataset': '*'}, dataset='_default', value=kwargs.get('enabled', True), description='Whether to create synthetics in compute/fitting run')]
# DYNAMICS
params += [ChoiceParameter(qualifier='dynamics_method', value=kwargs.get('dynamics_method', 'keplerian'), choices=['keplerian', 'nbody', 'rebound', 'bs'] if conf.devel else ['keplerian'], description='Which method to use to determine the dynamics of components')]
params += [BoolParameter(qualifier='ltte', value=kwargs.get('ltte', False), description='Correct for light travel time effects')]
if conf.devel:
params += [BoolParameter(visible_if='dynamics_method:nbody', qualifier='gr', value=kwargs.get('gr', False), description='Whether to account for general relativity effects')]
params += [FloatParameter(visible_if='dynamics_method:nbody', qualifier='stepsize', value=kwargs.get('stepsize', 0.01), default_unit=None, description='stepsize for the N-body integrator')] # TODO: improve description (and units??)
params += [ChoiceParameter(visible_if='dynamics_method:nbody', qualifier='integrator', value=kwargs.get('integrator', 'ias15'), choices=['ias15', 'whfast', 'sei', 'leapfrog', 'hermes'], description='Which integrator to use within rebound')]
# params += [FloatParameter(visible_if='dynamics_method:bs', qualifier='stepsize', value=kwargs.get('stepsize', 0.01), default_unit=None, description='stepsize for the N-body integrator')] # TODO: improve description (and units??)
# params += [FloatParameter(visible_if='dynamics_method:bs', qualifier='orbiterror', value=kwargs.get('orbiterror', 1e-20), default_unit=None, description='orbiterror for the N-body integrator')] # TODO: improve description (and units??)
# PHYSICS
# TODO: should either of these be per-dataset... if so: copy_for={'kind': ['rv_dep', 'lc_dep'], 'dataset': '*'}, dataset='_default' and then edit universe.py to pull for the correct dataset (will need to become dataset-dependent dictionary a la ld_func)
params += [ChoiceParameter(qualifier='irrad_method', value=kwargs.get('irrad_method', 'wilson'), choices=['none', 'wilson', 'horvat'], description='Which method to use to handle all irradiation effects (reflection, redistribution)')]
params += [ChoiceParameter(qualifier='boosting_method', value=kwargs.get('boosting_method', 'none'), choices=['none', 'linear'], description='Type of boosting method')]
# TODO: include scattering here? (used to be in lcdep)
#params += [ChoiceParameter(qualifier='irradiation_alg', value=kwargs.get('irradiation_alg', 'point_source'), choices=['full', 'point_source'], description='Type of irradiation algorithm')]
# MESH
# -- these parameters all need to exist per-component --
# copy_for = {'kind': ['star', 'disk', 'custombody'], 'component': '*'}
# means that this should exist for each component (since that has a wildcard) which
# has a kind in [star, disk, custombody]
# params += [BoolParameter(qualifier='horizon', value=kwargs.get('horizon', False), description='Store horizon for all meshes (except protomeshes)')]
params += [ChoiceParameter(copy_for={'kind': ['star', 'envelope'], 'component': '*'}, component='_default', qualifier='mesh_method', value=kwargs.get('mesh_method', 'marching'), choices=['marching', 'wd'] if conf.devel else ['marching'], description='Which method to use for discretizing the surface')]
params += [IntParameter(visible_if='mesh_method:marching', copy_for={'kind': ['star', 'envelope'], 'component': '*'}, component='_default', qualifier='ntriangles', value=kwargs.get('ntriangles', 1500), limits=(100,None), default_unit=u.dimensionless_unscaled, description='Requested number of triangles (won\'t be exact).')]
params += [ChoiceParameter(visible_if='mesh_method:marching', copy_for={'kind': ['star'], 'component': '*'}, component='_default', qualifier='distortion_method', value=kwargs.get('distortion_method', 'roche'), choices=['roche', 'rotstar', 'sphere'], description='Method to use for distorting stars')]
if conf.devel:
# TODO: can we have this computed from ntriangles? - and then do the same for the legacy compute options?
# NOTE: if removing from developer mode - also need to remove if conf.devel in io.py line ~800
params += [IntParameter(visible_if='mesh_method:wd', copy_for={'kind': ['star', 'envelope'], 'component': '*'}, component='_default', qualifier='gridsize', value=kwargs.get('gridsize', 60), limits=(10,None), default_unit=u.dimensionless_unscaled, description='Number of meshpoints for WD method')]
# ------------------------------------------------------
#params += [ChoiceParameter(qualifier='subdiv_alg', value=kwargs.get('subdiv_alg', 'edge'), choices=['edge'], description='Subdivision algorithm')]
# params += [IntParameter(qualifier='subdiv_num', value=kwargs.get('subdiv_num', 3), limits=(0,None), description='Number of subdivisions')]
if conf.devel:
params += [BoolParameter(qualifier='mesh_offset', value=kwargs.get('mesh_offset', True), description='Whether to adjust the mesh to have the correct surface area (TESTING)')]
params += [FloatParameter(visible_if='mesh_method:marching', qualifier='mesh_init_phi', value=kwargs.get('mesh_init_phi', 0.0), default_unit=u.rad, limits=(0,2*np.pi), description='Initial rotation offset for mesh (TESTING)')]
# DISTORTION
# ECLIPSE DETECTION
params += [ChoiceParameter(qualifier='eclipse_method', value=kwargs.get('eclipse_method', 'native'), choices=['only_horizon', 'graham', 'none', 'visible_partial', 'native', 'wd_horizon'] if conf.devel else ['native'], description='Type of eclipse algorithm')]
params += [ChoiceParameter(visible_if='eclipse_method:native', qualifier='horizon_method', value=kwargs.get('horizon_method', 'boolean'), choices=['boolean', 'linear'] if conf.devel else ['boolean'], description='Type of horizon method')]
# PER-COMPONENT
params += [ChoiceParameter(copy_for = {'kind': ['star'], 'component': '*'}, component='_default', qualifier='atm', value=kwargs.get('atm', 'ck2004'), choices=_atm_choices, description='Atmosphere table')]
# PER-DATASET
# -- these parameters all need to exist per-rvobs or lcobs --
# copy_for = {'kind': ['rv_dep'], 'component': '*', 'dataset': '*'}
# means that this should exist for each component/dataset pair with the
# rv_dep kind
params += [ChoiceParameter(qualifier='lc_method', copy_for = {'kind': ['lc'], 'dataset': '*'}, dataset='_default', value=kwargs.get('lc_method', 'numerical'), choices=['numerical', 'analytical'] if conf.devel else ['numerical'], description='Method to use for computing LC fluxes')]
params += [ChoiceParameter(qualifier='fti_method', copy_for = {'kind': ['lc'], 'dataset': '*'}, dataset='_default', value=kwargs.get('fti_method', 'none'), choices=['none', 'oversample'], description='How to handle finite-time integration (when non-zero exptime)')]
params += [IntParameter(visible_if='fti_method:oversample', qualifier='fti_oversample', copy_for={'kind': ['lc'], 'dataset': '*'}, dataset='_default', value=kwargs.get('fti_oversample', 5), default_unit=u.dimensionless_unscaled, description='Number of times to sample per-datapoint for finite-time integration')]
params += [ChoiceParameter(qualifier='rv_method', copy_for = {'kind': ['rv'], 'component': '*', 'dataset': '*'}, component='_default', dataset='_default', value=kwargs.get('rv_method', 'flux-weighted'), choices=['flux-weighted', 'dynamical'], description='Method to use for computing RVs (must be flux-weighted for Rossiter-McLaughlin effects)')]
params += [BoolParameter(visible_if='rv_method:flux-weighted', qualifier='rv_grav', copy_for = {'kind': ['rv'], 'component': '*', 'dataset': '*'}, component='_default', dataset='_default', value=kwargs.get('rv_grav', False), description='Whether gravitational redshift effects are enabled for RVs')]
if conf.devel:
params += [ChoiceParameter(qualifier='etv_method', copy_for = {'kind': ['etv'], 'component': '*', 'dataset': '*'}, component='_default', dataset='_default', value=kwargs.get('etv_method', 'crossing'), choices=['crossing'], description='Method to use for computing ETVs')]
params += [FloatParameter(visible_if='etv_method:crossing', qualifier='etv_tol', copy_for = {'kind': ['etv'], 'component': '*', 'dataset': '*'}, component='_default', dataset='_default', value=kwargs.get('etv_tol', 1e-4), default_unit=u.d, description='Precision with which to determine eclipse timings')]
# -----------------------------------------------------------
return ParameterSet(params) | Compute options for using the PHOEBE 2.0 backend.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.phoebe` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def legacy(**kwargs):
"""
Compute options for using the PHOEBE 1.0 legacy backend (must be
installed).
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.legacy` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
params = []
params += [BoolParameter(qualifier='enabled', copy_for={'context': 'dataset', 'kind': ['lc', 'rv', 'mesh'], 'dataset': '*'}, dataset='_default', value=kwargs.get('enabled', True), description='Whether to create synthetics in compute/fitting run')]
# TODO: the kwargs need to match the qualifier names!
# TODO: include MORE meshing options
params += [ChoiceParameter(copy_for = {'kind': ['star'], 'component': '*'}, component='_default', qualifier='atm', value=kwargs.get('atm', 'extern_atmx'), choices=['extern_atmx', 'extern_planckint'], description='Atmosphere table')]
# params += [ChoiceParameter(copy_for = {'kind': ['star'], 'component': '*'}, component='_default', qualifier='atm', value=kwargs.get('atm', 'kurucz'), choices=['kurucz', 'blackbody'], description='Atmosphere table')]
# params += [ChoiceParameter(qualifier='morphology', value=kwargs.get('morphology','Detached binary'), choices=['Unconstrained binary system', 'Detached binary', 'Overcontact binary of the W UMa type', 'Overcontact binary not in thermal contact'], description='System type constraint')]
# params += [BoolParameter(qualifier='cindex', value=kwargs.get('cindex', False), description='Color index constraint')]
# params += [IntParameter(visible_if='cindex_switch:True', qualifier='cindex', value=kwargs.get('cindex', np.array([1.0])), description='Number of reflections')]
# params += [BoolParameter(qualifier='heating', value=kwargs.get('heating', True), description='Allow irradiators to heat other components')]
params += [IntParameter(copy_for={'kind': ['star'], 'component': '*'}, component='_default', qualifier='gridsize', value=kwargs.get('gridsize', 60), limits=(10,None), description='Number of meshpoints for WD')]
params += [ChoiceParameter(qualifier='irrad_method', value=kwargs.get('irrad_method', 'wilson'), choices=['none', 'wilson'], description='Which method to use to handle irradiation/reflection effects')]
params += [IntParameter(visible_if='irrad_method:wilson', qualifier='refl_num', value=kwargs.get('refl_num', 1), limits=(0,None), description='Number of reflections')]
# params += [BoolParameter(qualifier='msc1', value=kwargs.get('msc1', False), description='Mainsequence Constraint for star 1')]
# params += [BoolParameter(qualifier='msc2', value=kwargs.get('msc2', False), description='Mainsequence Constraint for star 2')]
# TODO: can we come up with a better qualifier for reddening (and be consistent when we enable in phoebe2)
params += [BoolParameter(qualifier='ie', value=kwargs.get('ie', False), description='Should data be de-reddened')]
# TODO: can we change this to rv_method = ['flux_weighted', 'dynamical'] to be consistent with phoebe2?
# TODO: can proximity_rv (rv_method) be copied for each dataset (see how this is done for phoebe2)? This would probably mean that the wrapper would need to loop and make separate calls since PHOEBE1 can't handle different settings per-RV dataset
params += [ChoiceParameter(qualifier='rv_method', copy_for = {'kind': ['rv'], 'component': '*', 'dataset': '*'}, component='_default', dataset='_default',
value=kwargs.get('rv_method', 'flux-weighted'), choices=['flux-weighted', 'dynamical'], description='Method to use for computing RVs (must be flux-weighted for Rossiter-McLaughlin)')]
return ParameterSet(params) | Compute options for using the PHOEBE 1.0 legacy backend (must be
installed).
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.legacy` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def photodynam(**kwargs):
"""
Compute options for using Josh Carter's 'photodynam' code as a
backend (must be installed).
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.photodynam` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
if not conf.devel:
raise NotImplementedError("'photodynam' backend not officially supported for this release. Enable developer mode to test.")
params = []
params += [BoolParameter(qualifier='enabled', copy_for={'context': 'dataset', 'kind': ['lc', 'rv', 'orb'], 'dataset': '*'}, dataset='_default', value=kwargs.get('enabled', True), description='Whether to create synthetics in compute/fitting run')]
params += [FloatParameter(qualifier='stepsize', value=kwargs.get('stepsize', 0.01), default_unit=None, description='blah')]
params += [FloatParameter(qualifier='orbiterror', value=kwargs.get('orbiterror', 1e-20), default_unit=None, description='blah')]
# TODO: remove this option and instead use time0@system
#params += [FloatParameter(qualifier='time0', value=kwargs.get('time0', 0.0), default_unit=u.d, description='Time to start the integration')]
return ParameterSet(params) | Compute options for using Josh Carter's 'photodynam' code as a
backend (must be installed).
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.photodynam` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def jktebop(**kwargs):
"""
Compute options for using John Southworth's 'jktebop' code as a
backend (must be installed).
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.jktebop` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
if not conf.devel:
raise NotImplementedError("'jktebop' backend not officially supported for this release. Enable developer mode to test.")
params = []
params += [BoolParameter(qualifier='enabled', copy_for={'context': 'dataset', 'kind': ['lc'], 'dataset': '*'}, dataset='_default', value=kwargs.get('enabled', True), description='Whether to create synthetics in compute/fitting run')]
params += [FloatParameter(qualifier='ringsize', value=kwargs.get('ringsize', 5), default_unit=u.deg, description='Integ Ring Size')]
return ParameterSet(params) | Compute options for using John Southworth's 'jktebop' code as a
backend (must be installed).
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_compute`
Please see :func:`phoebe.backend.backends.jktebop` for a list of sources to
cite when using this backend.
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | entailment |
def team_required(func=None):
"""
Decorator for views that require a team be supplied wither via a slug in the
url pattern or already set on the request object from the TeamMiddleware
"""
def decorator(view_func):
@functools.wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
slug = kwargs.pop("slug", None)
if not getattr(request, "team", None):
request.team = get_object_or_404(Team, slug=slug)
return view_func(request, *args, **kwargs)
return _wrapped_view
if func:
return decorator(func)
return decorator | Decorator for views that require a team be supplied wither via a slug in the
url pattern or already set on the request object from the TeamMiddleware | entailment |
def manager_required(func=None):
"""
Decorator for views that require not only a team but also that a user be
logged in and be the manager or owner of that team.
"""
def decorator(view_func):
@team_required
@login_required
@functools.wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
role = request.team.role_for(request.user)
if role not in [Membership.ROLE_MANAGER, Membership.ROLE_OWNER]:
raise Http404()
return view_func(request, *args, **kwargs)
return _wrapped_view
if func:
return decorator(func)
return decorator | Decorator for views that require not only a team but also that a user be
logged in and be the manager or owner of that team. | entailment |
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
self.matches = self.attr_matches(text)
try:
return self.matches[state]
except IndexError:
return None | Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'. | entailment |
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
def _method_or_attr(thisobject, item):
# decide whether to append a '(' to the end of the attr based
# on whether its callable
if hasattr(getattr(thisobject, item), '__call__'):
return item + '('
else:
return item
tb_compl_commands = {
'.': {},
'[': {},
'.get(': {},
'.set(': {},
'.filter(': {},
'.filter_or_get(': {},
'.get_parameter(': {},
'.remove_parameter(': {},
'.remove_parameters_all(': {},
'.get_value(': {},
'.set_value(': {},
'.set_value_all(': {},
# TODO: default_unit, adjust, prior, posterior, enabled?
'.get_history(': {'context': 'history'},
'.remove_history(': {'context': 'history'},
'.get_component(': {'context': 'system'},
'.remove_component(': {'context': 'system'},
'.get_mesh(': {'context': 'mesh'},
'.remove_mesh(': {'context': 'mesh'},
'.get_constraint(': {'context': 'constraint'},
'.remove_constraint(': {'context': 'constraint'},
'.flip_constraint(': {'context': 'constraint'},
'.run_constraint(': {'context': 'constraint'},
'.get_compute(': {'context': 'compute'},
'.remove_compute(': {'context': 'compute'},
'.run_compute(': {'context': 'compute'},
'.get_prior(': {'context': 'prior'}, # TODO: remove_prior, run_prior, enable_prior, disable_prior
'.get_fitting(': {'context': 'fitting'},
'.remove_fitting(': {'context': 'fitting'},
'.run_fitting(': {'context': 'fitting'},
'.get_posterior(': {'context': 'posterior'}, # TODO: remove_posterior, draw_from_posterior
'.get_feedback(': {'context': 'feedback'},
'.remove_feedback(': {'context': 'feedback'},
# TODO: plots, plugins
}
expr = None
for cmd,filter_kwargs in tb_compl_commands.items():
if cmd in text:
expr, attr = text.rsplit(cmd, 1)
#~ if len(attr)==0:
#~ return []
if attr[0] not in ["'",'"'] and cmd != '.':
return []
else:
if cmd == '.':
# then we're just looking for attributes and don't
# need to offset for the ' or "
stringchar = ''
attr = attr
else:
# then we're the first argument of some method
# and need to account for the starting ' or "
stringchar = attr[0]
attr = attr[1:]
break
if expr is None:
# then we haven't found a match
return []
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
if cmd == '.':
# then we're looking for attributes of thisobject (PS or bundle) that start with attr
words = [_method_or_attr(thisobject, item) for item in dir(thisobject) if item[:len(attr)] == attr]
else:
# then we're looking to autocomplete the twig attr for thisobject (PS or bundle)
words = thisobject.filter_or_get(attr, autocomplete=True, **filter_kwargs)
matches = []
n = len(attr)
for word in words:
matches.append('{}{}{}{}'.format(expr,cmd,stringchar,word))
return matches | Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated. | entailment |
def _extract_from_bundle(b, compute, times=None, allow_oversample=False,
by_time=True, **kwargs):
"""
Extract a list of sorted times and the datasets that need to be
computed at each of those times. Any backend can then loop through
these times and see what quantities are needed for that time step.
Empty copies of synthetics for each applicable dataset are then
created and returned so that they can be filled by the given backend.
Setting of other meta-data should be handled by the bundle once
the backend returns the filled synthetics.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:return: times (list of floats), infos (list of lists of dictionaries),
new_syns (ParameterSet containing all new parameters)
:raises NotImplementedError: if for some reason there is a problem getting
a unique match to a dataset (shouldn't ever happen unless
the user overrides a label)
"""
provided_times = times
times = []
infolists = []
needed_syns = []
# The general format of the datastructures used within PHOEBE are as follows:
# if by_time:
# - times (list within _extract_from_bundle_by_time but then casted to np.array)
# - infolists (list of <infolist>, same shape and order as times)
# - infolist (list of <info>)
# - info (dict containing information for a given dataset-component computation at the given time)
#
# else:
# - infolist (same as by_time, one per-dataset, called infolists within this function only)
# - info (same as by_time)
#
# The workers then return a similar format:
# - packetlist (list of <packet>)
# - packet (dict ready to be sent to new_syns.set_value(**packet))
# where packet has a similar structure to info (but with value and possibly time keys)
# but packetlist may be longer than infolist (since mesh passband-columns allow
# now have their own entries.)
for dataset in b.filter(qualifier='enabled', compute=compute, value=True).datasets:
dataset_ps = b.filter(context='dataset', dataset=dataset)
dataset_compute_ps = b.filter(context='compute', dataset=dataset, compute=compute, check_visible=False)
dataset_kind = dataset_ps.exclude(kind='*_dep').kind
time_qualifier = _timequalifier_by_kind(dataset_kind)
if dataset_kind in ['lc']:
# then the Parameters in the model only exist at the system-level
# and are not tagged by component
dataset_components = [None]
elif dataset_kind in ['lp']:
# TODO: eventually spectra and RVs as well (maybe even LCs and ORBs)
dataset_components = b.hierarchy.get_stars() + b.hierarchy.get_orbits()
else:
dataset_components = b.hierarchy.get_stars()
for component in dataset_components:
if provided_times:
this_times = provided_times
elif dataset_kind == 'mesh':
this_times = _expand_mesh_times(b, dataset_ps, component)
elif dataset_kind in ['lp']:
# then we have Parameters tagged by times, this will probably
# also apply to spectra.
this_times = [float(t) for t in dataset_ps.times]
else:
timequalifier = _timequalifier_by_kind(dataset_kind)
timecomponent = component if dataset_kind not in ['mesh', 'lc'] else None
# print "*****", dataset_kind, dataset_ps.kinds, timequalifier, timecomponent
this_times = dataset_ps.get_value(qualifier=timequalifier, component=timecomponent, unit=u.d)
# we may also need to compute at other times if requested by a
# mesh with this dataset in datasets@mesh
# for mesh_datasets_parameter in mesh_datasets_parameters:
# if dataset in mesh_datasets_parameter.get_value():
# mesh_obs_ps = b.filter(context='dataset', dataset=mesh_datasets_parameter.dataset, component=None).exclude(kind='*_dep')
# TODO: not sure about the component=None on the next line... what will this do for rvs with different times per-component?
# mesh_times = _expand_mesh_times(b, mesh_obs_ps, component=None)
# this_times = np.unique(np.append(this_times, mesh_times))
if allow_oversample and \
dataset_kind in ['lc'] and \
b.get_value(qualifier='exptime', dataset=dataset, check_visible=False) > 0 and \
dataset_compute_ps.get_value(qualifier='fti_method', **kwargs)=='oversample':
# Then we need to override the times retrieved from the dataset
# with the oversampled times. Later we'll do an average over
# the exposure.
# NOTE: here we assume that the dataset times are at mid-exposure,
# if we want to allow more flexibility, we'll need a parameter
# that gives this option and different logic for each case.
exptime = dataset_ps.get_value(qualifier='exptime', unit=u.d)
fti_oversample = dataset_compute_ps.get_value(qualifier='fti_oversample', check_visible=False, **kwargs)
# NOTE: if changing this, also change in bundle.run_compute
this_times = np.array([np.linspace(t-exptime/2., t+exptime/2., fti_oversample) for t in this_times]).flatten()
if dataset_kind in ['lp']:
# for line profiles and spectra, we only need to compute synthetic
# model if there are defined wavelengths
this_wavelengths = dataset_ps.get_value(qualifier='wavelengths', component=component)
else:
this_wavelengths = None
if len(this_times) and (this_wavelengths is None or len(this_wavelengths)):
info = {'dataset': dataset,
'component': component,
'kind': dataset_kind,
'needs_mesh': _needs_mesh(b, dataset, dataset_kind, component, compute),
}
if dataset_kind == 'mesh':
# then we may be requesting passband-dependent columns be
# copied to the mesh from other datasets based on the values
# of columns@mesh. Let's store the needed information here,
# where mesh_datasets and mesh_kinds correspond to each
# other (but mesh_columns does not).
info['mesh_columns'] = dataset_ps.get_value('columns', expand=True)
info['mesh_datasets'] = list(set([c.split('@')[1] for c in info['mesh_columns'] if len(c.split('@'))>1]))
info['mesh_kinds'] = [b.filter(dataset=ds, context='dataset').exclude(kind='*_dep').kind for ds in info['mesh_datasets']]
if by_time:
for time_ in this_times:
# TODO: handle some deltatime allowance here?
if time_ in times:
ind = times.index(time_)
infolists[ind].append(info)
else:
times.append(time_)
infolists.append([info])
else:
# TODO: this doesn't appear to be different than needed_syns,
# unless we change the structure to be per-dataset.
info['times'] = this_times
infolists.append(info)
# we need the times for _create_syns but not within the infolists,
# so we'll do this last and make a copy so times aren't passed
# to everybody...
needed_syn_info = info.copy()
needed_syn_info['times'] = this_times
needed_syns.append(needed_syn_info)
if by_time and len(times):
ti = zip(times, infolists)
ti.sort()
times, infolists = zip(*ti)
if by_time:
# print "*** _extract_from_bundle return(times, infolists, syns)", times, infolists, needed_syns
return np.asarray(times), infolists, _create_syns(b, needed_syns)
else:
# print "*** _extract_from_bundle return(infolists, syns)", infolists, needed_syns
return infolists, _create_syns(b, needed_syns) | Extract a list of sorted times and the datasets that need to be
computed at each of those times. Any backend can then loop through
these times and see what quantities are needed for that time step.
Empty copies of synthetics for each applicable dataset are then
created and returned so that they can be filled by the given backend.
Setting of other meta-data should be handled by the bundle once
the backend returns the filled synthetics.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:return: times (list of floats), infos (list of lists of dictionaries),
new_syns (ParameterSet containing all new parameters)
:raises NotImplementedError: if for some reason there is a problem getting
a unique match to a dataset (shouldn't ever happen unless
the user overrides a label) | entailment |
def _create_syns(b, needed_syns):
"""
Create empty synthetics
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter list needed_syns: list of dictionaries containing kwargs to access
the dataset (dataset, component, kind)
:return: :class:`phoebe.parameters.parameters.ParameterSet` of all new parameters
"""
# needs_mesh = {info['dataset']: info['kind'] for info in needed_syns if info['needs_mesh']}
params = []
for needed_syn in needed_syns:
# print "*** _create_syns needed_syn", needed_syn
# used to be {}_syn
syn_kind = '{}'.format(needed_syn['kind'])
# if needed_syn['kind']=='mesh':
# parameters.dataset.mesh will handle creating the necessary columns
# needed_syn['dataset_fields'] = needs_mesh
# needed_syn['columns'] = b.get_value(qualifier='columns', dataset=needed_syn['dataset'], context='dataset')
# datasets = b.get_value(qualifier='datasets', dataset=needed_syn['dataset'], context='dataset')
# needed_syn['datasets'] = {ds: b.filter(datset=ds, context='dataset').exclude(kind='*_dep').kind for ds in datasets}
# phoebe will compute everything sorted - even if the input times array
# is out of order, so let's make sure the exposed times array is in
# the correct (sorted) order
if 'times' in needed_syn.keys():
needed_syn['times'].sort()
needed_syn['empty_arrays_len'] = len(needed_syn['times'])
these_params, these_constraints = getattr(_dataset, "{}_syn".format(syn_kind.lower()))(**needed_syn)
# TODO: do we need to handle constraints?
these_params = these_params.to_list()
for param in these_params:
if param._dataset is None:
# dataset may be set for mesh columns
param._dataset = needed_syn['dataset']
param._kind = syn_kind
param._component = needed_syn['component']
# reset copy_for... model Parameters should never copy
param._copy_for = {}
# context, model, etc will be handle by the bundle once these are returned
params += these_params
return ParameterSet(params) | Create empty synthetics
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter list needed_syns: list of dictionaries containing kwargs to access
the dataset (dataset, component, kind)
:return: :class:`phoebe.parameters.parameters.ParameterSet` of all new parameters | entailment |
def _make_packet(qualifier, value, time, info, **kwargs):
"""
where kwargs overrides info
"""
packet = {'dataset': kwargs.get('dataset', info['dataset']),
'component': kwargs.get('component', info['component']),
'kind': kwargs.get('kind', info['kind']),
'qualifier': qualifier,
'value': value,
'time': time
}
return packet | where kwargs overrides info | entailment |
def run_checks(self, b, compute, times=[], **kwargs):
"""
run any sanity checks to make sure the parameters and options are legal
for this backend. If they are not, raise an error here to avoid errors
within the workers.
Any physics-checks that are backend-independent should be in
Bundle.run_checks, and don't need to be repeated here.
This should be subclassed by all backends, otherwise will throw a
NotImplementedError
"""
raise NotImplementedError("run_checks is not implemented by the {} backend".format(self.__class__.__name__)) | run any sanity checks to make sure the parameters and options are legal
for this backend. If they are not, raise an error here to avoid errors
within the workers.
Any physics-checks that are backend-independent should be in
Bundle.run_checks, and don't need to be repeated here.
This should be subclassed by all backends, otherwise will throw a
NotImplementedError | entailment |
def get_packet_and_syns(self, b, compute, times=[], **kwargs):
"""
get_packet is called by the master and must get all information necessary
to send to all workers. The returned packet will be passed on as
_run_chunk(**packet) with the following exceptions:
* b: the bundle will be included in the packet serialized
* compute: the label of the compute options will be included in the packet
* backend: the class name will be passed on in the packet so the worker can call the correct backend
* all kwargs will be passed on verbatim
"""
packet, new_syns = self._get_packet_and_syns(b, compute, times, **kwargs)
for k,v in kwargs.items():
packet[k] = v
packet['b'] = b.to_json() if mpi.enabled else b
packet['compute'] = compute
packet['backend'] = self.__class__.__name__
return packet, new_syns | get_packet is called by the master and must get all information necessary
to send to all workers. The returned packet will be passed on as
_run_chunk(**packet) with the following exceptions:
* b: the bundle will be included in the packet serialized
* compute: the label of the compute options will be included in the packet
* backend: the class name will be passed on in the packet so the worker can call the correct backend
* all kwargs will be passed on verbatim | entailment |
def _fill_syns(self, new_syns, rpacketlists_per_worker):
"""
rpacket_per_worker is a list of packetlists as returned by _run_chunk
"""
# TODO: move to BaseBackendByDataset or BaseBackend?
logger.debug("rank:{}/{} {}._fill_syns".format(mpi.myrank, mpi.nprocs, self.__class__.__name__))
for packetlists in rpacketlists_per_worker:
# single worker
for packetlist in packetlists:
# single time/dataset
for packet in packetlist:
# single parameter
new_syns.set_value(**packet)
return new_syns | rpacket_per_worker is a list of packetlists as returned by _run_chunk | entailment |
def run(self, b, compute, times=[], **kwargs):
"""
if within mpirun, workers should call _run_worker instead of run
"""
self.run_checks(b, compute, times, **kwargs)
logger.debug("rank:{}/{} calling get_packet_and_syns".format(mpi.myrank, mpi.nprocs))
packet, new_syns = self.get_packet_and_syns(b, compute, times, **kwargs)
if mpi.enabled:
# broadcast the packet to ALL workers
mpi.comm.bcast(packet, root=0)
# now even the master can become a worker and take on a chunk
packet['b'] = b
rpacketlists = self._run_chunk(**packet)
# now receive all packetlists
rpacketlists_per_worker = mpi.comm.gather(rpacketlists, root=0)
else:
rpacketlists_per_worker = [self._run_chunk(**packet)]
return self._fill_syns(new_syns, rpacketlists_per_worker) | if within mpirun, workers should call _run_worker instead of run | entailment |
def compute_volume(sizes, centers, normals):
"""
Compute the numerical volume of a convex mesh
:parameter array sizes: array of sizes of triangles
:parameter array centers: array of centers of triangles (x,y,z)
:parameter array normals: array of normals of triangles (will normalize if not already)
:return: the volume (float)
"""
# the volume of a slanted triangular cone is A_triangle * (r_vec dot norm_vec) / 3.
# TODO: implement normalizing normals into meshing routines (or at least have them supply normal_mags to the mesh)
# TODO: remove this function - should now be returned by the meshing algorithm itself
# although wd method may currently use this
normal_mags = np.linalg.norm(normals, axis=1) #np.sqrt((normals**2).sum(axis=1))
return np.sum(sizes*((centers*normals).sum(axis=1)/normal_mags)/3) | Compute the numerical volume of a convex mesh
:parameter array sizes: array of sizes of triangles
:parameter array centers: array of centers of triangles (x,y,z)
:parameter array normals: array of normals of triangles (will normalize if not already)
:return: the volume (float) | entailment |
def euler_trans_matrix(etheta, elongan, eincl):
"""
Get the transformation matrix R to translate/rotate a mesh according to
euler angles.
The matrix is
R(long,incl,theta) =
Rz(pi).Rz(long).Rx(incl).Rz(theta)
Rz(long).Rx(-incl).Rz(theta).Rz(pi)
where
Rx(u) = 1, 0, 0
0, cos(u), -sin(u)
0, sin(u), cos(u)
Ry(u) = cos(u), 0, sin(u)
0, 1, 0
-sin(u), 0, cos(u)
Rz(u) = cos(u), -sin(u), 0
sin(u), cos(u), 0
0, 0, 1
Rz(pi) = reflection across z-axis
Note:
R(0,0,0) = -1, 0, 0
0, -1, 0
0, 0, 1
:parameter float etheta: euler theta angle
:parameter float elongan: euler long of asc node angle
:parameter float eincl: euler inclination angle
:return: matrix with size 3x3
"""
s1 = sin(eincl);
c1 = cos(eincl);
s2 = sin(elongan);
c2 = cos(elongan);
s3 = sin(etheta);
c3 = cos(etheta);
c1s3 = c1*s3;
c1c3 = c1*c3;
return np.array([
[-c2*c3+s2*c1s3, c2*s3+s2*c1c3, -s2*s1],
[-s2*c3-c2*c1s3, s2*s3-c2*c1c3, c2*s1],
[s1*s3, s1*c3, c1]
]) | Get the transformation matrix R to translate/rotate a mesh according to
euler angles.
The matrix is
R(long,incl,theta) =
Rz(pi).Rz(long).Rx(incl).Rz(theta)
Rz(long).Rx(-incl).Rz(theta).Rz(pi)
where
Rx(u) = 1, 0, 0
0, cos(u), -sin(u)
0, sin(u), cos(u)
Ry(u) = cos(u), 0, sin(u)
0, 1, 0
-sin(u), 0, cos(u)
Rz(u) = cos(u), -sin(u), 0
sin(u), cos(u), 0
0, 0, 1
Rz(pi) = reflection across z-axis
Note:
R(0,0,0) = -1, 0, 0
0, -1, 0
0, 0, 1
:parameter float etheta: euler theta angle
:parameter float elongan: euler long of asc node angle
:parameter float eincl: euler inclination angle
:return: matrix with size 3x3 | entailment |
def spin_in_system(incl, long_an):
"""
Spin in the plane of sky of a star given its inclination and "long_an"
incl - inclination of the star in the plane of sky
long_an - longitude of ascending node (equator) of the star in the plane of sky
Return:
spin - in plane of sky
"""
#print "*** spin_in_system", incl, long_an, np.dot(Rz(long_an), np.dot(Rx(-incl), np.array([0,0,1])))
# Rz(long_an) Rx(incl) [0, 0, 1]
return np.dot(Rz(long_an), np.dot(Rx(-incl), np.array([0.,0.,1.]))) | Spin in the plane of sky of a star given its inclination and "long_an"
incl - inclination of the star in the plane of sky
long_an - longitude of ascending node (equator) of the star in the plane of sky
Return:
spin - in plane of sky | entailment |
def spin_in_roche(s, etheta, elongan, eincl):
"""
Transform the spin s of a star on Kerpler orbit with
etheta - true anomaly
elongan - longitude of ascending node
eincl - inclination
from in the plane of sky reference frame into
the Roche reference frame.
"""
# m = Rz(long).Rx(-incl).Rz(theta).Rz(pi)
m = euler_trans_matrix(etheta, elongan, eincl)
return np.dot(m.T, s) | Transform the spin s of a star on Kerpler orbit with
etheta - true anomaly
elongan - longitude of ascending node
eincl - inclination
from in the plane of sky reference frame into
the Roche reference frame. | entailment |
def general_rotation_matrix(theta, phi, alpha):
"""
Rotation around vector
u = (sin(theta) cos(phi), sin(theta) sin(phi), cos(theta))
by an angle
alpha
Ref:
http://ksuweb.kennesaw.edu/~plaval//math4490/rotgen.pdf
:parameter float theta:
:parameter float phi:
:parameter float alpha: rotation angle
:return: 3x3 matrix of floats
"""
C = cos(alpha)
S = sin(alpha)
t = 1 - C
ux = sin(theta)*cos(phi)
uy = sin(theta)*sin(phi)
uz = cos(theta)
return np.array([
[t*ux**2 + C, t*ux*uy - S*uz, t*ux*uz + S*uy],
[t*ux*uy + S*uz, t*uy**2 + C, t*uy*uz - S*ux],
[t*ux*uz - S*uy, t*uy*uz + S*ux, t*uz**2 + C]
]) | Rotation around vector
u = (sin(theta) cos(phi), sin(theta) sin(phi), cos(theta))
by an angle
alpha
Ref:
http://ksuweb.kennesaw.edu/~plaval//math4490/rotgen.pdf
:parameter float theta:
:parameter float phi:
:parameter float alpha: rotation angle
:return: 3x3 matrix of floats | entailment |
def transform_position_array(array, pos, euler, is_normal, reverse=False):
"""
Transform any Nx3 position array by translating to a center-of-mass 'pos'
and applying an euler transformation
:parameter array array: numpy array of Nx3 positions in the original (star)
coordinate frame
:parameter array pos: numpy array with length 3 giving cartesian
coordinates to offset all positions
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter bool is_normal: whether each entry is a normal vector rather
than position vector. If true, the quantities won't be offset by
'pos'
:return: new positions array with same shape as 'array'.
"""
trans_matrix = euler_trans_matrix(*euler)
if not reverse:
trans_matrix = trans_matrix.T
if isinstance(array, ComputedColumn):
array = array.for_computations
if is_normal:
# then we don't do an offset by the position
return np.dot(np.asarray(array), trans_matrix)
else:
return np.dot(np.asarray(array), trans_matrix) + np.asarray(pos) | Transform any Nx3 position array by translating to a center-of-mass 'pos'
and applying an euler transformation
:parameter array array: numpy array of Nx3 positions in the original (star)
coordinate frame
:parameter array pos: numpy array with length 3 giving cartesian
coordinates to offset all positions
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter bool is_normal: whether each entry is a normal vector rather
than position vector. If true, the quantities won't be offset by
'pos'
:return: new positions array with same shape as 'array'. | entailment |
def transform_velocity_array(array, pos_array, vel, euler, rotation_vel=(0,0,0)):
"""
Transform any Nx3 velocity vector array by adding the center-of-mass 'vel',
accounting for solid-body rotation, and applying an euler transformation.
:parameter array array: numpy array of Nx3 velocity vectors in the original
(star) coordinate frame
:parameter array pos_array: positions of the elements with respect to the
original (star) coordinate frame. Must be the same shape as 'array'.
:parameter array vel: numpy array with length 3 giving cartesian velocity
offsets in the new (system) coordinate frame
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter array rotation_vel: vector of the rotation velocity of the star
in the original (star) coordinate frame
:return: new velocity array with same shape as 'array'
"""
trans_matrix = euler_trans_matrix(*euler)
# v_{rot,i} = omega x r_i with omega = rotation_vel
rotation_component = np.cross(rotation_vel, pos_array, axisb=1)
orbital_component = np.asarray(vel)
if isinstance(array, ComputedColumn):
array = array.for_computations
new_vel = np.dot(np.asarray(array)+rotation_component, trans_matrix.T) + orbital_component
return new_vel | Transform any Nx3 velocity vector array by adding the center-of-mass 'vel',
accounting for solid-body rotation, and applying an euler transformation.
:parameter array array: numpy array of Nx3 velocity vectors in the original
(star) coordinate frame
:parameter array pos_array: positions of the elements with respect to the
original (star) coordinate frame. Must be the same shape as 'array'.
:parameter array vel: numpy array with length 3 giving cartesian velocity
offsets in the new (system) coordinate frame
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter array rotation_vel: vector of the rotation velocity of the star
in the original (star) coordinate frame
:return: new velocity array with same shape as 'array' | entailment |
def wd_grid_to_mesh_dict(the_grid, q, F, d):
"""
Transform a wd-style mesh to the format used by PHOEBE. Namely this handles
translating vertices from Nx9 to Nx3x3 and creating the array of indices
for each triangle.
:parameter record-array the_grid: output from discretize_wd_style
:parameter float q: mass-ratio (M_this/M_sibling)
:parameter float F: syncpar
:parameter float d: instantaneous unitless separation
:return: the dictionary in PHOEBE's format to be passed to a Mesh class
"""
# WD returns a list of triangles with 9 coordinates (v1x, v1y, v1z, v2x, ...)
triangles_9N = the_grid[:,4:13]
new_mesh = {}
# force the mesh to be computed at centers rather than the PHOEBE default
# of computing at vertices and averaging for the centers. This will
# propogate to all ComputedColumns, which means we'll fill those quanities
# (ie normgrads, velocities) per-triangle.
new_mesh['compute_at_vertices'] = False
# PHOEBE's mesh structure stores vertices in an Nx3 array
new_mesh['vertices'] = triangles_9N.reshape(-1,3)
# and triangles as indices pointing to each of the 3 vertices (Nx3)
new_mesh['triangles'] = np.arange(len(triangles_9N)*3).reshape(-1,3)
new_mesh['centers'] = the_grid[:,0:3]
new_mesh['tnormals'] = the_grid[:,13:16]
norms = np.linalg.norm(new_mesh['tnormals'], axis=1)
new_mesh['normgrads'] = norms
# TODO: do this the right way by dividing along axis=1 (or using np.newaxis as done for multiplying in ComputedColumns)
new_mesh['tnormals'] = np.array([tn/n for tn,n in zip(new_mesh['tnormals'], norms)])
# NOTE: there are no vnormals in wd-style mesh
new_mesh['areas'] = the_grid[:,3]
new_mesh['tareas'] = the_grid[:,18]
# TESTING ONLY - remove this eventually ??? (currently being used
# to test WD-style eclipse detection by using theta and phi (lat and long)
# to determine which triangles are in the same "strip")
new_mesh['thetas'] = the_grid[:,16]
new_mesh['phis'] = the_grid[:,17]
# TODO: get rid of this list comprehension
# grads = np.array([libphoebe.roche_gradOmega_only(q, F, d, c) for c in new_mesh['centers']])
# new_mesh['normgrads'] = np.sqrt(grads[:,0]**2+grads[:,1]**2+grads[:,2]**2)
# new_mesh['normgrads'] = norms #np.linalg.norm(grads, axis=1)
# TODO: actually compute the numerical volume (find old code)
new_mesh['volume'] = compute_volume(new_mesh['areas'], new_mesh['centers'], new_mesh['tnormals'])
# new_mesh['area'] # TODO: compute surface area??? (not sure if needed)
new_mesh['velocities'] = np.zeros(new_mesh['centers'].shape)
return new_mesh | Transform a wd-style mesh to the format used by PHOEBE. Namely this handles
translating vertices from Nx9 to Nx3x3 and creating the array of indices
for each triangle.
:parameter record-array the_grid: output from discretize_wd_style
:parameter float q: mass-ratio (M_this/M_sibling)
:parameter float F: syncpar
:parameter float d: instantaneous unitless separation
:return: the dictionary in PHOEBE's format to be passed to a Mesh class | entailment |
def averages(self):
"""
Access to the average of the values at the vertices for each triangle.
If the quantities are defined at centers instead of vertices, this
will return None. Also see :method:`centers`.
:return: numpy array or None
"""
if not self.mesh._compute_at_vertices:
return None
return np.mean(self.vertices_per_triangle, axis=1) | Access to the average of the values at the vertices for each triangle.
If the quantities are defined at centers instead of vertices, this
will return None. Also see :method:`centers`.
:return: numpy array or None | entailment |
def weighted_averages(self):
"""
Access to the weighted averages of the values at the vertices for each
triangle based on the weights provided by mesh.weights. This is most
useful for partially visible triangles when using libphoebe's
eclipse detection that returns weights for each vertex.
Note that weights by default are set to 1/3 for each vertex, meaning
that this will provide the same values as :meth:`averages` unless
the weights are overridden within the mesh.
If the quantities are defined at centers instead of vertices, this will
return None.
:return: numpy array or None
"""
if not self.mesh._compute_at_vertices:
return None
vertices_per_triangle = self.vertices_per_triangle
if vertices_per_triangle.ndim==2:
# return np.dot(self.vertices_per_triangle, self.mesh.weights)
return np.sum(vertices_per_triangle*self.mesh.weights, axis=1)
elif vertices_per_triangle.ndim==3:
return np.sum(vertices_per_triangle*self.mesh.weights[:,np.newaxis], axis=1)
else:
raise NotImplementedError | Access to the weighted averages of the values at the vertices for each
triangle based on the weights provided by mesh.weights. This is most
useful for partially visible triangles when using libphoebe's
eclipse detection that returns weights for each vertex.
Note that weights by default are set to 1/3 for each vertex, meaning
that this will provide the same values as :meth:`averages` unless
the weights are overridden within the mesh.
If the quantities are defined at centers instead of vertices, this will
return None.
:return: numpy array or None | entailment |
def set_for_computations(self, value):
"""
Set the quantities, either at the vertices or centers depending on the
settings of the mesh (mesh._compute_at_vertices)
"""
if self.mesh._compute_at_vertices:
self._vertices = value
else:
self._centers = value | Set the quantities, either at the vertices or centers depending on the
settings of the mesh (mesh._compute_at_vertices) | entailment |
def update_columns_dict(self, kwargs):
"""
Update the value of a column or multiple columns by passing as a dict.
For observable columns, provide the label of the observable itself and
it will be found (so long as it does not conflict with an existing
non-observable column).
"""
# make sure to do the geometric things that are needed for some of the
# ComputedColumns first
for key in ('triangles', 'vertices', 'centers', 'vnormals', 'tnormals'):
if key in kwargs.keys():
self.__setitem__(key, kwargs.pop(key))
for k, v in kwargs.items():
if isinstance(v, float) and k not in self._scalar_fields:
# Then let's make an array with the correct length full of this
# scalar
# NOTE: this won't work for Nx3's, but that
# really shouldn't ever happen since they should be set
# within the init.
# v = np.ones(self.Ntriangles)*v
if self._compute_at_vertices:
v = np.full(self.Nvertices, v)
else:
v = np.full(self.Ntriangles, v)
self.__setitem__(k, v)
if isinstance(v, ComputedColumn):
# then let's update the mesh instance to correctly handle
# inheritance
self.__getitem__(k)._mesh = self | Update the value of a column or multiple columns by passing as a dict.
For observable columns, provide the label of the observable itself and
it will be found (so long as it does not conflict with an existing
non-observable column). | entailment |
def coords_for_observations(self):
"""
Return the coordinates from the center of the star for each element
(either centers or vertices depending on the setting in the mesh)
after perturbations (either by features or by offsetting to get
the correct volume). NOTE: this is NOT necessarily where the physical
parameters were computed, but IS where eclipse detection, etc, is
handled.
"""
if self._compute_at_vertices:
return self.vertices - self._pos_center
else:
return self.centers - self._pos_center | Return the coordinates from the center of the star for each element
(either centers or vertices depending on the setting in the mesh)
after perturbations (either by features or by offsetting to get
the correct volume). NOTE: this is NOT necessarily where the physical
parameters were computed, but IS where eclipse detection, etc, is
handled. | entailment |
def coords_for_computations(self):
"""
Return the coordinates from the center of the star for each element
(either centers or vertices depending on the setting in the mesh).
"""
# TODO: need to subtract the position offset if a Mesh (in orbit)
if self._compute_at_vertices:
if self.pvertices is not None:
return self.pvertices - self._pos_center
else:
return self.vertices - self._pos_center
else:
return self.centers - self._pos_center | Return the coordinates from the center of the star for each element
(either centers or vertices depending on the setting in the mesh). | entailment |
def rs(self):
"""
Return the radius of each element (either vertices or centers
depending on the setting in the mesh) with respect to the center of
the star.
NOTE: unscaled
(ComputedColumn)
"""
rs = np.linalg.norm(self.coords_for_computations, axis=1)
return ComputedColumn(self, rs) | Return the radius of each element (either vertices or centers
depending on the setting in the mesh) with respect to the center of
the star.
NOTE: unscaled
(ComputedColumn) | entailment |
def rprojs(self):
"""
Return the projected (in xy/uv plane) radius of each element (either
vertices or centers depending on the setting in the mesh) with respect
to the center of the star.
NOTE: unscaled
(ComputedColumn)
"""
# TODO: should this be moved to Mesh? Even though its surely possible
# to compute without being placed in orbit, projecting in x,y doesn't
# make much sense without LOS orientation.
rprojs = np.linalg.norm(self.coords_for_computations[:,:2], axis=1)
return ComputedColumn(self, rprojs) | Return the projected (in xy/uv plane) radius of each element (either
vertices or centers depending on the setting in the mesh) with respect
to the center of the star.
NOTE: unscaled
(ComputedColumn) | entailment |
def cosbetas(self):
"""
TODO: add documentation
(ComputedColumn)
"""
coords = self.coords_for_computations
norms = self.normals_for_computations
# TODO: ditch the list comprehension... I know I figured out how to do
# this (ie along an axis) with np.dot somewhere else
# cosbetas = np.array([np.dot(c,n) / (np.linalg.norm(c)*np.linalg.norm(n)) for c,n in zip(coords, norms)])
cosbetas = libphoebe.scalproj_cosangle(
np.ascontiguousarray(coords),
np.ascontiguousarray(norms)
)
return ComputedColumn(self, cosbetas) | TODO: add documentation
(ComputedColumn) | entailment |
def areas_si(self):
"""
TODO: add documentation
"""
if self._areas is not None:
return (self.areas*u.solRad**2).to(u.m**2).value
else:
return None | TODO: add documentation | entailment |
def from_proto(cls, proto_mesh, scale):
"""
TODO: add documentation
"""
mesh = cls(**proto_mesh.items())
mesh._copy_roche_values()
mesh._scale_mesh(scale=scale)
return mesh | TODO: add documentation | entailment |
def _scale_mesh(self, scale):
"""
TODO: add documentation
"""
pos_ks = ['vertices', 'centers']
# TODO: scale velocities???
# handle scale
self.update_columns_dict({k: self[k]*scale for k in pos_ks})
self.update_columns(areas=self.areas*(scale**2))
self._volume *= scale**3
if self._area is not None:
# self._area is None for wd meshes
self._area += scale**2 | TODO: add documentation | entailment |
def from_proto(cls, proto_mesh, scale,
pos, vel, euler, euler_vel,
rotation_vel=(0,0,0),
component_com_x=None):
"""
Turn a ProtoMesh into a Mesh scaled and placed in orbit.
Update all geometry fields from the proto reference frame, to the
current system reference frame, given the current position, velocitiy,
euler angles, and rotational velocity of THIS mesh.
:parameter list pos: current position (x, y, z)
:parameter list vel: current velocity (vx, vy, vz)
:parameter list euler: current euler angles (etheta, elongan, eincl)
:parameter list rotation_vel: rotation velocity vector (polar_dir*freq_rot)
"""
mesh = cls(**proto_mesh.items())
mesh._copy_roche_values()
mesh._scale_mesh(scale=scale)
mesh._place_in_orbit(pos, vel, euler, euler_vel, rotation_vel, component_com_x)
return mesh | Turn a ProtoMesh into a Mesh scaled and placed in orbit.
Update all geometry fields from the proto reference frame, to the
current system reference frame, given the current position, velocitiy,
euler angles, and rotational velocity of THIS mesh.
:parameter list pos: current position (x, y, z)
:parameter list vel: current velocity (vx, vy, vz)
:parameter list euler: current euler angles (etheta, elongan, eincl)
:parameter list rotation_vel: rotation velocity vector (polar_dir*freq_rot) | entailment |
def from_scaledproto(cls, scaledproto_mesh,
pos, vel, euler, euler_vel,
rotation_vel=(0,0,0),
component_com_x=None):
"""
TODO: add documentation
"""
mesh = cls(**scaledproto_mesh.items())
# roche coordinates have already been copied
# so do NOT call mesh._copy_roche_values() here
mesh._place_in_orbit(pos, vel, euler, euler_vel, rotation_vel, component_com_x)
return mesh | TODO: add documentation | entailment |
def _place_in_orbit(self, pos, vel, euler, euler_vel, rotation_vel=(0,0,0), component_com_x=None):
"""
TODO: add documentation
"""
# TODO: store pos, vel, euler so that INCREMENTAL changes are allowed
# if passing new values (and then make this a public method). See note
# below!
pos_ks = ['vertices', 'pvertices', 'centers']
norm_ks = ['vnormals', 'tnormals'] #, 'cnormals']
vel_ks = ['velocities']
# NOTE: we do velocities first since they require the positions WRT
# the star (not WRT the system). Will need to keep this in mind if we
# eventually support incremental transformations.
# pos_array = self.vertices if self._compute_at_vertices else self.centers
pos_array = self.roche_vertices if self._compute_at_vertices else self.roche_centers
if component_com_x is not None and component_com_x != 0.0:
# then we're the secondary component and need to do 1-x and then flip the rotation component vxs
pos_array = np.array([component_com_x, 0.0, 0.0]) - pos_array
self.update_columns_dict({k: transform_velocity_array(self[k], pos_array, vel, euler_vel, rotation_vel) for k in vel_ks if self[k] is not None})
# TODO: handle velocity from mesh reprojection during volume conservation
# handle rotation/displacement
# NOTE: mus will automatically be updated on-the-fly
self.update_columns_dict({k: transform_position_array(self[k], pos, euler, False) for k in pos_ks if self[k] is not None})
self.update_columns_dict({k: transform_position_array(self[k], pos, euler, True) for k in norm_ks if self[k] is not None})
# let's store the position. This is both useful for "undoing" the
# orbit-offset, and also eventually to allow incremental changes.
self._pos = pos
if component_com_x is not None and component_com_x != 0.0:
self._pos_center = transform_position_array(np.array([component_com_x, 0.0, 0.0]), pos, euler, False)
else:
self._pos_center = pos
self._euler = euler | TODO: add documentation | entailment |
def visibilities(self):
"""
Return the array of visibilities, where each item is a scalar/float
between 0 (completely hidden/invisible) and 1 (completely visible).
(Nx1)
"""
if self._visibilities is not None:
return self._visibilities
else:
return np.ones(self.Ntriangles) | Return the array of visibilities, where each item is a scalar/float
between 0 (completely hidden/invisible) and 1 (completely visible).
(Nx1) | entailment |
def weights(self):
"""
TODO: add documentation
(Nx3)
"""
if self._weights is not None and len(self._weights):
return self._weights
else:
return np.full((self.Ntriangles, 3), 1./3) | TODO: add documentation
(Nx3) | entailment |
def update_columns_dict(self, kwargs):
"""
TODO: add documentation
"""
super(Mesh, self).update_columns_dict(kwargs)
# if kwargs.get('vnormals', None) is not None or kwargs.get('tnormals', None) is not None:
# self._compute_mus()
if kwargs.get('triangles', None) is not None:
# reset visibilities and velocities so that they are reset
# when next queried
self.update_columns(visibilities=None, velocities=None) | TODO: add documentation | entailment |
def update_columns(self, field, value_dict, inds=None, computed_type=None):
"""
update the columns of all meshes
:parameter str field: name of the mesh columnname
:parameter value_dict: dictionary with component as keys and new
data as values. If value_dict is not a dictionary,
it will be applied to all components
:type value_dict: dict or value (array or float)
"""
if not isinstance(value_dict, dict):
value_dict = {comp_no: value_dict for comp_no in self._dict.keys()}
for comp, value in value_dict.items():
if computed_type is not None:
# then create the ComputedColumn now to override the default value of compute_at_vertices
self._dict[comp]._observables[field] = ComputedColumn(self._dict[comp], compute_at_vertices=computed_type=='vertices')
#print "***", comp, field, inds, value
if inds:
raise NotImplementedError('setting column with indices not yet ported to new meshing')
# self._dict[comp][field][inds] = value
else:
if comp in self._dict.keys():
self._dict[comp][field] = value
else:
meshes = self._dict[self._parent_envelope_of[comp]]
meshes[comp][field] = value | update the columns of all meshes
:parameter str field: name of the mesh columnname
:parameter value_dict: dictionary with component as keys and new
data as values. If value_dict is not a dictionary,
it will be applied to all components
:type value_dict: dict or value (array or float) | entailment |
def get_column(self, field, components=None, computed_type='for_observations'):
"""
TODO: add documentation
return a dictionary for a single column, with component as keys and the
column array as values
:parameter str field: name of the mesh columnname
:parameter components:
"""
def get_field(c, field, computed_type):
if c not in self._dict.keys() and self._parent_envelope_of[c] in self._dict.keys():
mesh = self._dict[self._parent_envelope_of[c]]
return mesh.get_column_flat(field, components, computed_type)
mesh = self._dict[c]
if isinstance(mesh, Meshes):
# then do this recursively for all components in the Meshes object
# but don't allow nesting in the dictionary, instead combine
# all subcomponents into one entry with the current component
return mesh.get_column_flat(field, mesh._components, computed_type)
f = mesh[field]
if isinstance(f, ComputedColumn):
col = getattr(f, computed_type)
else:
col = f
return col
if components:
if isinstance(components, str):
components = [components]
else:
components = self.keys()
return {c: get_field(c, field, computed_type) for c in components} | TODO: add documentation
return a dictionary for a single column, with component as keys and the
column array as values
:parameter str field: name of the mesh columnname
:parameter components: | entailment |
def get_column_flat(self, field, components=None, computed_type='for_observations'):
"""
TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components:
"""
return self.pack_column_flat(self.get_column(field, components, computed_type),
components,
offset=field=='triangles') | TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components: | entailment |
def pack_column_flat(self, value, components=None, offset=False):
"""
TODO: add documentation
"""
if components:
if isinstance(components, str):
components = [components]
elif isinstance(components, list):
components = components
else:
raise TypeError("components should be list or string, not {}".format(type(components)))
elif isinstance(value, dict):
components = value.keys()
elif isinstance(value, list):
components = self._dict.keys()
value = {c: v for c,v in zip(components, value)}
if offset:
values = []
offsetN = 0
for c in components:
values.append(value[c]+offsetN)
offsetN += len(self[c]['vertices'])
else:
values = [value[c] for c in components]
if len(value[components[0]].shape) > 1:
return np.vstack(values)
else:
return np.hstack(values) | TODO: add documentation | entailment |
def unpack_column_flat(self, value, components=None, offset=False, computed_type=None):
"""
TODO: add documentation
TODO: needs testing
"""
if components:
if isinstance(components, str):
components = [components]
else:
components = self._dict.keys()
# TODO: add this
# we need to split the flat array by the lengths of each mesh
N_lower = 0
N_upper = 0
offsetN = 0.0
value_dict = {}
for comp in components:
if isinstance(self[comp], Meshes):
# then we need to recursively extract to the underlying meshes
# pass
meshes = self[comp]._dict
else:
meshes = {comp: self[comp]}
for c, mesh in meshes.items():
if computed_type=='vertices' or (computed_type is None and mesh._compute_at_vertices):
N = mesh.Nvertices
else:
N = mesh.Ntriangles
N_upper += N
value_dict[c] = value[N_lower:N_upper] - offsetN
if offset:
offsetN += N
N_lower += N
return value_dict | TODO: add documentation
TODO: needs testing | entailment |
def set_column_flat(self, field, value, components=None, computed_type=None):
"""
TODO: add documentation
TODO: needs testing
"""
value_dict = self.unpack_column_flat(value, components, computed_type=computed_type)
self.update_columns(field, value_dict, computed_type=computed_type) | TODO: add documentation
TODO: needs testing | entailment |
def replace_elements(self, inds, new_submesh, component):
"""
TODO: add documentation
TODO: remove this method???
"""
self._dict[component] = np.hstack([self._dict[component][~inds], new_submesh]) | TODO: add documentation
TODO: remove this method??? | entailment |
def dynamics_from_bundle(b, times, compute=None, return_euler=False, **kwargs):
"""
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
return_euler: (bool, default=False) whether to include euler angles
in the return
Returns:
t, xs, ys, zs, vxs, vys, vzs [, theta, longan, incl].
t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
Euler angles (theta, longan, incl) are only returned if return_euler is
set to True.
"""
b.run_delayed_constraints()
computeps = b.get_compute(compute, check_visible=False, force_ps=True)
ltte = computeps.get_value('ltte', check_visible=False, **kwargs)
# make sure times is an array and not a list
times = np.array(times)
vgamma = b.get_value('vgamma', context='system', unit=u.solRad/u.d)
t0 = b.get_value('t0', context='system', unit=u.d)
hier = b.hierarchy
starrefs = hier.get_stars()
orbitrefs = hier.get_orbits()
s = b.filter(context='component')
periods, eccs, smas, t0_perpasses, per0s, long_ans, incls, dpdts, \
deccdts, dperdts, components = [],[],[],[],[],[],[],[],[],[],[]
for component in starrefs:
# we need to build a list of all orbitlabels underwhich this component
# belongs. For a simple binary this is just the parent, but for hierarchical
# systems we need to get the labels of the outer-orbits as well
ancestororbits = []
comp = component
while hier.get_parent_of(comp) in orbitrefs:
comp = hier.get_parent_of(comp)
ancestororbits.append(comp)
#print "***", component, ancestororbits
periods.append([s.get_value('period', u.d, component=orbit) for orbit in ancestororbits])
eccs.append([s.get_value('ecc', component=orbit) for orbit in ancestororbits])
t0_perpasses.append([s.get_value('t0_perpass', u.d, component=orbit) for orbit in ancestororbits])
per0s.append([s.get_value('per0', u.rad, component=orbit) for orbit in ancestororbits])
long_ans.append([s.get_value('long_an', u.rad, component=orbit) for orbit in ancestororbits])
incls.append([s.get_value('incl', u.rad, component=orbit) for orbit in ancestororbits])
dpdts.append([s.get_value('dpdt', u.d/u.d, component=orbit) for orbit in ancestororbits])
if conf.devel:
deccdts.append([s.get_value('deccdt', u.dimensionless_unscaled/u.d, component=orbit) for orbit in ancestororbits])
else:
deccdts.append([0.0 for orbit in ancestororbits])
dperdts.append([s.get_value('dperdt', u.rad/u.d, component=orbit) for orbit in ancestororbits])
# sma needs to be the COMPONENT sma. This is stored in the bundle for stars, but is NOT
# for orbits in orbits, so we'll need to recompute those from the mass-ratio and sma of
# the parent orbit.
smas_this = []
for comp in [component]+ancestororbits[:-1]:
if comp in starrefs:
smas_this.append(s.get_value('sma', u.solRad, component=comp))
else:
q = s.get_value('q', component=hier.get_parent_of(comp))
comp_comp = hier.get_primary_or_secondary(comp)
# NOTE: similar logic is also in constraints.comp_sma
# If changing any of the logic here, it should be changed there as well.
if comp_comp == 'primary':
qthing = (1. + 1./q)
else:
qthing = (1. + q)
smas_this.append(s.get_value('sma', u.solRad, component=hier.get_parent_of(comp)) / qthing)
smas.append(smas_this)
# components is whether an entry is the primary or secondary in its parent orbit, so here we want
# to start with component and end one level short of the top-level orbit
components.append([hier.get_primary_or_secondary(component=comp) for comp in [component]+ancestororbits[:-1]])
return dynamics(times, periods, eccs, smas, t0_perpasses, per0s, \
long_ans, incls, dpdts, deccdts, dperdts, \
components, t0, vgamma, \
mass_conservation=True, ltte=ltte, return_euler=return_euler) | Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
return_euler: (bool, default=False) whether to include euler angles
in the return
Returns:
t, xs, ys, zs, vxs, vys, vzs [, theta, longan, incl].
t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
Euler angles (theta, longan, incl) are only returned if return_euler is
set to True. | entailment |
def dynamics(times, periods, eccs, smas, t0_perpasses, per0s, long_ans, incls,
dpdts, deccdts, dperdts, components, t0=0.0, vgamma=0.0,
mass_conservation=True, ltte=False, return_euler=False):
"""
Compute the positions and velocites of each star in their nested
Keplerian orbits at a given list of times.
See :func:`dynamics_from_bundle` for a wrapper around this function
which automatically handles passing everything in the correct order
and in the correct units.
Args:
times: (iterable) times at which to compute positions and
velocities for each star
periods: (iterable) period of the parent orbit for each star
[days]
eccs: (iterable) eccentricity of the parent orbit for each star
smas: (iterable) semi-major axis of the parent orbit for each
star [solRad]
t0_perpasses: (iterable) t0_perpass of the parent orbit for each
star [days]
per0s: (iterable) longitudes of periastron of the parent orbit
for each star [rad]
long_ans: (iterable) longitudes of the ascending node of the
parent orbit for each star [rad]
incls: (iterable) inclination of the parent orbit for each
star [rad]
dpdts: (iterable) change in period with respect to time of the
parent orbit for each star [days/day]
deccdts: (iterable) change in eccentricity with respect to time
of the parent orbit for each star [1/day]
dperdts: (iterable) change in periastron with respect to time
of the parent orbit for each star [rad/d]
components: (iterable) component ('primary' or 'secondary') of
each star within its parent orbit [string]
t0: (float, default=0) time at which all initial values (ie period, per0)
are given [days]
mass_conservation: (bool, optional) whether to require mass
conservation if any of the derivatives (dpdt, dperdt, etc)
are non-zero [default: True]
return_euler: (bool, default=False) whether to include euler angles
in the return
Returns:
t, xs, ys, zs, vxs, vys, vzs [, theta, longan, incl].
t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
Euler angles (theta, longan, incl) are only returned if return_euler is
set to True.
"""
# TODO: NOTE: smas must be per-component, not per-orbit
# TODO: steal some documentation from 2.0a:keplerorbit.py:get_orbit
# TODO: deal with component number more smartly
def binary_dynamics(times, period, ecc, sma, t0_perpass, per0, long_an,
incl, dpdt, deccdt, dperdt, component='primary',
t0=0.0, vgamma=0.0, mass_conservation=True,
com_pos=(0.,0.,0.), com_vel=(0.,0.,0.), com_euler=(0.,0.,0.)):
"""
"""
# TODO: steal some documentation from 2.0a:keplerorbit.py:get_orbit
#-- if dpdt is non-zero, the period is actually an array, and the semi-
# major axis changes to match Kepler's third law (unless
# `mass_conservation` is set to False)
if dpdt!=0:
period_ = period
period = dpdt*(times-t0) + period_
if mass_conservation and not np.isscalar(period):
sma = sma/period[0]**2*period**2
elif mass_conservation:
sma = sma/period_**2*period**2
#-- if dperdt is non-zero, the argument of periastron is actually an
# array
if dperdt!=0.:
per0 = dperdt*(times-t0) + per0
#-- if deccdt is non-zero, the eccentricity is actually an array
if deccdt!=0:
ecc = deccdt*(times-t0) + ecc
#-- compute orbit
n = 2*pi/period
ma = n*(times-t0_perpass)
E,theta = _true_anomaly(ma,ecc)
r = sma*(1-ecc*cos(E))
PR = r*sin(theta)
#-- compute rdot and thetadot
l = r*(1+ecc*cos(theta))#-omega))
L = 2*pi*sma**2/period*sqrt(1-ecc**2)
rdot = L/l*ecc*sin(theta)#-omega)
thetadot = L/r**2
#-- the secondary is half an orbit further than the primary
if 'sec' in component.lower():
theta += pi
#-- take care of Euler angles
theta_ = theta+per0
#-- convert to the right coordinate frame
#-- create some shortcuts
sin_theta_ = sin(theta_)
cos_theta_ = cos(theta_)
sin_longan = sin(long_an)
cos_longan = cos(long_an)
#-- spherical coordinates to cartesian coordinates. Note that we actually
# set incl=-incl (but of course it doesn't show up in the cosine). We
# do this to match the convention that superior conjunction (primary
# eclipsed) happens at periastron passage when per0=90 deg.
x = r*(cos_longan*cos_theta_ - sin_longan*sin_theta_*cos(incl))
y = r*(sin_longan*cos_theta_ + cos_longan*sin_theta_*cos(incl))
z = r*(sin_theta_*sin(-incl))
#-- spherical vectors to cartesian vectors, and then rotated for
# the Euler angles Omega and i.
vx_ = cos_theta_*rdot - sin_theta_*r*thetadot
vy_ = sin_theta_*rdot + cos_theta_*r*thetadot
vx = cos_longan*vx_ - sin_longan*vy_*cos(incl)
vy = sin_longan*vx_ + cos_longan*vy_*cos(incl)
vz = sin(-incl)*vy_
#-- that's it!
# correct by vgamma (only z-direction)
# NOTE: vgamma is in the direction of positive RV or negative vz
vz -= vgamma
z -= vgamma * (times-t0)
return (x+com_pos[0],y+com_pos[1],z+com_pos[2]),\
(vx+com_vel[0],vy+com_vel[1],vz+com_vel[2]),\
(theta_,long_an,incl)
# (theta_+com_euler[0],long_an+com_euler[1],incl+com_euler[2])
def binary_dynamics_nested(times, periods, eccs, smas, \
t0_perpasses, per0s, long_ans, incls, dpdts, deccdts, \
dperdts, components, t0, vgamma, \
mass_conservation):
"""
compute the (possibly nested) positions of a single component (ltte should be
handle externally)
"""
if not hasattr(periods, '__iter__'):
# then we don't have to worry about nesting, and each item should
# be a single value ready to pass to binary_dynamics
pos, vel, euler = binary_dynamics(times, periods, eccs, smas, t0_perpasses, \
per0s, long_ans, incls, dpdts, deccdts, dperdts, components, \
t0, vgamma, mass_conservation)
else:
# Handle nesting - if this star is not in the top-level orbit, then
# all values should actually be lists. We want to sort by period to handle
# the outer orbits first and then apply those offsets to the inner-orbit(s)
# let's convert to arrays so we can use argsort easily
periods = np.array(periods)
eccs = np.array(eccs)
smas = np.array(smas)
t0_perpasses = np.array(t0_perpasses)
per0s = np.array(per0s)
long_ans = np.array(long_ans)
incls = np.array(incls)
dpdts = np.array(dpdts)
deccdts = np.array(deccdts)
dperdts = np.array(dperdts)
components = np.array(components)
si = periods.argsort()[::-1]
#print "***", periods, si
pos = (0.0, 0.0, 0.0)
vel = (0.0, 0.0, 0.0)
euler = (0.0, 0.0, 0.0)
for period, ecc, sma, t0_perpass, per0, long_an, incl, dpdt, \
deccdt, dperdt, component in zip(periods[si], eccs[si], \
smas[si], t0_perpasses[si], per0s[si], long_ans[si], \
incls[si], dpdts[si], deccdts[si], dperdts[si], components[si]):
pos, vel, euler = binary_dynamics(times, period, ecc, sma, t0_perpass, \
per0, long_an, incl, dpdt, deccdt, dperdt, component, \
t0, vgamma, mass_conservation,
com_pos=pos, com_vel=vel, com_euler=euler)
return pos, vel, euler
xs, ys, zs = [], [], []
vxs, vys, vzs = [], [], []
if return_euler:
ethetas, elongans, eincls = [], [], []
for period, ecc, sma, t0_perpass, per0, long_an, incl, dpdt, deccdt, \
dperdt, component in zip(periods, eccs, smas, t0_perpasses, per0s, long_ans, \
incls, dpdts, deccdts, dperdts, components):
# We now have the orbital parameters for a single star/component.
if ltte:
#scale_factor = 1.0/c.c.value * c.R_sun.value/(24*3600.)
scale_factor = (c.R_sun/c.c).to(u.d).value
def propertime_barytime_residual(t):
pos, vel, euler = binary_dynamics_nested(time, period, ecc, sma, \
t0_perpass, per0, long_an, incl, dpdt, deccdt, \
dperdt, components=component, t0=t0, vgamma=vgamma, \
mass_conservation=mass_conservation)
z = pos[2]
return t - z*scale_factor - time
# Finding that right time is easy with a Newton optimizer:
propertimes = [newton(propertime_barytime_residual, time) for \
time in times]
propertimes = np.array(propertimes).ravel()
pos, vel, euler = binary_dynamics_nested(propertimes, period, ecc, sma, \
t0_perpass, per0, long_an, incl, dpdt, deccdt, \
dperdt, components=component, t0=t0, vgamma=vgamma, \
mass_conservation=mass_conservation)
else:
pos, vel, euler = binary_dynamics_nested(times, period, ecc, sma, \
t0_perpass, per0, long_an, incl, dpdt, deccdt, \
dperdt, components=component, t0=t0, vgamma=vgamma, \
mass_conservation=mass_conservation)
xs.append(pos[0])
ys.append(pos[1])
zs.append(pos[2])
vxs.append(vel[0])
vys.append(vel[1])
vzs.append(vel[2])
if return_euler:
ethetas.append(euler[0])
elongans.append([euler[1]]*len(euler[0]))
eincls.append([euler[2]]*len(euler[0]))
# if return_euler:
# return times, \
# xs*u.solRad, ys*u.solRad, zs*u.solRad, \
# vxs*u.solRad/u.d, vys*u.solRad/u.d, vzs*u.solRad/u.d, \
# ethetas*u.rad, elongans*u.rad, eincls*u.rad
# else:
# return times, \
# xs*u.solRad, ys*u.solRad, zs*u.solRad, \
# vxs*u.solRad/u.d, vys*u.solRad/u.d, vzs*u.solRad/u.d if return_euler:
# d, solRad, solRad/d, rad
if return_euler:
return times, \
xs, ys, zs, \
vxs, vys, vzs, \
ethetas, elongans, eincls
else:
return times, \
xs, ys, zs, \
vxs, vys, vzs | Compute the positions and velocites of each star in their nested
Keplerian orbits at a given list of times.
See :func:`dynamics_from_bundle` for a wrapper around this function
which automatically handles passing everything in the correct order
and in the correct units.
Args:
times: (iterable) times at which to compute positions and
velocities for each star
periods: (iterable) period of the parent orbit for each star
[days]
eccs: (iterable) eccentricity of the parent orbit for each star
smas: (iterable) semi-major axis of the parent orbit for each
star [solRad]
t0_perpasses: (iterable) t0_perpass of the parent orbit for each
star [days]
per0s: (iterable) longitudes of periastron of the parent orbit
for each star [rad]
long_ans: (iterable) longitudes of the ascending node of the
parent orbit for each star [rad]
incls: (iterable) inclination of the parent orbit for each
star [rad]
dpdts: (iterable) change in period with respect to time of the
parent orbit for each star [days/day]
deccdts: (iterable) change in eccentricity with respect to time
of the parent orbit for each star [1/day]
dperdts: (iterable) change in periastron with respect to time
of the parent orbit for each star [rad/d]
components: (iterable) component ('primary' or 'secondary') of
each star within its parent orbit [string]
t0: (float, default=0) time at which all initial values (ie period, per0)
are given [days]
mass_conservation: (bool, optional) whether to require mass
conservation if any of the derivatives (dpdt, dperdt, etc)
are non-zero [default: True]
return_euler: (bool, default=False) whether to include euler angles
in the return
Returns:
t, xs, ys, zs, vxs, vys, vzs [, theta, longan, incl].
t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
Euler angles (theta, longan, incl) are only returned if return_euler is
set to True. | entailment |
def _true_anomaly(M,ecc,itermax=8):
r"""
Calculation of true and eccentric anomaly in Kepler orbits.
``M`` is the phase of the star, ``ecc`` is the eccentricity
See p.39 of Hilditch, 'An Introduction To Close Binary Stars':
Kepler's equation:
.. math::
E - e\sin E = \frac{2\pi}{P}(t-T)
with :math:`E` the eccentric anomaly. The right hand size denotes the
observed phase :math:`M`. This function returns the true anomaly, which is
the position angle of the star in the orbit (:math:`\theta` in Hilditch'
book). The relationship between the eccentric and true anomaly is as
follows:
.. math::
\tan(\theta/2) = \sqrt{\frac{1+e}{1-e}} \tan(E/2)
@parameter M: phase
@type M: float
@parameter ecc: eccentricity
@type ecc: float
@keyword itermax: maximum number of iterations
@type itermax: integer
@return: eccentric anomaly (E), true anomaly (theta)
@rtype: float,float
"""
# Initial value
Fn = M + ecc*sin(M) + ecc**2/2.*sin(2*M)
# Iterative solving of the transcendent Kepler's equation
for i in range(itermax):
F = Fn
Mn = F-ecc*sin(F)
Fn = F+(M-Mn)/(1.-ecc*cos(F))
keep = F!=0 # take care of zerodivision
if hasattr(F,'__iter__'):
if np.all(abs((Fn-F)[keep]/F[keep])<0.00001):
break
elif (abs((Fn-F)/F)<0.00001):
break
# relationship between true anomaly (theta) and eccentric anomaly (Fn)
true_an = 2.*arctan(sqrt((1.+ecc)/(1.-ecc))*tan(Fn/2.))
return Fn,true_an | r"""
Calculation of true and eccentric anomaly in Kepler orbits.
``M`` is the phase of the star, ``ecc`` is the eccentricity
See p.39 of Hilditch, 'An Introduction To Close Binary Stars':
Kepler's equation:
.. math::
E - e\sin E = \frac{2\pi}{P}(t-T)
with :math:`E` the eccentric anomaly. The right hand size denotes the
observed phase :math:`M`. This function returns the true anomaly, which is
the position angle of the star in the orbit (:math:`\theta` in Hilditch'
book). The relationship between the eccentric and true anomaly is as
follows:
.. math::
\tan(\theta/2) = \sqrt{\frac{1+e}{1-e}} \tan(E/2)
@parameter M: phase
@type M: float
@parameter ecc: eccentricity
@type ecc: float
@keyword itermax: maximum number of iterations
@type itermax: integer
@return: eccentric anomaly (E), true anomaly (theta)
@rtype: float,float | entailment |
def spot(feature, **kwargs):
"""
Create parameters for a spot
Generally, this will be used as input to the method argument in
:meth:`phoebe.frontend.bundle.Bundle.add_feature`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet`
"""
params = []
params += [FloatParameter(qualifier="colat", value=kwargs.get('colat', 0.0), default_unit=u.deg, description='Colatitude of the center of the spot wrt spin axes')]
params += [FloatParameter(qualifier="long", value=kwargs.get('long', 0.0), default_unit=u.deg, description='Longitude of the center of the spot wrt spin axis')]
params += [FloatParameter(qualifier='radius', value=kwargs.get('radius', 1.0), default_unit=u.deg, description='Angular radius of the spot')]
# params += [FloatParameter(qualifier='area', value=kwargs.get('area', 1.0), default_unit=u.solRad, description='Surface area of the spot')]
params += [FloatParameter(qualifier='relteff', value=kwargs.get('relteff', 1.0), limits=(0.,None), default_unit=u.dimensionless_unscaled, description='Temperature of the spot relative to the intrinsic temperature')]
# params += [FloatParameter(qualifier='teff', value=kwargs.get('teff', 10000), default_unit=u.K, description='Temperature of the spot')]
constraints = []
return ParameterSet(params), constraints | Create parameters for a spot
Generally, this will be used as input to the method argument in
:meth:`phoebe.frontend.bundle.Bundle.add_feature`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` | entailment |
def pulsation(feature, **kwargs):
"""
Create parameters for a pulsation feature
Generally, this will be used as input to the method argument in
:meth:`phoebe.frontend.bundle.Bundle.add_feature`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet`
"""
if not conf.devel:
raise NotImplementedError("'pulsation' feature not officially supported for this release. Enable developer mode to test.")
params = []
params += [FloatParameter(qualifier='radamp', value=kwargs.get('radamp', 0.1), default_unit=u.dimensionless_unscaled, description='Relative radial amplitude of the pulsations')]
params += [FloatParameter(qualifier='freq', value=kwargs.get('freq', 1.0), default_unit=u.d**-1, description='Frequency of the pulsations')]
params += [IntParameter(qualifier='l', value=kwargs.get('l', 0), default_unit=u.dimensionless_unscaled, description='Non-radial degree l')]
params += [IntParameter(qualifier='m', value=kwargs.get('m', 0), default_unit=u.dimensionless_unscaled, description='Azimuthal order m')]
params += [BoolParameter(qualifier='teffext', value=kwargs.get('teffext', False), description='Switch to denote whether Teffs are provided by the external code')]
constraints = []
return ParameterSet(params), constraints | Create parameters for a pulsation feature
Generally, this will be used as input to the method argument in
:meth:`phoebe.frontend.bundle.Bundle.add_feature`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` | entailment |
def anim_to_html(anim):
"""
adapted from: http://jakevdp.github.io/blog/2013/05/12/embedding-matplotlib-animations/
This function converts and animation object from matplotlib into HTML which can then
be embedded in an IPython notebook.
This requires ffmpeg to be installed in order to build the intermediate mp4 file
To get these to display automatically, you need to set animation.Animation._repr_html_ = plotlib.anim_to_html
(this is done on your behalf by PHOEBE)
"""
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.mp4') as f:
anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'])
video = open(f.name, "rb").read()
anim._encoded_video = video.encode("base64")
return VIDEO_TAG.format(anim._encoded_video) | adapted from: http://jakevdp.github.io/blog/2013/05/12/embedding-matplotlib-animations/
This function converts and animation object from matplotlib into HTML which can then
be embedded in an IPython notebook.
This requires ffmpeg to be installed in order to build the intermediate mp4 file
To get these to display automatically, you need to set animation.Animation._repr_html_ = plotlib.anim_to_html
(this is done on your behalf by PHOEBE) | entailment |
def load_lc_data(filename, indep, dep, indweight=None, mzero=None, dir='./'):
"""
load dictionary with lc data
"""
if '/' in filename:
path, filename = os.path.split(filename)
else:
# TODO: this needs to change to be directory of the .phoebe file
path = dir
load_file = os.path.join(path, filename)
lcdata = np.loadtxt(load_file)
ncol = len(lcdata[0])
if dep == 'Magnitude':
mag = lcdata[:,1]
flux = 10**(-0.4*(mag-mzero))
lcdata[:,1] = flux
d = {}
d['phoebe_lc_time'] = lcdata[:,0]
d['phoebe_lc_flux'] = lcdata[:,1]
if indweight=="Standard deviation":
if ncol >= 3:
d['phoebe_lc_sigmalc'] = lcdata[:,2]
else:
logger.warning('A sigma column was mentioned in the .phoebe file but is not present in the lc data file')
elif indweight =="Standard weight":
if ncol >= 3:
sigma = np.sqrt(1/lcdata[:,2])
d['phoebe_lc_sigmalc'] = sigma
logger.warning('Standard weight has been converted to Standard deviation.')
else:
logger.warning('A sigma column was mentioned in the .phoebe file but is not present in the lc data file')
else:
logger.warning('Phoebe 2 currently only supports standard deviaton')
# dataset.set_value(check_visible=False, **d)
return d | load dictionary with lc data | entailment |
def load_rv_data(filename, indep, dep, indweight=None, dir='./'):
"""
load dictionary with rv data.
"""
if '/' in filename:
path, filename = os.path.split(filename)
else:
path = dir
load_file = os.path.join(path, filename)
rvdata = np.loadtxt(load_file)
d ={}
d['phoebe_rv_time'] = rvdata[:,0]
d['phoebe_rv_vel'] = rvdata[:,1]
ncol = len(rvdata[0])
if indweight=="Standard deviation":
if ncol >= 3:
d['phoebe_rv_sigmarv'] = rvdata[:,2]
else:
logger.warning('A sigma column is mentioned in the .phoebe file but is not present in the rv data file')
elif indweight =="Standard weight":
if ncol >= 3:
sigma = np.sqrt(1/rvdata[:,2])
d['phoebe_rv_sigmarv'] = sigma
logger.warning('Standard weight has been converted to Standard deviation.')
else:
logger.warning('Phoebe 2 currently only supports standard deviaton')
return d | load dictionary with rv data. | entailment |
def det_dataset(eb, passband, dataid, comp, time):
"""
Since RV datasets can have values related to each component in phoebe2, but are component specific in phoebe1
, it is important to determine which dataset to add parameters to. This function will do that.
eb - bundle
rvpt - relevant phoebe 1 parameters
"""
rvs = eb.get_dataset(kind='rv').datasets
#first check to see if there are currently in RV datasets
if dataid == 'Undefined':
dataid = None
# if len(rvs) == 0:
#if there isn't we add one the easy part
try:
eb._check_label(dataid)
rv_dataset = eb.add_dataset('rv', dataset=dataid, times=[])
except ValueError:
logger.warning("The name picked for the radial velocity curve is forbidden. Applying default name instead")
rv_dataset = eb.add_dataset('rv', times=[])
# else:
# #now we have to determine if we add to an existing dataset or make a new one
# rvs = eb.get_dataset(kind='rv').datasets
# found = False
# #set the component of the companion
#
# if comp == 'primary':
# comp_o = 'primary'
# else:
# comp_o = 'secondary'
# for x in rvs:
# test_dataset = eb.get_dataset(x, check_visible=False)
#
#
# if len(test_dataset.get_value(qualifier='rvs', component=comp_o, check_visible=False)) == 0: #so at least it has an empty spot now check against filter and length
# # removing reference to time_o. If there are no rvs there should be no times
# # time_o = test_dataset.get_value('times', component=comp_o)
# passband_o = test_dataset.get_value('passband')
#
# # if np.all(time_o == time) and (passband == passband_o):
# if (passband == passband_o):
# rv_dataset = test_dataset
# found = True
#
# if not found:
# try:
# eb._check_label(dataid)
#
# rv_dataset = eb.add_dataset('rv', dataset=dataid, times=[])
#
# except ValueError:
#
# logger.warning("The name picked for the lightcurve is forbidden. Applying default name instead")
# rv_dataset = eb.add_dataset('rv', times=[])
return rv_dataset | Since RV datasets can have values related to each component in phoebe2, but are component specific in phoebe1
, it is important to determine which dataset to add parameters to. This function will do that.
eb - bundle
rvpt - relevant phoebe 1 parameters | entailment |
def N_to_Ntriangles(N):
"""
@N: WD style gridsize
Converts WD style grid size @N to the number of triangles on the
surface.
Returns: number of triangles.
"""
theta = np.array([np.pi/2*(k-0.5)/N for k in range(1, N+1)])
phi = np.array([[np.pi*(l-0.5)/Mk for l in range(1, Mk+1)] for Mk in np.array(1 + 1.3*N*np.sin(theta), dtype=int)])
Ntri = 2*np.array([len(p) for p in phi]).sum()
return Ntri | @N: WD style gridsize
Converts WD style grid size @N to the number of triangles on the
surface.
Returns: number of triangles. | entailment |
def pot_for_component(pot, q, component=1, reverse=False):
"""
q for secondaries should already be flipped (via q_for_component)
"""
# currently only used by legacy wrapper: consider moving/removing
if component==1:
return pot
elif component==2:
if reverse:
return pot/q + 0.5*(q-1)/q
else:
return q*pot - 0.5 * (q-1)
else:
raise NotImplementedError | q for secondaries should already be flipped (via q_for_component) | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.