sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _get_binary_from_ipv4(self, ip_addr):
"""Converts IPv4 address to binary form."""
return struct.unpack("!L", socket.inet_pton(socket.AF_INET,
ip_addr))[0] | Converts IPv4 address to binary form. | entailment |
def _get_binary_from_ipv6(self, ip_addr):
"""Converts IPv6 address to binary form."""
hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6,
ip_addr))
return (hi << 64) | lo | Converts IPv6 address to binary form. | entailment |
def _get_ipv4_from_binary(self, bin_addr):
"""Converts binary address to Ipv4 format."""
return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr)) | Converts binary address to Ipv4 format. | entailment |
def _get_ipv6_from_binary(self, bin_addr):
"""Converts binary address to Ipv6 format."""
hi = bin_addr >> 64
lo = bin_addr & 0xFFFFFFFF
return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo)) | Converts binary address to Ipv6 format. | entailment |
def _get_router_ip(self, cidr, ip_count, ip_ver):
"""For a given IP subnet and IP version type, generate IP for router.
This method takes the network address (cidr) and selects an
IP address that should be assigned to virtual router running
on multiple switches. It uses upper addresses in a subnet address
as IP for the router. Each instace of the router, on each switch,
requires uniqe IP address. For example in IPv4 case, on a 255
subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next,
and so on.
"""
start_ip = MLAG_SWITCHES + ip_count
network_addr, prefix = cidr.split('/')
if ip_ver == 4:
bits = IPV4_BITS
ip = self._get_binary_from_ipv4(network_addr)
elif ip_ver == 6:
bits = IPV6_BITS
ip = self._get_binary_from_ipv6(network_addr)
mask = (pow(2, bits) - 1) << (bits - int(prefix))
network_addr = ip & mask
router_ip = pow(2, bits - int(prefix)) - start_ip
router_ip = network_addr | router_ip
if ip_ver == 4:
return self._get_ipv4_from_binary(router_ip) + '/' + prefix
else:
return self._get_ipv6_from_binary(router_ip) + '/' + prefix | For a given IP subnet and IP version type, generate IP for router.
This method takes the network address (cidr) and selects an
IP address that should be assigned to virtual router running
on multiple switches. It uses upper addresses in a subnet address
as IP for the router. Each instace of the router, on each switch,
requires uniqe IP address. For example in IPv4 case, on a 255
subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next,
and so on. | entailment |
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL or an Engine.
Calls to context.execute() here emit the given string to the
script output.
"""
set_mysql_engine()
kwargs = dict()
if neutron_config.database.connection:
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['include_object'] = include_object
kwargs['version_table'] = ARISTA_VERSION_TABLE
context.configure(**kwargs)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'offline' mode.
This configures the context with just a URL or an Engine.
Calls to context.execute() here emit the given string to the
script output. | entailment |
def create_tenant(self, tenant_id):
"""Enqueue tenant create"""
t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE,
a_const.CREATE)
self.provision_queue.put(t_res) | Enqueue tenant create | entailment |
def delete_tenant_if_removed(self, tenant_id):
"""Enqueue tenant delete if it's no longer in the db"""
if not db_lib.tenant_provisioned(tenant_id):
t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE,
a_const.DELETE)
self.provision_queue.put(t_res) | Enqueue tenant delete if it's no longer in the db | entailment |
def create_network(self, network):
"""Enqueue network create"""
n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE,
a_const.CREATE)
self.provision_queue.put(n_res) | Enqueue network create | entailment |
def delete_network(self, network):
"""Enqueue network delete"""
n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE,
a_const.DELETE)
self.provision_queue.put(n_res) | Enqueue network delete | entailment |
def create_segments(self, segments):
"""Enqueue segment creates"""
for segment in segments:
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE,
a_const.CREATE)
self.provision_queue.put(s_res) | Enqueue segment creates | entailment |
def delete_segments(self, segments):
"""Enqueue segment deletes"""
for segment in segments:
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE,
a_const.DELETE)
self.provision_queue.put(s_res) | Enqueue segment deletes | entailment |
def get_instance_type(self, port):
"""Determine the port type based on device owner and vnic type"""
if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL:
return a_const.BAREMETAL_RESOURCE
owner_to_type = {
n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE,
n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE,
trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE}
if port['device_owner'] in owner_to_type.keys():
return owner_to_type[port['device_owner']]
elif port['device_owner'].startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
return a_const.VM_RESOURCE
return None | Determine the port type based on device owner and vnic type | entailment |
def create_instance(self, port):
"""Enqueue instance create"""
instance_type = self.get_instance_type(port)
if not instance_type:
return
i_res = MechResource(port['device_id'], instance_type, a_const.CREATE)
self.provision_queue.put(i_res) | Enqueue instance create | entailment |
def delete_instance_if_removed(self, port):
"""Enqueue instance delete if it's no longer in the db"""
instance_type = self.get_instance_type(port)
if not instance_type:
return
if not db_lib.instance_provisioned(port['device_id']):
i_res = MechResource(port['device_id'], instance_type,
a_const.DELETE)
self.provision_queue.put(i_res) | Enqueue instance delete if it's no longer in the db | entailment |
def create_port(self, port):
"""Enqueue port create"""
instance_type = self.get_instance_type(port)
if not instance_type:
return
port_type = instance_type + a_const.PORT_SUFFIX
p_res = MechResource(port['id'], port_type, a_const.CREATE)
self.provision_queue.put(p_res) | Enqueue port create | entailment |
def delete_port_if_removed(self, port):
"""Enqueue port delete"""
instance_type = self.get_instance_type(port)
if not instance_type:
return
port_type = instance_type + a_const.PORT_SUFFIX
if not db_lib.port_provisioned(port['id']):
p_res = MechResource(port['id'], port_type, a_const.DELETE)
self.provision_queue.put(p_res) | Enqueue port delete | entailment |
def _get_binding_keys(self, port, host):
"""Get binding keys from the port binding"""
binding_keys = list()
switch_binding = port[portbindings.PROFILE].get(
'local_link_information', None)
if switch_binding:
for binding in switch_binding:
switch_id = binding.get('switch_id')
port_id = binding.get('port_id')
binding_keys.append((port['id'], (switch_id, port_id)))
else:
binding_keys.append((port['id'], host))
return binding_keys | Get binding keys from the port binding | entailment |
def create_port_binding(self, port, host):
"""Enqueue port binding create"""
if not self.get_instance_type(port):
return
for pb_key in self._get_binding_keys(port, host):
pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE,
a_const.CREATE)
self.provision_queue.put(pb_res) | Enqueue port binding create | entailment |
def delete_port_binding(self, port, host):
"""Enqueue port binding delete"""
if not self.get_instance_type(port):
return
for pb_key in self._get_binding_keys(port, host):
pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE,
a_const.DELETE)
self.provision_queue.put(pb_res) | Enqueue port binding delete | entailment |
def create_network_postcommit(self, context):
"""Provision the network on CVX"""
network = context.current
log_context("create_network_postcommit: network", network)
segments = context.network_segments
tenant_id = network['project_id']
self.create_tenant(tenant_id)
self.create_network(network)
self.create_segments(segments) | Provision the network on CVX | entailment |
def update_network_postcommit(self, context):
"""Send network updates to CVX:
- Update the network name
- Add new segments
- Delete stale segments
"""
network = context.current
orig_network = context.original
log_context("update_network_postcommit: network", network)
log_context("update_network_postcommit: orig", orig_network)
segments = context.network_segments
self.create_network(network)
# New segments may have been added
self.create_segments(segments) | Send network updates to CVX:
- Update the network name
- Add new segments
- Delete stale segments | entailment |
def delete_network_postcommit(self, context):
"""Delete the network from CVX"""
network = context.current
log_context("delete_network_postcommit: network", network)
segments = context.network_segments
tenant_id = network['project_id']
self.delete_segments(segments)
self.delete_network(network)
self.delete_tenant_if_removed(tenant_id) | Delete the network from CVX | entailment |
def update_port_postcommit(self, context):
"""Send port updates to CVX
This method is also responsible for the initial creation of ports
as we wait until after a port is bound to send the port data to CVX
"""
port = context.current
orig_port = context.original
network = context.network.current
log_context("update_port_postcommit: port", port)
log_context("update_port_postcommit: orig", orig_port)
tenant_id = port['project_id']
# Device id can change without a port going DOWN, but the new device
# id may not be supported
if orig_port and port['device_id'] != orig_port['device_id']:
self._delete_port_resources(orig_port, context.original_host)
if context.status == n_const.PORT_STATUS_DOWN:
if (context.original_host and
context.status != context.original_status):
self._delete_port_resources(orig_port, context.original_host)
self._try_to_release_dynamic_segment(context, migration=True)
else:
self.create_tenant(tenant_id)
self.create_network(network)
if context.binding_levels:
segments = [
level['bound_segment'] for level in context.binding_levels]
self.create_segments(segments)
self.create_instance(port)
self.create_port(port)
self.create_port_binding(port, context.host) | Send port updates to CVX
This method is also responsible for the initial creation of ports
as we wait until after a port is bound to send the port data to CVX | entailment |
def delete_port_postcommit(self, context):
"""Delete the port from CVX"""
port = context.current
log_context("delete_port_postcommit: port", port)
self._delete_port_resources(port, context.host)
self._try_to_release_dynamic_segment(context) | Delete the port from CVX | entailment |
def _bind_baremetal_port(self, context, segment):
"""Bind the baremetal port to the segment"""
port = context.current
vif_details = {
portbindings.VIF_DETAILS_VLAN: str(
segment[driver_api.SEGMENTATION_ID])
}
context.set_binding(segment[driver_api.ID],
portbindings.VIF_TYPE_OTHER,
vif_details,
n_const.ACTIVE)
LOG.debug("AristaDriver: bound port info- port ID %(id)s "
"on network %(network)s",
{'id': port['id'],
'network': context.network.current['id']})
if port.get('trunk_details'):
self.trunk_driver.bind_port(port)
return True | Bind the baremetal port to the segment | entailment |
def _get_physnet(self, context):
"""Find the appropriate physnet for the host
- Baremetal ports' physnet is determined by looking at the
local_link_information contained in the binding profile
- Other ports' physnet is determined by looking for the host in the
topology
"""
port = context.current
physnet = None
if (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL):
physnet = self.eapi.get_baremetal_physnet(context)
else:
physnet = self.eapi.get_host_physnet(context)
# If the switch is part of an mlag pair, the physnet is called
# peer1_peer2
physnet = self.mlag_pairs.get(physnet, physnet)
return physnet | Find the appropriate physnet for the host
- Baremetal ports' physnet is determined by looking at the
local_link_information contained in the binding profile
- Other ports' physnet is determined by looking for the host in the
topology | entailment |
def _bind_fabric(self, context, segment):
"""Allocate dynamic segments for the port
Segment physnets are based on the switch to which the host is
connected.
"""
port_id = context.current['id']
physnet = self._get_physnet(context)
if not physnet:
LOG.debug("bind_port for port %(port)s: no physical_network "
"found", {'port': port_id})
return False
next_segment = context.allocate_dynamic_segment(
{'network_id': context.network.current['id'],
'network_type': n_const.TYPE_VLAN,
'physical_network': physnet})
LOG.debug("bind_port for port %(port)s: "
"current_segment=%(current_seg)s, "
"next_segment=%(next_seg)s",
{'port': port_id, 'current_seg': segment,
'next_seg': next_segment})
context.continue_binding(segment['id'], [next_segment])
return True | Allocate dynamic segments for the port
Segment physnets are based on the switch to which the host is
connected. | entailment |
def bind_port(self, context):
"""Bind port to a network segment.
Provisioning request to Arista Hardware to plug a host
into appropriate network is done when the port is created
this simply tells the ML2 Plugin that we are binding the port
"""
port = context.current
log_context("bind_port: port", port)
for segment in context.segments_to_bind:
physnet = segment.get(driver_api.PHYSICAL_NETWORK)
segment_type = segment[driver_api.NETWORK_TYPE]
if not physnet:
if (segment_type == n_const.TYPE_VXLAN and self.manage_fabric):
if self._bind_fabric(context, segment):
continue
elif (port.get(portbindings.VNIC_TYPE)
== portbindings.VNIC_BAREMETAL):
if (not self.managed_physnets or
physnet in self.managed_physnets):
if self._bind_baremetal_port(context, segment):
continue
LOG.debug("Arista mech driver unable to bind port %(port)s to "
"%(seg_type)s segment on physical_network %(physnet)s",
{'port': port.get('id'), 'seg_type': segment_type,
'physnet': physnet}) | Bind port to a network segment.
Provisioning request to Arista Hardware to plug a host
into appropriate network is done when the port is created
this simply tells the ML2 Plugin that we are binding the port | entailment |
def _try_to_release_dynamic_segment(self, context, migration=False):
"""Release dynamic segment if necessary
If this port was the last port using a segment and the segment was
allocated by this driver, it should be released
"""
if migration:
binding_levels = context.original_binding_levels
else:
binding_levels = context.binding_levels
LOG.debug("_try_release_dynamic_segment: "
"binding_levels=%(bl)s", {'bl': binding_levels})
if not binding_levels:
return
for prior_level, binding in enumerate(binding_levels[1:]):
allocating_driver = binding_levels[prior_level].get(
driver_api.BOUND_DRIVER)
if allocating_driver != a_const.MECHANISM_DRV_NAME:
continue
bound_segment = binding.get(driver_api.BOUND_SEGMENT, {})
segment_id = bound_segment.get('id')
if not db_lib.segment_is_dynamic(segment_id):
continue
if not db_lib.segment_bound(segment_id):
context.release_dynamic_segment(segment_id)
LOG.debug("Released dynamic segment %(seg)s allocated "
"by %(drv)s", {'seg': segment_id,
'drv': allocating_driver}) | Release dynamic segment if necessary
If this port was the last port using a segment and the segment was
allocated by this driver, it should be released | entailment |
def bdom_roll_date(sd, ed, bdom, months, holidays=[]):
"""
Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")])
"""
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data | Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")]) | entailment |
def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights | Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans) | entailment |
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts | Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names. | entailment |
def static_transition(timestamp, contract_dates, transition, holidays=None,
validate_inputs=True):
"""
An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition)
"""
if validate_inputs:
# required for MultiIndex slicing
_check_static(transition.sort_index(axis=1))
# the algorithm below will return invalid results if contract_dates is
# not as expected so better to fail explicitly
_check_contract_dates(contract_dates)
if not holidays:
holidays = []
# further speedup can be obtained using contract_dates.loc[timestamp:]
# but this requires swapping contract_dates index and values
after_contract_dates = contract_dates.loc[contract_dates >= timestamp]
contracts = after_contract_dates.index
front_expiry_dt = after_contract_dates.iloc[0]
days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),
holidays=holidays)
name2num = dict(zip(transition.columns.levels[0],
range(len(transition.columns.levels[0]))))
if days_to_expiry in transition.index:
weights_iter = transition.loc[days_to_expiry].iteritems()
# roll hasn't started yet
elif days_to_expiry < transition.index.min():
# provides significant speedup over transition.iloc[0].iteritems()
vals = transition.values[0]
weights_iter = zip(transition.columns.tolist(), vals)
# roll is finished
else:
vals = transition.values[-1]
weights_iter = zip(transition.columns.tolist(), vals)
cwts = []
for idx_tuple, weighting in weights_iter:
gen_name, position = idx_tuple
if weighting != 0:
if position == "front":
cntrct_idx = name2num[gen_name]
elif position == "back":
cntrct_idx = name2num[gen_name] + 1
try:
cntrct_name = contracts[cntrct_idx]
except IndexError as e:
raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} "
"resulting from {3} mapping")
.format(cntrct_idx, after_contract_dates,
timestamp, idx_tuple)
).with_traceback(sys.exc_info()[2])
cwts.append((gen_name, cntrct_name, weighting, timestamp))
return cwts | An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition) | entailment |
def to_generics(instruments, weights):
"""
Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts)
"""
if not isinstance(weights, dict):
weights = {"": weights}
allocations = []
unmapped_instr = instruments.index
for key in weights:
w = weights[key]
# may not always have instrument holdings for a set of weights so allow
# weights to be a superset of instruments, drop values where no
# holdings
winstrs = instruments.reindex(w.index).dropna()
w = w.loc[winstrs.index]
# drop generics where all weights for instruments on the genric are 0.
# This avoids numerical rounding issues where solution has epsilon
# weight on a generic
w = w.loc[:, ~(w == 0).all(axis=0)]
unmapped_instr = unmapped_instr.difference(winstrs.index)
A = w.values
b = winstrs.values
x = cvxpy.Variable(A.shape[1])
constrs = [CVX_SUM(x) == np.sum(b)]
obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b))
prob = cvxpy.Problem(obj, constrs)
prob.solve()
vals = np.array(x.value).squeeze()
idx = w.columns.tolist()
allocations.append(pd.Series(vals, index=idx))
if len(unmapped_instr) > 0:
raise KeyError("Unmapped instruments %s. weights must be a superset of"
" instruments" % unmapped_instr.tolist())
allocations = pd.concat(allocations, axis=0)
allocations = allocations.sort_index()
return allocations | Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts) | entailment |
def synchronize_switch(self, switch_ip, expected_acls, expected_bindings):
"""Update ACL config on a switch to match expected config
This is done as follows:
1. Get switch ACL config using show commands
2. Update expected bindings based on switch LAGs
3. Get commands to synchronize switch ACLs
4. Get commands to synchronize switch ACL bindings
5. Run sync commands on switch
"""
# Get ACL rules and interface mappings from the switch
switch_acls, switch_bindings = self._get_dynamic_acl_info(switch_ip)
# Adjust expected bindings for switch LAG config
expected_bindings = self.adjust_bindings_for_lag(switch_ip,
expected_bindings)
# Get synchronization commands
switch_cmds = list()
switch_cmds.extend(
self.get_sync_acl_cmds(switch_acls, expected_acls))
switch_cmds.extend(
self.get_sync_binding_cmds(switch_bindings, expected_bindings))
# Update switch config
self.run_openstack_sg_cmds(switch_cmds, self._switches.get(switch_ip)) | Update ACL config on a switch to match expected config
This is done as follows:
1. Get switch ACL config using show commands
2. Update expected bindings based on switch LAGs
3. Get commands to synchronize switch ACLs
4. Get commands to synchronize switch ACL bindings
5. Run sync commands on switch | entailment |
def synchronize(self):
"""Perform sync of the security groups between ML2 and EOS."""
# Get expected ACLs and rules
expected_acls = self.get_expected_acls()
# Get expected interface to ACL mappings
all_expected_bindings = self.get_expected_bindings()
# Check that config is correct on every registered switch
for switch_ip in self._switches.keys():
expected_bindings = all_expected_bindings.get(switch_ip, [])
try:
self.synchronize_switch(switch_ip, expected_acls,
expected_bindings)
except Exception:
LOG.exception("Failed to sync SGs for %(switch)s",
{'switch': switch_ip}) | Perform sync of the security groups between ML2 and EOS. | entailment |
def check_vlan_type_driver_commands(self):
"""Checks the validity of CLI commands for Arista's VLAN type driver.
This method tries to execute the commands used exclusively by the
arista_vlan type driver and stores the commands if they succeed.
"""
cmd = ['show openstack resource-pool vlan region %s uuid'
% self.region]
try:
self._run_eos_cmds(cmd)
self.cli_commands['resource-pool'] = cmd
except arista_exc.AristaRpcError:
self.cli_commands['resource-pool'] = []
LOG.warning(
_LW("'resource-pool' command '%s' is not available on EOS"),
cmd) | Checks the validity of CLI commands for Arista's VLAN type driver.
This method tries to execute the commands used exclusively by the
arista_vlan type driver and stores the commands if they succeed. | entailment |
def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
"""
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0]
return None | Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID | entailment |
def get_vlan_allocation(self):
"""Returns the status of the region's VLAN pool in CVX
:returns: dictionary containg the assigned, allocated and available
VLANs for the region
"""
if not self.cli_commands['resource-pool']:
LOG.warning(_('The version of CVX you are using does not support'
'arista VLAN type driver.'))
else:
cmd = ['show openstack resource-pools region %s' % self.region]
command_output = self._run_eos_cmds(cmd)
if command_output:
regions = command_output[0]['physicalNetwork']
if self.region in regions.keys():
return regions[self.region]['vlanPool']['default']
return {'assignedVlans': '',
'availableVlans': '',
'allocatedVlans': ''} | Returns the status of the region's VLAN pool in CVX
:returns: dictionary containg the assigned, allocated and available
VLANs for the region | entailment |
def _run_eos_cmds(self, commands, commands_to_log=None):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_log : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
"""
# Always figure out who is master (starting with the last known val)
try:
if self._get_eos_master() is None:
msg = "Failed to identify CVX master"
self.set_cvx_unavailable()
raise arista_exc.AristaRpcError(msg=msg)
except Exception:
self.set_cvx_unavailable()
raise
self.set_cvx_available()
log_cmds = commands
if commands_to_log:
log_cmds = commands_to_log
LOG.info(_LI('Executing command on Arista EOS: %s'), log_cmds)
# this returns array of return values for every command in
# full_command list
try:
response = self._send_eapi_req(cmds=commands,
commands_to_log=log_cmds)
if response is None:
# Reset the server as we failed communicating with it
self._server_ip = None
self.set_cvx_unavailable()
msg = "Failed to communicate with CVX master"
raise arista_exc.AristaRpcError(msg=msg)
return response
except arista_exc.AristaRpcError:
raise | Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_log : This should be set to the command that is
logged. If it is None, then the commands
param is logged. | entailment |
def _build_command(self, cmds, sync=False):
"""Build full EOS's openstack CLI command.
Helper method to add commands to enter and exit from openstack
CLI modes.
:param cmds: The openstack CLI commands that need to be executed
in the openstack config mode.
:param sync: This flags indicates that the region is being synced.
"""
region_cmd = 'region %s' % self.region
if sync:
region_cmd = self.cli_commands[const.CMD_REGION_SYNC]
full_command = [
'enable',
'configure',
'cvx',
'service openstack',
region_cmd,
]
full_command.extend(cmds)
return full_command | Build full EOS's openstack CLI command.
Helper method to add commands to enter and exit from openstack
CLI modes.
:param cmds: The openstack CLI commands that need to be executed
in the openstack config mode.
:param sync: This flags indicates that the region is being synced. | entailment |
def _run_openstack_cmds(self, commands, commands_to_log=None, sync=False):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_logs : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
:param sync: This flags indicates that the region is being synced.
"""
full_command = self._build_command(commands, sync=sync)
if commands_to_log:
full_log_command = self._build_command(commands_to_log, sync=sync)
else:
full_log_command = None
return self._run_eos_cmds(full_command, full_log_command) | Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_logs : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
:param sync: This flags indicates that the region is being synced. | entailment |
def get_baremetal_physnet(self, context):
"""Returns dictionary which contains mac to hostname mapping"""
port = context.current
host_id = context.host
cmd = ['show network physical-topology hosts']
try:
response = self._run_eos_cmds(cmd)
binding_profile = port.get(portbindings.PROFILE, {})
link_info = binding_profile.get('local_link_information', [])
for link in link_info:
switch_id = link.get('switch_id')
for host in response[0]['hosts'].values():
if switch_id == host['name']:
physnet = host['hostname']
LOG.debug("get_physical_network: Physical Network for "
"%(host)s is %(physnet)s",
{'host': host_id, 'physnet': physnet})
return physnet
LOG.debug("Physical network not found for %(host)s",
{'host': host_id})
except Exception as exc:
LOG.error(_LE('command %(cmd)s failed with '
'%(exc)s'), {'cmd': cmd, 'exc': exc})
return None | Returns dictionary which contains mac to hostname mapping | entailment |
def get_host_physnet(self, context):
"""Returns dictionary which contains physical topology information
for a given host_id
"""
host_id = utils.hostname(context.host)
cmd = ['show network physical-topology neighbors']
try:
response = self._run_eos_cmds(cmd)
# Get response for 'show network physical-topology neighbors'
# command
neighbors = response[0]['neighbors']
for neighbor in neighbors:
if host_id in neighbor:
physnet = neighbors[neighbor]['toPort'][0]['hostname']
LOG.debug("get_physical_network: Physical Network for "
"%(host)s is %(physnet)s", {'host': host_id,
'physnet': physnet})
return physnet
LOG.debug("Physical network not found for %(host)s",
{'host': host_id})
except Exception as exc:
LOG.error(_LE('command %(cmd)s failed with '
'%(exc)s'), {'cmd': cmd, 'exc': exc})
return None | Returns dictionary which contains physical topology information
for a given host_id | entailment |
def filter_unnecessary_segments(query):
"""Filter segments are not needed on CVX"""
segment_model = segment_models.NetworkSegment
network_model = models_v2.Network
query = (query
.join_if_necessary(network_model)
.join_if_necessary(segment_model)
.filter(network_model.project_id != '')
.filter_network_type())
return query | Filter segments are not needed on CVX | entailment |
def filter_network_type(query):
"""Filter unsupported segment types"""
segment_model = segment_models.NetworkSegment
query = (query
.filter(
segment_model.network_type.in_(
utils.SUPPORTED_NETWORK_TYPES)))
return query | Filter unsupported segment types | entailment |
def filter_unbound_ports(query):
"""Filter ports not bound to a host or network"""
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
query = (query
.join_if_necessary(port_model)
.join_if_necessary(binding_level_model)
.filter(
binding_level_model.host != '',
port_model.device_id != none,
port_model.network_id != none))
return query | Filter ports not bound to a host or network | entailment |
def filter_by_device_owner(query, device_owners=None):
"""Filter ports by device_owner
Either filter using specified device_owner or using the list of all
device_owners supported and unsupported by the arista ML2 plugin
"""
port_model = models_v2.Port
if not device_owners:
device_owners = utils.SUPPORTED_DEVICE_OWNERS
supported_device_owner_filter = [
port_model.device_owner.ilike('%s%%' % owner)
for owner in device_owners]
unsupported_device_owner_filter = [
port_model.device_owner.notilike('%s%%' % owner)
for owner in utils.UNSUPPORTED_DEVICE_OWNERS]
query = (query
.filter(
and_(*unsupported_device_owner_filter),
or_(*supported_device_owner_filter)))
return query | Filter ports by device_owner
Either filter using specified device_owner or using the list of all
device_owners supported and unsupported by the arista ML2 plugin | entailment |
def filter_by_device_id(query):
"""Filter ports attached to devices we don't care about
Currently used to filter DHCP_RESERVED ports
"""
port_model = models_v2.Port
unsupported_device_id_filter = [
port_model.device_id.notilike('%s%%' % id)
for id in utils.UNSUPPORTED_DEVICE_IDS]
query = (query
.filter(and_(*unsupported_device_id_filter)))
return query | Filter ports attached to devices we don't care about
Currently used to filter DHCP_RESERVED ports | entailment |
def filter_by_vnic_type(query, vnic_type):
"""Filter ports by vnic_type (currently only used for baremetals)"""
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
dst_binding_model = ml2_models.DistributedPortBinding
query = (query
.outerjoin_if_necessary(
binding_model,
port_model.id == binding_model.port_id)
.outerjoin_if_necessary(
dst_binding_model,
port_model.id == dst_binding_model.port_id)
.filter(
(binding_model.vnic_type == vnic_type) |
(dst_binding_model.vnic_type == vnic_type)))
return query | Filter ports by vnic_type (currently only used for baremetals) | entailment |
def filter_unmanaged_physnets(query):
"""Filter ports managed by other ML2 plugins """
config = cfg.CONF.ml2_arista
managed_physnets = config['managed_physnets']
# Filter out ports bound to segments on physnets that we're not
# managing
segment_model = segment_models.NetworkSegment
if managed_physnets:
query = (query
.join_if_necessary(segment_model)
.filter(segment_model.physical_network.in_(
managed_physnets)))
return query | Filter ports managed by other ML2 plugins | entailment |
def filter_inactive_ports(query):
"""Filter ports that aren't in active status """
port_model = models_v2.Port
query = (query
.filter(port_model.status == n_const.PORT_STATUS_ACTIVE))
return query | Filter ports that aren't in active status | entailment |
def filter_unnecessary_ports(query, device_owners=None, vnic_type=None,
active=True):
"""Filter out all ports are not needed on CVX """
query = (query
.filter_unbound_ports()
.filter_by_device_owner(device_owners)
.filter_by_device_id()
.filter_unmanaged_physnets())
if active:
query = query.filter_inactive_ports()
if vnic_type:
query = query.filter_by_vnic_type(vnic_type)
return query | Filter out all ports are not needed on CVX | entailment |
def get_tenants(tenant_id=None):
"""Returns list of all project/tenant ids that may be relevant on CVX"""
if tenant_id == '':
return []
session = db.get_reader_session()
project_ids = set()
with session.begin():
for m in [models_v2.Network, models_v2.Port]:
q = session.query(m.project_id).filter(m.project_id != '')
if tenant_id:
q = q.filter(m.project_id == tenant_id)
project_ids.update(pid[0] for pid in q.distinct())
return [{'project_id': project_id} for project_id in project_ids] | Returns list of all project/tenant ids that may be relevant on CVX | entailment |
def get_networks(network_id=None):
"""Returns list of all networks that may be relevant on CVX"""
session = db.get_reader_session()
with session.begin():
model = models_v2.Network
networks = session.query(model).filter(model.project_id != '')
if network_id:
networks = networks.filter(model.id == network_id)
return networks.all() | Returns list of all networks that may be relevant on CVX | entailment |
def get_segments(segment_id=None):
"""Returns list of all network segments that may be relevant on CVX"""
session = db.get_reader_session()
with session.begin():
model = segment_models.NetworkSegment
segments = session.query(model).filter_unnecessary_segments()
if segment_id:
segments = segments.filter(model.id == segment_id)
return segments.all() | Returns list of all network segments that may be relevant on CVX | entailment |
def get_instances(device_owners=None, vnic_type=None, instance_id=None):
"""Returns filtered list of all instances in the neutron db"""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
instances = (session
.query(port_model,
binding_model)
.outerjoin(
binding_model,
port_model.id == binding_model.port_id)
.distinct(port_model.device_id)
.group_by(port_model.device_id)
.filter_unnecessary_ports(device_owners, vnic_type))
if instance_id:
instances = instances.filter(port_model.device_id == instance_id)
return instances.all() | Returns filtered list of all instances in the neutron db | entailment |
def get_vm_instances(instance_id=None):
"""Returns filtered list of vms that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX],
vnic_type=portbindings.VNIC_NORMAL,
instance_id=instance_id) | Returns filtered list of vms that may be relevant on CVX | entailment |
def get_ports(device_owners=None, vnic_type=None, port_id=None, active=True):
"""Returns list of all ports in neutron the db"""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
ports = (session
.query(port_model)
.filter_unnecessary_ports(device_owners, vnic_type, active))
if port_id:
ports = ports.filter(port_model.id == port_id)
return ports.all() | Returns list of all ports in neutron the db | entailment |
def get_vm_ports(port_id=None):
"""Returns filtered list of vms that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX,
t_const.TRUNK_SUBPORT_OWNER],
vnic_type=portbindings.VNIC_NORMAL, port_id=port_id) | Returns filtered list of vms that may be relevant on CVX | entailment |
def get_port_bindings(binding_key=None):
"""Returns filtered list of port bindings that may be relevant on CVX
This query is a little complex as we need all binding levels for any
binding that has a single managed physnet, but we need to filter bindings
that have no managed physnets. In order to achieve this, we join to the
binding_level_model once to filter bindings with no managed levels,
then a second time to get all levels for the remaining bindings.
The loop at the end is a convenience to associate levels with bindings
as a list. This would ideally be done through the use of an orm.relation,
but due to some sqlalchemy limitations imposed to make OVO work, we can't
add relations to existing models.
"""
session = db.get_reader_session()
with session.begin():
binding_level_model = ml2_models.PortBindingLevel
aliased_blm = aliased(ml2_models.PortBindingLevel)
port_binding_model = ml2_models.PortBinding
dist_binding_model = ml2_models.DistributedPortBinding
bindings = (session.query(port_binding_model, aliased_blm)
.join(binding_level_model,
and_(
port_binding_model.port_id ==
binding_level_model.port_id,
port_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.join(aliased_blm,
and_(port_binding_model.port_id ==
aliased_blm.port_id,
port_binding_model.host ==
aliased_blm.host)))
dist_bindings = (session.query(dist_binding_model, aliased_blm)
.join(
binding_level_model,
and_(dist_binding_model.port_id ==
binding_level_model.port_id,
dist_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.filter(dist_binding_model.status ==
n_const.PORT_STATUS_ACTIVE)
.join(aliased_blm,
and_(dist_binding_model.port_id ==
aliased_blm.port_id,
dist_binding_model.host ==
aliased_blm.host)))
if binding_key:
port_id = binding_key[0]
if type(binding_key[1]) == tuple:
switch_id = binding_key[1][0]
switch_port = binding_key[1][1]
bindings = bindings.filter(and_(
port_binding_model.port_id == port_id,
port_binding_model.profile.ilike('%%%s%%' % switch_id),
port_binding_model.profile.ilike('%%%s%%' % switch_port)))
dist_bindings = dist_bindings.filter(and_(
dist_binding_model.port_id == port_id,
dist_binding_model.profile.ilike('%%%s%%' % switch_id),
dist_binding_model.profile.ilike('%%%s%%' % switch_port)))
else:
host_id = binding_key[1]
bindings = bindings.filter(and_(
port_binding_model.port_id == port_id,
port_binding_model.host == host_id))
dist_bindings = dist_bindings.filter(and_(
dist_binding_model.port_id == port_id,
dist_binding_model.host == host_id))
binding_levels = collections.defaultdict(list)
for binding, level in bindings.all() + dist_bindings.all():
binding_levels[binding].append(level)
bindings_with_levels = list()
for binding, levels in binding_levels.items():
binding.levels = levels
bindings_with_levels.append(binding)
return bindings_with_levels | Returns filtered list of port bindings that may be relevant on CVX
This query is a little complex as we need all binding levels for any
binding that has a single managed physnet, but we need to filter bindings
that have no managed physnets. In order to achieve this, we join to the
binding_level_model once to filter bindings with no managed levels,
then a second time to get all levels for the remaining bindings.
The loop at the end is a convenience to associate levels with bindings
as a list. This would ideally be done through the use of an orm.relation,
but due to some sqlalchemy limitations imposed to make OVO work, we can't
add relations to existing models. | entailment |
def tenant_provisioned(tenant_id):
"""Returns true if any networks or ports exist for a tenant."""
session = db.get_reader_session()
with session.begin():
res = any(
session.query(m).filter(m.tenant_id == tenant_id).count()
for m in [models_v2.Network, models_v2.Port]
)
return res | Returns true if any networks or ports exist for a tenant. | entailment |
def instance_provisioned(device_id):
"""Returns true if any ports exist for an instance."""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.device_id == device_id).count())
return res | Returns true if any ports exist for an instance. | entailment |
def port_provisioned(port_id):
"""Returns true if port still exists."""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.id == port_id).count())
return res | Returns true if port still exists. | entailment |
def get_parent(port_id):
"""Get trunk subport's parent port"""
session = db.get_reader_session()
res = dict()
with session.begin():
subport_model = trunk_models.SubPort
trunk_model = trunk_models.Trunk
subport = (session.query(subport_model).
filter(subport_model.port_id == port_id).first())
if subport:
trunk = (session.query(trunk_model).
filter(trunk_model.id == subport.trunk_id).first())
if trunk:
trunk_port_id = trunk.port.id
res = get_ports(port_id=trunk_port_id, active=False)[0]
return res | Get trunk subport's parent port | entailment |
def get_port_binding_level(filters):
"""Returns entries from PortBindingLevel based on the specified filters."""
session = db.get_reader_session()
with session.begin():
return (session.query(ml2_models.PortBindingLevel).
filter_by(**filters).
order_by(ml2_models.PortBindingLevel.level).
all()) | Returns entries from PortBindingLevel based on the specified filters. | entailment |
def read(file, frames=-1, start=0, stop=None, dtype='float64', always_2d=False,
fill_value=None, out=None, samplerate=None, channels=None,
format=None, subtype=None, endian=None, closefd=True):
"""Provide audio data from a sound file as NumPy array.
By default, the whole file is read from the beginning, but the
position to start reading can be specified with `start` and the
number of frames to read can be specified with `frames`.
Alternatively, a range can be specified with `start` and `stop`.
If there is less data left in the file than requested, the rest of
the frames are filled with `fill_value`.
If no `fill_value` is specified, a smaller array is returned.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
frames : int, optional
The number of frames to read. If `frames` is negative, the whole
rest of the file is read. Not allowed if `stop` is given.
start : int, optional
Where to start reading. A negative value counts from the end.
stop : int, optional
The index after the last frame to be read. A negative value
counts from the end. Not allowed if `frames` is given.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from ``-2**31`` to
``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for ``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional (frames x channels) NumPy array is returned.
If the sound file has only one channel, a one-dimensional array
is returned. Use ``always_2d=True`` to return a two-dimensional
array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is smaller
than the length of `out`) and no `fill_value` is given, then
only a part of `out` is overwritten and a view containing all
valid frames is returned.
samplerate : int
The sample rate of the audio file.
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data is
always returned as a two-dimensional array, even if the audio
file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file, the
rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given array
instead of creating a new array. In this case, the arguments
`dtype` and `always_2d` are silently ignored! If `frames` is
not given, it is obtained from the length of `out`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> data, samplerate = sf.read('stereo_file.wav')
>>> data
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
...
[ 0.67398441, -0.11516333]])
>>> samplerate
44100
"""
with SoundFile(file, 'r', samplerate, channels,
subtype, endian, format, closefd) as f:
frames = f._prepare_read(start, stop, frames)
data = f.read(frames, dtype, always_2d, fill_value, out)
return data, f.samplerate | Provide audio data from a sound file as NumPy array.
By default, the whole file is read from the beginning, but the
position to start reading can be specified with `start` and the
number of frames to read can be specified with `frames`.
Alternatively, a range can be specified with `start` and `stop`.
If there is less data left in the file than requested, the rest of
the frames are filled with `fill_value`.
If no `fill_value` is specified, a smaller array is returned.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
frames : int, optional
The number of frames to read. If `frames` is negative, the whole
rest of the file is read. Not allowed if `stop` is given.
start : int, optional
Where to start reading. A negative value counts from the end.
stop : int, optional
The index after the last frame to be read. A negative value
counts from the end. Not allowed if `frames` is given.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from ``-2**31`` to
``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for ``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional (frames x channels) NumPy array is returned.
If the sound file has only one channel, a one-dimensional array
is returned. Use ``always_2d=True`` to return a two-dimensional
array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is smaller
than the length of `out`) and no `fill_value` is given, then
only a part of `out` is overwritten and a view containing all
valid frames is returned.
samplerate : int
The sample rate of the audio file.
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data is
always returned as a two-dimensional array, even if the audio
file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file, the
rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given array
instead of creating a new array. In this case, the arguments
`dtype` and `always_2d` are silently ignored! If `frames` is
not given, it is obtained from the length of `out`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> data, samplerate = sf.read('stereo_file.wav')
>>> data
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
...
[ 0.67398441, -0.11516333]])
>>> samplerate
44100 | entailment |
def write(file, data, samplerate, subtype=None, endian=None, format=None,
closefd=True):
"""Write data to a sound file.
.. note:: If `file` exists, it will be truncated and overwritten!
Parameters
----------
file : str or int or file-like object
The file to write to. See :class:`SoundFile` for details.
data : array_like
The data to write. Usually two-dimensional (frames x channels),
but one-dimensional `data` can be used for mono files.
Only the data types ``'float64'``, ``'float32'``, ``'int32'``
and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the data
type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
samplerate : int
The sample rate of the audio data.
subtype : str, optional
See :func:`default_subtype` for the default value and
:func:`available_subtypes` for all possible values.
Other Parameters
----------------
format, endian, closefd
See :class:`SoundFile`.
Examples
--------
Write 10 frames of random data to a new file:
>>> import numpy as np
>>> import soundfile as sf
>>> sf.write('stereo_file.wav', np.random.randn(10, 2), 44100, 'PCM_24')
"""
import numpy as np
data = np.asarray(data)
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
with SoundFile(file, 'w', samplerate, channels,
subtype, endian, format, closefd) as f:
f.write(data) | Write data to a sound file.
.. note:: If `file` exists, it will be truncated and overwritten!
Parameters
----------
file : str or int or file-like object
The file to write to. See :class:`SoundFile` for details.
data : array_like
The data to write. Usually two-dimensional (frames x channels),
but one-dimensional `data` can be used for mono files.
Only the data types ``'float64'``, ``'float32'``, ``'int32'``
and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the data
type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
samplerate : int
The sample rate of the audio data.
subtype : str, optional
See :func:`default_subtype` for the default value and
:func:`available_subtypes` for all possible values.
Other Parameters
----------------
format, endian, closefd
See :class:`SoundFile`.
Examples
--------
Write 10 frames of random data to a new file:
>>> import numpy as np
>>> import soundfile as sf
>>> sf.write('stereo_file.wav', np.random.randn(10, 2), 44100, 'PCM_24') | entailment |
def blocks(file, blocksize=None, overlap=0, frames=-1, start=0, stop=None,
dtype='float64', always_2d=False, fill_value=None, out=None,
samplerate=None, channels=None,
format=None, subtype=None, endian=None, closefd=True):
"""Return a generator for block-wise reading.
By default, iteration starts at the beginning and stops at the end
of the file. Use `start` to start at a later position and `frames`
or `stop` to stop earlier.
If you stop iterating over the generator before it's exhausted,
the sound file is not closed. This is normally not a problem
because the file is opened in read-only mode. To close the file
properly, the generator's ``close()`` method can be called.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
blocksize : int
The number of frames to read per block.
Either this or `out` must be given.
overlap : int, optional
The number of frames to rewind between each block.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an integer
multiple of the length of `out`, and no `fill_value` was given,
the last block will be a smaller view into `out`.
Other Parameters
----------------
frames, start, stop
See :func:`read`.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :func:`read`.
always_2d, fill_value, out
See :func:`read`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> for block in sf.blocks('stereo_file.wav', blocksize=1024):
>>> pass # do something with 'block'
"""
with SoundFile(file, 'r', samplerate, channels,
subtype, endian, format, closefd) as f:
frames = f._prepare_read(start, stop, frames)
for block in f.blocks(blocksize, overlap, frames,
dtype, always_2d, fill_value, out):
yield block | Return a generator for block-wise reading.
By default, iteration starts at the beginning and stops at the end
of the file. Use `start` to start at a later position and `frames`
or `stop` to stop earlier.
If you stop iterating over the generator before it's exhausted,
the sound file is not closed. This is normally not a problem
because the file is opened in read-only mode. To close the file
properly, the generator's ``close()`` method can be called.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
blocksize : int
The number of frames to read per block.
Either this or `out` must be given.
overlap : int, optional
The number of frames to rewind between each block.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an integer
multiple of the length of `out`, and no `fill_value` was given,
the last block will be a smaller view into `out`.
Other Parameters
----------------
frames, start, stop
See :func:`read`.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :func:`read`.
always_2d, fill_value, out
See :func:`read`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> for block in sf.blocks('stereo_file.wav', blocksize=1024):
>>> pass # do something with 'block' | entailment |
def available_subtypes(format=None):
"""Return a dictionary of available subtypes.
Parameters
----------
format : str
If given, only compatible subtypes are returned.
Examples
--------
>>> import soundfile as sf
>>> sf.available_subtypes('FLAC')
{'PCM_24': 'Signed 24 bit PCM',
'PCM_16': 'Signed 16 bit PCM',
'PCM_S8': 'Signed 8 bit PCM'}
"""
subtypes = _available_formats_helper(_snd.SFC_GET_FORMAT_SUBTYPE_COUNT,
_snd.SFC_GET_FORMAT_SUBTYPE)
return dict((subtype, name) for subtype, name in subtypes
if format is None or check_format(format, subtype)) | Return a dictionary of available subtypes.
Parameters
----------
format : str
If given, only compatible subtypes are returned.
Examples
--------
>>> import soundfile as sf
>>> sf.available_subtypes('FLAC')
{'PCM_24': 'Signed 24 bit PCM',
'PCM_16': 'Signed 16 bit PCM',
'PCM_S8': 'Signed 8 bit PCM'} | entailment |
def check_format(format, subtype=None, endian=None):
"""Check if the combination of format/subtype/endian is valid.
Examples
--------
>>> import soundfile as sf
>>> sf.check_format('WAV', 'PCM_24')
True
>>> sf.check_format('FLAC', 'VORBIS')
False
"""
try:
return bool(_format_int(format, subtype, endian))
except (ValueError, TypeError):
return False | Check if the combination of format/subtype/endian is valid.
Examples
--------
>>> import soundfile as sf
>>> sf.check_format('WAV', 'PCM_24')
True
>>> sf.check_format('FLAC', 'VORBIS')
False | entailment |
def _error_check(err, prefix=""):
"""Pretty-print a numerical error code if there is an error."""
if err != 0:
err_str = _snd.sf_error_number(err)
raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace')) | Pretty-print a numerical error code if there is an error. | entailment |
def _format_int(format, subtype, endian):
"""Return numeric ID for given format|subtype|endian combo."""
result = _check_format(format)
if subtype is None:
subtype = default_subtype(format)
if subtype is None:
raise TypeError(
"No default subtype for major format {0!r}".format(format))
elif not isinstance(subtype, (_unicode, str)):
raise TypeError("Invalid subtype: {0!r}".format(subtype))
try:
result |= _subtypes[subtype.upper()]
except KeyError:
raise ValueError("Unknown subtype: {0!r}".format(subtype))
if endian is None:
endian = 'FILE'
elif not isinstance(endian, (_unicode, str)):
raise TypeError("Invalid endian-ness: {0!r}".format(endian))
try:
result |= _endians[endian.upper()]
except KeyError:
raise ValueError("Unknown endian-ness: {0!r}".format(endian))
info = _ffi.new("SF_INFO*")
info.format = result
info.channels = 1
if _snd.sf_format_check(info) == _snd.SF_FALSE:
raise ValueError(
"Invalid combination of format, subtype and endian")
return result | Return numeric ID for given format|subtype|endian combo. | entailment |
def _check_mode(mode):
"""Check if mode is valid and return its integer representation."""
if not isinstance(mode, (_unicode, str)):
raise TypeError("Invalid mode: {0!r}".format(mode))
mode_set = set(mode)
if mode_set.difference('xrwb+') or len(mode) > len(mode_set):
raise ValueError("Invalid mode: {0!r}".format(mode))
if len(mode_set.intersection('xrw')) != 1:
raise ValueError("mode must contain exactly one of 'xrw'")
if '+' in mode_set:
mode_int = _snd.SFM_RDWR
elif 'r' in mode_set:
mode_int = _snd.SFM_READ
else:
mode_int = _snd.SFM_WRITE
return mode_int | Check if mode is valid and return its integer representation. | entailment |
def _create_info_struct(file, mode, samplerate, channels,
format, subtype, endian):
"""Check arguments and create SF_INFO struct."""
original_format = format
if format is None:
format = _get_format_from_filename(file, mode)
assert isinstance(format, (_unicode, str))
else:
_check_format(format)
info = _ffi.new("SF_INFO*")
if 'r' not in mode or format.upper() == 'RAW':
if samplerate is None:
raise TypeError("samplerate must be specified")
info.samplerate = samplerate
if channels is None:
raise TypeError("channels must be specified")
info.channels = channels
info.format = _format_int(format, subtype, endian)
else:
if any(arg is not None for arg in (
samplerate, channels, original_format, subtype, endian)):
raise TypeError("Not allowed for existing files (except 'RAW'): "
"samplerate, channels, format, subtype, endian")
return info | Check arguments and create SF_INFO struct. | entailment |
def _get_format_from_filename(file, mode):
"""Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object).
"""
format = ''
file = getattr(file, 'name', file)
try:
# This raises an exception if file is not a (Unicode/byte) string:
format = _os.path.splitext(file)[-1][1:]
# Convert bytes to unicode (raises AttributeError on Python 3 str):
format = format.decode('utf-8', 'replace')
except Exception:
pass
if format.upper() not in _formats and 'r' not in mode:
raise TypeError("No format specified and unable to get format from "
"file extension: {0!r}".format(file))
return format | Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object). | entailment |
def _format_str(format_int):
"""Return the string representation of a given numeric format."""
for dictionary in _formats, _subtypes, _endians:
for k, v in dictionary.items():
if v == format_int:
return k
else:
return 'n/a' | Return the string representation of a given numeric format. | entailment |
def _format_info(format_int, format_flag=_snd.SFC_GET_FORMAT_INFO):
"""Return the ID and short description of a given format."""
format_info = _ffi.new("SF_FORMAT_INFO*")
format_info.format = format_int
_snd.sf_command(_ffi.NULL, format_flag, format_info,
_ffi.sizeof("SF_FORMAT_INFO"))
name = format_info.name
return (_format_str(format_info.format),
_ffi.string(name).decode('utf-8', 'replace') if name else "") | Return the ID and short description of a given format. | entailment |
def _available_formats_helper(count_flag, format_flag):
"""Helper for available_formats() and available_subtypes()."""
count = _ffi.new("int*")
_snd.sf_command(_ffi.NULL, count_flag, count, _ffi.sizeof("int"))
for format_int in range(count[0]):
yield _format_info(format_int, format_flag) | Helper for available_formats() and available_subtypes(). | entailment |
def _check_format(format_str):
"""Check if `format_str` is valid and return format ID."""
if not isinstance(format_str, (_unicode, str)):
raise TypeError("Invalid format: {0!r}".format(format_str))
try:
format_int = _formats[format_str.upper()]
except KeyError:
raise ValueError("Unknown format: {0!r}".format(format_str))
return format_int | Check if `format_str` is valid and return format ID. | entailment |
def _has_virtual_io_attrs(file, mode_int):
"""Check if file has all the necessary attributes for virtual IO."""
readonly = mode_int == _snd.SFM_READ
writeonly = mode_int == _snd.SFM_WRITE
return all([
hasattr(file, 'seek'),
hasattr(file, 'tell'),
hasattr(file, 'write') or readonly,
hasattr(file, 'read') or hasattr(file, 'readinto') or writeonly,
]) | Check if file has all the necessary attributes for virtual IO. | entailment |
def extra_info(self):
"""Retrieve the log string generated when opening the file."""
info = _ffi.new("char[]", 2**14)
_snd.sf_command(self._file, _snd.SFC_GET_LOG_INFO,
info, _ffi.sizeof(info))
return _ffi.string(info).decode('utf-8', 'replace') | Retrieve the log string generated when opening the file. | entailment |
def seek(self, frames, whence=SEEK_SET):
"""Set the read/write position.
Parameters
----------
frames : int
The frame index or offset to seek.
whence : {SEEK_SET, SEEK_CUR, SEEK_END}, optional
By default (``whence=SEEK_SET``), `frames` are counted from
the beginning of the file.
``whence=SEEK_CUR`` seeks from the current position
(positive and negative values are allowed for `frames`).
``whence=SEEK_END`` seeks from the end (use negative value
for `frames`).
Returns
-------
int
The new absolute read/write position in frames.
Examples
--------
>>> from soundfile import SoundFile, SEEK_END
>>> myfile = SoundFile('stereo_file.wav')
Seek to the beginning of the file:
>>> myfile.seek(0)
0
Seek to the end of the file:
>>> myfile.seek(0, SEEK_END)
44100 # this is the file length
"""
self._check_if_closed()
position = _snd.sf_seek(self._file, frames, whence)
_error_check(self._errorcode)
return position | Set the read/write position.
Parameters
----------
frames : int
The frame index or offset to seek.
whence : {SEEK_SET, SEEK_CUR, SEEK_END}, optional
By default (``whence=SEEK_SET``), `frames` are counted from
the beginning of the file.
``whence=SEEK_CUR`` seeks from the current position
(positive and negative values are allowed for `frames`).
``whence=SEEK_END`` seeks from the end (use negative value
for `frames`).
Returns
-------
int
The new absolute read/write position in frames.
Examples
--------
>>> from soundfile import SoundFile, SEEK_END
>>> myfile = SoundFile('stereo_file.wav')
Seek to the beginning of the file:
>>> myfile.seek(0)
0
Seek to the end of the file:
>>> myfile.seek(0, SEEK_END)
44100 # this is the file length | entailment |
def read(self, frames=-1, dtype='float64', always_2d=False,
fill_value=None, out=None):
"""Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write
"""
if out is None:
frames = self._check_frames(frames, fill_value)
out = self._create_empty_array(frames, always_2d, dtype)
else:
if frames < 0 or frames > len(out):
frames = len(out)
frames = self._array_io('read', out, frames)
if len(out) > frames:
if fill_value is None:
out = out[:frames]
else:
out[frames:] = fill_value
return out | Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write | entailment |
def buffer_read(self, frames=-1, dtype=None):
"""Read from the file and return data as buffer object.
Reads the given number of `frames` in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If `frames < 0`, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}
Audio data will be converted to the given data type.
Returns
-------
buffer
A buffer containing the read data.
See Also
--------
buffer_read_into, .read, buffer_write
"""
frames = self._check_frames(frames, fill_value=None)
ctype = self._check_dtype(dtype)
cdata = _ffi.new(ctype + '[]', frames * self.channels)
read_frames = self._cdata_io('read', cdata, ctype, frames)
assert read_frames == frames
return _ffi.buffer(cdata) | Read from the file and return data as buffer object.
Reads the given number of `frames` in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If `frames < 0`, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}
Audio data will be converted to the given data type.
Returns
-------
buffer
A buffer containing the read data.
See Also
--------
buffer_read_into, .read, buffer_write | entailment |
def buffer_read_into(self, buffer, dtype):
"""Read from the file into a given buffer object.
Fills the given `buffer` with frames in the given data format
starting at the current read/write position (which can be
changed with :meth:`.seek`) until the buffer is full or the end
of the file is reached. This advances the read/write position
by the number of frames that were read.
Parameters
----------
buffer : writable buffer
Audio frames from the file are written to this buffer.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of `buffer`.
Returns
-------
int
The number of frames that were read from the file.
This can be less than the size of `buffer`.
The rest of the buffer is not filled with meaningful data.
See Also
--------
buffer_read, .read
"""
ctype = self._check_dtype(dtype)
cdata, frames = self._check_buffer(buffer, ctype)
frames = self._cdata_io('read', cdata, ctype, frames)
return frames | Read from the file into a given buffer object.
Fills the given `buffer` with frames in the given data format
starting at the current read/write position (which can be
changed with :meth:`.seek`) until the buffer is full or the end
of the file is reached. This advances the read/write position
by the number of frames that were read.
Parameters
----------
buffer : writable buffer
Audio frames from the file are written to this buffer.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of `buffer`.
Returns
-------
int
The number of frames that were read from the file.
This can be less than the size of `buffer`.
The rest of the buffer is not filled with meaningful data.
See Also
--------
buffer_read, .read | entailment |
def write(self, data):
"""Write audio data from a NumPy array to the file.
Writes a number of frames at the read/write position to the
file. This also advances the read/write position by the same
number of frames and enlarges the file if necessary.
Note that writing int values to a float file will *not* scale
the values to [-1.0, 1.0). If you write the value
``np.array([42], dtype='int32')``, to a ``subtype='FLOAT'``
file, the file will then contain ``np.array([42.],
dtype='float32')``.
Parameters
----------
data : array_like
The data to write. Usually two-dimensional (frames x
channels), but one-dimensional `data` can be used for mono
files. Only the data types ``'float64'``, ``'float32'``,
``'int32'`` and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the
data type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
Examples
--------
>>> import numpy as np
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Write 10 frames of random data to a new file:
>>> with SoundFile('stereo_file.wav', 'w', 44100, 2, 'PCM_24') as f:
>>> f.write(np.random.randn(10, 2))
See Also
--------
buffer_write, .read
"""
import numpy as np
# no copy is made if data has already the correct memory layout:
data = np.ascontiguousarray(data)
written = self._array_io('write', data, len(data))
assert written == len(data)
self._update_frames(written) | Write audio data from a NumPy array to the file.
Writes a number of frames at the read/write position to the
file. This also advances the read/write position by the same
number of frames and enlarges the file if necessary.
Note that writing int values to a float file will *not* scale
the values to [-1.0, 1.0). If you write the value
``np.array([42], dtype='int32')``, to a ``subtype='FLOAT'``
file, the file will then contain ``np.array([42.],
dtype='float32')``.
Parameters
----------
data : array_like
The data to write. Usually two-dimensional (frames x
channels), but one-dimensional `data` can be used for mono
files. Only the data types ``'float64'``, ``'float32'``,
``'int32'`` and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the
data type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
Examples
--------
>>> import numpy as np
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Write 10 frames of random data to a new file:
>>> with SoundFile('stereo_file.wav', 'w', 44100, 2, 'PCM_24') as f:
>>> f.write(np.random.randn(10, 2))
See Also
--------
buffer_write, .read | entailment |
def buffer_write(self, data, dtype):
"""Write audio data from a buffer/bytes object to the file.
Writes the contents of `data` to the file at the current
read/write position.
This also advances the read/write position by the number of
frames that were written and enlarges the file if necessary.
Parameters
----------
data : buffer or bytes
A buffer or bytes object containing the audio data to be
written.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of the audio data stored in `data`.
See Also
--------
.write, buffer_read
"""
ctype = self._check_dtype(dtype)
cdata, frames = self._check_buffer(data, ctype)
written = self._cdata_io('write', cdata, ctype, frames)
assert written == frames
self._update_frames(written) | Write audio data from a buffer/bytes object to the file.
Writes the contents of `data` to the file at the current
read/write position.
This also advances the read/write position by the number of
frames that were written and enlarges the file if necessary.
Parameters
----------
data : buffer or bytes
A buffer or bytes object containing the audio data to be
written.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of the audio data stored in `data`.
See Also
--------
.write, buffer_read | entailment |
def blocks(self, blocksize=None, overlap=0, frames=-1, dtype='float64',
always_2d=False, fill_value=None, out=None):
"""Return a generator for block-wise reading.
By default, the generator yields blocks of the given
`blocksize` (using a given `overlap`) until the end of the file
is reached; `frames` can be used to stop earlier.
Parameters
----------
blocksize : int
The number of frames to read per block. Either this or `out`
must be given.
overlap : int, optional
The number of frames to rewind between each block.
frames : int, optional
The number of frames to read.
If ``frames < 0``, the file is read until the end.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :meth:`.read`.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an
integer multiple of the length of `out`, and no
`fill_value` was given, the last block will be a smaller
view into `out`.
Other Parameters
----------------
always_2d, fill_value, out
See :meth:`.read`.
fill_value : float, optional
See :meth:`.read`.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored!
Examples
--------
>>> from soundfile import SoundFile
>>> with SoundFile('stereo_file.wav') as f:
>>> for block in f.blocks(blocksize=1024):
>>> pass # do something with 'block'
"""
import numpy as np
if 'r' not in self.mode and '+' not in self.mode:
raise RuntimeError("blocks() is not allowed in write-only mode")
if out is None:
if blocksize is None:
raise TypeError("One of {blocksize, out} must be specified")
out = self._create_empty_array(blocksize, always_2d, dtype)
copy_out = True
else:
if blocksize is not None:
raise TypeError(
"Only one of {blocksize, out} may be specified")
blocksize = len(out)
copy_out = False
overlap_memory = None
frames = self._check_frames(frames, fill_value)
while frames > 0:
if overlap_memory is None:
output_offset = 0
else:
output_offset = len(overlap_memory)
out[:output_offset] = overlap_memory
toread = min(blocksize - output_offset, frames)
self.read(toread, dtype, always_2d, fill_value, out[output_offset:])
if overlap:
if overlap_memory is None:
overlap_memory = np.copy(out[-overlap:])
else:
overlap_memory[:] = out[-overlap:]
if blocksize > frames + overlap and fill_value is None:
block = out[:frames + overlap]
else:
block = out
yield np.copy(block) if copy_out else block
frames -= toread | Return a generator for block-wise reading.
By default, the generator yields blocks of the given
`blocksize` (using a given `overlap`) until the end of the file
is reached; `frames` can be used to stop earlier.
Parameters
----------
blocksize : int
The number of frames to read per block. Either this or `out`
must be given.
overlap : int, optional
The number of frames to rewind between each block.
frames : int, optional
The number of frames to read.
If ``frames < 0``, the file is read until the end.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :meth:`.read`.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an
integer multiple of the length of `out`, and no
`fill_value` was given, the last block will be a smaller
view into `out`.
Other Parameters
----------------
always_2d, fill_value, out
See :meth:`.read`.
fill_value : float, optional
See :meth:`.read`.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored!
Examples
--------
>>> from soundfile import SoundFile
>>> with SoundFile('stereo_file.wav') as f:
>>> for block in f.blocks(blocksize=1024):
>>> pass # do something with 'block' | entailment |
def truncate(self, frames=None):
"""Truncate the file to a given number of frames.
After this command, the read/write position will be at the new
end of the file.
Parameters
----------
frames : int, optional
Only the data before `frames` is kept, the rest is deleted.
If not specified, the current read/write position is used.
"""
if frames is None:
frames = self.tell()
err = _snd.sf_command(self._file, _snd.SFC_FILE_TRUNCATE,
_ffi.new("sf_count_t*", frames),
_ffi.sizeof("sf_count_t"))
if err:
raise RuntimeError("Error truncating the file")
self._info.frames = frames | Truncate the file to a given number of frames.
After this command, the read/write position will be at the new
end of the file.
Parameters
----------
frames : int, optional
Only the data before `frames` is kept, the rest is deleted.
If not specified, the current read/write position is used. | entailment |
def close(self):
"""Close the file. Can be called multiple times."""
if not self.closed:
# be sure to flush data to disk before closing the file
self.flush()
err = _snd.sf_close(self._file)
self._file = None
_error_check(err) | Close the file. Can be called multiple times. | entailment |
def _open(self, file, mode_int, closefd):
"""Call the appropriate sf_open*() function from libsndfile."""
if isinstance(file, (_unicode, bytes)):
if _os.path.isfile(file):
if 'x' in self.mode:
raise OSError("File exists: {0!r}".format(self.name))
elif set(self.mode).issuperset('w+'):
# truncate the file, because SFM_RDWR doesn't:
_os.close(_os.open(file, _os.O_WRONLY | _os.O_TRUNC))
openfunction = _snd.sf_open
if isinstance(file, _unicode):
if _sys.platform == 'win32':
openfunction = _snd.sf_wchar_open
else:
file = file.encode(_sys.getfilesystemencoding())
file_ptr = openfunction(file, mode_int, self._info)
elif isinstance(file, int):
file_ptr = _snd.sf_open_fd(file, mode_int, self._info, closefd)
elif _has_virtual_io_attrs(file, mode_int):
file_ptr = _snd.sf_open_virtual(self._init_virtual_io(file),
mode_int, self._info, _ffi.NULL)
else:
raise TypeError("Invalid file: {0!r}".format(self.name))
_error_check(_snd.sf_error(file_ptr),
"Error opening {0!r}: ".format(self.name))
if mode_int == _snd.SFM_WRITE:
# Due to a bug in libsndfile version <= 1.0.25, frames != 0
# when opening a named pipe in SFM_WRITE mode.
# See http://github.com/erikd/libsndfile/issues/77.
self._info.frames = 0
# This is not necessary for "normal" files (because
# frames == 0 in this case), but it doesn't hurt, either.
return file_ptr | Call the appropriate sf_open*() function from libsndfile. | entailment |
def _init_virtual_io(self, file):
"""Initialize callback functions for sf_open_virtual()."""
@_ffi.callback("sf_vio_get_filelen")
def vio_get_filelen(user_data):
curr = file.tell()
file.seek(0, SEEK_END)
size = file.tell()
file.seek(curr, SEEK_SET)
return size
@_ffi.callback("sf_vio_seek")
def vio_seek(offset, whence, user_data):
file.seek(offset, whence)
return file.tell()
@_ffi.callback("sf_vio_read")
def vio_read(ptr, count, user_data):
# first try readinto(), if not available fall back to read()
try:
buf = _ffi.buffer(ptr, count)
data_read = file.readinto(buf)
except AttributeError:
data = file.read(count)
data_read = len(data)
buf = _ffi.buffer(ptr, data_read)
buf[0:data_read] = data
return data_read
@_ffi.callback("sf_vio_write")
def vio_write(ptr, count, user_data):
buf = _ffi.buffer(ptr, count)
data = buf[:]
written = file.write(data)
# write() returns None for file objects in Python <= 2.7:
if written is None:
written = count
return written
@_ffi.callback("sf_vio_tell")
def vio_tell(user_data):
return file.tell()
# Note: the callback functions must be kept alive!
self._virtual_io = {'get_filelen': vio_get_filelen,
'seek': vio_seek,
'read': vio_read,
'write': vio_write,
'tell': vio_tell}
return _ffi.new("SF_VIRTUAL_IO*", self._virtual_io) | Initialize callback functions for sf_open_virtual(). | entailment |
def _check_frames(self, frames, fill_value):
"""Reduce frames to no more than are available in the file."""
if self.seekable():
remaining_frames = self.frames - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames | Reduce frames to no more than are available in the file. | entailment |
def _check_buffer(self, data, ctype):
"""Convert buffer to cdata and check for valid size."""
assert ctype in _ffi_types.values()
if not isinstance(data, bytes):
data = _ffi.from_buffer(data)
frames, remainder = divmod(len(data),
self.channels * _ffi.sizeof(ctype))
if remainder:
raise ValueError("Data size must be a multiple of frame size")
return data, frames | Convert buffer to cdata and check for valid size. | entailment |
def _create_empty_array(self, frames, always_2d, dtype):
"""Create an empty array with appropriate shape."""
import numpy as np
if always_2d or self.channels > 1:
shape = frames, self.channels
else:
shape = frames,
return np.empty(shape, dtype, order='C') | Create an empty array with appropriate shape. | entailment |
def _check_dtype(self, dtype):
"""Check if dtype string is valid and return ctype string."""
try:
return _ffi_types[dtype]
except KeyError:
raise ValueError("dtype must be one of {0!r} and not {1!r}".format(
sorted(_ffi_types.keys()), dtype)) | Check if dtype string is valid and return ctype string. | entailment |
def _array_io(self, action, array, frames):
"""Check array and call low-level IO function."""
if (array.ndim not in (1, 2) or
array.ndim == 1 and self.channels != 1 or
array.ndim == 2 and array.shape[1] != self.channels):
raise ValueError("Invalid shape: {0!r}".format(array.shape))
if not array.flags.c_contiguous:
raise ValueError("Data must be C-contiguous")
ctype = self._check_dtype(array.dtype.name)
assert array.dtype.itemsize == _ffi.sizeof(ctype)
cdata = _ffi.cast(ctype + '*', array.__array_interface__['data'][0])
return self._cdata_io(action, cdata, ctype, frames) | Check array and call low-level IO function. | entailment |
def _cdata_io(self, action, data, ctype, frames):
"""Call one of libsndfile's read/write functions."""
assert ctype in _ffi_types.values()
self._check_if_closed()
if self.seekable():
curr = self.tell()
func = getattr(_snd, 'sf_' + action + 'f_' + ctype)
frames = func(self._file, data, frames)
_error_check(self._errorcode)
if self.seekable():
self.seek(curr + frames, SEEK_SET) # Update read & write position
return frames | Call one of libsndfile's read/write functions. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.