code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
port_id = context.current['id']
physnet = self._get_physnet(context)
if not physnet:
LOG.debug("bind_port for port %(port)s: no physical_network "
"found", {'port': port_id})
return False
next_segment = context.allocate_dynamic_segment(
{'network_id': context.network.current['id'],
'network_type': n_const.TYPE_VLAN,
'physical_network': physnet})
LOG.debug("bind_port for port %(port)s: "
"current_segment=%(current_seg)s, "
"next_segment=%(next_seg)s",
{'port': port_id, 'current_seg': segment,
'next_seg': next_segment})
context.continue_binding(segment['id'], [next_segment])
return True | def _bind_fabric(self, context, segment) | Allocate dynamic segments for the port
Segment physnets are based on the switch to which the host is
connected. | 2.539083 | 2.45622 | 1.033736 |
port = context.current
log_context("bind_port: port", port)
for segment in context.segments_to_bind:
physnet = segment.get(driver_api.PHYSICAL_NETWORK)
segment_type = segment[driver_api.NETWORK_TYPE]
if not physnet:
if (segment_type == n_const.TYPE_VXLAN and self.manage_fabric):
if self._bind_fabric(context, segment):
continue
elif (port.get(portbindings.VNIC_TYPE)
== portbindings.VNIC_BAREMETAL):
if (not self.managed_physnets or
physnet in self.managed_physnets):
if self._bind_baremetal_port(context, segment):
continue
LOG.debug("Arista mech driver unable to bind port %(port)s to "
"%(seg_type)s segment on physical_network %(physnet)s",
{'port': port.get('id'), 'seg_type': segment_type,
'physnet': physnet}) | def bind_port(self, context) | Bind port to a network segment.
Provisioning request to Arista Hardware to plug a host
into appropriate network is done when the port is created
this simply tells the ML2 Plugin that we are binding the port | 3.295946 | 3.130853 | 1.052731 |
if migration:
binding_levels = context.original_binding_levels
else:
binding_levels = context.binding_levels
LOG.debug("_try_release_dynamic_segment: "
"binding_levels=%(bl)s", {'bl': binding_levels})
if not binding_levels:
return
for prior_level, binding in enumerate(binding_levels[1:]):
allocating_driver = binding_levels[prior_level].get(
driver_api.BOUND_DRIVER)
if allocating_driver != a_const.MECHANISM_DRV_NAME:
continue
bound_segment = binding.get(driver_api.BOUND_SEGMENT, {})
segment_id = bound_segment.get('id')
if not db_lib.segment_is_dynamic(segment_id):
continue
if not db_lib.segment_bound(segment_id):
context.release_dynamic_segment(segment_id)
LOG.debug("Released dynamic segment %(seg)s allocated "
"by %(drv)s", {'seg': segment_id,
'drv': allocating_driver}) | def _try_to_release_dynamic_segment(self, context, migration=False) | Release dynamic segment if necessary
If this port was the last port using a segment and the segment was
allocated by this driver, it should be released | 3.337963 | 3.056946 | 1.091927 |
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data | def bdom_roll_date(sd, ed, bdom, months, holidays=[]) | Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")]) | 2.156803 | 2.193061 | 0.983467 |
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights | def roller(timestamps, contract_dates, get_weights, **kwargs) | Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans) | 3.619426 | 4.426391 | 0.817692 |
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts | def aggregate_weights(weights, drop_date=False) | Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names. | 3.394357 | 2.931473 | 1.157902 |
# Get ACL rules and interface mappings from the switch
switch_acls, switch_bindings = self._get_dynamic_acl_info(switch_ip)
# Adjust expected bindings for switch LAG config
expected_bindings = self.adjust_bindings_for_lag(switch_ip,
expected_bindings)
# Get synchronization commands
switch_cmds = list()
switch_cmds.extend(
self.get_sync_acl_cmds(switch_acls, expected_acls))
switch_cmds.extend(
self.get_sync_binding_cmds(switch_bindings, expected_bindings))
# Update switch config
self.run_openstack_sg_cmds(switch_cmds, self._switches.get(switch_ip)) | def synchronize_switch(self, switch_ip, expected_acls, expected_bindings) | Update ACL config on a switch to match expected config
This is done as follows:
1. Get switch ACL config using show commands
2. Update expected bindings based on switch LAGs
3. Get commands to synchronize switch ACLs
4. Get commands to synchronize switch ACL bindings
5. Run sync commands on switch | 4.189408 | 3.671586 | 1.141035 |
# Get expected ACLs and rules
expected_acls = self.get_expected_acls()
# Get expected interface to ACL mappings
all_expected_bindings = self.get_expected_bindings()
# Check that config is correct on every registered switch
for switch_ip in self._switches.keys():
expected_bindings = all_expected_bindings.get(switch_ip, [])
try:
self.synchronize_switch(switch_ip, expected_acls,
expected_bindings)
except Exception:
LOG.exception("Failed to sync SGs for %(switch)s",
{'switch': switch_ip}) | def synchronize(self) | Perform sync of the security groups between ML2 and EOS. | 4.933884 | 4.334545 | 1.13827 |
cmd = ['show openstack resource-pool vlan region %s uuid'
% self.region]
try:
self._run_eos_cmds(cmd)
self.cli_commands['resource-pool'] = cmd
except arista_exc.AristaRpcError:
self.cli_commands['resource-pool'] = []
LOG.warning(
_LW("'resource-pool' command '%s' is not available on EOS"),
cmd) | def check_vlan_type_driver_commands(self) | Checks the validity of CLI commands for Arista's VLAN type driver.
This method tries to execute the commands used exclusively by the
arista_vlan type driver and stores the commands if they succeed. | 6.552573 | 6.197335 | 1.057321 |
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0]
return None | def get_vlan_assignment_uuid(self) | Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID | 8.711918 | 9.444298 | 0.922453 |
if not self.cli_commands['resource-pool']:
LOG.warning(_('The version of CVX you are using does not support'
'arista VLAN type driver.'))
else:
cmd = ['show openstack resource-pools region %s' % self.region]
command_output = self._run_eos_cmds(cmd)
if command_output:
regions = command_output[0]['physicalNetwork']
if self.region in regions.keys():
return regions[self.region]['vlanPool']['default']
return {'assignedVlans': '',
'availableVlans': '',
'allocatedVlans': ''} | def get_vlan_allocation(self) | Returns the status of the region's VLAN pool in CVX
:returns: dictionary containg the assigned, allocated and available
VLANs for the region | 8.185888 | 7.032502 | 1.164008 |
# Always figure out who is master (starting with the last known val)
try:
if self._get_eos_master() is None:
msg = "Failed to identify CVX master"
self.set_cvx_unavailable()
raise arista_exc.AristaRpcError(msg=msg)
except Exception:
self.set_cvx_unavailable()
raise
self.set_cvx_available()
log_cmds = commands
if commands_to_log:
log_cmds = commands_to_log
LOG.info(_LI('Executing command on Arista EOS: %s'), log_cmds)
# this returns array of return values for every command in
# full_command list
try:
response = self._send_eapi_req(cmds=commands,
commands_to_log=log_cmds)
if response is None:
# Reset the server as we failed communicating with it
self._server_ip = None
self.set_cvx_unavailable()
msg = "Failed to communicate with CVX master"
raise arista_exc.AristaRpcError(msg=msg)
return response
except arista_exc.AristaRpcError:
raise | def _run_eos_cmds(self, commands, commands_to_log=None) | Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_log : This should be set to the command that is
logged. If it is None, then the commands
param is logged. | 4.346214 | 4.499809 | 0.965866 |
region_cmd = 'region %s' % self.region
if sync:
region_cmd = self.cli_commands[const.CMD_REGION_SYNC]
full_command = [
'enable',
'configure',
'cvx',
'service openstack',
region_cmd,
]
full_command.extend(cmds)
return full_command | def _build_command(self, cmds, sync=False) | Build full EOS's openstack CLI command.
Helper method to add commands to enter and exit from openstack
CLI modes.
:param cmds: The openstack CLI commands that need to be executed
in the openstack config mode.
:param sync: This flags indicates that the region is being synced. | 5.954032 | 5.381942 | 1.106298 |
full_command = self._build_command(commands, sync=sync)
if commands_to_log:
full_log_command = self._build_command(commands_to_log, sync=sync)
else:
full_log_command = None
return self._run_eos_cmds(full_command, full_log_command) | def _run_openstack_cmds(self, commands, commands_to_log=None, sync=False) | Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_logs : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
:param sync: This flags indicates that the region is being synced. | 2.481604 | 2.493807 | 0.995107 |
port = context.current
host_id = context.host
cmd = ['show network physical-topology hosts']
try:
response = self._run_eos_cmds(cmd)
binding_profile = port.get(portbindings.PROFILE, {})
link_info = binding_profile.get('local_link_information', [])
for link in link_info:
switch_id = link.get('switch_id')
for host in response[0]['hosts'].values():
if switch_id == host['name']:
physnet = host['hostname']
LOG.debug("get_physical_network: Physical Network for "
"%(host)s is %(physnet)s",
{'host': host_id, 'physnet': physnet})
return physnet
LOG.debug("Physical network not found for %(host)s",
{'host': host_id})
except Exception as exc:
LOG.error(_LE('command %(cmd)s failed with '
'%(exc)s'), {'cmd': cmd, 'exc': exc})
return None | def get_baremetal_physnet(self, context) | Returns dictionary which contains mac to hostname mapping | 3.20461 | 3.250242 | 0.985961 |
host_id = utils.hostname(context.host)
cmd = ['show network physical-topology neighbors']
try:
response = self._run_eos_cmds(cmd)
# Get response for 'show network physical-topology neighbors'
# command
neighbors = response[0]['neighbors']
for neighbor in neighbors:
if host_id in neighbor:
physnet = neighbors[neighbor]['toPort'][0]['hostname']
LOG.debug("get_physical_network: Physical Network for "
"%(host)s is %(physnet)s", {'host': host_id,
'physnet': physnet})
return physnet
LOG.debug("Physical network not found for %(host)s",
{'host': host_id})
except Exception as exc:
LOG.error(_LE('command %(cmd)s failed with '
'%(exc)s'), {'cmd': cmd, 'exc': exc})
return None | def get_host_physnet(self, context) | Returns dictionary which contains physical topology information
for a given host_id | 3.702111 | 3.825765 | 0.967679 |
segment_model = segment_models.NetworkSegment
network_model = models_v2.Network
query = (query
.join_if_necessary(network_model)
.join_if_necessary(segment_model)
.filter(network_model.project_id != '')
.filter_network_type())
return query | def filter_unnecessary_segments(query) | Filter segments are not needed on CVX | 5.819688 | 6.345699 | 0.917107 |
segment_model = segment_models.NetworkSegment
query = (query
.filter(
segment_model.network_type.in_(
utils.SUPPORTED_NETWORK_TYPES)))
return query | def filter_network_type(query) | Filter unsupported segment types | 5.482402 | 4.576019 | 1.198073 |
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
query = (query
.join_if_necessary(port_model)
.join_if_necessary(binding_level_model)
.filter(
binding_level_model.host != '',
port_model.device_id != none,
port_model.network_id != none))
return query | def filter_unbound_ports(query) | Filter ports not bound to a host or network | 6.139649 | 5.959535 | 1.030223 |
port_model = models_v2.Port
if not device_owners:
device_owners = utils.SUPPORTED_DEVICE_OWNERS
supported_device_owner_filter = [
port_model.device_owner.ilike('%s%%' % owner)
for owner in device_owners]
unsupported_device_owner_filter = [
port_model.device_owner.notilike('%s%%' % owner)
for owner in utils.UNSUPPORTED_DEVICE_OWNERS]
query = (query
.filter(
and_(*unsupported_device_owner_filter),
or_(*supported_device_owner_filter)))
return query | def filter_by_device_owner(query, device_owners=None) | Filter ports by device_owner
Either filter using specified device_owner or using the list of all
device_owners supported and unsupported by the arista ML2 plugin | 2.523261 | 2.306849 | 1.093813 |
port_model = models_v2.Port
unsupported_device_id_filter = [
port_model.device_id.notilike('%s%%' % id)
for id in utils.UNSUPPORTED_DEVICE_IDS]
query = (query
.filter(and_(*unsupported_device_id_filter)))
return query | def filter_by_device_id(query) | Filter ports attached to devices we don't care about
Currently used to filter DHCP_RESERVED ports | 4.792508 | 4.179356 | 1.14671 |
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
dst_binding_model = ml2_models.DistributedPortBinding
query = (query
.outerjoin_if_necessary(
binding_model,
port_model.id == binding_model.port_id)
.outerjoin_if_necessary(
dst_binding_model,
port_model.id == dst_binding_model.port_id)
.filter(
(binding_model.vnic_type == vnic_type) |
(dst_binding_model.vnic_type == vnic_type)))
return query | def filter_by_vnic_type(query, vnic_type) | Filter ports by vnic_type (currently only used for baremetals) | 2.547617 | 2.471697 | 1.030716 |
config = cfg.CONF.ml2_arista
managed_physnets = config['managed_physnets']
# Filter out ports bound to segments on physnets that we're not
# managing
segment_model = segment_models.NetworkSegment
if managed_physnets:
query = (query
.join_if_necessary(segment_model)
.filter(segment_model.physical_network.in_(
managed_physnets)))
return query | def filter_unmanaged_physnets(query) | Filter ports managed by other ML2 plugins | 5.589601 | 5.178995 | 1.079283 |
port_model = models_v2.Port
query = (query
.filter(port_model.status == n_const.PORT_STATUS_ACTIVE))
return query | def filter_inactive_ports(query) | Filter ports that aren't in active status | 3.6473 | 3.280703 | 1.111744 |
query = (query
.filter_unbound_ports()
.filter_by_device_owner(device_owners)
.filter_by_device_id()
.filter_unmanaged_physnets())
if active:
query = query.filter_inactive_ports()
if vnic_type:
query = query.filter_by_vnic_type(vnic_type)
return query | def filter_unnecessary_ports(query, device_owners=None, vnic_type=None,
active=True) | Filter out all ports are not needed on CVX | 3.126708 | 3.082318 | 1.014401 |
if tenant_id == '':
return []
session = db.get_reader_session()
project_ids = set()
with session.begin():
for m in [models_v2.Network, models_v2.Port]:
q = session.query(m.project_id).filter(m.project_id != '')
if tenant_id:
q = q.filter(m.project_id == tenant_id)
project_ids.update(pid[0] for pid in q.distinct())
return [{'project_id': project_id} for project_id in project_ids] | def get_tenants(tenant_id=None) | Returns list of all project/tenant ids that may be relevant on CVX | 2.816854 | 2.832018 | 0.994646 |
session = db.get_reader_session()
with session.begin():
model = models_v2.Network
networks = session.query(model).filter(model.project_id != '')
if network_id:
networks = networks.filter(model.id == network_id)
return networks.all() | def get_networks(network_id=None) | Returns list of all networks that may be relevant on CVX | 3.271406 | 3.519868 | 0.929411 |
session = db.get_reader_session()
with session.begin():
model = segment_models.NetworkSegment
segments = session.query(model).filter_unnecessary_segments()
if segment_id:
segments = segments.filter(model.id == segment_id)
return segments.all() | def get_segments(segment_id=None) | Returns list of all network segments that may be relevant on CVX | 4.507053 | 4.063505 | 1.109154 |
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
instances = (session
.query(port_model,
binding_model)
.outerjoin(
binding_model,
port_model.id == binding_model.port_id)
.distinct(port_model.device_id)
.group_by(port_model.device_id)
.filter_unnecessary_ports(device_owners, vnic_type))
if instance_id:
instances = instances.filter(port_model.device_id == instance_id)
return instances.all() | def get_instances(device_owners=None, vnic_type=None, instance_id=None) | Returns filtered list of all instances in the neutron db | 3.014322 | 2.823606 | 1.067543 |
return get_instances(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX],
vnic_type=portbindings.VNIC_NORMAL,
instance_id=instance_id) | def get_vm_instances(instance_id=None) | Returns filtered list of vms that may be relevant on CVX | 5.657238 | 6.075058 | 0.931224 |
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
ports = (session
.query(port_model)
.filter_unnecessary_ports(device_owners, vnic_type, active))
if port_id:
ports = ports.filter(port_model.id == port_id)
return ports.all() | def get_ports(device_owners=None, vnic_type=None, port_id=None, active=True) | Returns list of all ports in neutron the db | 3.490182 | 3.397707 | 1.027217 |
return get_ports(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX,
t_const.TRUNK_SUBPORT_OWNER],
vnic_type=portbindings.VNIC_NORMAL, port_id=port_id) | def get_vm_ports(port_id=None) | Returns filtered list of vms that may be relevant on CVX | 5.590743 | 6.018932 | 0.92886 |
session = db.get_reader_session()
with session.begin():
binding_level_model = ml2_models.PortBindingLevel
aliased_blm = aliased(ml2_models.PortBindingLevel)
port_binding_model = ml2_models.PortBinding
dist_binding_model = ml2_models.DistributedPortBinding
bindings = (session.query(port_binding_model, aliased_blm)
.join(binding_level_model,
and_(
port_binding_model.port_id ==
binding_level_model.port_id,
port_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.join(aliased_blm,
and_(port_binding_model.port_id ==
aliased_blm.port_id,
port_binding_model.host ==
aliased_blm.host)))
dist_bindings = (session.query(dist_binding_model, aliased_blm)
.join(
binding_level_model,
and_(dist_binding_model.port_id ==
binding_level_model.port_id,
dist_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.filter(dist_binding_model.status ==
n_const.PORT_STATUS_ACTIVE)
.join(aliased_blm,
and_(dist_binding_model.port_id ==
aliased_blm.port_id,
dist_binding_model.host ==
aliased_blm.host)))
if binding_key:
port_id = binding_key[0]
if type(binding_key[1]) == tuple:
switch_id = binding_key[1][0]
switch_port = binding_key[1][1]
bindings = bindings.filter(and_(
port_binding_model.port_id == port_id,
port_binding_model.profile.ilike('%%%s%%' % switch_id),
port_binding_model.profile.ilike('%%%s%%' % switch_port)))
dist_bindings = dist_bindings.filter(and_(
dist_binding_model.port_id == port_id,
dist_binding_model.profile.ilike('%%%s%%' % switch_id),
dist_binding_model.profile.ilike('%%%s%%' % switch_port)))
else:
host_id = binding_key[1]
bindings = bindings.filter(and_(
port_binding_model.port_id == port_id,
port_binding_model.host == host_id))
dist_bindings = dist_bindings.filter(and_(
dist_binding_model.port_id == port_id,
dist_binding_model.host == host_id))
binding_levels = collections.defaultdict(list)
for binding, level in bindings.all() + dist_bindings.all():
binding_levels[binding].append(level)
bindings_with_levels = list()
for binding, levels in binding_levels.items():
binding.levels = levels
bindings_with_levels.append(binding)
return bindings_with_levels | def get_port_bindings(binding_key=None) | Returns filtered list of port bindings that may be relevant on CVX
This query is a little complex as we need all binding levels for any
binding that has a single managed physnet, but we need to filter bindings
that have no managed physnets. In order to achieve this, we join to the
binding_level_model once to filter bindings with no managed levels,
then a second time to get all levels for the remaining bindings.
The loop at the end is a convenience to associate levels with bindings
as a list. This would ideally be done through the use of an orm.relation,
but due to some sqlalchemy limitations imposed to make OVO work, we can't
add relations to existing models. | 1.767564 | 1.704137 | 1.03722 |
session = db.get_reader_session()
with session.begin():
res = any(
session.query(m).filter(m.tenant_id == tenant_id).count()
for m in [models_v2.Network, models_v2.Port]
)
return res | def tenant_provisioned(tenant_id) | Returns true if any networks or ports exist for a tenant. | 3.988607 | 3.595361 | 1.109376 |
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.device_id == device_id).count())
return res | def instance_provisioned(device_id) | Returns true if any ports exist for an instance. | 4.621578 | 3.794148 | 1.21808 |
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.id == port_id).count())
return res | def port_provisioned(port_id) | Returns true if port still exists. | 4.073602 | 3.859954 | 1.05535 |
session = db.get_reader_session()
res = dict()
with session.begin():
subport_model = trunk_models.SubPort
trunk_model = trunk_models.Trunk
subport = (session.query(subport_model).
filter(subport_model.port_id == port_id).first())
if subport:
trunk = (session.query(trunk_model).
filter(trunk_model.id == subport.trunk_id).first())
if trunk:
trunk_port_id = trunk.port.id
res = get_ports(port_id=trunk_port_id, active=False)[0]
return res | def get_parent(port_id) | Get trunk subport's parent port | 2.607025 | 2.321161 | 1.123156 |
session = db.get_reader_session()
with session.begin():
return (session.query(ml2_models.PortBindingLevel).
filter_by(**filters).
order_by(ml2_models.PortBindingLevel.level).
all()) | def get_port_binding_level(filters) | Returns entries from PortBindingLevel based on the specified filters. | 3.063998 | 2.962103 | 1.034399 |
import numpy as np
data = np.asarray(data)
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
with SoundFile(file, 'w', samplerate, channels,
subtype, endian, format, closefd) as f:
f.write(data) | def write(file, data, samplerate, subtype=None, endian=None, format=None,
closefd=True) | Write data to a sound file.
.. note:: If `file` exists, it will be truncated and overwritten!
Parameters
----------
file : str or int or file-like object
The file to write to. See :class:`SoundFile` for details.
data : array_like
The data to write. Usually two-dimensional (frames x channels),
but one-dimensional `data` can be used for mono files.
Only the data types ``'float64'``, ``'float32'``, ``'int32'``
and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the data
type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
samplerate : int
The sample rate of the audio data.
subtype : str, optional
See :func:`default_subtype` for the default value and
:func:`available_subtypes` for all possible values.
Other Parameters
----------------
format, endian, closefd
See :class:`SoundFile`.
Examples
--------
Write 10 frames of random data to a new file:
>>> import numpy as np
>>> import soundfile as sf
>>> sf.write('stereo_file.wav', np.random.randn(10, 2), 44100, 'PCM_24') | 2.222116 | 3.510969 | 0.632907 |
with SoundFile(file, 'r', samplerate, channels,
subtype, endian, format, closefd) as f:
frames = f._prepare_read(start, stop, frames)
for block in f.blocks(blocksize, overlap, frames,
dtype, always_2d, fill_value, out):
yield block | def blocks(file, blocksize=None, overlap=0, frames=-1, start=0, stop=None,
dtype='float64', always_2d=False, fill_value=None, out=None,
samplerate=None, channels=None,
format=None, subtype=None, endian=None, closefd=True) | Return a generator for block-wise reading.
By default, iteration starts at the beginning and stops at the end
of the file. Use `start` to start at a later position and `frames`
or `stop` to stop earlier.
If you stop iterating over the generator before it's exhausted,
the sound file is not closed. This is normally not a problem
because the file is opened in read-only mode. To close the file
properly, the generator's ``close()`` method can be called.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
blocksize : int
The number of frames to read per block.
Either this or `out` must be given.
overlap : int, optional
The number of frames to rewind between each block.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an integer
multiple of the length of `out`, and no `fill_value` was given,
the last block will be a smaller view into `out`.
Other Parameters
----------------
frames, start, stop
See :func:`read`.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :func:`read`.
always_2d, fill_value, out
See :func:`read`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> for block in sf.blocks('stereo_file.wav', blocksize=1024):
>>> pass # do something with 'block' | 2.878097 | 4.230581 | 0.680308 |
subtypes = _available_formats_helper(_snd.SFC_GET_FORMAT_SUBTYPE_COUNT,
_snd.SFC_GET_FORMAT_SUBTYPE)
return dict((subtype, name) for subtype, name in subtypes
if format is None or check_format(format, subtype)) | def available_subtypes(format=None) | Return a dictionary of available subtypes.
Parameters
----------
format : str
If given, only compatible subtypes are returned.
Examples
--------
>>> import soundfile as sf
>>> sf.available_subtypes('FLAC')
{'PCM_24': 'Signed 24 bit PCM',
'PCM_16': 'Signed 16 bit PCM',
'PCM_S8': 'Signed 8 bit PCM'} | 6.093387 | 7.773878 | 0.783829 |
try:
return bool(_format_int(format, subtype, endian))
except (ValueError, TypeError):
return False | def check_format(format, subtype=None, endian=None) | Check if the combination of format/subtype/endian is valid.
Examples
--------
>>> import soundfile as sf
>>> sf.check_format('WAV', 'PCM_24')
True
>>> sf.check_format('FLAC', 'VORBIS')
False | 5.44627 | 10.628329 | 0.51243 |
if err != 0:
err_str = _snd.sf_error_number(err)
raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace')) | def _error_check(err, prefix="") | Pretty-print a numerical error code if there is an error. | 5.994586 | 5.125235 | 1.169622 |
result = _check_format(format)
if subtype is None:
subtype = default_subtype(format)
if subtype is None:
raise TypeError(
"No default subtype for major format {0!r}".format(format))
elif not isinstance(subtype, (_unicode, str)):
raise TypeError("Invalid subtype: {0!r}".format(subtype))
try:
result |= _subtypes[subtype.upper()]
except KeyError:
raise ValueError("Unknown subtype: {0!r}".format(subtype))
if endian is None:
endian = 'FILE'
elif not isinstance(endian, (_unicode, str)):
raise TypeError("Invalid endian-ness: {0!r}".format(endian))
try:
result |= _endians[endian.upper()]
except KeyError:
raise ValueError("Unknown endian-ness: {0!r}".format(endian))
info = _ffi.new("SF_INFO*")
info.format = result
info.channels = 1
if _snd.sf_format_check(info) == _snd.SF_FALSE:
raise ValueError(
"Invalid combination of format, subtype and endian")
return result | def _format_int(format, subtype, endian) | Return numeric ID for given format|subtype|endian combo. | 3.009696 | 2.906263 | 1.03559 |
if not isinstance(mode, (_unicode, str)):
raise TypeError("Invalid mode: {0!r}".format(mode))
mode_set = set(mode)
if mode_set.difference('xrwb+') or len(mode) > len(mode_set):
raise ValueError("Invalid mode: {0!r}".format(mode))
if len(mode_set.intersection('xrw')) != 1:
raise ValueError("mode must contain exactly one of 'xrw'")
if '+' in mode_set:
mode_int = _snd.SFM_RDWR
elif 'r' in mode_set:
mode_int = _snd.SFM_READ
else:
mode_int = _snd.SFM_WRITE
return mode_int | def _check_mode(mode) | Check if mode is valid and return its integer representation. | 3.158126 | 3.042743 | 1.037921 |
original_format = format
if format is None:
format = _get_format_from_filename(file, mode)
assert isinstance(format, (_unicode, str))
else:
_check_format(format)
info = _ffi.new("SF_INFO*")
if 'r' not in mode or format.upper() == 'RAW':
if samplerate is None:
raise TypeError("samplerate must be specified")
info.samplerate = samplerate
if channels is None:
raise TypeError("channels must be specified")
info.channels = channels
info.format = _format_int(format, subtype, endian)
else:
if any(arg is not None for arg in (
samplerate, channels, original_format, subtype, endian)):
raise TypeError("Not allowed for existing files (except 'RAW'): "
"samplerate, channels, format, subtype, endian")
return info | def _create_info_struct(file, mode, samplerate, channels,
format, subtype, endian) | Check arguments and create SF_INFO struct. | 3.787601 | 3.547195 | 1.067773 |
format = ''
file = getattr(file, 'name', file)
try:
# This raises an exception if file is not a (Unicode/byte) string:
format = _os.path.splitext(file)[-1][1:]
# Convert bytes to unicode (raises AttributeError on Python 3 str):
format = format.decode('utf-8', 'replace')
except Exception:
pass
if format.upper() not in _formats and 'r' not in mode:
raise TypeError("No format specified and unable to get format from "
"file extension: {0!r}".format(file))
return format | def _get_format_from_filename(file, mode) | Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object). | 4.971966 | 4.690541 | 1.059998 |
for dictionary in _formats, _subtypes, _endians:
for k, v in dictionary.items():
if v == format_int:
return k
else:
return 'n/a' | def _format_str(format_int) | Return the string representation of a given numeric format. | 7.005756 | 5.75017 | 1.218356 |
format_info = _ffi.new("SF_FORMAT_INFO*")
format_info.format = format_int
_snd.sf_command(_ffi.NULL, format_flag, format_info,
_ffi.sizeof("SF_FORMAT_INFO"))
name = format_info.name
return (_format_str(format_info.format),
_ffi.string(name).decode('utf-8', 'replace') if name else "") | def _format_info(format_int, format_flag=_snd.SFC_GET_FORMAT_INFO) | Return the ID and short description of a given format. | 3.400329 | 3.1791 | 1.069588 |
count = _ffi.new("int*")
_snd.sf_command(_ffi.NULL, count_flag, count, _ffi.sizeof("int"))
for format_int in range(count[0]):
yield _format_info(format_int, format_flag) | def _available_formats_helper(count_flag, format_flag) | Helper for available_formats() and available_subtypes(). | 6.472291 | 6.000689 | 1.078591 |
if not isinstance(format_str, (_unicode, str)):
raise TypeError("Invalid format: {0!r}".format(format_str))
try:
format_int = _formats[format_str.upper()]
except KeyError:
raise ValueError("Unknown format: {0!r}".format(format_str))
return format_int | def _check_format(format_str) | Check if `format_str` is valid and return format ID. | 2.741553 | 2.356261 | 1.163518 |
readonly = mode_int == _snd.SFM_READ
writeonly = mode_int == _snd.SFM_WRITE
return all([
hasattr(file, 'seek'),
hasattr(file, 'tell'),
hasattr(file, 'write') or readonly,
hasattr(file, 'read') or hasattr(file, 'readinto') or writeonly,
]) | def _has_virtual_io_attrs(file, mode_int) | Check if file has all the necessary attributes for virtual IO. | 4.050405 | 3.929197 | 1.030848 |
info = _ffi.new("char[]", 2**14)
_snd.sf_command(self._file, _snd.SFC_GET_LOG_INFO,
info, _ffi.sizeof(info))
return _ffi.string(info).decode('utf-8', 'replace') | def extra_info(self) | Retrieve the log string generated when opening the file. | 6.459422 | 4.690736 | 1.377059 |
self._check_if_closed()
position = _snd.sf_seek(self._file, frames, whence)
_error_check(self._errorcode)
return position | def seek(self, frames, whence=SEEK_SET) | Set the read/write position.
Parameters
----------
frames : int
The frame index or offset to seek.
whence : {SEEK_SET, SEEK_CUR, SEEK_END}, optional
By default (``whence=SEEK_SET``), `frames` are counted from
the beginning of the file.
``whence=SEEK_CUR`` seeks from the current position
(positive and negative values are allowed for `frames`).
``whence=SEEK_END`` seeks from the end (use negative value
for `frames`).
Returns
-------
int
The new absolute read/write position in frames.
Examples
--------
>>> from soundfile import SoundFile, SEEK_END
>>> myfile = SoundFile('stereo_file.wav')
Seek to the beginning of the file:
>>> myfile.seek(0)
0
Seek to the end of the file:
>>> myfile.seek(0, SEEK_END)
44100 # this is the file length | 7.768131 | 24.303785 | 0.319626 |
if out is None:
frames = self._check_frames(frames, fill_value)
out = self._create_empty_array(frames, always_2d, dtype)
else:
if frames < 0 or frames > len(out):
frames = len(out)
frames = self._array_io('read', out, frames)
if len(out) > frames:
if fill_value is None:
out = out[:frames]
else:
out[frames:] = fill_value
return out | def read(self, frames=-1, dtype='float64', always_2d=False,
fill_value=None, out=None) | Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write | 2.954822 | 3.702841 | 0.797988 |
frames = self._check_frames(frames, fill_value=None)
ctype = self._check_dtype(dtype)
cdata = _ffi.new(ctype + '[]', frames * self.channels)
read_frames = self._cdata_io('read', cdata, ctype, frames)
assert read_frames == frames
return _ffi.buffer(cdata) | def buffer_read(self, frames=-1, dtype=None) | Read from the file and return data as buffer object.
Reads the given number of `frames` in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If `frames < 0`, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}
Audio data will be converted to the given data type.
Returns
-------
buffer
A buffer containing the read data.
See Also
--------
buffer_read_into, .read, buffer_write | 4.585772 | 5.643711 | 0.812545 |
ctype = self._check_dtype(dtype)
cdata, frames = self._check_buffer(buffer, ctype)
frames = self._cdata_io('read', cdata, ctype, frames)
return frames | def buffer_read_into(self, buffer, dtype) | Read from the file into a given buffer object.
Fills the given `buffer` with frames in the given data format
starting at the current read/write position (which can be
changed with :meth:`.seek`) until the buffer is full or the end
of the file is reached. This advances the read/write position
by the number of frames that were read.
Parameters
----------
buffer : writable buffer
Audio frames from the file are written to this buffer.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of `buffer`.
Returns
-------
int
The number of frames that were read from the file.
This can be less than the size of `buffer`.
The rest of the buffer is not filled with meaningful data.
See Also
--------
buffer_read, .read | 7.347104 | 9.622027 | 0.763571 |
import numpy as np
# no copy is made if data has already the correct memory layout:
data = np.ascontiguousarray(data)
written = self._array_io('write', data, len(data))
assert written == len(data)
self._update_frames(written) | def write(self, data) | Write audio data from a NumPy array to the file.
Writes a number of frames at the read/write position to the
file. This also advances the read/write position by the same
number of frames and enlarges the file if necessary.
Note that writing int values to a float file will *not* scale
the values to [-1.0, 1.0). If you write the value
``np.array([42], dtype='int32')``, to a ``subtype='FLOAT'``
file, the file will then contain ``np.array([42.],
dtype='float32')``.
Parameters
----------
data : array_like
The data to write. Usually two-dimensional (frames x
channels), but one-dimensional `data` can be used for mono
files. Only the data types ``'float64'``, ``'float32'``,
``'int32'`` and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the
data type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
Examples
--------
>>> import numpy as np
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Write 10 frames of random data to a new file:
>>> with SoundFile('stereo_file.wav', 'w', 44100, 2, 'PCM_24') as f:
>>> f.write(np.random.randn(10, 2))
See Also
--------
buffer_write, .read | 8.116978 | 10.419735 | 0.779 |
ctype = self._check_dtype(dtype)
cdata, frames = self._check_buffer(data, ctype)
written = self._cdata_io('write', cdata, ctype, frames)
assert written == frames
self._update_frames(written) | def buffer_write(self, data, dtype) | Write audio data from a buffer/bytes object to the file.
Writes the contents of `data` to the file at the current
read/write position.
This also advances the read/write position by the number of
frames that were written and enlarges the file if necessary.
Parameters
----------
data : buffer or bytes
A buffer or bytes object containing the audio data to be
written.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of the audio data stored in `data`.
See Also
--------
.write, buffer_read | 6.850924 | 8.238564 | 0.831568 |
import numpy as np
if 'r' not in self.mode and '+' not in self.mode:
raise RuntimeError("blocks() is not allowed in write-only mode")
if out is None:
if blocksize is None:
raise TypeError("One of {blocksize, out} must be specified")
out = self._create_empty_array(blocksize, always_2d, dtype)
copy_out = True
else:
if blocksize is not None:
raise TypeError(
"Only one of {blocksize, out} may be specified")
blocksize = len(out)
copy_out = False
overlap_memory = None
frames = self._check_frames(frames, fill_value)
while frames > 0:
if overlap_memory is None:
output_offset = 0
else:
output_offset = len(overlap_memory)
out[:output_offset] = overlap_memory
toread = min(blocksize - output_offset, frames)
self.read(toread, dtype, always_2d, fill_value, out[output_offset:])
if overlap:
if overlap_memory is None:
overlap_memory = np.copy(out[-overlap:])
else:
overlap_memory[:] = out[-overlap:]
if blocksize > frames + overlap and fill_value is None:
block = out[:frames + overlap]
else:
block = out
yield np.copy(block) if copy_out else block
frames -= toread | def blocks(self, blocksize=None, overlap=0, frames=-1, dtype='float64',
always_2d=False, fill_value=None, out=None) | Return a generator for block-wise reading.
By default, the generator yields blocks of the given
`blocksize` (using a given `overlap`) until the end of the file
is reached; `frames` can be used to stop earlier.
Parameters
----------
blocksize : int
The number of frames to read per block. Either this or `out`
must be given.
overlap : int, optional
The number of frames to rewind between each block.
frames : int, optional
The number of frames to read.
If ``frames < 0``, the file is read until the end.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :meth:`.read`.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an
integer multiple of the length of `out`, and no
`fill_value` was given, the last block will be a smaller
view into `out`.
Other Parameters
----------------
always_2d, fill_value, out
See :meth:`.read`.
fill_value : float, optional
See :meth:`.read`.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored!
Examples
--------
>>> from soundfile import SoundFile
>>> with SoundFile('stereo_file.wav') as f:
>>> for block in f.blocks(blocksize=1024):
>>> pass # do something with 'block' | 3.131395 | 3.20712 | 0.976388 |
if frames is None:
frames = self.tell()
err = _snd.sf_command(self._file, _snd.SFC_FILE_TRUNCATE,
_ffi.new("sf_count_t*", frames),
_ffi.sizeof("sf_count_t"))
if err:
raise RuntimeError("Error truncating the file")
self._info.frames = frames | def truncate(self, frames=None) | Truncate the file to a given number of frames.
After this command, the read/write position will be at the new
end of the file.
Parameters
----------
frames : int, optional
Only the data before `frames` is kept, the rest is deleted.
If not specified, the current read/write position is used. | 5.807604 | 6.472603 | 0.897259 |
if not self.closed:
# be sure to flush data to disk before closing the file
self.flush()
err = _snd.sf_close(self._file)
self._file = None
_error_check(err) | def close(self) | Close the file. Can be called multiple times. | 7.484194 | 6.338232 | 1.180802 |
if isinstance(file, (_unicode, bytes)):
if _os.path.isfile(file):
if 'x' in self.mode:
raise OSError("File exists: {0!r}".format(self.name))
elif set(self.mode).issuperset('w+'):
# truncate the file, because SFM_RDWR doesn't:
_os.close(_os.open(file, _os.O_WRONLY | _os.O_TRUNC))
openfunction = _snd.sf_open
if isinstance(file, _unicode):
if _sys.platform == 'win32':
openfunction = _snd.sf_wchar_open
else:
file = file.encode(_sys.getfilesystemencoding())
file_ptr = openfunction(file, mode_int, self._info)
elif isinstance(file, int):
file_ptr = _snd.sf_open_fd(file, mode_int, self._info, closefd)
elif _has_virtual_io_attrs(file, mode_int):
file_ptr = _snd.sf_open_virtual(self._init_virtual_io(file),
mode_int, self._info, _ffi.NULL)
else:
raise TypeError("Invalid file: {0!r}".format(self.name))
_error_check(_snd.sf_error(file_ptr),
"Error opening {0!r}: ".format(self.name))
if mode_int == _snd.SFM_WRITE:
# Due to a bug in libsndfile version <= 1.0.25, frames != 0
# when opening a named pipe in SFM_WRITE mode.
# See http://github.com/erikd/libsndfile/issues/77.
self._info.frames = 0
# This is not necessary for "normal" files (because
# frames == 0 in this case), but it doesn't hurt, either.
return file_ptr | def _open(self, file, mode_int, closefd) | Call the appropriate sf_open*() function from libsndfile. | 4.110092 | 3.931567 | 1.045408 |
@_ffi.callback("sf_vio_get_filelen")
def vio_get_filelen(user_data):
curr = file.tell()
file.seek(0, SEEK_END)
size = file.tell()
file.seek(curr, SEEK_SET)
return size
@_ffi.callback("sf_vio_seek")
def vio_seek(offset, whence, user_data):
file.seek(offset, whence)
return file.tell()
@_ffi.callback("sf_vio_read")
def vio_read(ptr, count, user_data):
# first try readinto(), if not available fall back to read()
try:
buf = _ffi.buffer(ptr, count)
data_read = file.readinto(buf)
except AttributeError:
data = file.read(count)
data_read = len(data)
buf = _ffi.buffer(ptr, data_read)
buf[0:data_read] = data
return data_read
@_ffi.callback("sf_vio_write")
def vio_write(ptr, count, user_data):
buf = _ffi.buffer(ptr, count)
data = buf[:]
written = file.write(data)
# write() returns None for file objects in Python <= 2.7:
if written is None:
written = count
return written
@_ffi.callback("sf_vio_tell")
def vio_tell(user_data):
return file.tell()
# Note: the callback functions must be kept alive!
self._virtual_io = {'get_filelen': vio_get_filelen,
'seek': vio_seek,
'read': vio_read,
'write': vio_write,
'tell': vio_tell}
return _ffi.new("SF_VIRTUAL_IO*", self._virtual_io) | def _init_virtual_io(self, file) | Initialize callback functions for sf_open_virtual(). | 2.237654 | 2.139113 | 1.046066 |
if self.seekable():
remaining_frames = self.frames - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames | def _check_frames(self, frames, fill_value) | Reduce frames to no more than are available in the file. | 4.095436 | 3.653066 | 1.121096 |
assert ctype in _ffi_types.values()
if not isinstance(data, bytes):
data = _ffi.from_buffer(data)
frames, remainder = divmod(len(data),
self.channels * _ffi.sizeof(ctype))
if remainder:
raise ValueError("Data size must be a multiple of frame size")
return data, frames | def _check_buffer(self, data, ctype) | Convert buffer to cdata and check for valid size. | 4.813916 | 4.39845 | 1.094457 |
import numpy as np
if always_2d or self.channels > 1:
shape = frames, self.channels
else:
shape = frames,
return np.empty(shape, dtype, order='C') | def _create_empty_array(self, frames, always_2d, dtype) | Create an empty array with appropriate shape. | 3.554287 | 3.36294 | 1.056899 |
try:
return _ffi_types[dtype]
except KeyError:
raise ValueError("dtype must be one of {0!r} and not {1!r}".format(
sorted(_ffi_types.keys()), dtype)) | def _check_dtype(self, dtype) | Check if dtype string is valid and return ctype string. | 4.196804 | 3.199615 | 1.311659 |
if (array.ndim not in (1, 2) or
array.ndim == 1 and self.channels != 1 or
array.ndim == 2 and array.shape[1] != self.channels):
raise ValueError("Invalid shape: {0!r}".format(array.shape))
if not array.flags.c_contiguous:
raise ValueError("Data must be C-contiguous")
ctype = self._check_dtype(array.dtype.name)
assert array.dtype.itemsize == _ffi.sizeof(ctype)
cdata = _ffi.cast(ctype + '*', array.__array_interface__['data'][0])
return self._cdata_io(action, cdata, ctype, frames) | def _array_io(self, action, array, frames) | Check array and call low-level IO function. | 3.165316 | 3.040682 | 1.040989 |
assert ctype in _ffi_types.values()
self._check_if_closed()
if self.seekable():
curr = self.tell()
func = getattr(_snd, 'sf_' + action + 'f_' + ctype)
frames = func(self._file, data, frames)
_error_check(self._errorcode)
if self.seekable():
self.seek(curr + frames, SEEK_SET) # Update read & write position
return frames | def _cdata_io(self, action, data, ctype, frames) | Call one of libsndfile's read/write functions. | 6.315725 | 5.506156 | 1.14703 |
if self.seekable():
curr = self.tell()
self._info.frames = self.seek(0, SEEK_END)
self.seek(curr, SEEK_SET)
else:
self._info.frames += written | def _update_frames(self, written) | Update self.frames after writing. | 4.3608 | 3.931398 | 1.109224 |
if start != 0 and not self.seekable():
raise ValueError("start is only allowed for seekable files")
if frames >= 0 and stop is not None:
raise TypeError("Only one of {frames, stop} may be used")
start, stop, _ = slice(start, stop).indices(self.frames)
if stop < start:
stop = start
if frames < 0:
frames = stop - start
if self.seekable():
self.seek(start, SEEK_SET)
return frames | def _prepare_read(self, start, stop, frames) | Seek to start frame and calculate length. | 3.829556 | 3.632505 | 1.054246 |
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise | def ensure_dir(dirname) | Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. | 3.956095 | 4.052777 | 0.976144 |
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed | def probes_used_generate_vector(probe_files_full, probe_files_model) | Generates boolean matrices indicating which are the probes for each model | 3.152229 | 3.053153 | 1.03245 |
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores | def probes_used_extract_scores(full_scores, same_probes) | Extracts a matrix of scores for a model, given a probes_used row vector of boolean | 2.384299 | 2.116869 | 1.126333 |
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data | def read(filename) | Read audio file | 8.602514 | 8.394979 | 1.024721 |
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset | def normalize_std_array(vector) | Applies a unit mean and variance normalization to an arrayset | 2.941994 | 2.799014 | 1.051082 |
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels | def smoothing(labels, smoothing_window) | Applies a smoothing on VAD | 1.600114 | 1.602272 | 0.998653 |
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self.use_existing_vad(energy_array, vad_file)
return labels | def _conversion(self, input_signal, vad_file) | Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes. | 12.093795 | 10.735527 | 1.126521 |
# Set parameters
wl = self.win_length_ms
ws = self.win_shift_ms
nf = self.n_filters
f_min = self.f_min
f_max = self.f_max
pre = self.pre_emphasis_coef
c = bob.ap.Spectrogram(rate_wavsample[0], wl, ws, nf, f_min, f_max, pre)
c.energy_filter=True
c.log_filter=False
c.energy_bands=True
sig = rate_wavsample[1]
energy_bands = c(sig)
filtering_res = self.pass_band_filtering(energy_bands, rate_wavsample[0])
mod_4hz = self.modulation_4hz(filtering_res, rate_wavsample)
mod_4hz = self.averaging(mod_4hz)
e = bob.ap.Energy(rate_wavsample[0], wl, ws)
energy_array = e(rate_wavsample[1])
labels = self._voice_activity_detection(energy_array, mod_4hz)
labels = utils.smoothing(labels,self.smoothing_window) # discard isolated speech less than 100ms
logger.info("After Mod-4Hz based VAD there are %d frames remaining over %d", numpy.sum(labels), len(labels))
return labels, energy_array, mod_4hz | def mod_4hz(self, rate_wavsample) | Computes and returns the 4Hz modulation energy features for the given input wave file | 5.152548 | 5.078991 | 1.014483 |
import bob.io.matlab
# return the numpy array read from the data_file
data_path = biofile.make_path(directory, extension)
return bob.io.base.load(data_path) | def read_matlab_files(self, biofile, directory, extension) | Read pre-computed CQCC Matlab features here | 7.255395 | 7.241084 | 1.001976 |
f = bob.io.base.HDF5File(data_file, 'w')
f.set("rate", data[0], compression=compression)
f.set("data", data[1], compression=compression)
f.set("labels", data[2], compression=compression) | def write_data(self, data, data_file, compression=0) | Writes the given *preprocessed* data to a file with the given name. | 3.293894 | 3.1135 | 1.057939 |
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('gmm-voxforge',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'voxforge', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_false', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args | def command_line_arguments(command_line_parameters) | Defines the command line parameters that are accepted. | 4.684913 | 4.676081 | 1.001889 |
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self._voice_activity_detection(energy_array)
# discard isolated speech a number of frames defined in smoothing_window
labels = utils.smoothing(labels,self.smoothing_window)
logger.info("After 2 Gaussian Energy-based VAD there are %d frames remaining over %d", numpy.sum(labels), len(labels))
return labels | def _compute_energy(self, rate_wavsample) | retreive the speech / non speech labels for the speech sample given by the tuple (rate, wave signal) | 10.598662 | 9.466896 | 1.11955 |
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0) | def calc_mean(c0, c1=[]) | Calculates the mean of the data. | 2.254611 | 2.104144 | 1.07151 |
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0) | def calc_std(c0, c1=[]) | Calculates the variance of the data. | 2.61402 | 2.557526 | 1.022089 |
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std | def calc_mean_std(c0, c1=[], nonStdZero=False) | Calculates both the mean of the data. | 2.624633 | 2.774793 | 0.945884 |
if not features.size:
raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible")
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
speech, = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
silences, = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [)
else:
logger.error("vad_filter_features(): VAD labels should be the same length as energy bands")
logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape))
return filtered_features | def vad_filter_features(vad_labels, features, filter_frames="trim_silence") | Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied) | 4.081829 | 3.948601 | 1.033741 |
n_effs = []
mode_types = []
fractions_te = []
fractions_tm = []
for s in tqdm.tqdm(structures, ncols=70):
self.solve(s)
n_effs.append(np.real(self.n_effs))
mode_types.append(self._get_mode_types())
fractions_te.append(self.fraction_te)
fractions_tm.append(self.fraction_tm)
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, sweep_param_list
)
with open(self._modes_directory + "mode_types.dat", "w") as fs:
header = ",".join(
"Mode%i" % i for i, _ in enumerate(mode_types[0])
)
fs.write("# " + header + "\n")
for mt in mode_types:
txt = ",".join("%s %.2f" % pair for pair in mt)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_te.dat", "w") as fs:
header = "fraction te"
fs.write("# param sweep," + header + "\n")
for param, fte in zip(sweep_param_list, fractions_te):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in fte)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_tm.dat", "w") as fs:
header = "fraction tm"
fs.write("# param sweep," + header + "\n")
for param, ftm in zip(sweep_param_list, fractions_tm):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in ftm)
fs.write(txt + "\n")
if plot:
if MPL:
title = "$n_{eff}$ vs %s" % x_label
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs %s" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename, self._modes_directory + "fraction_te.dat", x_label, y_label, title
)
title = "TE Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_te.dat",
x_label,
"TE Fraction [%]",
title,
fraction_mode_list,
)
title = "TM Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_tm.dat",
x_label,
"TM Fraction [%]",
title,
fraction_mode_list,
)
return n_effs | def solve_sweep_structure(
self,
structures,
sweep_param_list,
filename="structure_n_effs.dat",
plot=True,
x_label="Structure number",
fraction_mode_list=[],
) | Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure. | 1.958156 | 1.907124 | 1.026759 |
n_effs = []
for w in tqdm.tqdm(wavelengths, ncols=70):
structure.change_wavelength(w)
self.solve(structure)
n_effs.append(np.real(self.n_effs))
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, wavelengths
)
if plot:
if MPL:
title = "$n_{eff}$ vs Wavelength"
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs Wavelength" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename,
self._modes_directory + "fraction_te.dat",
"Wavelength",
"n_{eff}",
title,
)
return n_effs | def solve_sweep_wavelength(
self,
structure,
wavelengths,
filename="wavelength_n_effs.dat",
plot=True,
) | Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength. | 3.36066 | 3.473199 | 0.967598 |
r
wl_nom = structure._wl
self.solve(structure)
n_ctrs = self.n_effs
structure.change_wavelength(wl_nom - wavelength_step)
self.solve(structure)
n_bcks = self.n_effs
structure.change_wavelength(wl_nom + wavelength_step)
self.solve(structure)
n_frws = self.n_effs
n_gs = []
for n_ctr, n_bck, n_frw in zip(n_ctrs, n_bcks, n_frws):
n_gs.append(
n_ctr - wl_nom * (n_frw - n_bck) / (2 * wavelength_step)
)
if filename:
with open(self._modes_directory + filename, "w") as fs:
fs.write("# Mode idx, Group index\n")
for idx, n_g in enumerate(n_gs):
fs.write("%i,%.3f\n" % (idx, np.round(n_g.real, 3)))
return n_gs | def solve_ng(self, structure, wavelength_step=0.01, filename="ng.dat") | r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode. | 3.526048 | 3.122133 | 1.129372 |
modes_directory = "./modes_semi_vec/"
if not os.path.isdir(modes_directory):
os.mkdir(modes_directory)
filename = modes_directory + filename
for i, mode in enumerate(self._ms.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
self._write_mode_to_file(np.real(mode), filename_mode)
if plot:
if i == 0 and analyse:
A, centre, sigma_2 = anal.fit_gaussian(
self._structure.xc, self._structure.yc, np.abs(mode)
)
subtitle = (
"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, "
"MFD_{y} = %.3f"
) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1])
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
subtitle,
sigma_2[0],
sigma_2[1],
centre[0],
centre[1],
wavelength=self._structure._wl,
)
else:
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
wavelength=self._structure._wl,
)
return self.modes | def write_modes_to_file(self, filename="mode.dat", plot=True, analyse=True) | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | 3.211817 | 3.20604 | 1.001802 |
modes_directory = self._modes_directory
# Mode info file.
with open(modes_directory + "mode_info", "w") as fs:
fs.write("# Mode idx, Mode type, % in major direction, n_eff\n")
for i, (n_eff, (mode_type, percentage)) in enumerate(
zip(self.n_effs, self.mode_types)
):
mode_idx = str(i)
line = "%s,%s,%.2f,%.3f" % (
mode_idx,
mode_type,
percentage,
n_eff.real,
)
fs.write(line + "\n")
# Mode field plots.
for i, (mode, areas) in enumerate(zip(self._ms.modes, self.overlaps)):
mode_directory = "%smode_%i/" % (modes_directory, i)
if not os.path.isdir(mode_directory):
os.mkdir(mode_directory)
filename_full = mode_directory + filename
for (field_name, field_profile), area in zip(
mode.fields.items(), areas
):
if field_name in fields_to_write:
filename_mode = self._get_mode_filename(
field_name, i, filename_full
)
self._write_mode_to_file(
np.real(field_profile), filename_mode
)
if plot:
self._plot_mode(
field_name,
i,
filename_mode,
self.n_effs[i],
area=area,
wavelength=self._structure._wl,
)
return self.modes | def write_modes_to_file(
self,
filename="mode.dat",
plot=True,
fields_to_write=("Ex", "Ey", "Ez", "Hx", "Hy", "Hz"),
) | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each field and mode number. Default is
'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
fields_to_write (tuple): A tuple of strings where the
strings can be 'Ex', 'Ey', 'Ez', 'Hx', 'Hy' and 'Hz'
defining what part of the mode should be saved and
plotted. By default, all six components are written
and plotted.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | 3.391217 | 3.441931 | 0.985266 |
if render_kw is None:
render_kw = {}
if 'required' in render_kw and not force:
return render_kw
if field.flags.required:
render_kw['required'] = True
return render_kw | def set_required(field, render_kw=None, force=False) | Returns *render_kw* with *required* set if the field is required.
Sets the *required* key if the `required` flag is set for the field (this
is mostly the case if it is set by validators). The `required` attribute
is used by browsers to indicate a required field.
..note::
This won't change keys already present unless *force* is used. | 2.006963 | 2.408945 | 0.833129 |
if render_kw is None:
render_kw = {}
if field.errors:
classes = render_kw.get('class') or render_kw.pop('class_', '')
if classes:
render_kw['class'] = 'invalid {}'.format(classes)
else:
render_kw['class'] = 'invalid'
return render_kw | def set_invalid(field, render_kw=None) | Returns *render_kw* with `invalid` added to *class* on validation errors.
Set (or appends) 'invalid' to the fields CSS class(es), if the *field* got
any errors. 'invalid' is also set by browsers if they detect errors on a
field. | 2.314269 | 2.402562 | 0.963251 |
if render_kw is None:
render_kw = {}
for validator in field.validators:
if isinstance(validator, MINMAX_VALIDATORS):
if 'min' not in render_kw or force:
v_min = getattr(validator, 'min', -1)
if v_min not in (-1, None):
render_kw['min'] = v_min
if 'max' not in render_kw or force:
v_max = getattr(validator, 'max', -1)
if v_max not in (-1, None):
render_kw['max'] = v_max
return render_kw | def set_minmax(field, render_kw=None, force=False) | Returns *render_kw* with *min* and *max* set if validators use them.
Sets *min* and / or *max* keys if a `Length` or `NumberRange` validator is
using them.
..note::
This won't change keys already present unless *force* is used. | 1.869794 | 1.928797 | 0.96941 |
if render_kw is None:
render_kw = {}
if 'title' not in render_kw and getattr(field, 'description'):
render_kw['title'] = '{}'.format(field.description)
return render_kw | def set_title(field, render_kw=None) | Returns *render_kw* with *min* and *max* set if required.
If the field got a *description* but no *title* key is set, the *title* is
set to *description*. | 2.475147 | 2.56316 | 0.965662 |
if isinstance(field, UnboundField):
msg = 'This function needs a bound field not: {}'
raise ValueError(msg.format(field))
kwargs = render_kw.copy() if render_kw else {}
kwargs = set_required(field, kwargs, force) # is field required?
kwargs = set_invalid(field, kwargs) # is field invalid?
kwargs = set_minmax(field, kwargs, force) # check validators for min/max
kwargs = set_title(field, kwargs) # missing tile?
return kwargs | def get_html5_kwargs(field, render_kw=None, force=False) | Returns a copy of *render_kw* with keys added for a bound *field*.
If some *render_kw* are given, the new keys are added to a copy of them,
which is then returned. If none are given, a dictionary containing only
the automatically generated keys is returned.
.. important::
This might add new keys but won't changes any values if a key is
already in *render_kw*, unless *force* is used.
Raises:
ValueError: if *field* is an :cls:`UnboundField`.
The following keys are set automatically:
:required:
Sets the *required* key if the `required` flag is set for the
field (this is mostly the case if it is set by validators). The
`required` attribute is used by browsers to indicate a required field.
:invalid:
Set (or appends) 'invalid' to the fields CSS class(es), if the *field*
got any errors. 'invalid' is also set by browsers if they detect
errors on a field.
:min / max:
Sets *min* and / or *max* keys if a `Length` or `NumberRange`
validator is using them.
:title:
If the field got a *description* but no *title* key is set, the
*title* is set to *description*. | 4.894458 | 4.286582 | 1.141809 |
field_kw = getattr(field, 'render_kw', None)
if field_kw is not None:
render_kw = dict(field_kw, **render_kw)
render_kw = get_html5_kwargs(field, render_kw)
return field.widget(field, **render_kw) | def render_field(self, field, render_kw) | Returns the rendered field after adding auto–attributes.
Calls the field`s widget with the following kwargs:
1. the *render_kw* set on the field are used as based
2. and are updated with the *render_kw* arguments from the render call
3. this is used as an argument for a call to `get_html5_kwargs`
4. the return value of the call is used as final *render_kw* | 2.740145 | 2.120149 | 1.292431 |
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x | def x(self) | np.array: The grid points in x. | 3.21769 | 2.565017 | 1.254452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.