code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
name = args.get(0) puts("") while not name: name = raw_input("What is the project's short directory name? (e.g. my_project) ") return name
def _get_project_name(args)
Get project name.
9.068439
8.488169
1.068362
suffix_len = len(suffix) if len(string) < suffix_len: # the string param was shorter than the suffix raise ValueError("A suffix can not be bigger than string argument.") if string.endswith(suffix): # return from the beginning up to # but not including the first letter # in the suffix return string[0:-suffix_len] else: # leave unharmed return string
def _clean_suffix(string, suffix)
If string endswith the suffix, remove it. Else leave it alone.
6.356915
6.069581
1.04734
default_projects_path = settings.config.get("projects_path") path = None if default_projects_path: path = raw_input("\nWhere would you like to create this project? [{0}/{1}] ".format(default_projects_path, name)) if not path: path = os.path.join(default_projects_path, name) else: while not path: path = raw_input("\nWhere would you like to create this project? (e.g. ~/tarbell/) ") return os.path.expanduser(path)
def _get_path(name, settings, mkdir=True)
Generate a project path.
3.251962
3.043102
1.068634
try: os.mkdir(path) except OSError as e: if e.errno == 17: show_error("ABORTING: Directory {0} already exists.".format(path)) else: show_error("ABORTING: OSError {0}".format(e)) sys.exit()
def _mkdir(path)
Make a directory or bail.
2.962467
2.717871
1.089996
puts("\nPick a template\n") template = None while not template: _list_templates(settings) index = raw_input("\nWhich template would you like to use? [1] ") if not index: index = "1" try: index = int(index) - 1 return settings.config["project_templates"][index] except: puts("\"{0}\" isn't a valid option!".format(colored.red("{0}".format(index)))) pass
def _get_template(settings)
Prompt user to pick template from a list.
4.312243
3.843551
1.121942
for idx, option in enumerate(settings.config.get("project_templates"), start=1): puts(" {0!s:5} {1!s:36}".format( colored.yellow("[{0}]".format(idx)), colored.cyan(option.get("name")) )) if option.get("url"): puts(" {0}\n".format(option.get("url")))
def _list_templates(settings)
List templates from settings.
4.323617
4.235364
1.020837
if not settings.client_secrets: return None create = raw_input("Would you like to create a Google spreadsheet? [Y/n] ") if create and not create.lower() == "y": return puts("Not creating spreadsheet.") email_message = ( "What Google account(s) should have access to this " "this spreadsheet? (Use a full email address, such as " "your.name@gmail.com. Separate multiple addresses with commas.)") if settings.config.get("google_account"): emails = raw_input("\n{0}(Default: {1}) ".format(email_message, settings.config.get("google_account") )) if not emails: emails = settings.config.get("google_account") else: emails = None while not emails: emails = raw_input(email_message) try: media_body = _MediaFileUpload(os.path.join(path, '_blueprint/_spreadsheet.xlsx'), mimetype='application/vnd.ms-excel') except IOError: show_error("_blueprint/_spreadsheet.xlsx doesn't exist!") return None service = get_drive_api() body = { 'title': '{0} (Tarbell)'.format(title), 'description': '{0} ({1})'.format(title, name), 'mimeType': 'application/vnd.ms-excel', } try: newfile = service.files()\ .insert(body=body, media_body=media_body, convert=True).execute() for email in emails.split(","): _add_user_to_file(newfile['id'], service, user_email=email.strip()) puts("\n{0!s}! View the spreadsheet at {1!s}".format( colored.green("Success"), colored.yellow("https://docs.google.com/spreadsheet/ccc?key={0}" .format(newfile['id'])) )) return newfile['id'] except errors.HttpError as error: show_error('An error occurred creating spreadsheet: {0}'.format(error)) return None
def _create_spreadsheet(name, title, path, settings)
Create Google spreadsheet.
3.684833
3.626543
1.016073
new_permission = { 'value': user_email, 'type': perm_type, 'role': role } try: service.permissions()\ .insert(fileId=file_id, body=new_permission)\ .execute() except errors.HttpError as error: show_error('An error adding users to spreadsheet: {0}'.format(error))
def _add_user_to_file(file_id, service, user_email, perm_type='user', role='writer')
Grants the given set of permissions for a given file_id. service is an already-credentialed Google Drive service instance.
3.204411
3.208552
0.998709
puts("\nCopying configuration file") context = settings.config context.update({ "default_context": { "name": name, "title": title, }, "name": name, "title": title, "template_repo_url": template.get('url'), "key": key, }) # @TODO refactor this a bit if not key: spreadsheet_path = os.path.join(path, '_blueprint/', '_spreadsheet.xlsx') try: with open(spreadsheet_path, "rb") as f: puts("Copying _blueprint/_spreadsheet.xlsx to tarbell_config.py's DEFAULT_CONTEXT") data = process_xlsx(f.read()) if 'values' in data: data = copy_global_values(data) context["default_context"].update(data) except IOError: pass s3_buckets = settings.config.get("s3_buckets") if s3_buckets: puts("") for bucket, bucket_conf in s3_buckets.items(): puts("Configuring {0!s} bucket at {1!s}\n".format( colored.green(bucket), colored.yellow("{0}/{1}".format(bucket_conf['uri'], name)) )) puts("\n- Creating {0!s} project configuration file".format( colored.cyan("tarbell_config.py") )) template_dir = os.path.dirname(pkg_resources.resource_filename("tarbell", "templates/tarbell_config.py.template")) loader = jinja2.FileSystemLoader(template_dir) env = jinja2.Environment(loader=loader) env.filters["pprint_lines"] = pprint_lines # For dumping context content = env.get_template('tarbell_config.py.template').render(context) codecs.open(os.path.join(path, "tarbell_config.py"), "w", encoding="utf-8").write(content) puts("\n- Done copying configuration file")
def _copy_config_template(name, title, template, path, key, settings)
Get and render tarbell_config.py.template from Tarbell default.
3.8272
3.627208
1.055137
try: shutil.rmtree(dir) # delete directory except OSError as exc: if exc.errno != 2: # code 2 - no such file or directory raise # re-raise exception except UnboundLocalError: pass
def _delete_dir(dir)
Delete a directory.
2.715965
2.705243
1.003963
command = Command(name=name, short=short, fn=fn, usage=usage, help=help) Command.register(command)
def def_cmd(name=None, short=None, fn=None, usage=None, help=None)
Define a command.
2.931225
2.760828
1.061719
with open(self.path, "w") as f: self.config["project_templates"] = list(filter(lambda template: template.get("url"), self.config["project_templates"])) yaml.dump(self.config, f, default_flow_style=False)
def save(self)
Save settings.
3.513136
3.510932
1.000628
site = g.current_site if not absolute: path = os.path.join(site.path, path) try: return codecs.open(path, 'r', encoding).read() except IOError: return None
def read_file(path, absolute=False, encoding='utf-8')
Read the file at `path`. If `absolute` is True, use absolute path, otherwise path is assumed to be relative to Tarbell template root dir. For example: .. code-block:: html+jinja <div class="chapter"> {{ read_file('_chapters/one.txt')|linebreaks }} </div>
3.290505
4.177949
0.787589
site = g.current_site if not absolute: path = os.path.join(site.path, path) return render_template(path, **context)
def render_file(context, path, absolute=False)
Like :py:func:`read_file`, except that the file is rendered as a Jinja template using the current context. If `absolute` is True, use absolute path, otherwise path is assumed to be relative to Tarbell template root dir. For example: .. code-block:: html+jinja <div class="chapter"> {{ render_file('_chapters/one.txt') }} </div>
3.767873
5.704065
0.660559
if isinstance(value, float) or isinstance(value, int): seconds = (value - 25569) * 86400.0 parsed = datetime.datetime.utcfromtimestamp(seconds) else: parsed = dateutil.parser.parse(value) if convert_tz: local_zone = dateutil.tz.gettz(convert_tz) parsed = parsed.astimezone(tz=local_zone) if format: return parsed.strftime(format) else: return parsed
def format_date(value, format='%b %d, %Y', convert_tz=None)
Format an Excel date or date string, returning a formatted date. To return a Python :py:class:`datetime.datetime` object, pass ``None`` as a ``format`` argument. >>> format_date(42419.82163) 'Feb. 19, 2016' .. code-block:: html+jinja {{ row.date|format_date('%Y-%m-%d') }}
2.108549
2.473446
0.852474
pformatted = pformat(value, width=1, indent=4) formatted = "{0}\n {1}\n{2}".format( pformatted[0], pformatted[1:-1], pformatted[-1] ) return Markup(formatted)
def pprint_lines(value)
Pretty print lines
3.929035
3.702639
1.061144
generics = df.columns if (axes is not None) and (len(axes) != len(generics)): raise ValueError("If 'axes' is not None then it must be the same " "length as 'df.columns'") if axes is None: _, axes = plt.subplots(nrows=len(generics), ncols=1) if len(generics) == 1: axes = [axes] for ax, generic in zip(axes, generics): ax.plot(df.loc[:, generic], label=generic) # no legend line to avoid clutter ax.legend(loc='center right', handlelength=0) dates = intervals.loc[intervals.loc[:, "generic"] == generic, ["start_date", "end_date", "contract"]] date_ticks = set( dates.loc[:, "start_date"].tolist() + dates.loc[:, "end_date"].tolist() ) xticks = [ts.toordinal() for ts in date_ticks] xlabels = [ts.strftime("%Y-%m-%d") for ts in date_ticks] ax.set_xticks(xticks) ax.set_xticklabels(xlabels) y_top = ax.get_ylim()[1] count = 0 # label and colour each underlying for _, dt1, dt2, instr in dates.itertuples(): if count % 2: fc = "b" else: fc = "r" count += 1 ax.axvspan(dt1, dt2, facecolor=fc, alpha=0.2) x_mid = dt1 + (dt2 - dt1) / 2 ax.text(x_mid, y_top, instr, rotation=45) return axes
def plot_composition(df, intervals, axes=None)
Plot time series of generics and label underlying instruments which these series are composed of. Parameters: ----------- df: pd.DataFrame DataFrame of time series to be plotted. Each column is a generic time series. intervals: pd.DataFrame A DataFrame including information for when each contract is used in the generic series. Columns are['contract', 'generic', 'start_date', 'end_date'] axes: list List of matplotlib.axes.Axes Example ------- >>> import mapping.plot as mplot >>> import pandas as pd >>> from pandas import Timestamp as TS >>> idx = pd.date_range("2017-01-01", "2017-01-15") >>> rets_data = pd.np.random.randn(len(idx)) >>> rets = pd.DataFrame({"CL1": rets_data, "CL2": rets_data}, index=idx) >>> intervals = pd.DataFrame( ... [(TS("2017-01-01"), TS("2017-01-05"), "2017_CL_F", "CL1"), ... (TS("2017-01-05"), TS("2017-01-15"), "2017_CL_G", "CL1"), ... (TS("2017-01-01"), TS("2017-01-12"), "2017_CL_G", "CL2"), ... (TS("2017-01-10"), TS("2017-01-15"), "2017_CL_H", "CL2")], ... columns=["start_date", "end_date", "contract", "generic"]) >>> mplot.plot_composition(rets, intervals)
2.759608
2.619056
1.053665
intrvls = [] if isinstance(weights, dict): for root in weights: wts = weights[root] intrvls.append(_intervals(wts)) intrvls = pd.concat(intrvls, axis=0) else: intrvls = _intervals(weights) intrvls = intrvls.reset_index(drop=True) return intrvls
def intervals(weights)
Extract intervals where generics are composed of different tradeable instruments. Parameters ---------- weights: DataFrame or dict A DataFrame or dictionary of DataFrames with columns representing generics and a MultiIndex of date and contract. Values represent weights on tradeables for each generic. Returns ------- A DataFrame with [columns] ['contract', 'generic', 'start_date', 'end_date']
2.529867
2.491196
1.015523
LOG.info(_LI('Syncing Neutron Router DB <-> EOS')) routers, router_interfaces = self.get_routers_and_interfaces() expected_vrfs = set() if self._use_vrf: expected_vrfs.update(self.driver._arista_router_name( r['id'], r['name']) for r in routers) expected_vlans = set(r['seg_id'] for r in router_interfaces) if self._enable_cleanup: self.do_cleanup(expected_vrfs, expected_vlans) self.create_routers(routers) self.create_router_interfaces(router_interfaces)
def synchronize(self)
Synchronizes Router DB from Neturon DB with EOS. Walks through the Neturon Db and ensures that all the routers created in Netuton DB match with EOS. After creating appropriate routers, it ensures to add interfaces as well. Uses idempotent properties of EOS configuration, which means same commands can be repeated.
4.717916
4.070899
1.158937
# Add router to the DB new_router = super(AristaL3ServicePlugin, self).create_router( context, router) # create router on the Arista Hw try: self.driver.create_router(context, new_router) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating router on Arista HW router=%s "), new_router) super(AristaL3ServicePlugin, self).delete_router( context, new_router['id'] )
def create_router(self, context, router)
Create a new router entry in DB, and create it Arista HW.
2.863437
2.420021
1.183228
# Read existing router record from DB original_router = self.get_router(context, router_id) # Update router DB new_router = super(AristaL3ServicePlugin, self).update_router( context, router_id, router) # Modify router on the Arista Hw try: self.driver.update_router(context, router_id, original_router, new_router) return new_router except Exception: LOG.error(_LE("Error updating router on Arista HW router=%s "), new_router)
def update_router(self, context, router_id, router)
Update an existing router in DB, and update it in Arista HW.
3.65604
3.089316
1.183446
router = self.get_router(context, router_id) # Delete router on the Arista Hw try: self.driver.delete_router(context, router_id, router) except Exception as e: LOG.error(_LE("Error deleting router on Arista HW " "router %(r)s exception=%(e)s"), {'r': router, 'e': e}) super(AristaL3ServicePlugin, self).delete_router(context, router_id)
def delete_router(self, context, router_id)
Delete an existing router from Arista HW as well as from the DB.
2.999559
2.653892
1.130249
new_router = super(AristaL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) core = directory.get_plugin() # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = core.get_subnet(context, interface_info['subnet_id']) elif add_by_port: port = core.get_port(context, interface_info['port_id']) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = core.get_subnet(context, subnet_id) network_id = subnet['network_id'] # To create SVI's in Arista HW, the segmentation Id is required # for this network. ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] # Package all the info needed for Hw programming router = self.get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW"), {'subnet': subnet, 'router_id': router_id}) super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)
def add_router_interface(self, context, router_id, interface_info)
Add a subnet of a network to an existing router.
2.727738
2.707018
1.007654
router_to_del = ( super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info) ) # Get network information of the subnet that is being removed core = directory.get_plugin() subnet = core.get_subnet(context, router_to_del['subnet_id']) network_id = subnet['network_id'] # For SVI removal from Arista HW, segmentation ID is needed ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] router = self.get_router(context, router_id) router_info = copy.deepcopy(router_to_del) router_info['seg_id'] = seg_id router_info['name'] = router['name'] try: self.driver.remove_router_interface(context, router_info) return router_to_del except Exception as exc: LOG.error(_LE("Error removing interface %(interface)s from " "router %(router_id)s on Arista HW" "Exception =(exc)s"), {'interface': interface_info, 'router_id': router_id, 'exc': exc})
def remove_router_interface(self, context, router_id, interface_info)
Remove a subnet of a network from an existing router.
3.219028
3.247621
0.991196
self._switches = {} self._port_group_info = {} self._validate_config() for s in cfg.CONF.ml2_arista.switch_info: switch_ip, switch_user, switch_pass = s.split(":") if switch_pass == "''": switch_pass = '' self._switches[switch_ip] = api.EAPIClient( switch_ip, switch_user, switch_pass, verify=False, timeout=cfg.CONF.ml2_arista.conn_timeout) self._check_dynamic_acl_support()
def initialize_switch_endpoints(self)
Initialize endpoints for switch communication
3.785207
3.780148
1.001338
cmds = ['ip access-list openstack-test dynamic', 'no ip access-list openstack-test'] for switch_ip, switch_client in self._switches.items(): try: self.run_openstack_sg_cmds(cmds) except Exception: LOG.error("Switch %s does not support dynamic ACLs. SG " "support will not be enabled on this switch.", switch_ip)
def _check_dynamic_acl_support(self)
Log an error if any switches don't support dynamic ACLs
6.812077
5.427348
1.255139
if len(cfg.CONF.ml2_arista.get('switch_info')) < 1: msg = _('Required option - when "sec_group_support" is enabled, ' 'at least one switch must be specified ') LOG.exception(msg) raise arista_exc.AristaConfigError(msg=msg)
def _validate_config(self)
Ensure at least one switch is configured
6.872017
5.346809
1.285256
if not switch: LOG.exception("No client found for switch") return [] if len(commands) == 0: return [] command_start = ['enable', 'configure'] command_end = ['exit'] full_command = command_start + commands + command_end return self._run_eos_cmds(full_command, switch)
def run_openstack_sg_cmds(self, commands, switch)
Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param switch: Endpoint on the Arista switch to be configured
4.358607
5.077155
0.858474
LOG.info(_LI('Executing command on Arista EOS: %s'), commands) try: # this returns array of return values for every command in # commands list ret = switch.execute(commands) LOG.info(_LI('Results of execution on Arista EOS: %s'), ret) return ret except Exception: msg = (_('Error occurred while trying to execute ' 'commands %(cmd)s on EOS %(host)s') % {'cmd': commands, 'host': switch}) LOG.exception(msg)
def _run_eos_cmds(self, commands, switch)
Execute/sends a CAPI (Command API) command to EOS. This method is useful for running show commands that require no prefix or postfix commands. :param commands : List of commands to be executed on EOS. :param switch: Endpoint on the Arista switch to be configured
3.662002
3.78558
0.967356
switchports = [] if profile.get('local_link_information'): for link in profile['local_link_information']: if 'switch_info' in link and 'port_id' in link: switch = link['switch_info'] interface = link['port_id'] switchports.append((switch, interface)) else: LOG.warning("Incomplete link information: %s", link) return switchports
def _get_switchports(profile)
Return list of (switch_ip, interface) tuples from local_link_info
2.390297
1.878582
1.272394
if switches is None: switches = self._switches.keys() for switch_ip in switches: client = self._switches.get(switch_ip) ret = self._run_eos_cmds(['show interfaces'], client) if not ret or len(ret) == 0: LOG.warning("Unable to retrieve interface info for %s", switch_ip) continue intf_info = ret[0] self._port_group_info[switch_ip] = intf_info.get('interfaces', {})
def _update_port_group_info(self, switches=None)
Refresh data on switch interfaces' port group membership
3.056086
2.905736
1.051742
all_intf_info = self._port_group_info.get(switch, {}) intf_info = all_intf_info.get(port_id, {}) member_info = intf_info.get('interfaceMembership', '') port_group_info = re.search('Member of (?P<port_group>\S+)', member_info) if port_group_info: port_id = port_group_info.group('port_group') return port_id
def _get_port_for_acl(self, port_id, switch)
Gets interface name for ACLs Finds the Port-Channel name if port_id is in a Port-Channel, otherwise ACLs are applied to Ethernet interface. :param port_id: Name of port from ironic db :param server: Server endpoint on the Arista switch to be configured
3.254777
3.66531
0.887995
if not protocol or protocol not in utils.SUPPORTED_SG_PROTOCOLS: return False if ethertype != n_const.IPv4: return False return True
def _supported_rule(protocol, ethertype)
Checks that the rule is an IPv4 rule of a supported protocol
5.054893
4.643167
1.088674
if cidr is None: cidr = 'any' if direction == n_const.INGRESS_DIRECTION: dst_ip = 'any' src_ip = cidr elif direction == n_const.EGRESS_DIRECTION: dst_ip = cidr src_ip = 'any' if protocol == n_const.PROTO_NAME_ICMP: rule = "permit icmp %s %s" % (src_ip, dst_ip) if min_port: rule += " %s" % (min_port) if max_port: rule += " %s" % (max_port) else: rule = "permit %s %s %s" % (protocol, src_ip, dst_ip) if min_port and max_port: rule += " range %s %s" % (min_port, max_port) elif min_port and not max_port: rule += " eq %s" % min_port return rule
def _format_rule(self, protocol, cidr, min_port, max_port, direction)
Get EOS formatted rule
1.829356
1.776307
1.029865
in_rules = [] eg_rules = [] for rule in rules: protocol = rule.get('protocol') cidr = rule.get('remote_ip_prefix', 'any') min_port = rule.get('port_range_min') max_port = rule.get('port_range_max') direction = rule.get('direction') ethertype = rule.get('ethertype') if not self._supported_rule(protocol, ethertype): continue formatted_rule = self._format_rule(protocol, cidr, min_port, max_port, direction) if rule['direction'] == n_const.INGRESS_DIRECTION: in_rules.append(formatted_rule) elif rule['direction'] == n_const.EGRESS_DIRECTION: eg_rules.append(formatted_rule) return in_rules, eg_rules
def _format_rules_for_eos(self, rules)
Format list of rules for EOS and sort into ingress/egress rules
2.210408
1.980312
1.116192
for switch in self._switches.values(): self.run_openstack_sg_cmds(cmds, switch)
def run_cmds_on_all_switches(self, cmds)
Runs all cmds on all configured switches This helper is used for ACL and rule creation/deletion as ACLs and rules must exist on all switches.
8.115989
9.762865
0.831312
for switch_ip, cmds in switch_cmds.items(): switch = self._switches.get(switch_ip) self.run_openstack_sg_cmds(cmds, switch)
def run_per_switch_cmds(self, switch_cmds)
Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch.
5.200306
5.322683
0.977009
switchports = self._get_switchports(profile) switches = set([switchport[0] for switchport in switchports]) return switches
def _get_switches(self, profile)
Get set of switches referenced in a port binding profile
4.058698
3.502457
1.158815
cmds = [] in_rules, eg_rules = self._format_rules_for_eos(sg_rules) cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) for in_rule in in_rules: cmds.append(in_rule) cmds.append("exit") cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) for eg_rule in eg_rules: cmds.append(eg_rule) cmds.append("exit") return cmds
def get_create_security_group_commands(self, sg_id, sg_rules)
Commands for creating ACL
2.359293
2.303481
1.024229
cmds = [] cmds.append("no ip access-list %s" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) cmds.append("no ip access-list %s" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) return cmds
def get_delete_security_group_commands(self, sg_id)
Commands for deleting ACL
2.522011
2.360291
1.068517
rule_prefix = "" if delete: rule_prefix = "no " in_rules, eg_rules = self._format_rules_for_eos([sg_rule]) cmds = [] if in_rules: cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.INGRESS_DIRECTION)) for in_rule in in_rules: cmds.append(rule_prefix + in_rule) cmds.append("exit") if eg_rules: cmds.append("ip access-list %s dynamic" % self._acl_name(sg_id, n_const.EGRESS_DIRECTION)) for eg_rule in eg_rules: cmds.append(rule_prefix + eg_rule) cmds.append("exit") return cmds
def _get_rule_cmds(self, sg_id, sg_rule, delete=False)
Helper for getting add/delete ACL rule commands
2.305445
2.250569
1.024383
return self._get_rule_cmds(sg_id, sg_rule, delete=True)
def get_delete_security_group_rule_commands(self, sg_id, sg_rule)
Commands for removing rule from ACLS
4.806719
4.316552
1.113555
rule_prefix = "" if delete: rule_prefix = "no " switch_cmds = {} switchports = self._get_switchports(profile) for switch_ip, intf in switchports: cmds = [] intf_id = self._get_port_for_acl(intf, switch_ip) cmds.append("interface %s" % intf_id) name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) cmds.append(rule_prefix + "ip access-group %s %s" % (name, a_const.INGRESS_DIRECTION)) name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) cmds.append(rule_prefix + "ip access-group %s %s" % (name, a_const.EGRESS_DIRECTION)) cmds.append("exit") if switch_ip not in switch_cmds.keys(): switch_cmds[switch_ip] = [] switch_cmds[switch_ip].extend(cmds) return switch_cmds
def _get_interface_commands(self, sg_id, profile, delete=False)
Helper for getting interface ACL apply/remove commands
2.507464
2.460177
1.019221
return self._get_interface_commands(sg_id, profile, delete=True)
def get_remove_security_group_commands(self, sg_id, profile)
Commands for removing ACL from interface
6.365817
5.158347
1.234081
parsed_acls = dict() for acl in acl_config['aclList']: parsed_acls[acl['name']] = set() for rule in acl['sequence']: parsed_acls[acl['name']].add(rule['text']) return parsed_acls
def _parse_acl_config(self, acl_config)
Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., }
3.231345
2.837538
1.138785
parsed_bindings = set() for acl in binding_config['aclList']: for intf in acl['configuredIngressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.INGRESS_DIRECTION)) for intf in acl['configuredEgressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.EGRESS_DIRECTION)) return parsed_bindings
def _parse_binding_config(self, binding_config)
Parse configured interface -> ACL bindings Bindings are returned as a set of (intf, name, direction) tuples: set([(intf1, acl_name, direction), (intf2, acl_name, direction), ..., ])
3.482315
2.430287
1.432882
cmds = ["enable", "show ip access-lists dynamic", "show ip access-lists summary dynamic"] switch = self._switches.get(switch_ip) _, acls, bindings = self._run_eos_cmds(cmds, switch) parsed_acls = self._parse_acl_config(acls) parsed_bindings = self._parse_binding_config(bindings) return parsed_acls, parsed_bindings
def _get_dynamic_acl_info(self, switch_ip)
Retrieve ACLs, ACLs rules and interface bindings from switch
3.9515
3.491319
1.131807
security_groups = db_lib.get_security_groups() expected_acls = collections.defaultdict(set) for sg in security_groups: in_rules, out_rules = self._format_rules_for_eos(sg['rules']) ingress_acl_name = self._acl_name(sg['id'], n_const.INGRESS_DIRECTION) egress_acl_name = self._acl_name(sg['id'], n_const.EGRESS_DIRECTION) expected_acls[ingress_acl_name].update(in_rules) expected_acls[egress_acl_name].update(out_rules) return expected_acls
def get_expected_acls(self)
Query the neutron DB for Security Groups and Rules Groups and rules are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., }
2.562253
2.137678
1.198615
sg_bindings = db_lib.get_baremetal_sg_bindings() all_expected_bindings = collections.defaultdict(set) for sg_binding, port_binding in sg_bindings: sg_id = sg_binding['security_group_id'] try: binding_profile = json.loads(port_binding.profile) except ValueError: binding_profile = {} switchports = self._get_switchports(binding_profile) for switch, intf in switchports: ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) all_expected_bindings[switch].add( (intf, ingress_name, a_const.INGRESS_DIRECTION)) all_expected_bindings[switch].add( (intf, egress_name, a_const.EGRESS_DIRECTION)) return all_expected_bindings
def get_expected_bindings(self)
Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., }
2.976229
2.524698
1.178846
# Get latest LAG info for switch self._update_port_group_info([switch_ip]) # Update bindings to account for LAG info adjusted_bindings = set() for binding in bindings: adjusted_bindings.add( (self._get_port_for_acl(binding[0], switch_ip),) + binding[1:]) return adjusted_bindings
def adjust_bindings_for_lag(self, switch_ip, bindings)
Adjusting interface names for expected bindings where LAGs exist
5.722802
5.492425
1.041945
switch_cmds = list() # Delete any stale ACLs acls_to_delete = (set(switch_acls.keys()) - set(expected_acls.keys())) for acl in acls_to_delete: switch_cmds.append('no ip access-list %s' % acl) # Update or create ACLs and rules for acl, expected_rules in expected_acls.items(): switch_rules = switch_acls.get(acl, set()) rules_to_delete = switch_rules - expected_rules rules_to_add = expected_rules - switch_rules # Check if ACL requires create or rule changes if (acl in switch_acls and len(rules_to_add | rules_to_delete) == 0): continue switch_cmds.append('ip access-list %s dynamic' % acl) # Delete any stale rules for rule in rules_to_delete: switch_cmds.append('no ' + rule) # Add any missing rules for rule in rules_to_add: switch_cmds.append(rule) switch_cmds.append('exit') return switch_cmds
def get_sync_acl_cmds(self, switch_acls, expected_acls)
Returns the list of commands required synchronize switch ACLs 1. Identify unexpected ACLs and delete them 2. Iterate over expected ACLs a. Add missing ACLs + all rules b. Delete unexpected rules c. Add missing rules
2.431501
2.401596
1.012452
switch_cmds = list() # Update any necessary switch interface ACLs bindings_to_delete = switch_bindings - expected_bindings bindings_to_add = expected_bindings - switch_bindings for intf, acl, direction in bindings_to_delete: switch_cmds.extend(['interface %s' % intf, 'no ip access-group %s %s' % (acl, direction), 'exit']) for intf, acl, direction in bindings_to_add: switch_cmds.extend(['interface %s' % intf, 'ip access-group %s %s' % (acl, direction), 'exit']) return switch_cmds
def get_sync_binding_cmds(self, switch_bindings, expected_bindings)
Returns the list of commands required to synchronize ACL bindings 1. Delete any unexpected bindings 2. Add any missing bindings
2.458098
2.417186
1.016926
if name_func is None: def name_func(x): return os.path.split(x)[1].split(".")[0] dfs = [] for f in files: name = name_func(f) df = pd.read_csv(f, index_col=0, parse_dates=True) df.sort_index(inplace=True) df.index = pd.MultiIndex.from_product([df.index, [name]], names=["date", "contract"]) dfs.append(df) return pd.concat(dfs, axis=0, sort=False).sort_index()
def read_price_data(files, name_func=None)
Convenience function for reading in pricing data from csv files Parameters ---------- files: list List of strings refering to csv files to read data in from, first column should be dates name_func: func A function to apply to the file strings to infer the instrument name, used in the second level of the MultiIndex index. Default is the file name excluding the pathname and file ending, e.g. /path/to/file/name.csv -> name Returns ------- A pandas.DataFrame with a pandas.MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Columns are given by the csv file columns.
2.033201
2.161639
0.940583
# NOQA if isinstance(weights, pd.DataFrame): wts = weights.stack().reset_index() wts.columns = ["date", "contract", "generic", "weight"] elif isinstance(weights, dict): wts = [] for key in sorted(weights.keys()): wt = weights[key].stack().reset_index() wt.columns = ["date", "contract", "generic", "weight"] wt.loc[:, "key"] = key wts.append(wt) wts = pd.concat(wts, axis=0).reset_index(drop=True) else: raise ValueError("weights must be pd.DataFrame or dict") return wts
def flatten(weights)
Flatten weights into a long DataFrame. Parameters ---------- weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas. Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A long DataFrame of weights, where columns are "date", "contract", "generic" and "weight". If a dictionary is passed, DataFrame will contain additional colum "key" containing the key value and be sorted according to this key value. Example ------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLH5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"]) >>> util.flatten(weights)
2.277444
1.968511
1.156937
# NOQA if flat_weights.columns.contains("key"): weights = {} for key in flat_weights.loc[:, "key"].unique(): flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :] flt_wts = flt_wts.drop(labels="key", axis=1) wts = flt_wts.pivot_table(index=["date", "contract"], columns=["generic"], values=["weight"]) wts.columns = wts.columns.droplevel(0) weights[key] = wts else: weights = flat_weights.pivot_table(index=["date", "contract"], columns=["generic"], values=["weight"]) weights.columns = weights.columns.droplevel(0) return weights
def unflatten(flat_weights)
Pivot weights from long DataFrame into weighting matrix. Parameters ---------- flat_weights: pandas.DataFrame A long DataFrame of weights, where columns are "date", "contract", "generic", "weight" and optionally "key". If "key" column is present a dictionary of unflattened DataFrames is returned with the dictionary keys corresponding to the "key" column and each sub DataFrame containing rows for this key. Returns ------- A DataFrame or dict of DataFrames of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is returned the dict keys correspond to the "key" column of the input. Example ------- >>> import pandas as pd >>> from pandas import Timestamp as TS >>> import mapping.util as util >>> long_wts = pd.DataFrame( ... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4, ... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2, ... "generic": ["CL1", "CL2"] * 4, ... "weight": [1, 0, 0, 1, 1, 0, 0, 1]} ... ).loc[:, ["date", "contract", "generic", "weight"]] >>> util.unflatten(long_wts) See also: calc_rets()
2.442447
2.028812
1.20388
# NOQA if not isinstance(returns, dict): returns = {"": returns} if not isinstance(weights, dict): weights = {"": weights} generic_superset = [] for root in weights: generic_superset.extend(weights[root].columns.tolist()) if len(set(generic_superset)) != len(generic_superset): raise ValueError("Columns for weights must all be unique") _check_indices(returns, weights) grets = [] cols = [] for root in returns: root_wts = weights[root] root_rets = returns[root] for generic in root_wts.columns: gnrc_wts = root_wts.loc[:, generic] # drop generics where weight is 0, this avoids potential KeyError # in later indexing of rets even when ret has weight of 0 gnrc_wts = gnrc_wts.loc[gnrc_wts != 0] rets = root_rets.loc[gnrc_wts.index] # groupby time group_rets = (rets * gnrc_wts).groupby(level=0) grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False)) cols.extend(root_wts.columns.tolist()) rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1) return rets
def calc_rets(returns, weights)
Calculate continuous return series for futures instruments. These consist of weighted underlying instrument returns, who's weights can vary over time. Parameters ---------- returns: pandas.Series or dict A Series of instrument returns with a MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. Values correspond to one period instrument returns. returns should be available for all for all Timestamps and instruments provided in weights. If dict is given this should be a dict of pandas.Series in the above format, with keys which are a subset of the keys given in weights weights: pandas.DataFrame or dict A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. If dict is given this should be a dict of pandas.DataFrame in the above format, with keys for different root generics, e.g. 'CL' Returns ------- A pandas.DataFrame of continuous returns for generics. The index is pandas.Timestamps and the columns is generic names, corresponding to weights.columns Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-03'), 'CLG5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx) >>> vals = [1, 1/2, 1/2, 1] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLF5'), ... (pd.Timestamp('2015-01-04'), 'CLG5'), ... (pd.Timestamp('2015-01-05'), 'CLG5')]) >>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"]) >>> irets = price.groupby(level=-1).pct_change() >>> util.calc_rets(irets, weights)
3.619401
3.351928
1.079797
if not index.is_unique: raise ValueError("'index' must be unique") index = index.sort_values() index.names = ["date", "instrument"] price_dts = prices.sort_index().index.unique(level=0) index_dts = index.unique(level=0) mask = price_dts < index_dts[0] leading_price_dts = price_dts[mask] if len(leading_price_dts) == 0: raise ValueError("'prices' must have a date preceding first date in " "'index'") prev_dts = index_dts.tolist() prev_dts.insert(0, leading_price_dts[-1]) # avoid just lagging to preserve the calendar previous_date = dict(zip(index_dts, prev_dts)) first_instr = index.to_frame(index=False) first_instr = ( first_instr.drop_duplicates(subset=["instrument"], keep="first") ) first_instr.loc[:, "prev_date"] = ( first_instr.loc[:, "date"].apply(lambda x: previous_date[x]) ) additional_indices = pd.MultiIndex.from_tuples( first_instr.loc[:, ["prev_date", "instrument"]].values.tolist() ) augmented_index = index.union(additional_indices).sort_values() prices = prices.reindex(augmented_index) if limit != 0: prices = prices.groupby(level=1).fillna(method="ffill", limit=limit) return prices
def reindex(prices, index, limit)
Reindex a pd.Series of prices such that when instrument level returns are calculated they are compatible with a pd.MultiIndex of instrument weights in calc_rets(). This amount to reindexing the series by an augmented version of index which includes the preceding date for the first appearance of each instrument. Fill forward missing values with previous price up to some limit. Parameters ---------- prices: pandas.Series A Series of instrument prices with a MultiIndex where the top level is pandas.Timestamps and the second level is instrument names. index: pandas.MultiIndex A MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. limt: int Number of periods to fill prices forward. Returns ------- A pandas.Series of reindexed prices where the top level is pandas.Timestamps and the second level is instrument names. See also: calc_rets() Example ------- >>> import pandas as pd >>> from pandas import Timestamp as TS >>> import mapping.util as util >>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'), ... (TS('2015-01-05'), 'CLF5'), ... (TS('2015-01-05'), 'CLH5'), ... (TS('2015-01-06'), 'CLF5'), ... (TS('2015-01-06'), 'CLH5'), ... (TS('2015-01-07'), 'CLF5'), ... (TS('2015-01-07'), 'CLH5')]) >>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15, ... 104.37], index=idx) >>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'), ... (TS('2015-01-05'), 'CLH5'), ... (TS('2015-01-07'), 'CLF5'), ... (TS('2015-01-07'), 'CLH5')]) >>> util.reindex(prices, widx, limit=0)
3.417095
3.380149
1.01093
notionals = _instr_conv(instruments, prices, multipliers, True, desired_ccy, instr_fx, fx_rates) return notionals
def to_notional(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None)
Convert number of contracts of tradeable instruments to notional value of tradeable instruments in a desired currency. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are number of contracts. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index otherwise NaN returned for instruments without prices multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. Returns ------- pandas.Series of notional amounts of instruments with Index of instruments names Example ------- >>> import pandas as pd >>> import mapping.util as util >>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16']) >>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16']) >>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16']) >>> ntln = util.to_notional(current_contracts, prices, multipliers)
3.642796
10.493551
0.347146
contracts = _instr_conv(instruments, prices, multipliers, False, desired_ccy, instr_fx, fx_rates) if rounder is None: rounder = pd.Series.round contracts = rounder(contracts) contracts = contracts.astype(int) return contracts
def to_contracts(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None, rounder=None)
Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names
3.485168
3.726831
0.935156
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict): raise ValueError("For multiple generic instruments weights must be a " "dictionary") mults = [] intrs = [] for ast, multiplier in root_generic_multiplier.iteritems(): if isinstance(weights, dict): weights_ast = weights[ast].index else: weights_ast = weights.index mults.extend(np.repeat(multiplier, len(weights_ast))) intrs.extend(weights_ast) imults = pd.Series(mults, intrs) imults = imults.sort_index() return imults
def get_multiplier(weights, root_generic_multiplier)
Determine tradeable instrument multiplier based on generic asset multipliers and weights mapping from generics to tradeables. Parameters ---------- weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments **for a given date**. The columns are integers refering to generic number indexed from 0, e.g. [0, 1], and the index is strings representing instrument names. If dict is given keys should be generic instrument names, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index root_generic_multiplier: pandas.Series Series of multipliers for generic instruments lexigraphically sorted. If a dictionary of weights is given, root_generic_multiplier.index should correspond to the weights keys. Returns ------- A pandas.Series of multipliers for tradeable instruments. Examples -------- >>> import pandas as pd >>> import mapping.util as util >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=[0, 1]) >>> ast_mult = pd.Series([1000], index=["CL"]) >>> util.get_multiplier(wts, ast_mult)
3.440046
3.021802
1.138409
# NOQA cols = weights.columns weights = weights.reset_index(level=-1) expiries = contract_dates.to_dict() weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x]) diffs = (pd.DatetimeIndex(weights.expiry) - pd.Series(weights.index, weights.index)).apply(lambda x: x.days) weights = weights.loc[:, cols] wexp = weights.mul(diffs, axis=0).groupby(level=0).sum() return wexp
def weighted_expiration(weights, contract_dates)
Calculate the days to expiration for generic futures, weighted by the composition of the underlying tradeable instruments. Parameters: ----------- weights: pandas.DataFrame A DataFrame of instrument weights with a MultiIndex where the top level contains pandas.Timestamps and the second level is instrument names. The columns consist of generic names. contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values Returns: -------- A pandas.DataFrame with columns of generic futures and index of dates. Values are the weighted average of days to expiration for the underlying contracts. Examples: --------- >>> import pandas as pd >>> import mapping.util as util >>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]] >>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'), ... (pd.Timestamp('2015-01-03'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLF15'), ... (pd.Timestamp('2015-01-04'), 'CLG15'), ... (pd.Timestamp('2015-01-04'), 'CLH15'), ... (pd.Timestamp('2015-01-05'), 'CLG15'), ... (pd.Timestamp('2015-01-05'), 'CLH15')]) >>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx) >>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'), ... pd.Timestamp('2015-02-21'), ... pd.Timestamp('2015-03-20')], ... index=['CLF15', 'CLG15', 'CLH15']) >>> util.weighted_expiration(weights, contract_dates)
3.601071
4.001555
0.899918
if port.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL: return False sgs = port.get('security_groups', []) if len(sgs) == 0: # Nothing to do return False if len(port.get('security_groups', [])) > 1: LOG.warning('SG provisioning failed for %(port)s. Only one ' 'SG may be applied per port.', {'port': port['id']}) return False return True
def _valid_baremetal_port(port)
Check if port is a baremetal port with exactly one security group
3.302517
2.783193
1.186593
# Grab the sync lock if not self._rpc.sync_start(): LOG.info("%(pid)s Failed to grab the sync lock", {'pid': os.getpid()}) greenthread.sleep(1) return for resource in self._resources_to_update: self.update_neutron_resource(resource) self._resources_to_update = list() # Sync any necessary resources. # We delete in reverse order and create in order to ensure that # dependent resources are deleted before the resources they depend # on and created after them for resource_type in reversed(self.sync_order): resource_type.delete_cvx_resources() for resource_type in self.sync_order: resource_type.create_cvx_resources() # Release the sync lock self._rpc.sync_end() # Update local uuid if this was a full sync if self._synchronizing_uuid: LOG.info("%(pid)s Full sync for cvx uuid %(uuid)s complete", {'uuid': self._synchronizing_uuid, 'pid': os.getpid()}) self._cvx_uuid = self._synchronizing_uuid self._synchronizing_uuid = None
def synchronize_resources(self)
Synchronize worker with CVX All database queries must occur while the sync lock is held. This tightly couples reads with writes and ensures that an older read does not result in the last write. Eg: Worker 1 reads (P1 created) Worder 2 reads (P1 deleted) Worker 2 writes (Delete P1 from CVX) Worker 1 writes (Create P1 on CVX) By ensuring that all reads occur with the sync lock held, we ensure that Worker 1 completes its writes before Worker2 is allowed to read. A failure to write results in a full resync and purges all reads from memory. It is also important that we compute resources to sync in reverse sync order in order to avoid missing dependencies on creation. Eg: If we query in sync order 1. Query Instances -> I1 isn't there 2. Query Port table -> Port P1 is there, connected to I1 3. We send P1 to CVX without sending I1 -> Error raised But if we query P1 first: 1. Query Ports P1 -> P1 is not there 2. Query Instances -> find I1 3. We create I1, not P1 -> harmless, mech driver creates P1 Missing dependencies on deletion will helpfully result in the dependent resource not being created: 1. Query Ports -> P1 is found 2. Query Instances -> I1 not found 3. Creating P1 fails on CVX
4.124583
4.179807
0.986788
super(AristaTrunkDriver, self).register(resource, event, trigger, kwargs) registry.subscribe(self.subport_create, resources.SUBPORTS, events.AFTER_CREATE) registry.subscribe(self.subport_delete, resources.SUBPORTS, events.AFTER_DELETE) registry.subscribe(self.trunk_create, resources.TRUNK, events.AFTER_CREATE) registry.subscribe(self.trunk_update, resources.TRUNK, events.AFTER_UPDATE) registry.subscribe(self.trunk_delete, resources.TRUNK, events.AFTER_DELETE) self.core_plugin = directory.get_plugin() LOG.debug("Arista trunk driver initialized.")
def register(self, resource, event, trigger, **kwargs)
Called in trunk plugin's AFTER_INIT
2.057463
1.981613
1.038277
cmds = [] rd = "%s:%s" % (rdm, rdm) for c in self.routerDict['create']: cmds.append(c.format(router_name, rd)) if self._mlag_configured: mac = VIRTUAL_ROUTER_MAC for c in self._additionalRouterCmdsDict['create']: cmds.append(c.format(mac)) self._run_config_cmds(cmds, server)
def create_router_on_eos(self, router_name, rdm, server)
Creates a router on Arista HW Device. :param router_name: globally unique identifier for router/VRF :param rdm: A value generated by hashing router name :param server: Server endpoint on the Arista switch to be configured
6.181454
6.737177
0.917514
cmds = [] for c in self.routerDict['delete']: cmds.append(c.format(router_name)) if self._mlag_configured: for c in self._additionalRouterCmdsDict['delete']: cmds.append(c) self._run_config_cmds(cmds, server)
def delete_router_from_eos(self, router_name, server)
Deletes a router from Arista HW Device. :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured
6.671066
7.281219
0.916202
if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['add']: if self._mlag_configured: # In VARP config, use router ID else, use gateway IP address. ip = router_ip else: ip = gip + '/' + mask cmds.append(c.format(segment_id, router_name, ip)) if self._mlag_configured: for c in self._additionalInterfaceCmdsDict['add']: cmds.append(c.format(gip)) self._run_config_cmds(cmds, server)
def add_interface_to_router(self, segment_id, router_name, gip, router_ip, mask, server)
Adds an interface to existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param gip: Gateway IP associated with the subnet :param router_ip: IP address of the router :param mask: subnet mask to be used :param server: Server endpoint on the Arista switch to be configured
5.989765
6.2061
0.965142
if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['remove']: cmds.append(c.format(segment_id)) self._run_config_cmds(cmds, server)
def delete_interface_from_router(self, segment_id, router_name, server)
Deletes an interface from existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured
6.09773
6.707041
0.909153
if router: router_name = self._arista_router_name(router['id'], router['name']) hashed = hashlib.sha256(router_name.encode('utf-8')) rdm = str(int(hashed.hexdigest(), 16) % 65536) mlag_peer_failed = False for s in self._servers: try: self.create_router_on_eos(router_name, rdm, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
def create_router(self, context, router)
Creates a router on Arista Switch. Deals with multiple configurations - such as Router per VRF, a router in default VRF, Virtual Router in MLAG configurations
4.549885
4.327682
1.051344
if router: router_name = self._arista_router_name(router_id, router['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_router_from_eos(router_name, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
def delete_router(self, context, router_id, router)
Deletes a router from Arista Switch.
4.977653
4.539377
1.09655
if router_info: self._select_dicts(router_info['ip_version']) cidr = router_info['cidr'] subnet_mask = cidr.split('/')[1] router_name = self._arista_router_name(router_info['id'], router_info['name']) if self._mlag_configured: # For MLAG, we send a specific IP address as opposed to cidr # For now, we are using x.x.x.253 and x.x.x.254 as virtual IP mlag_peer_failed = False for i, server in enumerate(self._servers): # Get appropriate virtual IP address for this router router_ip = self._get_router_ip(cidr, i, router_info['ip_version']) try: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], router_ip, subnet_mask, server) mlag_peer_failed = False except Exception: if not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError( msg=msg) else: for s in self._servers: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], None, subnet_mask, s)
def add_router_interface(self, context, router_info)
Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations.
3.64486
3.528289
1.033039
if router_info: router_name = self._arista_router_name(router_info['id'], router_info['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_interface_from_router(router_info['seg_id'], router_name, s) if self._mlag_configured: mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
def remove_router_interface(self, context, router_info)
Removes previously configured interface from router on Arista HW. This deals with both IPv6 and IPv4 configurations.
3.70721
3.553214
1.04334
command_start = ['enable', 'configure'] command_end = ['exit'] full_command = command_start + commands + command_end self._run_eos_cmds(full_command, server)
def _run_config_cmds(self, commands, server)
Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param server: Server endpoint on the Arista switch to be configured
4.86898
4.936997
0.986223
return struct.unpack("!L", socket.inet_pton(socket.AF_INET, ip_addr))[0]
def _get_binary_from_ipv4(self, ip_addr)
Converts IPv4 address to binary form.
4.107574
3.861327
1.063773
hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip_addr)) return (hi << 64) | lo
def _get_binary_from_ipv6(self, ip_addr)
Converts IPv6 address to binary form.
3.253001
2.97144
1.094756
return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr))
def _get_ipv4_from_binary(self, bin_addr)
Converts binary address to Ipv4 format.
3.781416
3.209074
1.178351
hi = bin_addr >> 64 lo = bin_addr & 0xFFFFFFFF return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo))
def _get_ipv6_from_binary(self, bin_addr)
Converts binary address to Ipv6 format.
2.830478
2.517756
1.124207
start_ip = MLAG_SWITCHES + ip_count network_addr, prefix = cidr.split('/') if ip_ver == 4: bits = IPV4_BITS ip = self._get_binary_from_ipv4(network_addr) elif ip_ver == 6: bits = IPV6_BITS ip = self._get_binary_from_ipv6(network_addr) mask = (pow(2, bits) - 1) << (bits - int(prefix)) network_addr = ip & mask router_ip = pow(2, bits - int(prefix)) - start_ip router_ip = network_addr | router_ip if ip_ver == 4: return self._get_ipv4_from_binary(router_ip) + '/' + prefix else: return self._get_ipv6_from_binary(router_ip) + '/' + prefix
def _get_router_ip(self, cidr, ip_count, ip_ver)
For a given IP subnet and IP version type, generate IP for router. This method takes the network address (cidr) and selects an IP address that should be assigned to virtual router running on multiple switches. It uses upper addresses in a subnet address as IP for the router. Each instace of the router, on each switch, requires uniqe IP address. For example in IPv4 case, on a 255 subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next, and so on.
2.89566
2.932072
0.987582
set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = ARISTA_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations()
def run_migrations_offline()
Run migrations in 'offline' mode. This configures the context with just a URL or an Engine. Calls to context.execute() here emit the given string to the script output.
4.163393
4.198468
0.991646
t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.CREATE) self.provision_queue.put(t_res)
def create_tenant(self, tenant_id)
Enqueue tenant create
11.506655
9.6777
1.188987
if not db_lib.tenant_provisioned(tenant_id): t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.DELETE) self.provision_queue.put(t_res)
def delete_tenant_if_removed(self, tenant_id)
Enqueue tenant delete if it's no longer in the db
7.926743
6.756555
1.173193
n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, a_const.CREATE) self.provision_queue.put(n_res)
def create_network(self, network)
Enqueue network create
14.739406
12.714884
1.159225
n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, a_const.DELETE) self.provision_queue.put(n_res)
def delete_network(self, network)
Enqueue network delete
12.242583
11.141278
1.098849
for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
def create_segments(self, segments)
Enqueue segment creates
11.394364
10.422262
1.093272
for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.DELETE) self.provision_queue.put(s_res)
def delete_segments(self, segments)
Enqueue segment deletes
10.546902
9.47026
1.113687
if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL: return a_const.BAREMETAL_RESOURCE owner_to_type = { n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE, n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE, trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE} if port['device_owner'] in owner_to_type.keys(): return owner_to_type[port['device_owner']] elif port['device_owner'].startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX): return a_const.VM_RESOURCE return None
def get_instance_type(self, port)
Determine the port type based on device owner and vnic type
2.464142
2.300974
1.070913
instance_type = self.get_instance_type(port) if not instance_type: return i_res = MechResource(port['device_id'], instance_type, a_const.CREATE) self.provision_queue.put(i_res)
def create_instance(self, port)
Enqueue instance create
6.495655
5.984776
1.085363
instance_type = self.get_instance_type(port) if not instance_type: return if not db_lib.instance_provisioned(port['device_id']): i_res = MechResource(port['device_id'], instance_type, a_const.DELETE) self.provision_queue.put(i_res)
def delete_instance_if_removed(self, port)
Enqueue instance delete if it's no longer in the db
6.02434
5.315416
1.133371
instance_type = self.get_instance_type(port) if not instance_type: return port_type = instance_type + a_const.PORT_SUFFIX p_res = MechResource(port['id'], port_type, a_const.CREATE) self.provision_queue.put(p_res)
def create_port(self, port)
Enqueue port create
6.205631
6.016546
1.031427
instance_type = self.get_instance_type(port) if not instance_type: return port_type = instance_type + a_const.PORT_SUFFIX if not db_lib.port_provisioned(port['id']): p_res = MechResource(port['id'], port_type, a_const.DELETE) self.provision_queue.put(p_res)
def delete_port_if_removed(self, port)
Enqueue port delete
5.851494
5.614254
1.042257
binding_keys = list() switch_binding = port[portbindings.PROFILE].get( 'local_link_information', None) if switch_binding: for binding in switch_binding: switch_id = binding.get('switch_id') port_id = binding.get('port_id') binding_keys.append((port['id'], (switch_id, port_id))) else: binding_keys.append((port['id'], host)) return binding_keys
def _get_binding_keys(self, port, host)
Get binding keys from the port binding
2.448565
2.382957
1.027532
if not self.get_instance_type(port): return for pb_key in self._get_binding_keys(port, host): pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE, a_const.CREATE) self.provision_queue.put(pb_res)
def create_port_binding(self, port, host)
Enqueue port binding create
6.862621
6.427618
1.067677
if not self.get_instance_type(port): return for pb_key in self._get_binding_keys(port, host): pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE, a_const.DELETE) self.provision_queue.put(pb_res)
def delete_port_binding(self, port, host)
Enqueue port binding delete
6.777627
6.446364
1.051388
network = context.current log_context("create_network_postcommit: network", network) segments = context.network_segments tenant_id = network['project_id'] self.create_tenant(tenant_id) self.create_network(network) self.create_segments(segments)
def create_network_postcommit(self, context)
Provision the network on CVX
4.044117
3.937597
1.027052
network = context.current orig_network = context.original log_context("update_network_postcommit: network", network) log_context("update_network_postcommit: orig", orig_network) segments = context.network_segments self.create_network(network) # New segments may have been added self.create_segments(segments)
def update_network_postcommit(self, context)
Send network updates to CVX: - Update the network name - Add new segments - Delete stale segments
4.467625
3.930557
1.136639
network = context.current log_context("delete_network_postcommit: network", network) segments = context.network_segments tenant_id = network['project_id'] self.delete_segments(segments) self.delete_network(network) self.delete_tenant_if_removed(tenant_id)
def delete_network_postcommit(self, context)
Delete the network from CVX
4.50683
4.328378
1.041228
port = context.current orig_port = context.original network = context.network.current log_context("update_port_postcommit: port", port) log_context("update_port_postcommit: orig", orig_port) tenant_id = port['project_id'] # Device id can change without a port going DOWN, but the new device # id may not be supported if orig_port and port['device_id'] != orig_port['device_id']: self._delete_port_resources(orig_port, context.original_host) if context.status == n_const.PORT_STATUS_DOWN: if (context.original_host and context.status != context.original_status): self._delete_port_resources(orig_port, context.original_host) self._try_to_release_dynamic_segment(context, migration=True) else: self.create_tenant(tenant_id) self.create_network(network) if context.binding_levels: segments = [ level['bound_segment'] for level in context.binding_levels] self.create_segments(segments) self.create_instance(port) self.create_port(port) self.create_port_binding(port, context.host)
def update_port_postcommit(self, context)
Send port updates to CVX This method is also responsible for the initial creation of ports as we wait until after a port is bound to send the port data to CVX
3.744747
3.81424
0.981781
port = context.current log_context("delete_port_postcommit: port", port) self._delete_port_resources(port, context.host) self._try_to_release_dynamic_segment(context)
def delete_port_postcommit(self, context)
Delete the port from CVX
6.324225
6.259296
1.010373
port = context.current vif_details = { portbindings.VIF_DETAILS_VLAN: str( segment[driver_api.SEGMENTATION_ID]) } context.set_binding(segment[driver_api.ID], portbindings.VIF_TYPE_OTHER, vif_details, n_const.ACTIVE) LOG.debug("AristaDriver: bound port info- port ID %(id)s " "on network %(network)s", {'id': port['id'], 'network': context.network.current['id']}) if port.get('trunk_details'): self.trunk_driver.bind_port(port) return True
def _bind_baremetal_port(self, context, segment)
Bind the baremetal port to the segment
3.801274
3.672984
1.034928
port = context.current physnet = None if (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL): physnet = self.eapi.get_baremetal_physnet(context) else: physnet = self.eapi.get_host_physnet(context) # If the switch is part of an mlag pair, the physnet is called # peer1_peer2 physnet = self.mlag_pairs.get(physnet, physnet) return physnet
def _get_physnet(self, context)
Find the appropriate physnet for the host - Baremetal ports' physnet is determined by looking at the local_link_information contained in the binding profile - Other ports' physnet is determined by looking for the host in the topology
3.909547
3.750403
1.042434