signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def walk(self, into_past=<NUM_LIT:0>, into_future=<NUM_LIT:0>):
|
walked_range = []<EOL>for shift in range(-into_past, into_future):<EOL><INDENT>kwargs = dict(drip_model=self.drip_model,<EOL>name=self.name,<EOL>now_shift_kwargs={'<STR_LIT>': shift})<EOL>walked_range.append(self.__class__(**kwargs))<EOL><DEDENT>return walked_range<EOL>
|
Walk over a date range and create new instances of self with new ranges.
|
f11053:c1:m3
|
def apply_queryset_rules(self, qs):
|
clauses = {<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': []}<EOL>for rule in self.drip_model.queryset_rules.all():<EOL><INDENT>clause = clauses.get(rule.method_type, clauses['<STR_LIT>'])<EOL>kwargs = rule.filter_kwargs(qs, now=self.now)<EOL>clause.append(Q(**kwargs))<EOL>qs = rule.apply_any_annotation(qs)<EOL><DEDENT>if clauses['<STR_LIT>']:<EOL><INDENT>qs = qs.exclude(functools.reduce(operator.or_, clauses['<STR_LIT>']))<EOL><DEDENT>qs = qs.filter(*clauses['<STR_LIT>'])<EOL>return qs<EOL>
|
First collect all filter/exclude kwargs and apply any annotations.
Then apply all filters at once, and all excludes at once.
|
f11053:c1:m4
|
def run(self):
|
if not self.drip_model.enabled:<EOL><INDENT>return None<EOL><DEDENT>self.prune()<EOL>count = self.send()<EOL>return count<EOL>
|
Get the queryset, prune sent people, and send it.
|
f11053:c1:m6
|
def prune(self):
|
target_user_ids = self.get_queryset().values_list('<STR_LIT:id>', flat=True)<EOL>exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),<EOL>drip=self.drip_model,<EOL>user__id__in=target_user_ids).values_list('<STR_LIT>', flat=True)<EOL>self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)<EOL>
|
Do an exclude for all Users who have a SentDrip already.
|
f11053:c1:m7
|
def send(self):
|
if not self.from_email:<EOL><INDENT>self.from_email = getattr(settings, '<STR_LIT>', settings.DEFAULT_FROM_EMAIL)<EOL><DEDENT>MessageClass = message_class_for(self.drip_model.message_class)<EOL>count = <NUM_LIT:0><EOL>for user in self.get_queryset():<EOL><INDENT>message_instance = MessageClass(self, user)<EOL>try:<EOL><INDENT>result = message_instance.message.send()<EOL>if result:<EOL><INDENT>SentDrip.objects.create(<EOL>drip=self.drip_model,<EOL>user=user,<EOL>from_email=self.from_email,<EOL>from_email_name=self.from_email_name,<EOL>subject=message_instance.subject,<EOL>body=message_instance.body<EOL>)<EOL>count += <NUM_LIT:1><EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logging.error("<STR_LIT>" % (self.drip_model.id, user, e))<EOL><DEDENT><DEDENT>return count<EOL>
|
Send the message to each user on the queryset.
Create SentDrip for each user that gets a message.
Returns count of created SentDrips.
|
f11053:c1:m8
|
def queryset(self):
|
User = get_user_model()<EOL>return User.objects<EOL>
|
Returns a queryset of auth.User who meet the
criteria of the drip.
Alternatively, you could create Drips on the fly
using a queryset builder from the admin interface...
|
f11053:c1:m9
|
def get_fields(Model, <EOL>parent_field="<STR_LIT>",<EOL>model_stack=None,<EOL>stack_limit=<NUM_LIT:2>,<EOL>excludes=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):
|
out_fields = []<EOL>if model_stack is None:<EOL><INDENT>model_stack = []<EOL><DEDENT>if isinstance(Model, basestring):<EOL><INDENT>app_label, model_name = Model.split('<STR_LIT:.>')<EOL>Model = models.get_model(app_label, model_name)<EOL><DEDENT>fields = Model._meta.fields + Model._meta.many_to_many + Model._meta.get_all_related_objects()<EOL>model_stack.append(Model)<EOL>stop_recursion = False<EOL>if len(model_stack) > stack_limit:<EOL><INDENT>if model_stack[-<NUM_LIT:3>] == model_stack[-<NUM_LIT:1>]:<EOL><INDENT>stop_recursion = True<EOL><DEDENT>if len(model_stack) > <NUM_LIT:5>:<EOL><INDENT>stop_recursion = True<EOL><DEDENT>if len(set(model_stack)) != len(model_stack):<EOL><INDENT>stop_recursion = True<EOL><DEDENT><DEDENT>if stop_recursion:<EOL><INDENT>return [] <EOL><DEDENT>for field in fields:<EOL><INDENT>field_name = field.name<EOL>if isinstance(field, RelatedObject):<EOL><INDENT>field_name = field.field.related_query_name()<EOL><DEDENT>if parent_field:<EOL><INDENT>full_field = "<STR_LIT>".join([parent_field, field_name])<EOL><DEDENT>else:<EOL><INDENT>full_field = field_name<EOL><DEDENT>if len([True for exclude in excludes if (exclude in full_field)]):<EOL><INDENT>continue<EOL><DEDENT>out_fields.append([full_field, field_name, Model, field.__class__])<EOL>if not stop_recursion and(isinstance(field, ForeignKey) or isinstance(field, OneToOneField) orisinstance(field, RelatedObject) or isinstance(field, ManyToManyField)):<EOL><INDENT>if isinstance(field, RelatedObject):<EOL><INDENT>RelModel = field.model<EOL><DEDENT>else:<EOL><INDENT>RelModel = field.related.parent_model<EOL><DEDENT>out_fields.extend(get_fields(RelModel, full_field, list(model_stack)))<EOL><DEDENT><DEDENT>return out_fields<EOL>
|
Given a Model, return a list of lists of strings with important stuff:
...
['test_user__user__customuser', 'customuser', 'User', 'RelatedObject']
['test_user__unique_id', 'unique_id', 'TestUser', 'CharField']
['test_user__confirmed', 'confirmed', 'TestUser', 'BooleanField']
...
|
f11059:m0
|
def give_model_field(full_field, Model):
|
field_data = get_fields(Model, '<STR_LIT>', [])<EOL>for full_key, name, _Model, _ModelField in field_data:<EOL><INDENT>if full_key == full_field:<EOL><INDENT>return full_key, name, _Model, _ModelField<EOL><DEDENT><DEDENT>raise Exception('<STR_LIT>'.format(full_field, Model.__name__))<EOL>
|
Given a field_name and Model:
"test_user__unique_id", <AchievedGoal>
Returns "test_user__unique_id", "id", <Model>, <ModelField>
|
f11059:m1
|
def setUp(self):
|
self.User = get_user_model()<EOL>start = timezone.now() - timedelta(hours=<NUM_LIT:2>)<EOL>num_string = ['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>for i, name in enumerate(num_string):<EOL><INDENT>user = self.User.objects.create(username='<STR_LIT>' % name, email='<STR_LIT>' % name)<EOL>self.User.objects.filter(id=user.id).update(date_joined=start - timedelta(days=i))<EOL>profile = Profile.objects.get(user=user)<EOL>profile.credits = i * <NUM_LIT><EOL>profile.save()<EOL><DEDENT>for i, name in enumerate(num_string):<EOL><INDENT>user = self.User.objects.create(username='<STR_LIT>' % name, email='<STR_LIT>' % name)<EOL>self.User.objects.filter(id=user.id).update(date_joined=start - timedelta(days=i))<EOL><DEDENT>
|
Creates 20 users, half of which buy 25 credits a day,
and the other half that does none.
|
f11061:c1:m0
|
def get_version(package):
|
init_py = open(os.path.join(package, '<STR_LIT>')).read()<EOL>return re.search("<STR_LIT>", init_py, re.MULTILINE).group(<NUM_LIT:1>)<EOL>
|
Return package version as listed in `__version__` in `init.py`.
|
f11063:m0
|
def get_packages(package):
|
return [dirpath<EOL>for dirpath, dirnames, filenames in os.walk(package)<EOL>if os.path.exists(os.path.join(dirpath, '<STR_LIT>'))]<EOL>
|
Return root package and all sub-packages.
|
f11063:m1
|
def get_package_data(package):
|
walk = [(dirpath.replace(package + os.sep, '<STR_LIT>', <NUM_LIT:1>), filenames)<EOL>for dirpath, dirnames, filenames in os.walk(package)<EOL>if not os.path.exists(os.path.join(dirpath, '<STR_LIT>'))]<EOL>filepaths = []<EOL>for base, filenames in walk:<EOL><INDENT>filepaths.extend([os.path.join(base, filename)<EOL>for filename in filenames])<EOL><DEDENT>return {package: filepaths}<EOL>
|
Return all files under the root package, that are not in a
package themselves.
|
f11063:m2
|
def run_capture(out = []):
|
return lambda command, *args, **kwargs: out.append(command.strip())<EOL>
|
Helper for retriving env.run issued commands
|
f11070:m0
|
def empty_copy():
|
source_path = os.path.join(env.current_release, "<STR_LIT:src>")<EOL>env.run("<STR_LIT>" % source_path)<EOL>env.run("<STR_LIT>" % source_path)<EOL>
|
A stub copy method that does nothing more then create a .txt file.
|
f11070:m2
|
@task<EOL>def backup_db(release=None, limit=<NUM_LIT:5>):
|
assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>if not release:<EOL><INDENT>release = paths.get_current_release_name()<EOL><DEDENT>max_versions = limit+<NUM_LIT:1><EOL>if not release:<EOL><INDENT>logger.info("<STR_LIT>")<EOL>return<EOL><DEDENT>remote_file = "<STR_LIT>" % release<EOL>remote_path = paths.get_backup_path(remote_file)<EOL>env.run("<STR_LIT>" % paths.get_backup_path("<STR_LIT>"))<EOL>with context_managers.shell_env(PGPASSWORD=env.psql_password):<EOL><INDENT>env.run("<STR_LIT>" % (<EOL>remote_path, env.psql_user, env.psql_db<EOL>))<EOL><DEDENT>env.run("<STR_LIT>" % (<EOL>paths.get_backup_path("<STR_LIT>"),<EOL>max_versions)<EOL>)<EOL>
|
Backup database and associate it with current release
|
f11074:m1
|
@task<EOL>def restore_db(release=None):
|
if not release:<EOL><INDENT>release = paths.get_current_release_name()<EOL><DEDENT>if not release:<EOL><INDENT>raise Exception("<STR_LIT>" % release)<EOL><DEDENT>backup_file = "<STR_LIT>" % release<EOL>backup_path = paths.get_backup_path(backup_file)<EOL>if not env.exists(backup_path):<EOL><INDENT>raise Exception("<STR_LIT>" % backup_path)<EOL><DEDENT>with context_managers.shell_env(PGPASSWORD=env.psql_password):<EOL><INDENT>env.run("<STR_LIT>" % (<EOL>env.psql_db,<EOL>env.psql_user,<EOL>backup_path)<EOL>)<EOL><DEDENT>
|
Restores backup back to version, uses current version by default.
|
f11074:m2
|
@task<EOL>def sync_local_to_remote(force="<STR_LIT>"):
|
_check_requirements()<EOL>if force != "<STR_LIT:yes>":<EOL><INDENT>message = "<STR_LIT>""<STR_LIT>" % (env.psql_db, env.local_psql_db)<EOL>answer = prompt(message, "<STR_LIT:y>")<EOL>if answer != "<STR_LIT:y>":<EOL><INDENT>logger.info("<STR_LIT>")<EOL>return<EOL><DEDENT><DEDENT>init_tasks() <EOL>local_file = "<STR_LIT>" % int(time.time()*<NUM_LIT:1000>)<EOL>local_path = "<STR_LIT>" % local_file<EOL>with context_managers.shell_env(PGPASSWORD=env.local_psql_password):<EOL><INDENT>elocal("<STR_LIT>" % (<EOL>local_path, env.local_psql_user, env.local_psql_db<EOL>))<EOL><DEDENT>remote_path = "<STR_LIT>" % local_file<EOL>put(remote_path, local_path)<EOL>with context_managers.shell_env(PGPASSWORD=env.psql_password):<EOL><INDENT>env.run("<STR_LIT>" % (<EOL>env.psql_db,<EOL>env.psql_user,<EOL>remote_path)<EOL>)<EOL><DEDENT>env.run("<STR_LIT>" % remote_path)<EOL>elocal("<STR_LIT>" % local_path)<EOL>run_hook("<STR_LIT>")<EOL>logger.info("<STR_LIT>")<EOL>
|
Sync your local postgres database with remote
Example:
fabrik prod sync_local_to_remote:force=yes
|
f11074:m3
|
@task<EOL>def sync_remote_to_local(force="<STR_LIT>"):
|
_check_requirements()<EOL>if force != "<STR_LIT:yes>":<EOL><INDENT>message = "<STR_LIT>""<STR_LIT>" % (env.local_psql_db, env.psql_db)<EOL>answer = prompt(message, "<STR_LIT:y>")<EOL>if answer != "<STR_LIT:y>":<EOL><INDENT>logger.info("<STR_LIT>")<EOL>return<EOL><DEDENT><DEDENT>init_tasks() <EOL>remote_file = "<STR_LIT>" % int(time.time()*<NUM_LIT:1000>)<EOL>remote_path = paths.get_backup_path(remote_file)<EOL>env.run("<STR_LIT>" % paths.get_backup_path("<STR_LIT>"))<EOL>with context_managers.shell_env(PGPASSWORD=env.psql_password):<EOL><INDENT>env.run("<STR_LIT>" % (<EOL>remote_path, env.psql_user, env.psql_db<EOL>))<EOL><DEDENT>local_path = "<STR_LIT>" % remote_file<EOL>get(remote_path, local_path)<EOL>with context_managers.shell_env(PGPASSWORD=env.local_psql_password):<EOL><INDENT>elocal("<STR_LIT>" % (<EOL>env.local_psql_db,<EOL>env.local_psql_user,<EOL>local_path)<EOL>)<EOL><DEDENT>env.run("<STR_LIT>" % remote_path)<EOL>elocal("<STR_LIT>" % local_path)<EOL>run_hook("<STR_LIT>")<EOL>logger.info("<STR_LIT>")<EOL>
|
Sync your remote postgres database with local
Example:
fabrik prod sync_remote_to_local
|
f11074:m4
|
@task<EOL>def sync_remote_to_local(force="<STR_LIT>"):
|
assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>if force != "<STR_LIT:yes>":<EOL><INDENT>message = "<STR_LIT>""<STR_LIT>"<EOL>answer = prompt(message, "<STR_LIT:y>")<EOL>if answer != "<STR_LIT:y>":<EOL><INDENT>logger.info("<STR_LIT>")<EOL>return<EOL><DEDENT><DEDENT>init_tasks() <EOL>remote_file = "<STR_LIT>" % int(time.time()*<NUM_LIT:1000>)<EOL>remote_path = "<STR_LIT>" % remote_file<EOL>with env.cd(paths.get_current_path()):<EOL><INDENT>env.run("<STR_LIT>" % remote_path)<EOL><DEDENT>local_wp_dir = env.local_wp_dir<EOL>local_path = "<STR_LIT>" % remote_file<EOL>get(remote_path, local_path)<EOL>with lcd(local_wp_dir):<EOL><INDENT>elocal("<STR_LIT>" % local_path)<EOL><DEDENT>env.run("<STR_LIT>" % remote_path)<EOL>elocal("<STR_LIT>" % local_path)<EOL>
|
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
|
f11080:m0
|
@task<EOL>def backup_db(release=None, limit=<NUM_LIT:5>):
|
assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>if not release:<EOL><INDENT>release = paths.get_current_release_name()<EOL><DEDENT>max_versions = limit+<NUM_LIT:1><EOL>if not release:<EOL><INDENT>return<EOL><DEDENT>env.run("<STR_LIT>" % paths.get_backup_path("<STR_LIT>"))<EOL>backup_file = "<STR_LIT>" % release<EOL>backup_path = paths.get_backup_path(backup_file)<EOL>env.run("<STR_LIT>" %<EOL>(env.mysql_user, env.mysql_password, env.mysql_host, env.mysql_db,<EOL>backup_path))<EOL>env.run("<STR_LIT>" % (<EOL>paths.get_backup_path("<STR_LIT>"),<EOL>max_versions)<EOL>)<EOL>
|
Backup database and associate it with current release
|
f11082:m0
|
@task<EOL>def restore_db(release=None):
|
assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>assert "<STR_LIT>" in env, "<STR_LIT>"<EOL>if not release:<EOL><INDENT>release = paths.get_current_release_name()<EOL><DEDENT>if not release:<EOL><INDENT>raise Exception("<STR_LIT>" % release)<EOL><DEDENT>backup_file = "<STR_LIT>" % release<EOL>backup_path = paths.get_backup_path(backup_file)<EOL>if not env.exists(backup_path):<EOL><INDENT>raise Exception("<STR_LIT>" % backup_path)<EOL><DEDENT>env.run("<STR_LIT>" %<EOL>(backup_path, env.mysql_user, env.mysql_password, env.mysql_host,<EOL>env.mysql_db))<EOL>
|
Restores backup back to version, uses current version by default.
|
f11082:m1
|
def hook(name=None, priority=-<NUM_LIT:1>):
|
def _hook(hook_func):<EOL><INDENT>return register_hook(name, hook_func=hook_func, priority=priority)<EOL><DEDENT>return _hook<EOL>
|
Decorator
|
f11084:m0
|
def run_task(task):
|
if has_task(task):<EOL><INDENT>execute(task)<EOL><DEDENT>
|
A method of running fabric task with silent errors.
|
f11088:m0
|
def has_task(task):
|
return crawl(task, state.commands) is not None<EOL>
|
Checks if fabric task exists
|
f11088:m1
|
def apply_settings():
|
prompts = {}<EOL>if "<STR_LIT>" in env:<EOL><INDENT>prompts["<STR_LIT>"] = env.git_passphrase<EOL><DEDENT>return settings(prompts=prompts)<EOL>
|
Applies additional settings before clone takes place"
|
f11104:m1
|
@runs_once<EOL>def init_tasks():
|
<EOL>if "<STR_LIT>" not in env:<EOL><INDENT>env.exists = exists<EOL><DEDENT>if "<STR_LIT>" not in env:<EOL><INDENT>env.run = run<EOL><DEDENT>if "<STR_LIT>" not in env:<EOL><INDENT>env.cd = cd<EOL><DEDENT>if "<STR_LIT>" not in env:<EOL><INDENT>env.max_releases = <NUM_LIT:5><EOL><DEDENT>if "<STR_LIT>" in env:<EOL><INDENT>public_path = env.public_path.rstrip("<STR_LIT:/>")<EOL>env.public_path = public_path<EOL><DEDENT>run_hook("<STR_LIT>")<EOL>
|
Performs basic setup before any of the tasks are run. All tasks needs to
run this before continuing. It only fires once.
|
f11105:m1
|
@task<EOL>def setup():
|
init_tasks()<EOL>run_hook("<STR_LIT>")<EOL>env.run("<STR_LIT>" % (paths.get_shared_path()))<EOL>env.run("<STR_LIT>" % (paths.get_shared_path()))<EOL>env.run("<STR_LIT>" % (paths.get_backup_path()))<EOL>env.run("<STR_LIT>" % (paths.get_backup_path()))<EOL>env.run("<STR_LIT>" % (paths.get_upload_path()))<EOL>env.run("<STR_LIT>" % (paths.get_upload_path()))<EOL>run_hook("<STR_LIT>")<EOL>run_hook("<STR_LIT>")<EOL>
|
Creates shared and upload directory then fires setup to recipes.
|
f11105:m2
|
@task<EOL>def deploy():
|
init_tasks()<EOL>if not has_hook("<STR_LIT>"):<EOL><INDENT>return report("<STR_LIT>")<EOL><DEDENT>if not env.exists(paths.get_shared_path()):<EOL><INDENT>return report("<STR_LIT>")<EOL><DEDENT>run_hook("<STR_LIT>")<EOL>release_name = int(time.time()*<NUM_LIT:1000>)<EOL>release_path = paths.get_releases_path(release_name)<EOL>env.current_release = release_path<EOL>try:<EOL><INDENT>run_hook("<STR_LIT>")<EOL><DEDENT>except Exception as e:<EOL><INDENT>return report("<STR_LIT>", err=e)<EOL><DEDENT>if not env.exists(paths.get_source_path(release_name)):<EOL><INDENT>return report("<STR_LIT>" %<EOL>paths.get_source_path(release_name))<EOL><DEDENT>try:<EOL><INDENT>run_hook("<STR_LIT>")<EOL><DEDENT>except Exception as e:<EOL><INDENT>message = "<STR_LIT>"<EOL>logger.error(message)<EOL>logger.error(e)<EOL>run_task("<STR_LIT>")<EOL>return report("<STR_LIT>")<EOL><DEDENT>paths.symlink(paths.get_source_path(release_name),<EOL>paths.get_current_path())<EOL>if "<STR_LIT>" in env:<EOL><INDENT>cleanup_releases(int(env.max_releases))<EOL><DEDENT>run_hook("<STR_LIT>")<EOL>if "<STR_LIT>" in env:<EOL><INDENT>paths.symlink(paths.get_source_path(release_name), env.public_path)<EOL><DEDENT>logger.info("<STR_LIT>")<EOL>
|
Performs a deploy by invoking copy, then generating next release name and
invoking necessary hooks.
|
f11105:m3
|
@task<EOL>def rollback():
|
init_tasks()<EOL>run_hook("<STR_LIT>")<EOL>current_release = paths.get_current_release_path()<EOL>if current_release:<EOL><INDENT>env.run("<STR_LIT>" % current_release)<EOL><DEDENT>old_release = paths.get_current_release_name()<EOL>if old_release:<EOL><INDENT>paths.symlink(paths.get_source_path(old_release),<EOL>paths.get_current_path())<EOL><DEDENT>run_hook("<STR_LIT>")<EOL>run_hook("<STR_LIT>")<EOL>logger.info("<STR_LIT>")<EOL>
|
Rolls back to previous release
|
f11105:m4
|
@task<EOL>def cleanup_releases(limit=<NUM_LIT:5>):
|
init_tasks()<EOL>max_versions = limit + <NUM_LIT:1><EOL>env.run("<STR_LIT>" % (<EOL>paths.get_releases_path(),<EOL>max_versions)<EOL>)<EOL>
|
Removes older releases.
|
f11105:m5
|
@task<EOL>def debug():
|
from fabric.network import ssh<EOL>init_tasks()<EOL>ssh.util.log_to_file("<STR_LIT>", <NUM_LIT:10>)<EOL>
|
Outputs debug information, needs to run before the task".
Example:
fab prod debug deploy.
|
f11105:m6
|
def validate(yaml, raise_exc=True):
|
data = read_yaml(yaml)<EOL>validator = get_validator()<EOL>errors = list(validator.iter_errors(data))<EOL>if errors and raise_exc:<EOL><INDENT>raise ValidationErrors(errors)<EOL><DEDENT>return errors<EOL>
|
Validate the given YAML document and return a list of errors.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param raise_exc: Whether to raise a meta-exception containing all discovered errors after validation.
:type raise_exc: bool
:return: A list of errors encountered.
:rtype: list[jsonschema.exceptions.ValidationError]
|
f11135:m2
|
@classmethod<EOL><INDENT>def parse(cls, data):<DEDENT>
|
parsers = {<EOL>'<STR_LIT>': ([], Step.parse),<EOL>'<STR_LIT>': ([], Endpoint.parse),<EOL>}<EOL>for datum in data:<EOL><INDENT>assert isinstance(datum, dict)<EOL>for type, (items, parse) in parsers.items():<EOL><INDENT>if type in datum:<EOL><INDENT>items.append(parse(datum[type]))<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(datum))<EOL><DEDENT><DEDENT>inst = cls(<EOL>steps=parsers['<STR_LIT>'][<NUM_LIT:0>],<EOL>endpoints=parsers['<STR_LIT>'][<NUM_LIT:0>],<EOL>)<EOL>inst._original_data = data<EOL>return inst<EOL>
|
Parse a Config structure out of a Python dict (that's likely deserialized from YAML).
:param data: Config-y dict
:type data: dict
:return: Config object
:rtype: valohai_yaml.objs.Config
|
f11136:c0:m1
|
def get_step_by(self, **kwargs):
|
if not kwargs:<EOL><INDENT>return None<EOL><DEDENT>for index, step in enumerate(self.steps.values()):<EOL><INDENT>extended_step = dict(step.serialize(), index=index)<EOL>if all(item in extended_step.items() for item in kwargs.items()):<EOL><INDENT>return step<EOL><DEDENT><DEDENT>return None<EOL>
|
Get the first step that matches all the passed named arguments.
Has special argument index not present in the real step.
Usage:
config.get_step_by(name='not found')
config.get_step_by(index=0)
config.get_step_by(name="greeting", command='echo HELLO MORDOR')
:param kwargs:
:return: Step object or None
:rtype: valohai_yaml.objs.Step|None
|
f11136:c0:m4
|
def build_parameters(self):
|
param_bits = []<EOL>for name in self.parameters:<EOL><INDENT>param_bits.extend(self.build_parameter_by_name(name) or [])<EOL><DEDENT>return param_bits<EOL>
|
Build the CLI command line from the parameter values.
:return: list of CLI strings -- not escaped!
:rtype: list[str]
|
f11138:c0:m1
|
def get_data(self):
|
data = vars(self).copy()<EOL>data.pop('<STR_LIT>', None)<EOL>return data<EOL>
|
Get data for serialization.
|
f11140:c0:m0
|
def get_parameter_defaults(self, include_flags=True):
|
return {<EOL>name: parameter.default<EOL>for (name, parameter)<EOL>in self.parameters.items()<EOL>if parameter.default is not None and (include_flags or parameter.type != '<STR_LIT>')<EOL>}<EOL>
|
Get a dict mapping parameter names to their defaults (if set).
:rtype: dict[str, object]
|
f11144:c0:m3
|
def build_command(self, parameter_values, command=None):
|
command = (command or self.command)<EOL>values = dict(self.get_parameter_defaults(include_flags=False), **parameter_values)<EOL>parameter_map = ParameterMap(parameters=self.parameters, values=values)<EOL>return build_command(command, parameter_map)<EOL>
|
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
|
f11144:c0:m5
|
def validate(self, value):
|
errors = []<EOL>value = self._validate_type(value, errors)<EOL>self._validate_value(value, errors)<EOL>if errors:<EOL><INDENT>raise ValidationErrors(errors)<EOL><DEDENT>return value<EOL>
|
Validate (and possibly typecast) the given parameter value value.
:param value: Parameter value
:return: Typecast parameter value
:raises ValidationErrors: if there were validation errors
|
f11147:c0:m4
|
def format_cli(self, value):
|
if value is None or (self.type == '<STR_LIT>' and not value):<EOL><INDENT>return None<EOL><DEDENT>pass_as_bits = text_type(self.pass_as or self.default_pass_as).split()<EOL>env = dict(name=self.name, value=value, v=value)<EOL>return [bit.format(**env) for bit in pass_as_bits]<EOL>
|
Build a single parameter argument.
:return: list of CLI strings -- not escaped. If the parameter should not be expressed, returns None.
:rtype: list[str]|None
|
f11147:c0:m6
|
def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, <EOL>blink=None, reverse=None, reset=True):
|
bits = []<EOL>if fg:<EOL><INDENT>try:<EOL><INDENT>bits.append('<STR_LIT>' % (_ansi_colors.index(fg) + <NUM_LIT:30>))<EOL><DEDENT>except ValueError:<EOL><INDENT>raise TypeError('<STR_LIT>' % fg)<EOL><DEDENT><DEDENT>if bg:<EOL><INDENT>try:<EOL><INDENT>bits.append('<STR_LIT>' % (_ansi_colors.index(bg) + <NUM_LIT>))<EOL><DEDENT>except ValueError:<EOL><INDENT>raise TypeError('<STR_LIT>' % bg)<EOL><DEDENT><DEDENT>if bold is not None:<EOL><INDENT>bits.append('<STR_LIT>' % (<NUM_LIT:1> if bold else <NUM_LIT>))<EOL><DEDENT>if dim is not None:<EOL><INDENT>bits.append('<STR_LIT>' % (<NUM_LIT:2> if dim else <NUM_LIT>))<EOL><DEDENT>if underline is not None:<EOL><INDENT>bits.append('<STR_LIT>' % (<NUM_LIT:4> if underline else <NUM_LIT>))<EOL><DEDENT>if blink is not None:<EOL><INDENT>bits.append('<STR_LIT>' % (<NUM_LIT:5> if blink else <NUM_LIT>))<EOL><DEDENT>if reverse is not None:<EOL><INDENT>bits.append('<STR_LIT>' % (<NUM_LIT:7> if reverse else <NUM_LIT>))<EOL><DEDENT>bits.append(text)<EOL>if reset:<EOL><INDENT>bits.append(_ansi_reset_all)<EOL><DEDENT>return '<STR_LIT>'.join(bits)<EOL>
|
Styles a text with ANSI styles and returns the new string.
|
f11150:m0
|
def listify(value):
|
if value is None:<EOL><INDENT>return []<EOL><DEDENT>if isinstance(value, (list, tuple)):<EOL><INDENT>return list(value)<EOL><DEDENT>return [value]<EOL>
|
Wrap the given value into a list, with the below provisions:
* If the value is a list or a tuple, it's coerced into a new list.
* If the value is None, an empty list is returned.
* Otherwise, a single-element list is returned, containing the value.
:param value: A value.
:return: a list!
:rtype: list
|
f11152:m1
|
def lint_file(file_path):
|
with open(file_path, '<STR_LIT:r>') as yaml:<EOL><INDENT>try:<EOL><INDENT>return lint(yaml)<EOL><DEDENT>except Exception as e:<EOL><INDENT>lr = LintResult()<EOL>lr.add_error('<STR_LIT>' % e, exception=e)<EOL>return lr<EOL><DEDENT><DEDENT>
|
Validate & lint `file_path` and return a LintResult.
:param file_path: YAML filename
:type file_path: str
:return: LintResult object
|
f11153:m0
|
def build_command(command, parameter_map):
|
if isinstance(parameter_map, list): <EOL><INDENT>parameter_map = LegacyParameterMap(parameter_map)<EOL><DEDENT>out_commands = []<EOL>for command in listify(command):<EOL><INDENT>if interpolable_re.search(command):<EOL><INDENT>try:<EOL><INDENT>command = interpolable_re.sub(<EOL>lambda match: _replace_interpolation(parameter_map, match),<EOL>command,<EOL>)<EOL><DEDENT>except ValueError as exc: <EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>' % (command, exc),<EOL>CommandInterpolationWarning<EOL>)<EOL><DEDENT><DEDENT>out_commands.append(command.strip())<EOL><DEDENT>return out_commands<EOL>
|
Build command line(s) using the given parameter map.
Even if the passed a single `command`, this function will return a list
of shell commands. It is the caller's responsibility to concatenate them,
likely using the semicolon or double ampersands.
:param command: The command to interpolate params into.
:type command: str|list[str]
:param parameter_map: A ParameterMap object containing parameter knowledge.
:type parameter_map: valohai_yaml.objs.parameter_map.ParameterMap
:return: list of commands
:rtype: list[str]
|
f11154:m2
|
def parse(yaml, validate=True):
|
data = read_yaml(yaml)<EOL>if validate: <EOL><INDENT>from .validation import validate<EOL>validate(data, raise_exc=True)<EOL><DEDENT>return Config.parse(data)<EOL>
|
Parse the given YAML data into a `Config` object, optionally validating it first.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param validate: Whether to validate the data before attempting to parse it.
:type validate: bool
:return: Config object
:rtype: valohai_yaml.objs.Config
|
f11156:m0
|
def read_config(config, prefix):
|
<EOL>suffixes = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>config_server, config_user, config_password, config_token, config_secret, config_consumer, config_cert = [<EOL>config.get('<STR_LIT>'.format(prefix, suffix)) for suffix in suffixes<EOL>]<EOL>result = dict(options=dict(server=config_server))<EOL>basic = (config_user, config_password)<EOL>oauth = dict(<EOL>access_token=config_token,<EOL>access_token_secret=config_secret,<EOL>consumer_key=config_consumer,<EOL>key_cert=config_cert,<EOL>)<EOL>if any(oauth.values()):<EOL><INDENT>result['<STR_LIT>'] = oauth<EOL><DEDENT>elif all(basic):<EOL><INDENT>result['<STR_LIT>'] = basic<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return result<EOL>
|
Return a jira.client.JIRA.__init__() compatible dictionary from data in the Flask config.
Generate a dictionary compatible with jira.client.JIRA.__init__() keyword arguments from data in the Flask
application's configuration values relevant to JIRA. If both basic and OAuth settings are specified, OAuth
authentication takes precedence.
Usage:
config = read_config(app.config, prefix)
jira = JIRA(**config)
Positional arguments:
config -- Flask application config dictionary.
prefix -- Prefix used in config key names in the Flask app's configuration.
Returns:
Dictionary with parsed data, compatible with jira.client.JIRA.__init__() keyword arguments.
|
f11157:m0
|
def __init__(self, app=None, config_prefix=None):
|
self.original_kill_session = self.kill_session<EOL>self.kill_session = self._fake_kill_session<EOL>if app is not None:<EOL><INDENT>self.init_app(app, config_prefix)<EOL><DEDENT>
|
If app argument provided then initialize JIRA using application config values.
If no app argument provided you should do initialization later with init_app method.
Keyword arguments:
app -- Flask application instance.
config_prefix -- Prefix used in config key names in the Flask app's configuration. More info in
self.init_app()'s docstring.
|
f11157:c1:m0
|
def _fake_kill_session(self):
|
return self<EOL>
|
Does nothing. Used to temporary overwrite self.kill_session() in self.__init__().
JIRA calls self.kill_session() even when no session was created.
|
f11157:c1:m1
|
def init_app(self, app, config_prefix=None):
|
<EOL>self.kill_session = self.original_kill_session<EOL>config_prefix = (config_prefix or '<STR_LIT>').rstrip('<STR_LIT:_>').upper()<EOL>if not hasattr(app, '<STR_LIT>'):<EOL><INDENT>app.extensions = dict()<EOL><DEDENT>if config_prefix.lower() in app.extensions:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(config_prefix))<EOL><DEDENT>app.extensions[config_prefix.lower()] = _JIRAState(self, app)<EOL>args = read_config(app.config, config_prefix)<EOL>try:<EOL><INDENT>super(JIRA, self).__init__(**args)<EOL><DEDENT>except ConnectionError:<EOL><INDENT>if not app.config.get('<STR_LIT>'.format(config_prefix)):<EOL><INDENT>raise<EOL><DEDENT>LOG.exception('<STR_LIT>')<EOL><DEDENT>
|
Actual method to read JIRA settings from app configuration and initialize the JIRA instance.
Positional arguments:
app -- Flask application instance.
Keyword arguments:
config_prefix -- Prefix used in config key names in the Flask app's configuration. Useful for applications which
maintain two authenticated sessions with a JIRA server. Default is 'JIRA'. Will be converted to upper case.
Examples:
JIRA_SYSTEM_SERVER = 'http://jira.mycompany.com'
JIRA_SYSTEM_USER = 'system_account'
JIRA_SERVER = 'http://jira.mycompany.com'
JIRA_TOKEN = '<token for oauthing users>'
|
f11157:c1:m2
|
def get_metadata(main_file):
|
with open(os.path.join(HERE, '<STR_LIT>'), encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>long_description = f.read()<EOL><DEDENT>with open(os.path.join(HERE, main_file), encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>lines = [l.strip() for l in f if l.startswith('<STR_LIT>')]<EOL><DEDENT>metadata = ast.literal_eval("<STR_LIT>" + "<STR_LIT>".join([l.replace('<STR_LIT>', "<STR_LIT>") for l in lines]) + '<STR_LIT:}>')<EOL>__author__, __license__, __version__ = [metadata[k] for k in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')]<EOL>everything = dict(version=__version__, long_description=long_description, author=__author__, license=__license__)<EOL>if not all(everything.values()):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return everything<EOL>
|
Get metadata about the package/module.
Positional arguments:
main_file -- python file path within `HERE` which has __author__ and the others defined as global variables.
Returns:
Dictionary to be passed into setuptools.setup().
|
f11161:m0
|
def __call__(self, *args, **kwargs):
|
return self.trigger(*args, **kwargs)<EOL>
|
Execute all event handlers using obj.trigger() or just obj().
|
f11163:c0:m1
|
@property<EOL><INDENT>def handlers(self):<DEDENT>
|
if not hasattr(self, '<STR_LIT>'): <EOL><INDENT>self._handlers = set()<EOL><DEDENT>return self._handlers<EOL>
|
Return all event handlers.
|
f11163:c0:m2
|
def on(self, handler):
|
if not hasattr(handler, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self.handlers.add(handler)<EOL>
|
Attach a handler (any Python callable) for the event.
|
f11163:c0:m3
|
def off(self, handler):
|
self.handlers.remove(handler)<EOL>
|
Deattach a handler for the event.
|
f11163:c0:m4
|
def trigger(self, *args, **kwargs):
|
for h in self.handlers:<EOL><INDENT>h(*args, **kwargs)<EOL><DEDENT>
|
Execute the handlers with a message, if any.
|
f11163:c0:m5
|
@property<EOL><INDENT>def events(self):<DEDENT>
|
if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._events = {}<EOL><DEDENT>return self._events<EOL>
|
Return all events of the observable.
|
f11163:c1:m0
|
def on(self, event, handler=None):
|
if isinstance(event, str) and '<STR_LIT:U+0020>' in event: <EOL><INDENT>self.on(event.split('<STR_LIT:U+0020>'), handler)<EOL><DEDENT>elif isinstance(event, list): <EOL><INDENT>for each in event:<EOL><INDENT>self.on(each, handler)<EOL><DEDENT><DEDENT>elif isinstance(event, dict): <EOL><INDENT>for key, value in event.items():<EOL><INDENT>self.on(key, value)<EOL><DEDENT><DEDENT>elif isinstance(handler, list): <EOL><INDENT>for each in handler:<EOL><INDENT>self.on(event, each)<EOL><DEDENT><DEDENT>elif isinstance(handler, Event): <EOL><INDENT>self.events[event] = handler <EOL>setattr(self, event, self.events[event]) <EOL><DEDENT>elif event in self.events: <EOL><INDENT>self.events[event].on(handler)<EOL><DEDENT>else: <EOL><INDENT>self.on(event, Event(handler))<EOL><DEDENT>
|
Create, add or update an event with a handler or more attached.
|
f11163:c1:m1
|
def off(self, event, handler=None):
|
if handler:<EOL><INDENT>self.events[event].off(handler)<EOL><DEDENT>else:<EOL><INDENT>del self.events[event]<EOL>delattr(self, event)<EOL><DEDENT>
|
Remove an event or a handler from it.
|
f11163:c1:m2
|
def trigger(self, *args, **kargs):
|
event = args[<NUM_LIT:0>]<EOL>if isinstance(event, str) and '<STR_LIT:U+0020>' in event:<EOL><INDENT>event = event.split('<STR_LIT:U+0020>') <EOL><DEDENT>if isinstance(event, list): <EOL><INDENT>for each in event:<EOL><INDENT>self.events[each].trigger(*args[<NUM_LIT:1>:], **kargs)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.events[event].trigger(*args[<NUM_LIT:1>:], **kargs)<EOL><DEDENT>
|
Execute all event handlers with optional arguments for the observable.
|
f11163:c1:m3
|
def get_handler(progname, fmt=None, datefmt=None, project_id=None,<EOL>credentials=None, debug_thread_worker=False, **_):
|
builder = CloudLoggingHandlerBuilder(<EOL>progname, fmt=fmt, datefmt=datefmt, project_id=project_id,<EOL>credentials=credentials, debug_thread_worker=debug_thread_worker)<EOL>return builder.get_handler()<EOL>
|
Helper function to create a Stackdriver handler.
See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments
and supported keyword arguments.
Returns:
(obj): Instance of `google.cloud.logging.handlers.
CloudLoggingHandler`
|
f11172:m0
|
def _get_metadata(self, data_type, key, timeout=<NUM_LIT:5>):
|
endpoint_url = self.METADATA_ENDPOINT.format(<EOL>data_type=data_type, key=key)<EOL>try:<EOL><INDENT>rsp = requests.get(<EOL>endpoint_url,<EOL>headers={'<STR_LIT>': '<STR_LIT>'},<EOL>timeout=timeout)<EOL>rsp.raise_for_status()<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise exceptions.GoogleCloudError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>key=key, type=data_type, url=endpoint_url, e=e))<EOL><DEDENT>metadata_value = rsp.text<EOL>if metadata_value.strip() == '<STR_LIT>':<EOL><INDENT>raise exceptions.GoogleCloudError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(url=endpoint_url))<EOL><DEDENT>return metadata_value<EOL>
|
Get host instance metadata (only works on GCP hosts).
More details about instance metadata:
https://cloud.google.com/compute/docs/storing-retrieving-metadata
Args:
data_type (str): Type of metadata to fetch. Eg. project,
instance
key (str): Key of metadata to fetch
timeout (int, optional): HTTP request timeout in seconds.
Default is 5 seconds.
Returns:
(str): Plain text value of metadata entry
Raises:
GoogleCloudError: when request to metadata endpoint fails
|
f11172:c0:m1
|
def _create_gcl_resource(self):
|
return gcl_resource.Resource('<STR_LIT>', {<EOL>'<STR_LIT>': self.project_id,<EOL>'<STR_LIT>': self.instance_id,<EOL>'<STR_LIT>': self.zone<EOL>})<EOL>
|
Create a configured Resource object.
The logging.resource.Resource object enables GCL to filter and
bucket incoming logs according to which resource (host) they're
coming from.
Returns:
(obj): Instance of `google.cloud.logging.resource.Resource`
|
f11172:c0:m2
|
def get_formatter(self):
|
if not self.fmt:<EOL><INDENT>self.fmt = ('<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>host=self.hostname, progname=self.progname)<EOL><DEDENT>if not self.datefmt:<EOL><INDENT>self.datefmt = '<STR_LIT>'<EOL><DEDENT>return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)<EOL>
|
Create a fully configured `logging.Formatter`
Example of formatted log message:
2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello
Returns:
(obj): Instance of `logging.Formatter`
|
f11172:c0:m3
|
def _set_worker_thread_level(self):
|
bthread_logger = logging.getLogger(<EOL>'<STR_LIT>')<EOL>if self.debug_thread_worker:<EOL><INDENT>bthread_logger.setLevel(logging.DEBUG)<EOL><DEDENT>else:<EOL><INDENT>bthread_logger.setLevel(logging.INFO)<EOL><DEDENT>
|
Sets logging level of the background logging thread to DEBUG or INFO
|
f11172:c0:m4
|
def get_handler(self):
|
gcl_client = gcl_logging.Client(<EOL>project=self.project_id, credentials=self.credentials)<EOL>handler = gcl_handlers.CloudLoggingHandler(<EOL>gcl_client,<EOL>resource=self.resource,<EOL>labels={<EOL>'<STR_LIT>': self.instance_id,<EOL>'<STR_LIT>': self.project_id,<EOL>'<STR_LIT>': self.zone,<EOL>'<STR_LIT>': self.hostname<EOL>})<EOL>handler.setFormatter(self.get_formatter())<EOL>self._set_worker_thread_level()<EOL>return handler<EOL>
|
Create a fully configured CloudLoggingHandler.
Returns:
(obj): Instance of `google.cloud.logging.handlers.
CloudLoggingHandler`
|
f11172:c0:m5
|
def get_handler(progname, address=None, proto=None, facility=None,<EOL>fmt=None, datefmt=None, **_):
|
builder = SyslogHandlerBuilder(<EOL>progname, address=address, proto=proto, facility=facility,<EOL>fmt=fmt, datefmt=datefmt)<EOL>return builder.get_handler()<EOL>
|
Helper function to create a Syslog handler.
See `ulogger.syslog.SyslogHandlerBuilder` for arguments and
supported keyword arguments.
Returns:
(obj): Instance of `logging.SysLogHandler`
|
f11173:m0
|
def _setup_default_handler(progname, fmt=None, datefmt=None, **_):
|
handler = logging.StreamHandler()<EOL>if not fmt:<EOL><INDENT>fmt_prefix = '<STR_LIT>'<EOL>fmt_suffix = '<STR_LIT>' + '<STR_LIT>'<EOL>fmt = fmt_prefix + progname + fmt_suffix<EOL><DEDENT>if not datefmt:<EOL><INDENT>datefmt = '<STR_LIT>'<EOL><DEDENT>formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)<EOL>handler.setFormatter(formatter)<EOL>return handler<EOL>
|
Create a Stream handler (default handler).
Args:
progname (str): Name of program.
fmt (:obj:`str`, optional): Desired log format if different than
the default; uses the same formatting string options
supported in the stdlib's `logging` module.
datefmt (:obj:`str`, optional): Desired date format if different
than the default; uses the same formatting string options
supported in the stdlib's `logging` module.
Returns:
(obj): Instance of `logging.StreamHandler`
|
f11175:m0
|
def setup_logging(progname, level, handlers, **kwargs):
|
for h in handlers:<EOL><INDENT>if h == '<STR_LIT>':<EOL><INDENT>handler = _setup_default_handler(progname, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>handler_module_path = '<STR_LIT>'.format(h)<EOL>try:<EOL><INDENT>handler_module = import_module(<EOL>handler_module_path, package='<STR_LIT>')<EOL><DEDENT>except ImportError:<EOL><INDENT>msg = '<STR_LIT>'.format(h)<EOL>raise exceptions.ULoggerError(msg)<EOL><DEDENT>try:<EOL><INDENT>get_handler = getattr(handler_module, '<STR_LIT>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise exceptions.ULoggerError(msg.format(h))<EOL><DEDENT>handler = get_handler(progname, **kwargs)<EOL><DEDENT>logging.getLogger('<STR_LIT>').addHandler(handler)<EOL><DEDENT>level = logging.getLevelName(level)<EOL>logging.getLogger('<STR_LIT>').setLevel(level)<EOL>
|
Setup logging to stdout (stream), syslog, or stackdriver.
Attaches handler(s) and sets log level to the root logger.
Example usage:
import logging
from ulogger import setup_logging
setup_logging('my_awesome_program', 'INFO', ['stream'])
logging.info('ohai')
Args:
progname (str): Name of program.
level (str): Threshold for when to log.
handlers (list): Desired handlers, default 'stream',
supported: 'syslog', 'stackdriver', 'stream'.
**kwargs (optional): Keyword arguments to pass to handlers. See
handler documentation for more information on available
kwargs.
|
f11175:m1
|
def read(*filenames, **kwargs):
|
encoding = kwargs.get('<STR_LIT>', '<STR_LIT:utf-8>')<EOL>sep = kwargs.get('<STR_LIT>', '<STR_LIT:\n>')<EOL>buf = []<EOL>for fl in filenames:<EOL><INDENT>with codecs.open(os.path.join(HERE, fl), '<STR_LIT:rb>', encoding) as f:<EOL><INDENT>buf.append(f.read())<EOL><DEDENT><DEDENT>return sep.join(buf)<EOL>
|
Build an absolute path from ``*filenames``, and return contents of
resulting file. Defaults to UTF-8 encoding.
|
f11177:m0
|
def find_meta(meta):
|
re_str = r"<STR_LIT>".format(meta=meta)<EOL>meta_match = re.search(re_str, META_FILE, re.M)<EOL>if meta_match:<EOL><INDENT>return meta_match.group(<NUM_LIT:1>)<EOL><DEDENT>raise RuntimeError('<STR_LIT>'.format(meta=meta))<EOL>
|
Extract __*meta*__ from META_FILE.
|
f11177:m1
|
def reformat_pattern(pattern, compile=False):
|
<EOL>rex_pattern = re.sub(r'<STR_LIT>', '<STR_LIT>', pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>', '<STR_LIT>', rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>', '<STR_LIT>', rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>', '<STR_LIT>', rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>',<EOL>'<STR_LIT>', rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>',<EOL>'<STR_LIT>', rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(<EOL>r'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>rex_pattern)<EOL>rex_pattern = re.sub(r'<STR_LIT>', '<STR_LIT>', rex_pattern)<EOL>if compile:<EOL><INDENT>return re.compile(rex_pattern)<EOL><DEDENT>return rex_pattern<EOL>
|
Apply the filters on user pattern to generate a new regular expression
pattern.
A user provided variable, should start with an alphabet, can be
alphanumeric and can have _.
|
f11180:m0
|
def match_string(pattern, search_string):
|
rexobj = REX(pattern, None)<EOL>rexpatstr = reformat_pattern(pattern)<EOL>rexpat = re.compile(rexpatstr)<EOL>rexobj.rex_patternstr = rexpatstr<EOL>rexobj.rex_pattern = rexpat<EOL>line_count = <NUM_LIT:1><EOL>for line in search_string.splitlines():<EOL><INDENT>line = line.strip()<EOL>mobj = rexpat.match(line)<EOL>if mobj:<EOL><INDENT>populate_resobj(rexobj, mobj, line_count)<EOL><DEDENT>line_count += <NUM_LIT:1><EOL><DEDENT>return rexobj<EOL>
|
Match a pattern in a string
|
f11180:m1
|
def populate_resobj(rexobj, mobj, loc):
|
resobj = REXResult(mobj, loc)<EOL>rexobj.matches.append(resobj)<EOL>rexobj.res_count += <NUM_LIT:1><EOL>
|
Popuate the result object and append it to the
rexobj results.
|
f11180:m2
|
def match_file(pattern, filename):
|
<EOL>if pattern is None:<EOL><INDENT>return None<EOL><DEDENT>if os.stat(filename).st_size == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>rexobj = REX(pattern, filename)<EOL>rexpatstr = reformat_pattern(pattern)<EOL>rexpat = re.compile(rexpatstr)<EOL>rexobj.rex_patternstr = rexpatstr<EOL>rexobj.rex_pattern = rexpat<EOL>sfile = open(filename, '<STR_LIT:r>')<EOL>data = sfile.read()<EOL>sfile.close()<EOL>line_count = <NUM_LIT:1><EOL>for line in data.splitlines():<EOL><INDENT>mobj = rexpat.match(line)<EOL>if mobj:<EOL><INDENT>populate_resobj(rexobj, mobj, line_count)<EOL><DEDENT>line_count += <NUM_LIT:1><EOL><DEDENT>return rexobj<EOL>
|
The function will match a pattern in a file and return
a rex object, which will have all the matches found in the file.
|
f11180:m3
|
def parse_lrvalue_string(search_string,<EOL>delimiter="<STR_LIT::>"):
|
mac_search_pattern = r"<STR_LIT>" % delimiter<EOL>search_pattern = r"<STR_LIT>" % delimiter<EOL>rexdict = {}<EOL>for line in search_string.splitlines():<EOL><INDENT>line = line.strip()<EOL>mobj = re.match(mac_search_pattern, line)<EOL>if mobj:<EOL><INDENT>key = mobj.group(<NUM_LIT:1>).lower()<EOL>key = "<STR_LIT:_>".join(key.split()[<NUM_LIT:0>:<NUM_LIT:3>])<EOL>key = key.strip()<EOL>rexdict[key] = mobj.group(<NUM_LIT:2>)<EOL>continue<EOL><DEDENT>mobj = re.match(search_pattern, line)<EOL>if mobj:<EOL><INDENT>key = mobj.group(<NUM_LIT:1>).lower()<EOL>key = "<STR_LIT:_>".join(key.split()[<NUM_LIT:0>:<NUM_LIT:3>])<EOL>key = key.strip()<EOL>rexdict[key] = mobj.group(<NUM_LIT:2>)<EOL><DEDENT><DEDENT>return rexdict<EOL>
|
The function takes a multi-line output/string with the format
"name/descr : value", and converts it to a dictionary object
with key value pairs, where key is built from the name/desc
part and value as the value.
eg: "Serial Number: FCH1724V1GT" will be translated to
dict['serial_number'] = "FCH1724V1GT"
|
f11180:m4
|
def parse_multi_lrvalue_string(search_string, split_string,<EOL>delimiter="<STR_LIT::>"):
|
dictlist = []<EOL>for out in search_string.split(split_string):<EOL><INDENT>tdict = parse_lrvalue_string(split_string + out,<EOL>delimiter=delimiter)<EOL>dictlist.append(tdict)<EOL><DEDENT>return dictlist<EOL>
|
The function is an extension of the parse_lrvalue_string() API.
The function takes a multi-line output/string of the format
"Category: xyz
name: foo
id: bar
Category: abc
name: foox
id: barx
:
"
It splits the output based on the splitstring passed as
argument (eg "Category"), and converts the individual
lines of the form "name: value" to a dictionary object with
key value pairs. The key is built from the name (LV) part.
eg "First Name: Behzad" --> dict[first_name] = "Behzad"
|
f11180:m5
|
def parse_tabular_string(search_string,<EOL>header_keys,<EOL>delimiter=None,<EOL>merge_list=None):
|
first_line = True<EOL>parsed_results = []<EOL>for line in search_string.splitlines():<EOL><INDENT>if first_line:<EOL><INDENT>first_line = False<EOL><DEDENT>else:<EOL><INDENT>result = {}<EOL>row = line.split()<EOL>if merge_list:<EOL><INDENT>for mergeset in merge_list:<EOL><INDENT>fidx = mergeset[<NUM_LIT:0>]<EOL>lidx = mergeset[<NUM_LIT:1>]<EOL>try:<EOL><INDENT>row[fidx] = "<STR_LIT:_>".join(row[fidx:(lidx+<NUM_LIT:1>)])<EOL>row.remove(row[lidx])<EOL><DEDENT>except IndexError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>if len(row) != len(header_keys):<EOL><INDENT>print("<STR_LIT>")<EOL>continue<EOL><DEDENT>key_count = <NUM_LIT:0><EOL>for column in row:<EOL><INDENT>result[header_keys[key_count]] = column<EOL>key_count += <NUM_LIT:1><EOL><DEDENT>parsed_results.append(result)<EOL><DEDENT><DEDENT>return parsed_results<EOL>
|
Given a string in a tabular format, parse it and return a
dictionary
@args:
search_string: This is a string in tabular format (e.g.: output of df
command)
header_keys: This is a list of strings for the headers.
delimiter(optional): Default is None, which translates to spaces
merge_list(optional): In some cases 2 fields need to be merged as they
are one value.
|
f11180:m6
|
def dump_rexobj_results(rexobj, options=None):
|
print(("<STR_LIT:->" * <NUM_LIT>))<EOL>print(("<STR_LIT>", rexobj.res_count))<EOL>matches = rexobj.matches<EOL>for match in matches:<EOL><INDENT>print(("<STR_LIT>", match.loc, "<STR_LIT>"))<EOL>for key in list(match.named_groups.keys()):<EOL><INDENT>print(("<STR_LIT>" %<EOL>(key, match.named_groups[key])))<EOL><DEDENT>print("<STR_LIT>")<EOL><DEDENT>
|
print all the results.
|
f11180:m7
|
def get_match_value(rexobj, key, index=<NUM_LIT:0>):
|
if rexobj is None:<EOL><INDENT>return None<EOL><DEDENT>if rexobj.res_count == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>return rexobj.matches[index].named_groups[key]<EOL><DEDENT>except IndexError:<EOL><INDENT>return None<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>
|
Return a matched value for the key for a specific match from the
results.
|
f11180:m8
|
def __init__(self, pattern, filename=None):
|
self.user_pattern = pattern<EOL>self.search_file = filename<EOL>self.rex_patternstr = None<EOL>self.rex_pattern = None<EOL>self.matches = []<EOL>self.res_count = <NUM_LIT:0><EOL>
|
Initialization.
|
f11180:c0:m0
|
def __init__(self, reobj, loc):
|
self.reobj = reobj<EOL>self.loc = loc<EOL>self.named_groups = reobj.groupdict()<EOL>
|
Initialization.
|
f11180:c1:m0
|
def diff(before, after, check_modified=False):
|
<EOL>if len(before) == <NUM_LIT:0>:<EOL><INDENT>return [<EOL>{'<STR_LIT:state>': '<STR_LIT>', '<STR_LIT:value>': v}<EOL>for v in after<EOL>]<EOL><DEDENT>elif len(after) == <NUM_LIT:0>:<EOL><INDENT>return [<EOL>{'<STR_LIT:state>': '<STR_LIT>', '<STR_LIT:value>': v}<EOL>for v in before<EOL>]<EOL><DEDENT>grid = create_grid(before, after)<EOL>nrows = len(grid[<NUM_LIT:0>])<EOL>ncols = len(grid)<EOL>dps = diff_points(grid)<EOL>result = []<EOL>for kind, col, row in dps:<EOL><INDENT>if kind == '<STR_LIT>':<EOL><INDENT>value = before[col]<EOL>result.append({<EOL>'<STR_LIT:state>': kind,<EOL>'<STR_LIT:value>': value,<EOL>})<EOL><DEDENT>elif kind == '<STR_LIT>':<EOL><INDENT>assert col < ncols<EOL>value = before[col]<EOL>result.append({<EOL>'<STR_LIT:state>': kind,<EOL>'<STR_LIT:value>': value,<EOL>})<EOL><DEDENT>elif kind == '<STR_LIT>':<EOL><INDENT>assert row < nrows<EOL>value = after[row]<EOL>result.append({<EOL>'<STR_LIT:state>': kind,<EOL>'<STR_LIT:value>': value,<EOL>})<EOL><DEDENT>elif check_modified and kind == '<STR_LIT>':<EOL><INDENT>result.append({<EOL>'<STR_LIT:state>': kind,<EOL>'<STR_LIT>': before[col],<EOL>'<STR_LIT>': after[row],<EOL>})<EOL><DEDENT>elif (not check_modified) and kind == '<STR_LIT>':<EOL><INDENT>result.append({<EOL>'<STR_LIT:state>': '<STR_LIT>',<EOL>'<STR_LIT:value>': before[col],<EOL>})<EOL>result.append({<EOL>'<STR_LIT:state>': '<STR_LIT>',<EOL>'<STR_LIT:value>': after[row],<EOL>})<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>return result<EOL>
|
Diff two sequences of comparable objects.
The result of this function is a list of dictionaries containing
values in ``before`` or ``after`` with a ``state`` of either
'unchanged', 'added', 'deleted', or 'modified'.
>>> import pprint
>>> result = diff(['a', 'b', 'c'], ['b', 'c', 'd'])
>>> pprint.pprint(result)
[{'state': 'deleted', 'value': 'a'},
{'state': 'unchanged', 'value': 'b'},
{'state': 'unchanged', 'value': 'c'},
{'state': 'added', 'value': 'd'}]
Parameters
----------
before : iterable
An iterable containing values to be used as the baseline version.
after : iterable
An iterable containing values to be compared against the baseline.
check_modified : bool
Whether or not to check for modifiedness.
Returns
-------
diff_items : A list of dictionaries containing diff information.
|
f11193:m0
|
def notebook_diff(nb1, nb2, check_modified=True):
|
nb1_cells = nb1['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>nb2_cells = nb2['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>diffed_nb = cells_diff(nb1_cells, nb2_cells, check_modified=check_modified)<EOL>line_diffs = diff_modified_items(diffed_nb)<EOL>cell_list = list()<EOL>for i, item in enumerate(diffed_nb):<EOL><INDENT>cell = diff_result_to_cell(item)<EOL>if i in line_diffs:<EOL><INDENT>cell['<STR_LIT>']['<STR_LIT>'] = line_diffs[i]<EOL><DEDENT>cell_list.append(cell)<EOL><DEDENT>nb1['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>'] = cell_list<EOL>nb1['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>return nb1<EOL>
|
Unify two notebooks into a single notebook with diff metadata.
The result of this function is a valid notebook that can be loaded
by the IPython Notebook front-end. This function adds additional
cell metadata that the front-end Javascript uses to render the diffs.
Parameters
----------
nb1 : dict
An IPython Notebook to use as the baseline version.
nb2 : dict
An IPython Notebook to compare against the baseline.
check_modified : bool
Whether or not to detect cell modification.
Returns
-------
nb : A valid notebook containing diff metadata.
|
f11194:m0
|
def diff_result_to_cell(item):
|
state = item['<STR_LIT:state>']<EOL>if state == '<STR_LIT>':<EOL><INDENT>new_cell = item['<STR_LIT>'].data<EOL>old_cell = item['<STR_LIT>'].data<EOL>new_cell['<STR_LIT>']['<STR_LIT:state>'] = state<EOL>new_cell['<STR_LIT>']['<STR_LIT>'] = old_cell<EOL>cell = new_cell<EOL><DEDENT>else:<EOL><INDENT>cell = item['<STR_LIT:value>'].data<EOL>cell['<STR_LIT>']['<STR_LIT:state>'] = state<EOL><DEDENT>return cell<EOL>
|
diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.
|
f11194:m2
|
def cells_diff(before_cells, after_cells, check_modified=False):
|
before_comps = [<EOL>CellComparator(cell, check_modified=check_modified)<EOL>for cell in before_cells<EOL>]<EOL>after_comps = [<EOL>CellComparator(cell, check_modified=check_modified)<EOL>for cell in after_cells<EOL>]<EOL>diff_result = diff(<EOL>before_comps,<EOL>after_comps,<EOL>check_modified=check_modified<EOL>)<EOL>return diff_result<EOL>
|
Diff two arrays of cells.
|
f11194:m3
|
def words_diff(before_words, after_words):
|
before_comps = before_words.split()<EOL>after_comps = after_words.split()<EOL>diff_result = diff(<EOL>before_comps,<EOL>after_comps<EOL>)<EOL>return diff_result<EOL>
|
Diff the words in two strings.
This is intended for use in diffing prose and other forms of text
where line breaks have little semantic value.
Parameters
----------
before_words : str
A string to be used as the baseline version.
after_words : str
A string to be compared against the baseline.
Returns
-------
diff_result : A list of dictionaries containing diff information.
|
f11194:m4
|
def lines_diff(before_lines, after_lines, check_modified=False):
|
before_comps = [<EOL>LineComparator(line, check_modified=check_modified)<EOL>for line in before_lines<EOL>]<EOL>after_comps = [<EOL>LineComparator(line, check_modified=check_modified)<EOL>for line in after_lines<EOL>]<EOL>diff_result = diff(<EOL>before_comps,<EOL>after_comps,<EOL>check_modified=check_modified<EOL>)<EOL>return diff_result<EOL>
|
Diff the lines in two strings.
Parameters
----------
before_lines : iterable
Iterable containing lines used as the baseline version.
after_lines : iterable
Iterable containing lines to be compared against the baseline.
Returns
-------
diff_result : A list of dictionaries containing diff information.
|
f11194:m5
|
def merge(local, base, remote, check_modified=False):
|
base_local = diff.diff(base, local, check_modified=check_modified)<EOL>base_remote = diff.diff(base, remote, check_modified=check_modified)<EOL>merge = diff.diff(base_local, base_remote)<EOL>return merge<EOL>
|
Generate unmerged series of changes (including conflicts).
By diffing the two diffs, we find *changes* that are
on the local branch, the remote branch, or both.
We arbitrarily choose the "local" branch to be the "before"
and the "remote" branch to be the "after" in the diff algorithm.
Therefore:
If a change is "deleted", that means that it occurs only on
the local branch. If a change is "added" that means it occurs only on
the remote branch. If a change is "unchanged", that means it occurs
in both branches. Either the same addition or same deletion occurred in
both branches, or the cell was not changed in either branch.
Parameters
----------
local : list
A sequence representing the items on the local branch.
base : dict
A sequence representing the items on the base branch
remote : dict
A sequence representing the items on the remote branch.
Returns
-------
result : A diff result comparing the changes on the local and remote
branches.
|
f11195:m0
|
def notebook_merge(local, base, remote, check_modified=False):
|
local_cells = get_cells(local)<EOL>base_cells = get_cells(base)<EOL>remote_cells = get_cells(remote)<EOL>rows = []<EOL>current_row = []<EOL>empty_cell = lambda: {<EOL>'<STR_LIT>': '<STR_LIT:code>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT:text>': ['<STR_LIT>'],<EOL>'<STR_LIT>': {'<STR_LIT:state>': '<STR_LIT>'}<EOL>}<EOL>diff_of_diffs = merge(local_cells, base_cells, remote_cells)<EOL>for item in diff_of_diffs:<EOL><INDENT>state = item['<STR_LIT:state>']<EOL>cell = copy.deepcopy(diff_result_to_cell(item['<STR_LIT:value>']))<EOL>if state == '<STR_LIT>':<EOL><INDENT>if cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>cell['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>remote_cell = empty_cell()<EOL>remote_cell['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>if cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>'or cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>':<EOL><INDENT>base_cell = copy.deepcopy(cell)<EOL><DEDENT>else:<EOL><INDENT>base_cell = empty_cell()<EOL><DEDENT>base_cell['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>current_row = [<EOL>cell,<EOL>base_cell,<EOL>remote_cell,<EOL>]<EOL><DEDENT>elif state == '<STR_LIT>':<EOL><INDENT>cell['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>if cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>if cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>':<EOL><INDENT>base_cell = copy.deepcopy(cell)<EOL>base_cell['<STR_LIT>']['<STR_LIT:state>'] = '<STR_LIT>'<EOL>local_cell = copy.deepcopy(cell)<EOL>local_cell['<STR_LIT>']['<STR_LIT:state>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>base_cell = empty_cell()<EOL>local_cell = empty_cell()<EOL><DEDENT>base_cell['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>local_cell['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>current_row = [<EOL>local_cell,<EOL>base_cell,<EOL>cell,<EOL>]<EOL><DEDENT>elif state == '<STR_LIT>':<EOL><INDENT>cell1 = copy.deepcopy(cell)<EOL>cell3 = copy.deepcopy(cell)<EOL>if cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>'or cell['<STR_LIT>']['<STR_LIT:state>'] == '<STR_LIT>':<EOL><INDENT>cell2 = copy.deepcopy(cell)<EOL>cell2['<STR_LIT>']['<STR_LIT:state>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>cell2 = empty_cell()<EOL><DEDENT>cell1['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>cell2['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>cell3['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>current_row = [<EOL>cell1,<EOL>cell2,<EOL>cell3,<EOL>]<EOL><DEDENT>rows.append(current_row)<EOL><DEDENT>result_notebook = local<EOL>if len(result_notebook['<STR_LIT>']) == <NUM_LIT:0>:<EOL><INDENT>result_notebook['<STR_LIT>'] = [nbformat.new_worksheet()]<EOL><DEDENT>new_cell_array = list(it.chain.from_iterable(rows))<EOL>result_notebook['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>'] = new_cell_array<EOL>result_notebook['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>return result_notebook<EOL>
|
Unify three notebooks into a single notebook with merge metadata.
The result of this function is a valid notebook that can be loaded
by the IPython Notebook front-end. This function adds additional
cell metadata that the front-end Javascript uses to render the merge.
Parameters
----------
local : dict
The local branch's version of the notebook.
base : dict
The last common ancestor of local and remote.
remote : dict
The remote branch's version of the notebook.
Returns
-------
nb : A valid notebook containing merge metadata.
|
f11195:m1
|
def parse(self, json_data):
|
data = current.read(json_data, '<STR_LIT>')<EOL>json_data.close()<EOL>return data<EOL>
|
Parse a notebook .ipynb file.
Parameters
----------
json_data : file
A file handle for an .ipynb file.
Returns
-------
nb : An IPython Notebook data structure.
|
f11218:c0:m0
|
def __nonzero__(self):
|
return self.truth<EOL>
|
for evaluating as a boolean
|
f11219:c0:m1
|
def equal(self, line1, line2):
|
eqLine = line1 == line2<EOL>if eqLine:<EOL><INDENT>return BooleanPlus(True, False)<EOL><DEDENT>else:<EOL><INDENT>unchanged_count = self.count_similar_words(line1, line2)<EOL>similarity_percent = (<EOL>(<NUM_LIT> * unchanged_count) /<EOL>(len(line1.split()) + len(line2.split()))<EOL>)<EOL>if similarity_percent >= <NUM_LIT>:<EOL><INDENT>return BooleanPlus(True, True)<EOL><DEDENT>return BooleanPlus(False, False)<EOL><DEDENT>
|
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
|
f11219:c1:m2
|
def compare_cells(self, cell1, cell2):
|
eqlanguage = cell1["<STR_LIT>"] == cell2["<STR_LIT>"]<EOL>eqinput = cell1["<STR_LIT:input>"] == cell2["<STR_LIT:input>"]<EOL>eqoutputs = self.equaloutputs(cell1["<STR_LIT>"], cell2["<STR_LIT>"])<EOL>if eqlanguage and eqinput and eqoutputs:<EOL><INDENT>return BooleanPlus(True, False)<EOL><DEDENT>elif not self.check_modified:<EOL><INDENT>return BooleanPlus(False, False)<EOL><DEDENT>input1 = u"<STR_LIT>".join(cell1['<STR_LIT:input>'])<EOL>input2 = u"<STR_LIT>".join(cell2['<STR_LIT:input>'])<EOL>similarity_percent = Levenshtein.ratio(input1, input2)<EOL>if similarity_percent >= <NUM_LIT>:<EOL><INDENT>return BooleanPlus(True, True)<EOL><DEDENT>return BooleanPlus(False, False)<EOL>
|
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
|
f11219:c2:m5
|
def load_keys():
|
consumer_key = os.environ.get('<STR_LIT>')<EOL>consumer_secret = os.environ.get('<STR_LIT>')<EOL>access_token = os.environ.get('<STR_LIT>')<EOL>access_token_secret = os.environ.get('<STR_LIT>')<EOL>return consumer_key, consumer_secret, access_token, access_token_secret<EOL>
|
Loads Twitter keys.
Returns:
tuple: consumer_key, consumer_secret, access_token, access_token_secret
|
f11229:m0
|
def search(self, q):
|
results = self._api.search(q=q)<EOL>return results<EOL>
|
Search tweets by keyword.
Args:
q: keyword
Returns:
list: tweet list
|
f11229:c0:m1
|
def search_by_user(self, screen_name, count=<NUM_LIT:100>):
|
results = self._api.user_timeline(screen_name=screen_name, count=count)<EOL>return results<EOL>
|
Search tweets by user.
Args:
screen_name: screen name
count: the number of tweets
Returns:
list: tweet list
|
f11229:c0:m2
|
def __init__(self, app):
|
prefix = "<STR_LIT>".format(app.config.get('<STR_LIT>'))<EOL>super(self.__class__, self).__init__(<EOL>current_cache, prefix=prefix, timeout=None,<EOL>ignore_memcache_errors=True<EOL>)<EOL>
|
Initialize `BytecodeCache`.
|
f11241:c0:m0
|
def _callback_factory(callback_imp):
|
if callback_imp is None:<EOL><INDENT>try:<EOL><INDENT>pkg_resources.get_distribution('<STR_LIT>')<EOL>from flask_login import current_user<EOL>return lambda: current_user.is_authenticated<EOL><DEDENT>except pkg_resources.DistributionNotFound:<EOL><INDENT>return lambda: False<EOL><DEDENT><DEDENT>elif isinstance(callback_imp, string_types):<EOL><INDENT>return import_string(callback_imp)<EOL><DEDENT>else:<EOL><INDENT>return callback_imp<EOL><DEDENT>
|
Factory for creating a is authenticated callback.
|
f11242:m0
|
def __init__(self, app=None):
|
if app:<EOL><INDENT>self.init_app(app)<EOL><DEDENT>
|
Extension initialization.
|
f11242:c0:m0
|
def init_app(self, app):
|
self.init_config(app)<EOL>self.cache = Cache(app)<EOL>self.is_authenticated_callback = _callback_factory(<EOL>app.config['<STR_LIT>'])<EOL>app.extensions['<STR_LIT>'] = self<EOL>
|
Flask application initialization.
|
f11242:c0:m1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.