commit
stringlengths
40
40
old_file
stringlengths
4
150
new_file
stringlengths
4
150
old_contents
stringlengths
0
3.26k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
501
message
stringlengths
15
4.06k
lang
stringclasses
4 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
diff
stringlengths
0
4.35k
891e9e05f8c3fde75bb217d8d8132cdf6003e827
locust/shape.py
locust/shape.py
from __future__ import annotations import time from typing import Optional, Tuple, List, Type from . import User from .runners import Runner class LoadTestShape: """ A simple load test shape class used to control the shape of load generated during a load test. """ runner: Optional[Runner] = None """Reference to the :class:`Runner <locust.runners.Runner>` instance""" def __init__(self): self.start_time = time.perf_counter() def reset_time(self): """ Resets start time back to 0 """ self.start_time = time.perf_counter() def get_run_time(self): """ Calculates run time in seconds of the load test """ return time.perf_counter() - self.start_time def get_current_user_count(self): """ Returns current actual number of users from the runner """ return self.runner.user_count def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None: """ Returns a tuple with 2 elements to control the running load test: user_count -- Total user count spawn_rate -- Number of users to start/stop per second when changing number of users user_classes -- None or a List of userclasses to be spawned in it tick If `None` is returned then the running load test will be stopped. """ return None
from __future__ import annotations import time from typing import Optional, Tuple, List, Type from abc import ABC, abstractmethod from . import User from .runners import Runner class LoadTestShape(ABC): """ Base class for custom load shapes. """ runner: Optional[Runner] = None """Reference to the :class:`Runner <locust.runners.Runner>` instance""" def __init__(self): self.start_time = time.perf_counter() def reset_time(self): """ Resets start time back to 0 """ self.start_time = time.perf_counter() def get_run_time(self): """ Calculates run time in seconds of the load test """ return time.perf_counter() - self.start_time def get_current_user_count(self): """ Returns current actual number of users from the runner """ return self.runner.user_count @abstractmethod def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None: """ Returns a tuple with 2 elements to control the running load test: user_count -- Total user count spawn_rate -- Number of users to start/stop per second when changing number of users user_classes -- None or a List of userclasses to be spawned in it tick If `None` is returned then the running load test will be stopped. """ ...
Make LoadTestShape a proper abstract class.
Make LoadTestShape a proper abstract class.
Python
mit
locustio/locust,locustio/locust,locustio/locust,locustio/locust
--- +++ @@ -1,15 +1,15 @@ from __future__ import annotations import time from typing import Optional, Tuple, List, Type +from abc import ABC, abstractmethod from . import User from .runners import Runner -class LoadTestShape: +class LoadTestShape(ABC): """ - A simple load test shape class used to control the shape of load generated - during a load test. + Base class for custom load shapes. """ runner: Optional[Runner] = None @@ -36,6 +36,7 @@ """ return self.runner.user_count + @abstractmethod def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None: """ Returns a tuple with 2 elements to control the running load test: @@ -47,5 +48,4 @@ If `None` is returned then the running load test will be stopped. """ - - return None + ...
4bdaf4d2e29da71a1bf00e1bfc5caad6d3647372
search/views.py
search/views.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.core.management import call_command from django.shortcuts import render @login_required(login_url='/accounts/login/') def search_index(request): call_command('search-index', 'all') return HttpResponse("Index process done.")
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.core.management import call_command from django.shortcuts import render @login_required(login_url='/accounts/login/') def search_index(request): call_command('search-index', '_all') return HttpResponse("Index process done.")
Fix error in search index not updating on view call.
Fix error in search index not updating on view call.
Python
apache-2.0
toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity
--- +++ @@ -8,5 +8,5 @@ @login_required(login_url='/accounts/login/') def search_index(request): - call_command('search-index', 'all') + call_command('search-index', '_all') return HttpResponse("Index process done.")
1f0e5b7e65914ec5c3fb0a6617f72ea2f466bbdc
server/admin.py
server/admin.py
from django.contrib import admin from server.models import * class MachineGroupAdmin(admin.ModelAdmin): readonly_fields = ('key',) class MachineAdmin(admin.ModelAdmin): list_display = ('hostname', 'serial') admin.site.register(UserProfile) admin.site.register(BusinessUnit) admin.site.register(MachineGroup, MachineGroupAdmin) admin.site.register(Machine, MachineAdmin) admin.site.register(Fact) admin.site.register(PluginScriptSubmission) admin.site.register(PluginScriptRow) admin.site.register(HistoricalFact) admin.site.register(Condition) admin.site.register(PendingUpdate) admin.site.register(InstalledUpdate) admin.site.register(PendingAppleUpdate) admin.site.register(ApiKey) admin.site.register(Plugin) admin.site.register(Report) # admin.site.register(OSQueryResult) # admin.site.register(OSQueryColumn) admin.site.register(SalSetting) admin.site.register(UpdateHistory) admin.site.register(UpdateHistoryItem) admin.site.register(MachineDetailPlugin)
from django.contrib import admin from server.models import * class ApiKeyAdmin(admin.ModelAdmin): list_display = ('name', 'public_key', 'private_key') class MachineAdmin(admin.ModelAdmin): list_display = ('hostname', 'serial') class MachineGroupAdmin(admin.ModelAdmin): readonly_fields = ('key',) admin.site.register(ApiKey, ApiKeyAdmin) admin.site.register(BusinessUnit) admin.site.register(Condition) admin.site.register(Fact) admin.site.register(HistoricalFact) admin.site.register(InstalledUpdate) admin.site.register(Machine, MachineAdmin) admin.site.register(MachineDetailPlugin) admin.site.register(MachineGroup, MachineGroupAdmin) # admin.site.register(OSQueryColumn) # admin.site.register(OSQueryResult) admin.site.register(PendingAppleUpdate) admin.site.register(PendingUpdate) admin.site.register(Plugin) admin.site.register(PluginScriptRow) admin.site.register(PluginScriptSubmission) admin.site.register(Report) admin.site.register(SalSetting) admin.site.register(UpdateHistory) admin.site.register(UpdateHistoryItem) admin.site.register(UserProfile)
Sort registrations. Separate classes of imports. Add API key display.
Sort registrations. Separate classes of imports. Add API key display.
Python
apache-2.0
salopensource/sal,salopensource/sal,sheagcraig/sal,salopensource/sal,sheagcraig/sal,sheagcraig/sal,salopensource/sal,sheagcraig/sal
--- +++ @@ -1,33 +1,38 @@ from django.contrib import admin + from server.models import * + + +class ApiKeyAdmin(admin.ModelAdmin): + list_display = ('name', 'public_key', 'private_key') + + +class MachineAdmin(admin.ModelAdmin): + list_display = ('hostname', 'serial') class MachineGroupAdmin(admin.ModelAdmin): readonly_fields = ('key',) -class MachineAdmin(admin.ModelAdmin): - list_display = ('hostname', 'serial') - - -admin.site.register(UserProfile) +admin.site.register(ApiKey, ApiKeyAdmin) admin.site.register(BusinessUnit) +admin.site.register(Condition) +admin.site.register(Fact) +admin.site.register(HistoricalFact) +admin.site.register(InstalledUpdate) +admin.site.register(Machine, MachineAdmin) +admin.site.register(MachineDetailPlugin) admin.site.register(MachineGroup, MachineGroupAdmin) -admin.site.register(Machine, MachineAdmin) -admin.site.register(Fact) +# admin.site.register(OSQueryColumn) +# admin.site.register(OSQueryResult) +admin.site.register(PendingAppleUpdate) +admin.site.register(PendingUpdate) +admin.site.register(Plugin) +admin.site.register(PluginScriptRow) admin.site.register(PluginScriptSubmission) -admin.site.register(PluginScriptRow) -admin.site.register(HistoricalFact) -admin.site.register(Condition) -admin.site.register(PendingUpdate) -admin.site.register(InstalledUpdate) -admin.site.register(PendingAppleUpdate) -admin.site.register(ApiKey) -admin.site.register(Plugin) admin.site.register(Report) -# admin.site.register(OSQueryResult) -# admin.site.register(OSQueryColumn) admin.site.register(SalSetting) admin.site.register(UpdateHistory) admin.site.register(UpdateHistoryItem) -admin.site.register(MachineDetailPlugin) +admin.site.register(UserProfile)
51076b9d21679b1198931e2517afbf7c6d2e573a
src/competition/forms/team_forms.py
src/competition/forms/team_forms.py
from django import forms from django.template.defaultfilters import slugify from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Fieldset, Submit from crispy_forms.bootstrap import FormActions from competition.models.team_model import Team class TeamForm(forms.ModelForm): class Meta: model = Team fields = ('name', ) def __init__(self, *args, **kwargs): self.helper = FormHelper() self.helper.form_class = 'form-horizontal' self.helper.layout = Layout( Fieldset( 'Create a new team', 'name', ), FormActions( Submit('submit', 'Submit') ) ) super(TeamForm, self).__init__(*args, **kwargs) def clean_name(self): c = self.instance.competition n = self.cleaned_data['name'] s = slugify(n) if Team.objects.filter(competition=c, slug=s).exists(): msg = "This name is already taken for %s" % c.name raise forms.ValidationError(msg) return n def validate_unique(self): exclude = self._get_validation_exclusions() exclude.remove('competition') try: self.instance.validate_unique(exclude=exclude) except ValidationError, e: self._update_errors(e.message_dict)
from django import forms from django.template.defaultfilters import slugify from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Fieldset, Submit from crispy_forms.bootstrap import FormActions from competition.models.team_model import Team class TeamForm(forms.ModelForm): class Meta: model = Team fields = ('name', ) def __init__(self, *args, **kwargs): self.helper = FormHelper() self.helper.form_class = 'form-horizontal' self.helper.label_class = 'col-lg-2' self.helper.field_class = 'col-lg-8' self.helper.layout = Layout( Fieldset( 'Create a new team', 'name', ), FormActions( Submit('submit', 'Submit') ) ) super(TeamForm, self).__init__(*args, **kwargs) def clean_name(self): c = self.instance.competition n = self.cleaned_data['name'] s = slugify(n) if Team.objects.filter(competition=c, slug=s).exists(): msg = "This name is already taken for %s" % c.name raise forms.ValidationError(msg) return n def validate_unique(self): exclude = self._get_validation_exclusions() exclude.remove('competition') try: self.instance.validate_unique(exclude=exclude) except ValidationError, e: self._update_errors(e.message_dict)
Update forms to bootstrap 3
Update forms to bootstrap 3 form-horizontal needs additional helper classes in BS3
Python
bsd-3-clause
michaelwisely/django-competition,michaelwisely/django-competition,michaelwisely/django-competition
--- +++ @@ -16,6 +16,8 @@ def __init__(self, *args, **kwargs): self.helper = FormHelper() self.helper.form_class = 'form-horizontal' + self.helper.label_class = 'col-lg-2' + self.helper.field_class = 'col-lg-8' self.helper.layout = Layout( Fieldset( 'Create a new team',
07e780a27253c4108c96e232ffbb975e88d23f8d
src/pygrapes/serializer/__init__.py
src/pygrapes/serializer/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from abstract import Abstract __all__ = ['Abstract']
#!/usr/bin/env python # -*- coding: utf-8 -*- from abstract import Abstract from json import Json __all__ = ['Abstract', 'Json']
Load pygrapes.serializer.json.Json right inside pygrapes.serializer
Load pygrapes.serializer.json.Json right inside pygrapes.serializer
Python
bsd-3-clause
michalbachowski/pygrapes,michalbachowski/pygrapes,michalbachowski/pygrapes
--- +++ @@ -1,6 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- from abstract import Abstract +from json import Json - -__all__ = ['Abstract'] +__all__ = ['Abstract', 'Json']
384c2f34fcdefd26a928254e70a9ed6d15ffd069
dimod/reference/samplers/random_sampler.py
dimod/reference/samplers/random_sampler.py
""" RandomSampler ------------- A random sampler that can be used for unit testing and debugging. """ import numpy as np from dimod.core.sampler import Sampler from dimod.response import Response, SampleView __all__ = ['RandomSampler'] class RandomSampler(Sampler): """Gives random samples. Note that this sampler is intended for testing. """ def __init__(self): Sampler.__init__(self) self.sample_kwargs = {'num_reads': []} def sample(self, bqm, num_reads=10): """Gives random samples. Args: todo Returns: :obj:`.Response`: The vartype will match the given binary quadratic model. Notes: For each variable in each sample, the value is chosen by a coin flip. """ values = np.asarray(list(bqm.vartype.value), dtype='int8') samples = np.random.choice(values, (num_reads, len(bqm))) variable_labels = list(bqm.linear) label_to_idx = {v: idx for idx, v in enumerate(variable_labels)} energies = [bqm.energy(SampleView(idx, samples, label_to_idx)) for idx in range(num_reads)] return Response.from_matrix(samples, {'energy': energies}, vartype=bqm.vartype, variable_labels=variable_labels)
""" RandomSampler ------------- A random sampler that can be used for unit testing and debugging. """ import numpy as np from dimod.core.sampler import Sampler from dimod.response import Response, SampleView __all__ = ['RandomSampler'] class RandomSampler(Sampler): """Gives random samples. Note that this sampler is intended for testing. """ properties = None parameters = None def __init__(self): self.parameters = {'num_reads': []} self.properties = {} def sample(self, bqm, num_reads=10): """Gives random samples. Args: todo Returns: :obj:`.Response`: The vartype will match the given binary quadratic model. Notes: For each variable in each sample, the value is chosen by a coin flip. """ values = np.asarray(list(bqm.vartype.value), dtype='int8') samples = np.random.choice(values, (num_reads, len(bqm))) variable_labels = list(bqm.linear) label_to_idx = {v: idx for idx, v in enumerate(variable_labels)} energies = [bqm.energy(SampleView(idx, samples, label_to_idx)) for idx in range(num_reads)] return Response.from_matrix(samples, {'energy': energies}, vartype=bqm.vartype, variable_labels=variable_labels)
Update RandomSampler to use the new Sampler abc
Update RandomSampler to use the new Sampler abc
Python
apache-2.0
dwavesystems/dimod,dwavesystems/dimod
--- +++ @@ -18,9 +18,12 @@ Note that this sampler is intended for testing. """ + properties = None + parameters = None + def __init__(self): - Sampler.__init__(self) - self.sample_kwargs = {'num_reads': []} + self.parameters = {'num_reads': []} + self.properties = {} def sample(self, bqm, num_reads=10): """Gives random samples.
0aa5741ce05dcd4926be9c74af18f6fe46f4aded
etl_framework/utilities/DatetimeConverter.py
etl_framework/utilities/DatetimeConverter.py
"""class to convert datetime values""" import datetime class DatetimeConverter(object): """stuff""" _EPOCH_0 = datetime.datetime(1970, 1, 1) def __init__(self): """stuff""" pass @staticmethod def get_tomorrow(): """stuff""" return datetime.datetime.today() + datetime.timedelta(days=1) @classmethod def get_timestamp(cls, datetime_obj): """helper method to return timestamp fo datetime object""" return (datetime_obj - cls._EPOCH_0).total_seconds() @classmethod def get_tomorrow_timestamp(cls): """stuff""" return cls.get_timestamp(cls.get_tomorrow())
"""class to convert datetime values""" import datetime class DatetimeConverter(object): """stuff""" _EPOCH_0 = datetime.datetime(1970, 1, 1) def __init__(self): """stuff""" pass @staticmethod def get_tomorrow(): """stuff""" return datetime.datetime.today() + datetime.timedelta(days=1) @staticmethod def get_yesterday(): return datetime.datetime.today() - datetime.timedelta(days=1) @classmethod def get_timestamp(cls, datetime_obj): """helper method to return timestamp fo datetime object""" return (datetime_obj - cls._EPOCH_0).total_seconds() @classmethod def get_tomorrow_timestamp(cls): """stuff""" return cls.get_timestamp(cls.get_tomorrow()) @classmethod def get_yesterday_timestamp(cls): return cls.get_timestamp(cls.get_yesterday())
Add utility methods for yesterday's date
Add utility methods for yesterday's date
Python
mit
pantheon-systems/etl-framework
--- +++ @@ -18,6 +18,11 @@ return datetime.datetime.today() + datetime.timedelta(days=1) + @staticmethod + def get_yesterday(): + + return datetime.datetime.today() - datetime.timedelta(days=1) + @classmethod def get_timestamp(cls, datetime_obj): """helper method to return timestamp fo datetime object""" @@ -29,3 +34,8 @@ """stuff""" return cls.get_timestamp(cls.get_tomorrow()) + + @classmethod + def get_yesterday_timestamp(cls): + + return cls.get_timestamp(cls.get_yesterday())
26bb374b00d667de00a080c4b32e102ac69a0e23
asn1crypto/version.py
asn1crypto/version.py
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function __version__ = '0.24.0' __version_info__ = (0, 24, 0)
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function __version__ = '0.25.0-alpha' __version_info__ = (0, 25, 0, 'alpha')
Mark master as working towards 0.25.0
Mark master as working towards 0.25.0
Python
mit
wbond/asn1crypto
--- +++ @@ -2,5 +2,5 @@ from __future__ import unicode_literals, division, absolute_import, print_function -__version__ = '0.24.0' -__version_info__ = (0, 24, 0) +__version__ = '0.25.0-alpha' +__version_info__ = (0, 25, 0, 'alpha')
c52a959896c345b57fdd28e2ae8cbd75ab2e3c71
fuzzinator/call/file_reader_decorator.py
fuzzinator/call/file_reader_decorator.py
# Copyright (c) 2017-2018 Renata Hodovan, Akos Kiss. # # Licensed under the BSD 3-Clause License # <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>. # This file may not be copied, modified, or distributed except # according to those terms. import os from . import CallableDecorator class FileReaderDecorator(CallableDecorator): """ Decorator for SUTs that take input as a file path: saves the content of the failing test case. Moreover, the issue (if any) is also extended with the new ``'filename'`` property containing the name of the test case (as received in the ``test`` argument). **Example configuration snippet:** .. code-block:: ini [sut.foo] call=fuzzinator.call.SubprocessCall call.decorate(0)=fuzzionator.call.FileReaderDecorator [sut.foo.call] # assuming that foo takes one file as input specified on command line command=/home/alice/foo/bin/foo {test} """ def decorator(self, **kwargs): def wrapper(fn): def reader(*args, **kwargs): issue = fn(*args, **kwargs) if issue is not None: with open(kwargs['test'], 'rb') as f: issue['filename'] = os.path.basename(kwargs['test']) issue['test'] = f.read() return issue return reader return wrapper
# Copyright (c) 2017-2018 Renata Hodovan, Akos Kiss. # # Licensed under the BSD 3-Clause License # <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>. # This file may not be copied, modified, or distributed except # according to those terms. import os from . import CallableDecorator class FileReaderDecorator(CallableDecorator): """ Decorator for SUTs that take input as a file path: saves the content of the failing test case. Moreover, the issue (if any) is also extended with the new ``'filename'`` property containing the name of the test case (as received in the ``test`` argument). **Example configuration snippet:** .. code-block:: ini [sut.foo] call=fuzzinator.call.SubprocessCall call.decorate(0)=fuzzinator.call.FileReaderDecorator [sut.foo.call] # assuming that foo takes one file as input specified on command line command=/home/alice/foo/bin/foo {test} """ def decorator(self, **kwargs): def wrapper(fn): def reader(*args, **kwargs): issue = fn(*args, **kwargs) if issue is not None: with open(kwargs['test'], 'rb') as f: issue['filename'] = os.path.basename(kwargs['test']) issue['test'] = f.read() return issue return reader return wrapper
Fix a typo in the documentation of FileReaderDecorator.
Fix a typo in the documentation of FileReaderDecorator.
Python
bsd-3-clause
renatahodovan/fuzzinator,akosthekiss/fuzzinator,renatahodovan/fuzzinator,akosthekiss/fuzzinator,akosthekiss/fuzzinator,renatahodovan/fuzzinator,renatahodovan/fuzzinator,akosthekiss/fuzzinator
--- +++ @@ -25,7 +25,7 @@ [sut.foo] call=fuzzinator.call.SubprocessCall - call.decorate(0)=fuzzionator.call.FileReaderDecorator + call.decorate(0)=fuzzinator.call.FileReaderDecorator [sut.foo.call] # assuming that foo takes one file as input specified on command line
cbd913af9013926ca7f08ab56023d7242e783698
ad-hoc-scripts/latex-adjust.py
ad-hoc-scripts/latex-adjust.py
#! /usr/bin/env python3 import sys import json for arg in sys.argv[1:]: with open(arg) as f: equajson = json.load(f) try: latex = equajson["markup-languages"]["LaTeX"][0]["markup"] except KeyError: continue if 'documentclass' not in latex: with_boilerplate = "\\documentclass{article}\n\\begin{document}\n\\[\n%s\n\\]\n\\end{document}" % latex equajson["markup-languages"]["LaTeX"][0]["markup"] = with_boilerplate with open(arg, 'w') as f: json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True)
#! /usr/bin/env python3 import sys import json for arg in sys.argv[1:]: with open(arg) as f: equajson = json.load(f) try: latex = equajson["markup-languages"]["LaTeX"][0]["markup"] except KeyError: continue if 'documentclass' not in latex: with_boilerplate = "\\documentclass{article}\n\\begin{document}\n\\[\n%s\n\\]\n\\end{document}" % latex equajson["markup-languages"]["LaTeX"][0]["markup"] = with_boilerplate with open(arg, 'w') as f: json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True) f.write('\n') # add trailing newline
Add trailing newline to make round-tripping without diffs possible.
Add trailing newline to make round-tripping without diffs possible.
Python
mit
nbeaver/equajson
--- +++ @@ -18,3 +18,4 @@ with open(arg, 'w') as f: json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True) + f.write('\n') # add trailing newline
ace54e86e9462b25acd1636e0e9905ba6decfe9b
admin_tools/dashboard/views.py
admin_tools/dashboard/views.py
from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.template import RequestContext from django.shortcuts import render_to_response from django.contrib import messages try: from django.views.decorators.csrf import csrf_exempt except ImportError: from django.contrib.csrf.middleware import csrf_exempt from .forms import DashboardPreferencesForm from .models import DashboardPreferences @login_required @csrf_exempt def set_preferences(request, dashboard_id): """ This view serves and validates a preferences form. """ try: preferences = DashboardPreferences.objects.get( user=request.user, dashboard_id=dashboard_id ) except DashboardPreferences.DoesNotExist: preferences = None if request.method == "POST": form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, data=request.POST, instance=preferences ) if form.is_valid(): preferences = form.save() if request.is_ajax(): return HttpResponse('true') messages.success(request, 'Preferences saved') elif request.is_ajax(): return HttpResponse('false') else: form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, instance=preferences ) return render_to_response('admin_tools/dashboard/preferences_form.html', RequestContext(request, {'form': form}))
from django.contrib.admin.views.decorators import staff_member_required from django.http import HttpResponse from django.template import RequestContext from django.shortcuts import render_to_response from django.contrib import messages try: from django.views.decorators.csrf import csrf_exempt except ImportError: from django.contrib.csrf.middleware import csrf_exempt from .forms import DashboardPreferencesForm from .models import DashboardPreferences @staff_member_required @csrf_exempt def set_preferences(request, dashboard_id): """ This view serves and validates a preferences form. """ try: preferences = DashboardPreferences.objects.get( user=request.user, dashboard_id=dashboard_id ) except DashboardPreferences.DoesNotExist: preferences = None if request.method == "POST": form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, data=request.POST, instance=preferences ) if form.is_valid(): preferences = form.save() if request.is_ajax(): return HttpResponse('true') messages.success(request, 'Preferences saved') elif request.is_ajax(): return HttpResponse('false') else: form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, instance=preferences ) return render_to_response('admin_tools/dashboard/preferences_form.html', RequestContext(request, {'form': form}))
Use @staff_member_required decorator for the dashboard view as well
Use @staff_member_required decorator for the dashboard view as well
Python
mit
django-admin-tools/django-admin-tools,django-admin-tools/django-admin-tools,django-admin-tools/django-admin-tools
--- +++ @@ -1,4 +1,4 @@ -from django.contrib.auth.decorators import login_required +from django.contrib.admin.views.decorators import staff_member_required from django.http import HttpResponse from django.template import RequestContext from django.shortcuts import render_to_response @@ -13,7 +13,7 @@ from .models import DashboardPreferences -@login_required +@staff_member_required @csrf_exempt def set_preferences(request, dashboard_id): """
3219a925ecddbacb39e4adc484d94eaed6bddd0b
yolk/__init__.py
yolk/__init__.py
"""yolk. Author: Rob Cakebread <cakebread at gmail> License : BSD """ __version__ = '0.8.6'
"""yolk. Author: Rob Cakebread <cakebread at gmail> License : BSD """ __version__ = '0.8.7'
Increment patch version to 0.8.7
Increment patch version to 0.8.7
Python
bsd-3-clause
myint/yolk,myint/yolk
--- +++ @@ -6,4 +6,4 @@ """ -__version__ = '0.8.6' +__version__ = '0.8.7'
badcdcc03517aaf705975676a5d37488b38c9738
foomodules/link_harvester/common_handlers.py
foomodules/link_harvester/common_handlers.py
import logging import re import socket import urllib from bs4 import BeautifulSoup logger = logging.getLogger(__name__) WURSTBALL_RE = re.compile(r"^https?://(www\.)?wurstball\.de/[0-9]+/") def default_handler(metadata): return {key: getattr(metadata, key) for key in ["original_url", "url", "title", "description", "human_readable_type"]} def wurstball_handler(metadata): if not WURSTBALL_RE.match(metadata.url): return None ret = default_handler(metadata) soup = BeautifulSoup(metadata.buf) img_url = soup.find(id="content-main").img["src"] try: response = urllib.request.urlopen(img_url, timeout=5) img_data = response.read() except (socket.timeout, urllib.error.URLError, urllib.error.HTTPError) as err: logger.warn("Could not download Wurstball image: {}".format(err)) return ret mime_type = response.getheader("Content-Type") ret.update({"image_mime_type": mime_type, "image_buffer": img_data, "image_url": img_url, "title": None, "description": None}) return ret
import logging import re import socket import urllib import http.client from bs4 import BeautifulSoup logger = logging.getLogger(__name__) WURSTBALL_RE = re.compile(r"^https?://(www\.)?wurstball\.de/[0-9]+/") def default_handler(metadata): return {key: getattr(metadata, key) for key in ["original_url", "url", "title", "description", "human_readable_type"]} def wurstball_handler(metadata): if not WURSTBALL_RE.match(metadata.url): return None ret = default_handler(metadata) soup = BeautifulSoup(metadata.buf) img_url = soup.find(id="content-main").img["src"] try: response = urllib.request.urlopen(img_url, timeout=5) img_data = response.read() except (socket.timeout, urllib.error.URLError, urllib.error.HTTPError) as err: logger.warn("Could not download Wurstball image: {}".format(err)) return ret mime_type = response.getheader("Content-Type") ret.update({"image_mime_type": mime_type, "image_buffer": img_data, "image_url": img_url, "title": None, "description": None}) return ret def image_handler(metadata): if not metadata.mime_type.startswith("image/"): return None ret = default_handler(metadata) try: img_data = metadata.buf + metadata.response.read() except http.client.IncompleteRead as err: logger.warn("Could not download image: {}".format(err)) return ret ret.update({"image_mime_type": metadata.mime_type, "image_buffer": img_data, "image_url": metadata.url}) return ret
Add image_handler for link harvester
Add image_handler for link harvester
Python
mit
horazont/xmpp-crowd
--- +++ @@ -2,6 +2,7 @@ import re import socket import urllib +import http.client from bs4 import BeautifulSoup logger = logging.getLogger(__name__) @@ -41,3 +42,22 @@ "description": None}) return ret + + +def image_handler(metadata): + if not metadata.mime_type.startswith("image/"): + return None + + ret = default_handler(metadata) + + try: + img_data = metadata.buf + metadata.response.read() + except http.client.IncompleteRead as err: + logger.warn("Could not download image: {}".format(err)) + return ret + + ret.update({"image_mime_type": metadata.mime_type, + "image_buffer": img_data, + "image_url": metadata.url}) + + return ret
ee31e6c0302c6840d522666b1f724d0ec429d562
monasca_setup/detection/plugins/neutron.py
monasca_setup/detection/plugins/neutron.py
import monasca_setup.detection class Neutron(monasca_setup.detection.ServicePlugin): """Detect Neutron daemons and setup configuration to monitor them. """ def __init__(self, template_dir, overwrite=True, args=None): service_params = { 'args': args, 'template_dir': template_dir, 'overwrite': overwrite, 'service_name': 'networking', 'process_names': ['neutron-server', 'neutron-openvswitch-agent', 'neutron-rootwrap', 'neutron-dhcp-agent', 'neutron-vpn-agent', 'neutron-metadata-agent', 'neutron-metering-agent', 'neutron-l3-agent', 'neutron-ns-metadata-proxy'], 'service_api_url': 'http://localhost:9696', 'search_pattern': '.*v2.0.*' } super(Neutron, self).__init__(service_params) def build_config(self): """Build the config as a Plugins object and return.""" # Skip the http check if neutron-server is not on this box if 'neutron-server' not in self.found_processes: self.service_api_url = None self.search_pattern = None return monasca_setup.detection.ServicePlugin.build_config(self)
import monasca_setup.detection class Neutron(monasca_setup.detection.ServicePlugin): """Detect Neutron daemons and setup configuration to monitor them. """ def __init__(self, template_dir, overwrite=True, args=None): service_params = { 'args': args, 'template_dir': template_dir, 'overwrite': overwrite, 'service_name': 'networking', 'process_names': ['neutron-server', 'neutron-openvswitch-agent', 'neutron-rootwrap', 'neutron-dhcp-agent', 'neutron-vpn-agent', 'neutron-metadata-agent', 'neutron-metering-agent', 'neutron-l3-agent', 'neutron-ns-metadata-proxy', '/opt/stack/service/neutron/venv/bin/neutron-lbaas-agent', '/opt/stack/service/neutron/venv/bin/neutron-lbaasv2-agent'], 'service_api_url': 'http://localhost:9696', 'search_pattern': '.*v2.0.*' } super(Neutron, self).__init__(service_params) def build_config(self): """Build the config as a Plugins object and return.""" # Skip the http check if neutron-server is not on this box if 'neutron-server' not in self.found_processes: self.service_api_url = None self.search_pattern = None return monasca_setup.detection.ServicePlugin.build_config(self)
Add process monitoring for LBaaS agents
Add process monitoring for LBaaS agents Add neutron-lbaas-agent (LBaaS V1) and neutron-lbaasv2-agent (LBaaS V2) to the neutron detection plugin. Because the string "neutron-lbaas-agent" can be both a process name and log file name, the process monitor is susceptible to false positive matching on that string. Use a longer part of the python path to disambiguate this Change-Id: I3081639a6f36a276bab2f9eb1b9b39a5bef452f1
Python
bsd-3-clause
sapcc/monasca-agent,sapcc/monasca-agent,sapcc/monasca-agent
--- +++ @@ -17,7 +17,9 @@ 'neutron-rootwrap', 'neutron-dhcp-agent', 'neutron-vpn-agent', 'neutron-metadata-agent', 'neutron-metering-agent', 'neutron-l3-agent', - 'neutron-ns-metadata-proxy'], + 'neutron-ns-metadata-proxy', + '/opt/stack/service/neutron/venv/bin/neutron-lbaas-agent', + '/opt/stack/service/neutron/venv/bin/neutron-lbaasv2-agent'], 'service_api_url': 'http://localhost:9696', 'search_pattern': '.*v2.0.*' }
8769224d8dbe73e177d19012d54c9bb7e114a3fa
recipes/webrtc.py
recipes/webrtc.py
# Copyright (c) 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import recipe_util # pylint: disable=F0401 # This class doesn't need an __init__ method, so we disable the warning # pylint: disable=W0232 class WebRTC(recipe_util.Recipe): """Basic Recipe class for WebRTC.""" @staticmethod def fetch_spec(props): url = 'https://chromium.googlesource.com/external/webrtc.git' spec = { 'solutions': [ { 'name': 'src', 'url': url, 'deps_file': 'DEPS', 'managed': False, 'custom_deps': {}, 'safesync_url': '', }, ], 'auto': True, # Runs git auto-svn as a part of the fetch. 'with_branch_heads': True, } if props.get('target_os'): spec['target_os'] = props['target_os'].split(',') return { 'type': 'gclient_git_svn', 'gclient_git_svn_spec': spec, } @staticmethod def expected_root(_props): return 'src' def main(argv=None): return WebRTC().handle_args(argv) if __name__ == '__main__': sys.exit(main(sys.argv))
# Copyright (c) 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import recipe_util # pylint: disable=F0401 # This class doesn't need an __init__ method, so we disable the warning # pylint: disable=W0232 class WebRTC(recipe_util.Recipe): """Basic Recipe class for WebRTC.""" @staticmethod def fetch_spec(props): url = 'https://chromium.googlesource.com/external/webrtc.git' spec = { 'solutions': [ { 'name': 'src', 'url': url, 'deps_file': 'DEPS', 'managed': False, 'custom_deps': {}, 'safesync_url': '', }, ], 'with_branch_heads': True, } if props.get('target_os'): spec['target_os'] = props['target_os'].split(',') return { 'type': 'gclient_git', 'gclient_git_spec': spec, } @staticmethod def expected_root(_props): return 'src' def main(argv=None): return WebRTC().handle_args(argv) if __name__ == '__main__': sys.exit(main(sys.argv))
Switch WebRTC recipe to Git.
Switch WebRTC recipe to Git. BUG=412012 Review URL: https://codereview.chromium.org/765373002 git-svn-id: fd409f4bdeea2bb50a5d34bb4d4bfc2046a5a3dd@294546 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
sarvex/depot-tools,fracting/depot_tools,sarvex/depot-tools,azunite/chrome_build,disigma/depot_tools,duongbaoduy/gtools,fracting/depot_tools,hsharsha/depot_tools,Midrya/chromium,hsharsha/depot_tools,ajohnson23/depot_tools,gcodetogit/depot_tools,npe9/depot_tools,mlufei/depot_tools,primiano/depot_tools,chinmaygarde/depot_tools,primiano/depot_tools,azunite/chrome_build,SuYiling/chrome_depot_tools,duanwujie/depot_tools,withtone/depot_tools,chinmaygarde/depot_tools,duongbaoduy/gtools,SuYiling/chrome_depot_tools,azureplus/chromium_depot_tools,gcodetogit/depot_tools,kaiix/depot_tools,gcodetogit/depot_tools,sarvex/depot-tools,SuYiling/chrome_depot_tools,aleonliao/depot_tools,liaorubei/depot_tools,mlufei/depot_tools,Midrya/chromium,primiano/depot_tools,npe9/depot_tools,liaorubei/depot_tools,sarvex/depot-tools,withtone/depot_tools,disigma/depot_tools,azureplus/chromium_depot_tools,disigma/depot_tools,hsharsha/depot_tools,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,kaiix/depot_tools,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,liaorubei/depot_tools,CoherentLabs/depot_tools,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,ajohnson23/depot_tools,withtone/depot_tools,azureplus/chromium_depot_tools,duanwujie/depot_tools,liaorubei/depot_tools,aleonliao/depot_tools,fracting/depot_tools,kaiix/depot_tools,npe9/depot_tools,CoherentLabs/depot_tools,mlufei/depot_tools,npe9/depot_tools,aleonliao/depot_tools,ajohnson23/depot_tools,duongbaoduy/gtools,duanwujie/depot_tools,azunite/chrome_build,chinmaygarde/depot_tools,Midrya/chromium
--- +++ @@ -26,7 +26,6 @@ 'safesync_url': '', }, ], - 'auto': True, # Runs git auto-svn as a part of the fetch. 'with_branch_heads': True, } @@ -34,8 +33,8 @@ spec['target_os'] = props['target_os'].split(',') return { - 'type': 'gclient_git_svn', - 'gclient_git_svn_spec': spec, + 'type': 'gclient_git', + 'gclient_git_spec': spec, } @staticmethod
2393b066fbb0fc88d9e9a1918485cf57c40aecc2
opps/articles/templatetags/article_tags.py
opps/articles/templatetags/article_tags.py
# -*- coding: utf-8 -*- from django import template from django.conf import settings from opps.articles.models import ArticleBox register = template.Library() @register.simple_tag def get_articlebox(slug, channel_slug=None, template_name=None): if channel_slug: slug = slug + '-' + channel_slug try: box = ArticleBox.objects.get(site=settings.SITE_ID, slug=slug) except ArticleBox.DoesNotExist: box = None t = template.loader.get_template('articles/articlebox_detail.html') if template_name: t = template.loader.get_template(template_name) return t.render(template.Context({'articlebox': box, 'slug': slug})) @register.simple_tag def get_all_articlebox(channel_slug, template_name=None): boxes = ArticleBox.objects.filter(site=settings.SITE_ID, channel__slug=channel_slug) t = template.loader.get_template('articles/articlebox_list.html') if template_name: t = template.loader.get_template(template_name) return t.render(template.Context({'articleboxes': boxes}))
# -*- coding: utf-8 -*- from django import template from django.conf import settings from django.utils import timezone from opps.articles.models import ArticleBox register = template.Library() @register.simple_tag def get_articlebox(slug, channel_slug=None, template_name=None): if channel_slug: slug = slug + '-' + channel_slug try: box = ArticleBox.objects.get(site=settings.SITE_ID, slug=slug, date_available__lte=timezone.now(), published=True) except ArticleBox.DoesNotExist: box = None t = template.loader.get_template('articles/articlebox_detail.html') if template_name: t = template.loader.get_template(template_name) return t.render(template.Context({'articlebox': box, 'slug': slug})) @register.simple_tag def get_all_articlebox(channel_slug, template_name=None): boxes = ArticleBox.objects.filter(site=settings.SITE_ID, channel__slug=channel_slug) t = template.loader.get_template('articles/articlebox_list.html') if template_name: t = template.loader.get_template(template_name) return t.render(template.Context({'articleboxes': boxes}))
Add validate published on templatetag get articlebox
Add validate published on templatetag get articlebox
Python
mit
jeanmask/opps,williamroot/opps,opps/opps,jeanmask/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,opps/opps,opps/opps,YACOWS/opps,opps/opps,williamroot/opps,williamroot/opps,YACOWS/opps
--- +++ @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from django import template from django.conf import settings +from django.utils import timezone from opps.articles.models import ArticleBox @@ -13,7 +14,9 @@ slug = slug + '-' + channel_slug try: - box = ArticleBox.objects.get(site=settings.SITE_ID, slug=slug) + box = ArticleBox.objects.get(site=settings.SITE_ID, slug=slug, + date_available__lte=timezone.now(), + published=True) except ArticleBox.DoesNotExist: box = None
8f9dc9a241515f9cab633f33b9d2243f76df55bd
emencia_paste_djangocms_3/django_buildout/project/utils/templatetags/utils_addons.py
emencia_paste_djangocms_3/django_buildout/project/utils/templatetags/utils_addons.py
# -*- coding: utf-8 -*- """ Various usefull tags """ from django import template register = template.Library() @register.filter(name='split', is_safe=True) def split_string(value, arg=None): """ A simple string splitter So you can do that : :: {% if LANGUAGE_CODE in "fr,en-ca,en-gb,zh-hk,it,en,de"|split:',' %} """ return value.split(arg)
# -*- coding: utf-8 -*- """ Various usefull tags """ from django import template register = template.Library() @register.filter(name='split', is_safe=False) def split_string(value, arg=None): """ A simple string splitter So you can do that : :: {% if LANGUAGE_CODE in "fr,en-ca,en-gb,zh-hk,it,en,de"|split:',' %} """ return value.split(arg)
Fix split filter in emencia_utils templatetags that was returning a string instead of a list
Fix split filter in emencia_utils templatetags that was returning a string instead of a list
Python
mit
emencia/emencia_paste_djangocms_3,emencia/emencia_paste_djangocms_3,emencia/emencia_paste_djangocms_3,emencia/emencia_paste_djangocms_3
--- +++ @@ -6,7 +6,7 @@ register = template.Library() -@register.filter(name='split', is_safe=True) +@register.filter(name='split', is_safe=False) def split_string(value, arg=None): """ A simple string splitter
d7878a798d8208bcd9221babcd3ac1a5c12aa9f7
drivnal/object.py
drivnal/object.py
from constants import * import os import urllib import mimetypes import logging class Object: def __init__(self, path): self.name = os.path.basename(path) self.path = path if os.path.isdir(self.path): self.type = DIR_MIME_TYPE self.size = None self.time = None else: try: stat = os.stat(self.path) self.size = stat.st_size self.time = stat.st_mtime except OSError: self.size = None self.time = None mime = mimetypes.MimeTypes() self.type = mime.guess_type(urllib.pathname2url(self.path))[0] @staticmethod def get_objects(path): objects = [] if path: for name in os.listdir(path): object_path = os.path.join(path, name) objects.append(Object(object_path)) return objects
from constants import * import os import urllib import subprocess import logging logger = logging.getLogger(APP_NAME) class Object: def __init__(self, path): self.name = os.path.basename(path) self.path = path if os.path.isdir(self.path): self.type = DIR_MIME_TYPE self.size = None self.time = None else: try: stat = os.stat(self.path) self.size = stat.st_size self.time = stat.st_mtime except OSError: self.size = None self.time = None self.type = None @staticmethod def get_objects(path): objects = [] object_paths = [] if path: for name in os.listdir(path): object_path = os.path.join(path, name) object = Object(object_path) objects.append(object) if not object.type: object_paths.append(object_path) try: # TODO Follow symlinks mime_types = subprocess.check_output(['file', '--mime-type', '--brief'] + object_paths).splitlines() except subprocess.CalledProcessError, error: logger.warning('File mime-type call failed. %r' % { 'return_code': error.returncode, 'output': error.output, }) try: for object in objects: if not object.type: object.type = mime_types.pop(0) except IndexError: logger.error('File mime-type call index error.') return objects
Improve file mime type detection
Improve file mime type detection
Python
agpl-3.0
drivnal/drivnal,drivnal/drivnal,drivnal/drivnal
--- +++ @@ -1,8 +1,10 @@ from constants import * import os import urllib -import mimetypes +import subprocess import logging + +logger = logging.getLogger(APP_NAME) class Object: def __init__(self, path): @@ -21,17 +23,36 @@ except OSError: self.size = None self.time = None - - mime = mimetypes.MimeTypes() - self.type = mime.guess_type(urllib.pathname2url(self.path))[0] + self.type = None @staticmethod def get_objects(path): objects = [] + object_paths = [] if path: for name in os.listdir(path): object_path = os.path.join(path, name) - objects.append(Object(object_path)) + object = Object(object_path) + objects.append(object) + if not object.type: + object_paths.append(object_path) + + try: + # TODO Follow symlinks + mime_types = subprocess.check_output(['file', + '--mime-type', '--brief'] + object_paths).splitlines() + except subprocess.CalledProcessError, error: + logger.warning('File mime-type call failed. %r' % { + 'return_code': error.returncode, + 'output': error.output, + }) + + try: + for object in objects: + if not object.type: + object.type = mime_types.pop(0) + except IndexError: + logger.error('File mime-type call index error.') return objects
3ddeeccabb09f11fdfb60d9ddbddce406a054e50
settings.py
settings.py
from settings_common import * PACKAGE_VERSION = 0.5 DEBUG = TEMPLATE_DEBUG = True DATABASE_ENGINE = 'postgresql_psycopg2' DATABASE_NAME = 'daisyproducer_dev' DATABASE_USER = 'eglic' DATABASE_PASSWORD = '' DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp', 'pipeline-20100301') DTBOOK2SBSFORM_PATH = os.path.join(PROJECT_DIR, '..', '..', 'workspace', 'LiblouisSaxonExtension') # debug toolbar #INSTALLED_APPS += ('debug_toolbar',) #MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS' : False} SERVE_STATIC_FILES = True
from settings_common import * PACKAGE_VERSION = 0.5 DEBUG = TEMPLATE_DEBUG = True DATABASE_ENGINE = 'postgresql_psycopg2' DATABASE_NAME = 'daisyproducer_dev' DATABASE_USER = 'eglic' DATABASE_PASSWORD = '' DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp', 'pipeline-20110106') DTBOOK2SBSFORM_PATH = os.path.join(PROJECT_DIR, '..', '..', 'workspace', 'LiblouisSaxonExtension') # debug toolbar #INSTALLED_APPS += ('debug_toolbar',) #MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS' : False} SERVE_STATIC_FILES = True
Upgrade to a newer pipeline release
Upgrade to a newer pipeline release
Python
agpl-3.0
sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer
--- +++ @@ -9,7 +9,7 @@ DATABASE_USER = 'eglic' DATABASE_PASSWORD = '' -DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp', 'pipeline-20100301') +DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp', 'pipeline-20110106') DTBOOK2SBSFORM_PATH = os.path.join(PROJECT_DIR, '..', '..', 'workspace', 'LiblouisSaxonExtension') # debug toolbar
109fc84cb307083f6a01317bb5b5bea0578088d3
bloop/__init__.py
bloop/__init__.py
from bloop.engine import Engine, ObjectsNotFound, ConstraintViolation from bloop.column import Column, GlobalSecondaryIndex, LocalSecondaryIndex from bloop.types import ( String, Float, Integer, Binary, StringSet, FloatSet, IntegerSet, BinarySet, Null, Boolean, Map, List ) __all__ = [ "Engine", "ObjectsNotFound", "ConstraintViolation", "Column", "GlobalSecondaryIndex", "LocalSecondaryIndex", "String", "Float", "Integer", "Binary", "StringSet", "FloatSet", "IntegerSet", "BinarySet", "Null", "Boolean", "Map", "List" ]
from bloop.engine import Engine, ObjectsNotFound, ConstraintViolation from bloop.column import Column, GlobalSecondaryIndex, LocalSecondaryIndex from bloop.types import ( String, UUID, Float, Integer, Binary, StringSet, FloatSet, IntegerSet, BinarySet, Null, Boolean, Map, List ) __all__ = [ "Engine", "ObjectsNotFound", "ConstraintViolation", "Column", "GlobalSecondaryIndex", "LocalSecondaryIndex", "String", "UUID", "Float", "Integer", "Binary", "StringSet", "FloatSet", "IntegerSet", "BinarySet", "Null", "Boolean", "Map", "List" ]
Add UUID to bloop __all__
Add UUID to bloop __all__
Python
mit
numberoverzero/bloop,numberoverzero/bloop
--- +++ @@ -1,13 +1,13 @@ from bloop.engine import Engine, ObjectsNotFound, ConstraintViolation from bloop.column import Column, GlobalSecondaryIndex, LocalSecondaryIndex from bloop.types import ( - String, Float, Integer, Binary, StringSet, FloatSet, + String, UUID, Float, Integer, Binary, StringSet, FloatSet, IntegerSet, BinarySet, Null, Boolean, Map, List ) __all__ = [ "Engine", "ObjectsNotFound", "ConstraintViolation", "Column", "GlobalSecondaryIndex", "LocalSecondaryIndex", - "String", "Float", "Integer", "Binary", "StringSet", "FloatSet", + "String", "UUID", "Float", "Integer", "Binary", "StringSet", "FloatSet", "IntegerSet", "BinarySet", "Null", "Boolean", "Map", "List" ]
5c11731b445df04e1b4ec92df4ff6b7e6681915b
testMail.py
testMail.py
#!/usr/local/bin/python import smtplib, time, threading, sys from email.mime.text import MIMEText fromaddr = sys.argv[0] toaddr = sys.argv[1] def createMessage(fromaddr, toaddr, subject, msgtxt): msg = MIMEText(msgtxt) msg['Subject'] = subject msg['From'] = fromaddr msg['To'] = toaddr return msg def sendMails(threadId): server = smtplib.SMTP('localhost', 8001) for i in xrange(25): server.sendmail(fromaddr, [toaddr], createMessage(fromaddr, toaddr, "This is from thread %s" % threadId, "Some header" ).as_string()) server.quit() threads = [threading.Thread(target=sendMails, args=(i,)) for i in range(10)] for t in threads: t.start() for t in threads: t.join()
#!/usr/local/bin/python import smtplib, time, threading, sys from email.mime.text import MIMEText fromaddr = sys.argv[1] toaddr = sys.argv[2] def createMessage(fromaddr, toaddr, subject, msgtxt): msg = MIMEText(msgtxt) msg['Subject'] = subject msg['From'] = fromaddr msg['To'] = toaddr return msg def sendMails(threadId): server = smtplib.SMTP('localhost', 8001) for i in xrange(25): server.sendmail(fromaddr, [toaddr], createMessage(fromaddr, toaddr, "This is from thread %s" % threadId, "Some header" ).as_string()) server.quit() threads = [threading.Thread(target=sendMails, args=(i,)) for i in range(10)] for t in threads: t.start() for t in threads: t.join()
Change the arg values so not to use the script name as the fromaddr
Change the arg values so not to use the script name as the fromaddr
Python
bsd-3-clause
bobbynewmark/mailthrottler,bobbynewmark/mailthrottler
--- +++ @@ -2,8 +2,8 @@ import smtplib, time, threading, sys from email.mime.text import MIMEText -fromaddr = sys.argv[0] -toaddr = sys.argv[1] +fromaddr = sys.argv[1] +toaddr = sys.argv[2] def createMessage(fromaddr, toaddr, subject, msgtxt):
6c32e39e2e51a80ebc9e31e88e22cc4aa39f7466
chainer/functions/copy.py
chainer/functions/copy.py
from chainer import cuda from chainer import function class Copy(function.Function): """Copy an input GPUArray onto another device.""" def __init__(self, out_device): self.out_device = out_device def forward_cpu(self, x): return x[0].copy(), def forward_gpu(self, x): return cuda.copy(x[0], out_device=self.out_device), def backward_cpu(self, x, gy): return gy[0].copy(), def backward_gpu(self, x, gy): return cuda.copy(gy[0], out_device=cuda.get_device(x[0])), def copy(x, dst): """Copies the input variable onto the specified device. This function copies the array of input variable onto the device specified by ``dst`` if the original array is on GPU, and otherwise just copies the array within host memory. Args: x (~chainer.Variable): Variable to be copied. dst: Target device specifier. Returns: ~chainer.Variable: Output variable. """ return Copy(dst)(x)
import numpy from chainer import cuda from chainer import function from chainer.utils import type_check class Copy(function.Function): """Copy an input GPUArray onto another device.""" def __init__(self, out_device): self.out_device = out_device def check_type_forward(self, in_types): type_check.expect( in_types.size() == 1, in_types[0].dtype == numpy.float32 ) def check_type_backward(self, in_types, out_types): type_check.expect( out_types.size() == 1, in_types[0].dtype == out_types[0].dtype, in_types[0].ndim == out_types[0].ndim, in_types[0].shape == out_types[0].shape ) def forward_cpu(self, x): return x[0].copy(), def forward_gpu(self, x): return cuda.copy(x[0], out_device=self.out_device), def backward_cpu(self, x, gy): return gy[0].copy(), def backward_gpu(self, x, gy): return cuda.copy(gy[0], out_device=cuda.get_device(x[0])), def copy(x, dst): """Copies the input variable onto the specified device. This function copies the array of input variable onto the device specified by ``dst`` if the original array is on GPU, and otherwise just copies the array within host memory. Args: x (~chainer.Variable): Variable to be copied. dst: Target device specifier. Returns: ~chainer.Variable: Output variable. """ return Copy(dst)(x)
Add unittest(cpu-only) and typecheck for Copy
Add unittest(cpu-only) and typecheck for Copy
Python
mit
chainer/chainer,sinhrks/chainer,ronekko/chainer,ktnyt/chainer,chainer/chainer,jnishi/chainer,niboshi/chainer,tkerola/chainer,elviswf/chainer,tscohen/chainer,muupan/chainer,keisuke-umezawa/chainer,Kaisuke5/chainer,woodshop/chainer,jnishi/chainer,keisuke-umezawa/chainer,tigerneil/chainer,cupy/cupy,niboshi/chainer,chainer/chainer,hvy/chainer,aonotas/chainer,t-abe/chainer,wkentaro/chainer,hvy/chainer,keisuke-umezawa/chainer,umitanuki/chainer,t-abe/chainer,okuta/chainer,kiyukuta/chainer,ktnyt/chainer,wkentaro/chainer,ktnyt/chainer,cupy/cupy,okuta/chainer,hvy/chainer,cemoody/chainer,woodshop/complex-chainer,hidenori-t/chainer,ysekky/chainer,hvy/chainer,ikasumi/chainer,benob/chainer,kashif/chainer,sinhrks/chainer,kikusu/chainer,sou81821/chainer,okuta/chainer,pfnet/chainer,1986ks/chainer,cupy/cupy,kikusu/chainer,wkentaro/chainer,muupan/chainer,kuwa32/chainer,wavelets/chainer,keisuke-umezawa/chainer,AlpacaDB/chainer,ktnyt/chainer,jnishi/chainer,ytoyama/yans_chainer_hackathon,bayerj/chainer,delta2323/chainer,niboshi/chainer,niboshi/chainer,anaruse/chainer,yanweifu/chainer,rezoo/chainer,wkentaro/chainer,truongdq/chainer,okuta/chainer,jnishi/chainer,masia02/chainer,jfsantos/chainer,truongdq/chainer,minhpqn/chainer,benob/chainer,laysakura/chainer,cupy/cupy,chainer/chainer,AlpacaDB/chainer
--- +++ @@ -1,5 +1,8 @@ +import numpy + from chainer import cuda from chainer import function +from chainer.utils import type_check class Copy(function.Function): @@ -8,6 +11,20 @@ def __init__(self, out_device): self.out_device = out_device + + def check_type_forward(self, in_types): + type_check.expect( + in_types.size() == 1, + in_types[0].dtype == numpy.float32 + ) + + def check_type_backward(self, in_types, out_types): + type_check.expect( + out_types.size() == 1, + in_types[0].dtype == out_types[0].dtype, + in_types[0].ndim == out_types[0].ndim, + in_types[0].shape == out_types[0].shape + ) def forward_cpu(self, x): return x[0].copy(),
6fbd752b1343c2e5085c3d060dbc7cc11a839728
sympy/utilities/tests/test_code_quality.py
sympy/utilities/tests/test_code_quality.py
from os import walk, sep, chdir, pardir from os.path import split, join, abspath from glob import glob # System path separator (usually slash or backslash) sepd = {"sep": sep} # Files having at least one of these in their path will be excluded EXCLUDE = set([ "%(sep)sthirdparty%(sep)s" % sepd, "%(sep)sprinting%(sep)spretty%(sep)s" % sepd, ]) def test_no_trailing_whitespace(): message = "File contains trailing whitespace: %s, line %s." base_path = split(__file__)[0] base_path += sep + pardir + sep + pardir # go to sympy/ base_path = abspath(base_path) for root, dirs, files in walk(base_path): for fname in glob(join(root, "*.py")): if filter(lambda ex: ex in fname, EXCLUDE): continue file = open(fname, "r") try: for idx, line in enumerate(file): if line.endswith(" \n"): assert False, message % (fname, idx+1) finally: file.close()
from os import walk, sep, chdir, pardir from os.path import split, join, abspath from glob import glob # System path separator (usually slash or backslash) sepd = {"sep": sep} # Files having at least one of these in their path will be excluded EXCLUDE = set([ "%(sep)sthirdparty%(sep)s" % sepd, # "%(sep)sprinting%(sep)spretty%(sep)s" % sepd, ]) def test_no_trailing_whitespace(): message = "File contains trailing whitespace: %s, line %s." base_path = split(__file__)[0] base_path += sep + pardir + sep + pardir # go to sympy/ base_path = abspath(base_path) for root, dirs, files in walk(base_path): for fname in glob(join(root, "*.py")): if filter(lambda ex: ex in fname, EXCLUDE): continue file = open(fname, "r") try: for idx, line in enumerate(file): if line.endswith(" \n"): assert False, message % (fname, idx+1) finally: file.close()
Test whitespace in pretty printing tests.
Test whitespace in pretty printing tests.
Python
bsd-3-clause
toolforger/sympy,atsao72/sympy,lindsayad/sympy,fperez/sympy,abhiii5459/sympy,chaffra/sympy,abhiii5459/sympy,farhaanbukhsh/sympy,wanglongqi/sympy,bukzor/sympy,ga7g08/sympy,sahmed95/sympy,sunny94/temp,Shaswat27/sympy,Titan-C/sympy,lidavidm/sympy,hargup/sympy,ryanGT/sympy,Curious72/sympy,mcdaniel67/sympy,emon10005/sympy,rahuldan/sympy,kumarkrishna/sympy,maniteja123/sympy,kevalds51/sympy,farhaanbukhsh/sympy,MridulS/sympy,VaibhavAgarwalVA/sympy,kaichogami/sympy,sahilshekhawat/sympy,minrk/sympy,cccfran/sympy,flacjacket/sympy,debugger22/sympy,kaichogami/sympy,MridulS/sympy,sunny94/temp,toolforger/sympy,jbaayen/sympy,hrashk/sympy,Davidjohnwilson/sympy,ahhda/sympy,wyom/sympy,ChristinaZografou/sympy,garvitr/sympy,hazelnusse/sympy-old,mattpap/sympy-polys,postvakje/sympy,wyom/sympy,shipci/sympy,jamesblunt/sympy,postvakje/sympy,Shaswat27/sympy,emon10005/sympy,asm666/sympy,wanglongqi/sympy,shikil/sympy,garvitr/sympy,vipulroxx/sympy,moble/sympy,pbrady/sympy,souravsingh/sympy,tovrstra/sympy,sahilshekhawat/sympy,grevutiu-gabriel/sympy,kevalds51/sympy,wanglongqi/sympy,hrashk/sympy,Arafatk/sympy,oliverlee/sympy,liangjiaxing/sympy,Davidjohnwilson/sympy,Designist/sympy,yukoba/sympy,oliverlee/sympy,AkademieOlympia/sympy,jaimahajan1997/sympy,debugger22/sympy,atsao72/sympy,sahmed95/sympy,kaushik94/sympy,KevinGoodsell/sympy,Vishluck/sympy,amitjamadagni/sympy,ga7g08/sympy,atreyv/sympy,asm666/sympy,rahuldan/sympy,pandeyadarsh/sympy,vipulroxx/sympy,Sumith1896/sympy,Mitchkoens/sympy,wyom/sympy,amitjamadagni/sympy,Gadal/sympy,AunShiLord/sympy,pbrady/sympy,moble/sympy,hazelnusse/sympy-old,atreyv/sympy,shipci/sympy,vipulroxx/sympy,cswiercz/sympy,Designist/sympy,shikil/sympy,Arafatk/sympy,shikil/sympy,AkademieOlympia/sympy,saurabhjn76/sympy,abloomston/sympy,saurabhjn76/sympy,Titan-C/sympy,pbrady/sympy,kaushik94/sympy,Sumith1896/sympy,aktech/sympy,abloomston/sympy,meghana1995/sympy,hrashk/sympy,ahhda/sympy,shipci/sympy,sampadsaha5/sympy,Gadal/sympy,skidzo/sympy,VaibhavAgarwalVA/sympy,pandeyadarsh/sympy,Curious72/sympy,jbbskinny/sympy,kmacinnis/sympy,jamesblunt/sympy,dqnykamp/sympy,skidzo/sympy,garvitr/sympy,ChristinaZografou/sympy,sampadsaha5/sympy,jamesblunt/sympy,jbbskinny/sympy,Arafatk/sympy,jerli/sympy,ChristinaZografou/sympy,kumarkrishna/sympy,jerli/sympy,souravsingh/sympy,skirpichev/omg,souravsingh/sympy,maniteja123/sympy,kmacinnis/sympy,mafiya69/sympy,jbbskinny/sympy,lindsayad/sympy,VaibhavAgarwalVA/sympy,maniteja123/sympy,chaffra/sympy,moble/sympy,Vishluck/sympy,Mitchkoens/sympy,madan96/sympy,jerli/sympy,debugger22/sympy,chaffra/sympy,mcdaniel67/sympy,lidavidm/sympy,srjoglekar246/sympy,grevutiu-gabriel/sympy,bukzor/sympy,kevalds51/sympy,Titan-C/sympy,hargup/sympy,AkademieOlympia/sympy,Shaswat27/sympy,Mitchkoens/sympy,iamutkarshtiwari/sympy,dqnykamp/sympy,aktech/sympy,cswiercz/sympy,yashsharan/sympy,AunShiLord/sympy,beni55/sympy,Vishluck/sympy,beni55/sympy,madan96/sympy,yukoba/sympy,atsao72/sympy,kaichogami/sympy,yashsharan/sympy,pandeyadarsh/sympy,meghana1995/sympy,sahilshekhawat/sympy,skidzo/sympy,kmacinnis/sympy,jaimahajan1997/sympy,kumarkrishna/sympy,sunny94/temp,kaushik94/sympy,diofant/diofant,cccfran/sympy,ahhda/sympy,Curious72/sympy,drufat/sympy,toolforger/sympy,sahmed95/sympy,MechCoder/sympy,MechCoder/sympy,hargup/sympy,emon10005/sympy,rahuldan/sympy,abloomston/sympy,drufat/sympy,mcdaniel67/sympy,iamutkarshtiwari/sympy,liangjiaxing/sympy,ga7g08/sympy,beni55/sympy,saurabhjn76/sympy,Davidjohnwilson/sympy,Gadal/sympy,mafiya69/sympy,Designist/sympy,atreyv/sympy,abhiii5459/sympy,mafiya69/sympy,grevutiu-gabriel/sympy,jaimahajan1997/sympy,pernici/sympy,dqnykamp/sympy,postvakje/sympy,cswiercz/sympy,farhaanbukhsh/sympy,bukzor/sympy,lindsayad/sympy,sampadsaha5/sympy,minrk/sympy,iamutkarshtiwari/sympy,meghana1995/sympy,lidavidm/sympy,drufat/sympy,Sumith1896/sympy,AunShiLord/sympy,asm666/sympy,madan96/sympy,cccfran/sympy,yukoba/sympy,MechCoder/sympy,yashsharan/sympy,liangjiaxing/sympy,MridulS/sympy,oliverlee/sympy,aktech/sympy
--- +++ @@ -8,7 +8,7 @@ # Files having at least one of these in their path will be excluded EXCLUDE = set([ "%(sep)sthirdparty%(sep)s" % sepd, - "%(sep)sprinting%(sep)spretty%(sep)s" % sepd, +# "%(sep)sprinting%(sep)spretty%(sep)s" % sepd, ]) def test_no_trailing_whitespace(): @@ -27,4 +27,3 @@ assert False, message % (fname, idx+1) finally: file.close() -
f01222f021f277805492e3f539609f6b64be0b7e
blanc_basic_news/news/views.py
blanc_basic_news/news/views.py
from django.views.generic import ListView, DateDetailView from django.shortcuts import get_object_or_404 from django.utils import timezone from django.conf import settings from .models import Category, Post class PostListView(ListView): paginate_by = getattr(settings, 'NEWS_PER_PAGE', 10) def get_queryset(self): return Post.objects.filter(published=True, date__lte=timezone.now()) class PostListCategoryView(ListView): paginate_by = getattr(settings, 'NEWS_PER_PAGE', 10) template_name_suffix = '_list_category' def get_queryset(self): self.category = get_object_or_404(Category, slug=self.kwargs['slug']) return Post.objects.filter(published=True, date__lte=timezone.now(), category=self.category) def get_context_data(self, **kwargs): context = super(PostListCategoryView, self).get_context_data(**kwargs) context['category'] = self.category return context class PostDetailView(DateDetailView): queryset = Post.objects.filter(published=True) month_format = '%m' date_field = 'date'
from django.views.generic import ListView, DateDetailView from django.shortcuts import get_object_or_404 from django.utils import timezone from django.conf import settings from .models import Category, Post class PostListView(ListView): paginate_by = getattr(settings, 'NEWS_PER_PAGE', 10) def get_queryset(self): return Post.objects.select_related().filter( published=True, date__lte=timezone.now()) class PostListCategoryView(ListView): paginate_by = getattr(settings, 'NEWS_PER_PAGE', 10) template_name_suffix = '_list_category' def get_queryset(self): self.category = get_object_or_404(Category, slug=self.kwargs['slug']) return Post.objects.select_related().filter( published=True, date__lte=timezone.now(), category=self.category) def get_context_data(self, **kwargs): context = super(PostListCategoryView, self).get_context_data(**kwargs) context['category'] = self.category return context class PostDetailView(DateDetailView): queryset = Post.objects.filter(published=True) month_format = '%m' date_field = 'date'
Use select_related to help with category foreign keys
Use select_related to help with category foreign keys
Python
bsd-3-clause
blancltd/blanc-basic-news
--- +++ @@ -9,7 +9,8 @@ paginate_by = getattr(settings, 'NEWS_PER_PAGE', 10) def get_queryset(self): - return Post.objects.filter(published=True, date__lte=timezone.now()) + return Post.objects.select_related().filter( + published=True, date__lte=timezone.now()) class PostListCategoryView(ListView): @@ -18,7 +19,10 @@ def get_queryset(self): self.category = get_object_or_404(Category, slug=self.kwargs['slug']) - return Post.objects.filter(published=True, date__lte=timezone.now(), category=self.category) + return Post.objects.select_related().filter( + published=True, + date__lte=timezone.now(), + category=self.category) def get_context_data(self, **kwargs): context = super(PostListCategoryView, self).get_context_data(**kwargs)
19cd84480a739f9550258dc959637fe85f43af50
fedora/release.py
fedora/release.py
''' Information about this python-fedora release ''' from fedora import _ NAME = 'python-fedora' VERSION = '0.3.6' DESCRIPTION = _('Python modules for interacting with Fedora services') LONG_DESCRIPTION = _(''' The Fedora Project runs many different services. These services help us to package software, develop new programs, and generally put together the distro. This package contains software that helps us do that. ''') AUTHOR = 'Toshio Kuratomi, Luke Macken' EMAIL = 'tkuratom@redhat.com' COPYRIGHT = '2007-2008 Red Hat, Inc.' URL = 'https://fedorahosted.org/python-fedora' DOWNLOAD_URL = 'https://fedorahosted.org/releases/p/y/python-fedora/' LICENSE = 'GPLv2'
''' Information about this python-fedora release ''' from fedora import _ NAME = 'python-fedora' VERSION = '0.3.6' DESCRIPTION = _('Python modules for interacting with Fedora Services') LONG_DESCRIPTION = _(''' The Fedora Project runs many different services. These services help us to package software, develop new programs, and generally put together the distro. This package contains software that helps us do that. ''') AUTHOR = 'Toshio Kuratomi, Luke Macken' EMAIL = 'tkuratom@redhat.com' COPYRIGHT = '2007-2008 Red Hat, Inc.' URL = 'https://fedorahosted.org/python-fedora' DOWNLOAD_URL = 'https://fedorahosted.org/releases/p/y/python-fedora/' LICENSE = 'GPLv2'
Correct minor typo in a string.
Correct minor typo in a string.
Python
lgpl-2.1
fedora-infra/python-fedora
--- +++ @@ -6,7 +6,7 @@ NAME = 'python-fedora' VERSION = '0.3.6' -DESCRIPTION = _('Python modules for interacting with Fedora services') +DESCRIPTION = _('Python modules for interacting with Fedora Services') LONG_DESCRIPTION = _(''' The Fedora Project runs many different services. These services help us to package software, develop new programs, and generally put together the distro.
db37b195ea47cd18969ad482e1dae301903da092
pyOutlook/__init__.py
pyOutlook/__init__.py
from .core import * __all__ = ['OutlookAccount', 'Message', 'Contact', 'Folder'] __version__ = '1.0.0' __release__ = '1.0.0'
from .core import * __all__ = ['OutlookAccount', 'Message', 'Contact', 'Folder'] __version__ = '1.0.0dev' __release__ = '1.0.0dev'
Package development version of upcoming v1 release for testing.
Package development version of upcoming v1 release for testing.
Python
mit
JensAstrup/pyOutlook
--- +++ @@ -1,5 +1,5 @@ from .core import * __all__ = ['OutlookAccount', 'Message', 'Contact', 'Folder'] -__version__ = '1.0.0' -__release__ = '1.0.0' +__version__ = '1.0.0dev' +__release__ = '1.0.0dev'
c5f10b2e5ea10dd17c8c19f87dcdfd2584f8e431
comics/accounts/models.py
comics/accounts/models.py
import uuid from django.contrib.auth.models import User from django.db import models from django.dispatch import receiver @receiver(models.signals.post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): UserProfile.objects.get_or_create(user=instance) class UserProfile(models.Model): user = models.OneToOneField(User) secret_key = models.CharField(max_length=32, blank=False, help_text='Secret key for feed and API access') class Meta: db_table = 'comics_user_profile' def __init__(self, *args, **kwargs): super(UserProfile, self).__init__(*args, **kwargs) self.generate_new_secret_key() def __unicode__(self): return u'User profile for %s' % self.user def generate_new_secret_key(self): self.secret_key = uuid.uuid4().hex
import uuid from django.contrib.auth.models import User from django.db import models from django.dispatch import receiver @receiver(models.signals.post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: UserProfile.objects.create(user=instance) class UserProfile(models.Model): user = models.OneToOneField(User) secret_key = models.CharField(max_length=32, blank=False, help_text='Secret key for feed and API access') class Meta: db_table = 'comics_user_profile' def __init__(self, *args, **kwargs): super(UserProfile, self).__init__(*args, **kwargs) self.generate_new_secret_key() def __unicode__(self): return u'User profile for %s' % self.user def generate_new_secret_key(self): self.secret_key = uuid.uuid4().hex
Remove conditional sql-select on new user creation
Remove conditional sql-select on new user creation Only create a user profile if a new user is actually created.
Python
agpl-3.0
datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,datagutten/comics,datagutten/comics,jodal/comics,jodal/comics
--- +++ @@ -7,8 +7,8 @@ @receiver(models.signals.post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): - UserProfile.objects.get_or_create(user=instance) - + if created: + UserProfile.objects.create(user=instance) class UserProfile(models.Model): user = models.OneToOneField(User)
fd6c7386cfdaa5fb97a428b323fc1f9b17f9f02c
tests/test_helpers.py
tests/test_helpers.py
import pandas from sharepa.helpers import pretty_print from sharepa.helpers import source_counts def test_pretty_print(): some_stuff = '{"Dusty": "Rhodes"}' pretty_print(some_stuff) def test_source_counts(): all_counts = source_counts() assert isinstance(all_counts, pandas.core.frame.DataFrame)
import vcr import pandas import pytest from sharepa.search import ShareSearch from sharepa.helpers import pretty_print from sharepa.helpers import source_counts @vcr.use_cassette('tests/vcr/simple_execute.yaml') def test_pretty_print(): my_search = ShareSearch() result = my_search.execute() the_dict = result.to_dict() try: pretty_print(the_dict) except: pytest.fail("Unexpected exception!!") def test_source_counts(): all_counts = source_counts() assert isinstance(all_counts, pandas.core.frame.DataFrame)
Add pytest fail check on raising pretty print exeption
Add pytest fail check on raising pretty print exeption
Python
mit
CenterForOpenScience/sharepa,fabianvf/sharepa,samanehsan/sharepa,erinspace/sharepa
--- +++ @@ -1,12 +1,21 @@ +import vcr import pandas +import pytest +from sharepa.search import ShareSearch from sharepa.helpers import pretty_print from sharepa.helpers import source_counts +@vcr.use_cassette('tests/vcr/simple_execute.yaml') def test_pretty_print(): - some_stuff = '{"Dusty": "Rhodes"}' - pretty_print(some_stuff) + my_search = ShareSearch() + result = my_search.execute() + the_dict = result.to_dict() + try: + pretty_print(the_dict) + except: + pytest.fail("Unexpected exception!!") def test_source_counts():
0a9e3fb387c61f2c7cb32502f5c50eaa5b950169
tests/test_process.py
tests/test_process.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals import pytest from wamopacker.process import run_command, ProcessException import os import uuid def test_run_command(): cwd = os.getcwd() output_cmd = run_command('ls -1A', working_dir = cwd) output_py = os.listdir(cwd) assert sorted(output_cmd) == sorted(output_py) def test_run_command_error(): data = uuid.uuid4().hex with pytest.raises(ProcessException) as e: run_command('cat {}'.format(data)) assert e.value.log_stdout == '' assert e.value.log_stderr == 'cat: {}: No such file or directory\n'.format(data) assert e.value.exit_code != 0
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals import pytest from wamopacker.process import run_command, ProcessException import os import uuid def test_run_command(): cwd = os.getcwd() output_cmd = run_command('ls -1A', working_dir = cwd) output_py = os.listdir(cwd) assert sorted(output_cmd) == sorted(output_py) def test_run_command_error(): data = uuid.uuid4().hex with pytest.raises(ProcessException) as e: run_command('cat {}'.format(data)) assert e.value.log_stdout == '' assert e.value.log_stderr.startswith('cat: {}'.format(data)) assert e.value.exit_code != 0
Fix intermittent travis build error.
Fix intermittent travis build error.
Python
mit
wamonite/packermate
--- +++ @@ -21,5 +21,5 @@ run_command('cat {}'.format(data)) assert e.value.log_stdout == '' - assert e.value.log_stderr == 'cat: {}: No such file or directory\n'.format(data) + assert e.value.log_stderr.startswith('cat: {}'.format(data)) assert e.value.exit_code != 0
e79c90db5dcda56ff9b2b154659984db9c6f7663
src/main.py
src/main.py
# -*- encoding: utf-8 -*- import pygame from scenes import director from scenes import intro_scene pygame.init() def main(): game_director = director.Director() scene = intro_scene.IntroScene(game_director) game_director.change_scene(scene) game_director.loop() if __name__ == '__main__': pygame.init() main()
# -*- encoding: utf-8 -*- import pygame from scenes import director from scenes import intro_scene from game_logic import settings pygame.init() def main(): initial_settings = settings.Settings( trials=1000, player='O', oponent='Computer') game_director = director.Director() scene = intro_scene.IntroScene(game_director) game_director.change_scene(scene, initial_settings) game_director.loop() if __name__ == '__main__': pygame.init() main()
Create initial config when starting game
Create initial config when starting game
Python
mit
juangallostra/TicTacToe
--- +++ @@ -3,14 +3,17 @@ import pygame from scenes import director from scenes import intro_scene +from game_logic import settings pygame.init() def main(): + initial_settings = settings.Settings( + trials=1000, player='O', oponent='Computer') game_director = director.Director() scene = intro_scene.IntroScene(game_director) - game_director.change_scene(scene) + game_director.change_scene(scene, initial_settings) game_director.loop()
7dd228d7eaad6b1f37ff3c4d954aebe0ffa99170
tests/test_targets/test_targets.py
tests/test_targets/test_targets.py
# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import TestCase from project_generator_definitions.definitions import ProGenTargets class TestAllTargets(TestCase): """test all targets""" def setUp(self): self.progen_target = ProGenTargets() self.targets_list = self.progen_target.get_targets() def test_targets_validity(self): for target in self.targets_list: record = self.progen_target.get_target_record(target) assert record['target']['name'][0] assert record['target']['mcu'][0] def test_targets_mcu_validity(self): for target in self.targets_list: mcu = self.progen_target.get_mcu_record(target) assert mcu['mcu'] assert mcu['mcu']['name'] assert mcu['mcu']['core']
# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from project_generator_definitions.definitions import ProGenTargets class TestAllTargets(TestCase): """test all targets""" def setUp(self): self.progen_target = ProGenTargets() self.targets_list = self.progen_target.get_targets() def test_targets_validity(self): # Cehck for required info for targets for target in self.targets_list: record = self.progen_target.get_target_record(target) assert record['target']['name'][0] assert record['target']['mcu'][0] def test_targets_mcu_validity(self): # Check for required info in mcu for target in self.targets_list: mcu = self.progen_target.get_mcu_record(target) assert mcu['mcu'][0] assert mcu['mcu']['name'][0] assert mcu['mcu']['core'][0]
Test - targets test fix mcu validity indexes
Test - targets test fix mcu validity indexes
Python
apache-2.0
project-generator/project_generator_definitions,0xc0170/project_generator_definitions,ohagendorf/project_generator_definitions
--- +++ @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os from unittest import TestCase @@ -26,14 +25,16 @@ self.targets_list = self.progen_target.get_targets() def test_targets_validity(self): + # Cehck for required info for targets for target in self.targets_list: record = self.progen_target.get_target_record(target) assert record['target']['name'][0] assert record['target']['mcu'][0] def test_targets_mcu_validity(self): + # Check for required info in mcu for target in self.targets_list: mcu = self.progen_target.get_mcu_record(target) - assert mcu['mcu'] - assert mcu['mcu']['name'] - assert mcu['mcu']['core'] + assert mcu['mcu'][0] + assert mcu['mcu']['name'][0] + assert mcu['mcu']['core'][0]
bd2c0efa6b0205ff0d24cf335f65f755f18566f2
modernrpc/__init__.py
modernrpc/__init__.py
# coding: utf-8 # default_app_config was deprecated in Django 3.2. Maybe set it only when detected django version is older? default_app_config = "modernrpc.apps.ModernRpcConfig" # Package version is now stored in pyproject.toml only. To retrieve it from code, use: # import pkg_resources; version = pkg_resources.get_distribution('django-modern-rpc').version
# coding: utf-8 from packaging.version import Version import django # Set default_app_config only with Django up to 3.1. This prevents a Warning on newer releases # See https://docs.djangoproject.com/fr/3.2/releases/3.2/#automatic-appconfig-discovery if Version(django.get_version()) < Version("3.2"): default_app_config = "modernrpc.apps.ModernRpcConfig" # Package version is now stored in pyproject.toml only. To retrieve it from code, use: # import pkg_resources; version = pkg_resources.get_distribution('django-modern-rpc').version
Stop defining default_app_config on Django 3.2+
Stop defining default_app_config on Django 3.2+
Python
mit
alorence/django-modern-rpc,alorence/django-modern-rpc
--- +++ @@ -1,7 +1,11 @@ # coding: utf-8 +from packaging.version import Version +import django -# default_app_config was deprecated in Django 3.2. Maybe set it only when detected django version is older? -default_app_config = "modernrpc.apps.ModernRpcConfig" +# Set default_app_config only with Django up to 3.1. This prevents a Warning on newer releases +# See https://docs.djangoproject.com/fr/3.2/releases/3.2/#automatic-appconfig-discovery +if Version(django.get_version()) < Version("3.2"): + default_app_config = "modernrpc.apps.ModernRpcConfig" # Package version is now stored in pyproject.toml only. To retrieve it from code, use: # import pkg_resources; version = pkg_resources.get_distribution('django-modern-rpc').version
77a1ee839da665fc1f97dabed1bf5639c980a17a
src/api/controller/ServerListController.py
src/api/controller/ServerListController.py
from BaseController import BaseController from api.util import settings class ServerListController(BaseController): def get(self): servers = {"servers": self.read_server_config()} self.write(servers) def read_server_config(self): """Returns a list of servers with the 'id' field added. """ # TODO: Move this into the settings module so everything benefits. server_list = [] redis_servers = settings.get_redis_servers() for server in redis_servers: server_id = "%(server)s:%(port)s" % server s = dict(server=server['server'], port=server['port'], password=server['password'], id=server_id) server_list.append(s) return server_list
from BaseController import BaseController from api.util import settings class ServerListController(BaseController): def get(self): servers = {"servers": self.read_server_config()} self.write(servers) def read_server_config(self): """Returns a list of servers with the 'id' field added. """ # TODO: Move this into the settings module so everything benefits. server_list = [] redis_servers = settings.get_redis_servers() for server in redis_servers: if 'password' not in server: server['password'] = None server_id = "%(server)s:%(port)s" % server s = dict(server=server['server'], port=server['port'], password=server['password'], id=server_id) server_list.append(s) return server_list
Allow servers command to work without a password.
Allow servers command to work without a password.
Python
mit
YongMan/RedisLive,merlian/RedisLive,heamon7/RedisLive,fengshao0907/RedisLive,heamon7/RedisLive,udomsak/RedisLive,merlian/RedisLive,jacklee0810/RedisLive,YongMan/RedisLive,jacklee0810/RedisLive,udomsak/RedisLive,jiejieling/RdsMonitor,udomsak/RedisLive,fengshao0907/RedisLive,nkrode/RedisLive,jacklee0810/RedisLive,jiejieling/RdsMonitor,merlian/RedisLive,nkrode/RedisLive,fengshao0907/RedisLive,YongMan/RedisLive,nkrode/RedisLive,jiejieling/RdsMonitor,heamon7/RedisLive
--- +++ @@ -15,6 +15,9 @@ redis_servers = settings.get_redis_servers() for server in redis_servers: + if 'password' not in server: + server['password'] = None + server_id = "%(server)s:%(port)s" % server s = dict(server=server['server'], port=server['port'], password=server['password'], id=server_id) server_list.append(s)
271bb9de8f0f3674b1f6f47bc3519f1297c87abf
examples/linechannel.py
examples/linechannel.py
# -*- coding: utf-8 -*- from linepy import * client = LineClient() #client = LineClient(authToken='AUTHTOKEN') client.log("Auth Token : " + str(client.authToken)) # Initialize LineChannel with LineClient # This channel id is Timeline channel channel = LineChannel(client, channel_id="1341209950") client.log("Channel Access Token : " + str(channel.channelAccessToken))
# -*- coding: utf-8 -*- from linepy import * client = LineClient() #client = LineClient(authToken='AUTHTOKEN') client.log("Auth Token : " + str(client.authToken)) # Initialize LineChannel with LineClient # This channel id is Timeline channel channel = LineChannel(client, channelId="1341209950") client.log("Channel Access Token : " + str(channel.channelAccessToken))
Change channel_id to new channelId param
Change channel_id to new channelId param
Python
bsd-3-clause
fadhiilrachman/line-py
--- +++ @@ -8,5 +8,5 @@ # Initialize LineChannel with LineClient # This channel id is Timeline channel -channel = LineChannel(client, channel_id="1341209950") +channel = LineChannel(client, channelId="1341209950") client.log("Channel Access Token : " + str(channel.channelAccessToken))
bc5d678937e69fe00e206b6a80c9a2f6dfb1a3a2
examples/worker_rush.py
examples/worker_rush.py
import sc2 from sc2 import run_game, maps, Race, Difficulty from sc2.player import Bot, Computer class WorkerRushBot(sc2.BotAI): async def on_step(self, state, iteration): if iteration == 0: for probe in self.workers: await self.do(probe.attack(self.enemy_start_locations[0])) def main(): run_game(maps.get("Abyssal Reef LE"), [ Bot(Race.Protoss, WorkerRushBot()), Computer(Race.Protoss, Difficulty.Medium) ], realtime=True) if __name__ == '__main__': main()
import sc2 from sc2 import run_game, maps, Race, Difficulty from sc2.player import Bot, Computer class WorkerRushBot(sc2.BotAI): async def on_step(self, state, iteration): if iteration == 0: for worker in self.workers: await self.do(worker.attack(self.enemy_start_locations[0])) def main(): run_game(maps.get("Abyssal Reef LE"), [ Bot(Race.Zerg, WorkerRushBot()), Computer(Race.Protoss, Difficulty.Medium) ], realtime=True) if __name__ == '__main__': main()
Use generic names in the worker rush example
Use generic names in the worker rush example
Python
mit
Dentosal/python-sc2
--- +++ @@ -5,12 +5,12 @@ class WorkerRushBot(sc2.BotAI): async def on_step(self, state, iteration): if iteration == 0: - for probe in self.workers: - await self.do(probe.attack(self.enemy_start_locations[0])) + for worker in self.workers: + await self.do(worker.attack(self.enemy_start_locations[0])) def main(): run_game(maps.get("Abyssal Reef LE"), [ - Bot(Race.Protoss, WorkerRushBot()), + Bot(Race.Zerg, WorkerRushBot()), Computer(Race.Protoss, Difficulty.Medium) ], realtime=True)
a6a2ee870840730f99ad475e02956c49fe2e7ed3
common/authapp.py
common/authapp.py
import ConfigParser from common.application import Application from keystonemiddleware.auth_token import filter_factory as auth_filter_factory class KeystoneApplication(Application): """ An Application which uses Keystone for authorisation using RBAC """ def __init__(self, configuration): super(KeystoneApplication, self).__init__(configuration) self.required_role = self.config.get('authorisation', 'required_role') if self.required_role is None: raise ValueError("No required role supplied") def _check_auth(self, req): if 'HTTP_X_ROLES' in req.environ: user_roles = req.environ['HTTP_X_ROLES'].split(',') return self.required_role in user_roles return False def keystone_auth_filter_factory(global_config, **local_config): global_config.update(local_config) config_file_name = global_config.get('config_file', 'apiv1app.ini') config_file = ConfigParser.SafeConfigParser() config_file.read(config_file_name) global_config.update(config_file.items('keystone_authtoken')) return auth_filter_factory(global_config)
import ConfigParser from common.application import Application from keystonemiddleware.auth_token import filter_factory as auth_filter_factory class KeystoneApplication(Application): """ An Application which uses Keystone for authorisation using RBAC """ INI_SECTION = 'keystone_authtoken' def __init__(self, configuration): super(KeystoneApplication, self).__init__(configuration) self.required_role = self.config.get('authorisation', 'required_role') if self.required_role is None: raise ValueError("No required role supplied") def _check_auth(self, req): if 'HTTP_X_ROLES' in req.environ: user_roles = req.environ['HTTP_X_ROLES'].split(',') return self.required_role in user_roles return False def keystone_auth_filter_factory(global_config, **local_config): global_config.update(local_config) config_file_name = global_config.get('config_file') if not config_file_name: raise ValueError('No config_file directive') config_file = ConfigParser.SafeConfigParser() if not config_file.read(config_file_name): raise ValueError("Cannot read config file '%s'" % config_file_name) global_config.update(config_file.items(KeystoneApplication.INI_SECTION)) return auth_filter_factory(global_config)
Remove hardcoded default filename. Raise an error if no app config file was specified, or it is unreadable, or it doesn't contain the section we need.
Remove hardcoded default filename. Raise an error if no app config file was specified, or it is unreadable, or it doesn't contain the section we need.
Python
apache-2.0
NCI-Cloud/reporting-api,NeCTAR-RC/reporting-api,NCI-Cloud/reporting-api,NeCTAR-RC/reporting-api
--- +++ @@ -7,6 +7,8 @@ """ An Application which uses Keystone for authorisation using RBAC """ + + INI_SECTION = 'keystone_authtoken' def __init__(self, configuration): super(KeystoneApplication, self).__init__(configuration) @@ -22,8 +24,11 @@ def keystone_auth_filter_factory(global_config, **local_config): global_config.update(local_config) - config_file_name = global_config.get('config_file', 'apiv1app.ini') + config_file_name = global_config.get('config_file') + if not config_file_name: + raise ValueError('No config_file directive') config_file = ConfigParser.SafeConfigParser() - config_file.read(config_file_name) - global_config.update(config_file.items('keystone_authtoken')) + if not config_file.read(config_file_name): + raise ValueError("Cannot read config file '%s'" % config_file_name) + global_config.update(config_file.items(KeystoneApplication.INI_SECTION)) return auth_filter_factory(global_config)
66462c231011f6418fc246789ce4feed10a74a66
web/whim/core/time.py
web/whim/core/time.py
from datetime import datetime, timezone, time def zero_time_with_timezone(date, tz=timezone.utc): return datetime.combine(date, time(tzinfo=tz))
from datetime import datetime, timezone, time import dateparser def zero_time_with_timezone(date, tz=timezone.utc): return datetime.combine(date, time(tzinfo=tz)) def attempt_parse_date(val): parsed_date = dateparser.parse(val, languages=['en']) if parsed_date is None: # try other strategies? pass return parsed_date
Use dateparser for parsing scraped dates
Use dateparser for parsing scraped dates
Python
mit
andrewgleave/whim,andrewgleave/whim,andrewgleave/whim
--- +++ @@ -1,5 +1,15 @@ from datetime import datetime, timezone, time + +import dateparser def zero_time_with_timezone(date, tz=timezone.utc): return datetime.combine(date, time(tzinfo=tz)) + + +def attempt_parse_date(val): + parsed_date = dateparser.parse(val, languages=['en']) + if parsed_date is None: + # try other strategies? + pass + return parsed_date
57a37c4a87e9757a109dfb5f3169fb8264d0795e
neutron/server/rpc_eventlet.py
neutron/server/rpc_eventlet.py
#!/usr/bin/env python # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # If ../neutron/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... from oslo_log import log from neutron._i18n import _LI from neutron import manager from neutron import service LOG = log.getLogger(__name__) def eventlet_rpc_server(): LOG.info(_LI("Eventlet based AMQP RPC server starting...")) try: manager.init() workers = service._get_rpc_workers() + service._get_plugins_workers() rpc_workers_launcher = service._start_workers(workers) except NotImplementedError: LOG.info(_LI("RPC was already started in parent process by " "plugin.")) else: rpc_workers_launcher.wait()
#!/usr/bin/env python # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # If ../neutron/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... from oslo_log import log from neutron._i18n import _LI from neutron import manager from neutron import service LOG = log.getLogger(__name__) def eventlet_rpc_server(): LOG.info(_LI("Eventlet based AMQP RPC server starting...")) try: manager.init() rpc_workers_launcher = service.start_all_workers() except NotImplementedError: LOG.info(_LI("RPC was already started in parent process by " "plugin.")) else: rpc_workers_launcher.wait()
Switch to start_all_workers in RPC server
Switch to start_all_workers in RPC server This does the same as the logic present but it emits the registry callback event for resources.PROCESS AFTER_SPAWN that some plugins may be expecting. Change-Id: I6f9aeca753a5d3c0052f553a2ac46786ca113e1e Related-Bug: #1687896
Python
apache-2.0
mahak/neutron,noironetworks/neutron,openstack/neutron,openstack/neutron,openstack/neutron,eayunstack/neutron,mahak/neutron,huntxu/neutron,eayunstack/neutron,noironetworks/neutron,huntxu/neutron,mahak/neutron
--- +++ @@ -32,8 +32,7 @@ try: manager.init() - workers = service._get_rpc_workers() + service._get_plugins_workers() - rpc_workers_launcher = service._start_workers(workers) + rpc_workers_launcher = service.start_all_workers() except NotImplementedError: LOG.info(_LI("RPC was already started in parent process by " "plugin."))
bc961992afeae978e95209606e0e7b1a9b73719f
jesusmtnez/python/kata/game.py
jesusmtnez/python/kata/game.py
class Game(): def __init__(self): self._score = 0 def roll(self, pins): pass def score(self): return 0
class Game(): def __init__(self): self._score = 0 def roll(self, pins): self._score += pins def score(self): return self._score
Update score in Game class methods
[Python] Update score in Game class methods
Python
mit
JesusMtnez/devexperto-challenge,JesusMtnez/devexperto-challenge
--- +++ @@ -3,7 +3,7 @@ self._score = 0 def roll(self, pins): - pass + self._score += pins def score(self): - return 0 + return self._score
69c72d47ebf57932b6e20e2c22a5f1c84d07d3eb
pyqode/core/api/__init__.py
pyqode/core/api/__init__.py
""" This package contains the bases classes of pyqode and some utility functions. """ from .code_edit import CodeEdit from .decoration import TextDecoration from .encodings import ENCODINGS_MAP, convert_to_codec_key from .manager import Manager from .mode import Mode from .panel import Panel from .syntax_highlighter import SyntaxHighlighter from .syntax_highlighter import ColorScheme from .syntax_highlighter import TextBlockUserData from .utils import TextHelper, TextBlockHelper from .utils import get_block_symbol_data from .utils import DelayJobRunner from .folding import FoldDetector from .folding import IndentFoldDetector from .folding import CharBasedFoldDetector from .folding import FoldScope __all__ = [ 'convert_to_codec_key', 'get_block_symbol_data', 'CharBasedFoldDetector', 'CodeEdit', 'ColorScheme', 'DelayJobRunner', 'ENCODINGS_MAP', 'FoldDetector', 'IndentFoldDetector', 'FoldScope', 'Manager', 'Mode', 'Panel', 'SyntaxHighlighter', 'TextBlockUserData', 'TextDecoration', 'TextHelper', 'TextBlockHelper' ]
""" This package contains the bases classes of pyqode and some utility functions. """ from .code_edit import CodeEdit from .decoration import TextDecoration from .encodings import ENCODINGS_MAP, convert_to_codec_key from .manager import Manager from .mode import Mode from .panel import Panel from .syntax_highlighter import ColorScheme from .syntax_highlighter import PYGMENTS_STYLES from .syntax_highlighter import SyntaxHighlighter from .syntax_highlighter import TextBlockUserData from .utils import TextHelper, TextBlockHelper from .utils import get_block_symbol_data from .utils import DelayJobRunner from .folding import FoldDetector from .folding import IndentFoldDetector from .folding import CharBasedFoldDetector from .folding import FoldScope __all__ = [ 'convert_to_codec_key', 'get_block_symbol_data', 'CharBasedFoldDetector', 'CodeEdit', 'ColorScheme', 'DelayJobRunner', 'ENCODINGS_MAP', 'FoldDetector', 'IndentFoldDetector', 'FoldScope', 'Manager', 'Mode', 'Panel', 'PYGMENTS_STYLES', 'SyntaxHighlighter', 'TextBlockUserData', 'TextDecoration', 'TextHelper', 'TextBlockHelper' ]
Add missing PYGMENTS_STYLES list to pyqode.core.api
Add missing PYGMENTS_STYLES list to pyqode.core.api
Python
mit
zwadar/pyqode.core,pyQode/pyqode.core,pyQode/pyqode.core
--- +++ @@ -9,8 +9,9 @@ from .manager import Manager from .mode import Mode from .panel import Panel +from .syntax_highlighter import ColorScheme +from .syntax_highlighter import PYGMENTS_STYLES from .syntax_highlighter import SyntaxHighlighter -from .syntax_highlighter import ColorScheme from .syntax_highlighter import TextBlockUserData from .utils import TextHelper, TextBlockHelper from .utils import get_block_symbol_data @@ -35,6 +36,7 @@ 'Manager', 'Mode', 'Panel', + 'PYGMENTS_STYLES', 'SyntaxHighlighter', 'TextBlockUserData', 'TextDecoration',
23d8942ffeeee72e21330bd8ecc5bfb5e91bbc3b
certidude/push.py
certidude/push.py
import click import json import logging import requests from datetime import datetime from certidude import config def publish(event_type, event_data): """ Publish event on push server """ if not isinstance(event_data, basestring): from certidude.decorators import MyEncoder event_data = json.dumps(event_data, cls=MyEncoder) url = config.PUSH_PUBLISH % config.PUSH_TOKEN click.echo("Publishing %s event '%s' on %s" % (event_type, event_data, url)) try: notification = requests.post( url, data=event_data, headers={"X-EventSource-Event": event_type, "User-Agent": "Certidude API"}) if notification.status_code == requests.codes.created: pass # Sent to client elif notification.status_code == requests.codes.accepted: pass # Buffered in nchan else: click.echo("Failed to submit event to push server, server responded %d" % ( notification.status_code)) except requests.exceptions.ConnectionError: click.echo("Failed to submit event to push server, connection error") class PushLogHandler(logging.Handler): """ To be used with Python log handling framework for publishing log entries """ def emit(self, record): from certidude.push import publish publish("log-entry", dict( created = datetime.utcfromtimestamp(record.created), message = record.msg % record.args, severity = record.levelname.lower()))
import click import json import logging import requests from datetime import datetime from certidude import config def publish(event_type, event_data): """ Publish event on push server """ if not config.PUSH_PUBLISH: # Push server disabled return if not isinstance(event_data, basestring): from certidude.decorators import MyEncoder event_data = json.dumps(event_data, cls=MyEncoder) url = config.PUSH_PUBLISH % config.PUSH_TOKEN click.echo("Publishing %s event '%s' on %s" % (event_type, event_data, url)) try: notification = requests.post( url, data=event_data, headers={"X-EventSource-Event": event_type, "User-Agent": "Certidude API"}) if notification.status_code == requests.codes.created: pass # Sent to client elif notification.status_code == requests.codes.accepted: pass # Buffered in nchan else: click.echo("Failed to submit event to push server, server responded %d" % ( notification.status_code)) except requests.exceptions.ConnectionError: click.echo("Failed to submit event to push server, connection error") class PushLogHandler(logging.Handler): """ To be used with Python log handling framework for publishing log entries """ def emit(self, record): from certidude.push import publish publish("log-entry", dict( created = datetime.utcfromtimestamp(record.created), message = record.msg % record.args, severity = record.levelname.lower()))
Add fallbacks for e-mail handling if outbox is not defined
Add fallbacks for e-mail handling if outbox is not defined
Python
mit
laurivosandi/certidude,laurivosandi/certidude,plaes/certidude,laurivosandi/certidude,plaes/certidude,plaes/certidude,laurivosandi/certidude,plaes/certidude
--- +++ @@ -11,6 +11,10 @@ """ Publish event on push server """ + if not config.PUSH_PUBLISH: + # Push server disabled + return + if not isinstance(event_data, basestring): from certidude.decorators import MyEncoder event_data = json.dumps(event_data, cls=MyEncoder)
78515c7bbb81263fa339a67c2aabfa1a4f3c9af9
thefuck/rules/ifconfig_device_not_found.py
thefuck/rules/ifconfig_device_not_found.py
import subprocess from thefuck.utils import for_app, replace_command, eager @for_app('ifconfig') def match(command): return 'error fetching interface information: Device not found' \ in command.stderr @eager def _get_possible_interfaces(): proc = subprocess.Popen(['ifconfig', '-a'], stdout=subprocess.PIPE) for line in proc.stdout.readlines(): line = line.decode() if line and line != '\n' and not line.startswith(' '): yield line.split(' ')[0] def get_new_command(command): interface = command.stderr.split(' ')[0][:-1] possible_interfaces = _get_possible_interfaces() return replace_command(command, interface, possible_interfaces)
import subprocess from thefuck.utils import for_app, replace_command, eager @for_app('ifconfig') def match(command): return 'error fetching interface information: Device not found' \ in command.stderr @eager def _get_possible_interfaces(): proc = subprocess.Popen(['ifconfig', '-a'], stdout=subprocess.PIPE) for line in proc.stdout.readlines(): line = line.decode() if line and line != '\n' and not line.startswith(' '): yield line.split(' ')[0] def get_new_command(command): interface = command.stderr.split(' ')[0][:-1] possible_interfaces = _get_possible_interfaces() return replace_command(command, interface, possible_interfaces)
Fix flake8 errors: W391 blank line at end of file
Fix flake8 errors: W391 blank line at end of file
Python
mit
nvbn/thefuck,scorphus/thefuck,Clpsplug/thefuck,mlk/thefuck,nvbn/thefuck,SimenB/thefuck,mlk/thefuck,SimenB/thefuck,Clpsplug/thefuck,scorphus/thefuck
--- +++ @@ -21,5 +21,3 @@ interface = command.stderr.split(' ')[0][:-1] possible_interfaces = _get_possible_interfaces() return replace_command(command, interface, possible_interfaces) - -
e7e8972124d3336834f1c177f655e12528a49624
cosmo/monitors/osm_data_models.py
cosmo/monitors/osm_data_models.py
import pandas as pd from monitorframe.monitor import BaseDataModel from cosmo.filesystem import FileDataFinder from cosmo import FILES_SOURCE from cosmo.monitor_helpers import explode_df class OSMDataModel(BaseDataModel): def get_data(self): header_keys = ( 'ROOTNAME', 'EXPSTART', 'DETECTOR', 'LIFE_ADJ', 'OPT_ELEM', 'CENWAVE', 'FPPOS', 'PROPOSID', 'OBSET_ID' ) header_extensions = (0, 1, 0, 0, 0, 0, 0, 0, 0) data_keys = ('TIME', 'SHIFT_DISP', 'SHIFT_XDISP', 'SEGMENT') data_extensions = (1, 1, 1, 1) finder = FileDataFinder( FILES_SOURCE, '*lampflash*', header_keys, header_extensions, data_keys=data_keys, data_extensions=data_extensions ) df = pd.DataFrame(finder.data_from_files()) return explode_df(df, list(data_keys))
import pandas as pd from monitorframe.monitor import BaseDataModel from cosmo.filesystem import FileDataFinder from cosmo import FILES_SOURCE from cosmo.monitor_helpers import explode_df class OSMDataModel(BaseDataModel): """Data model for all OSM Shift monitors.""" def get_data(self): header_keys = ( 'ROOTNAME', 'EXPSTART', 'DETECTOR', 'LIFE_ADJ', 'OPT_ELEM', 'CENWAVE', 'FPPOS', 'PROPOSID', 'OBSET_ID' ) header_extensions = (0, 1, 0, 0, 0, 0, 0, 0, 0) data_keys = ('TIME', 'SHIFT_DISP', 'SHIFT_XDISP', 'SEGMENT') data_extensions = (1, 1, 1, 1) # Find data from lampflash files finder = FileDataFinder( FILES_SOURCE, '*lampflash*', header_keys, header_extensions, data_keys=data_keys, data_extensions=data_extensions ) df = pd.DataFrame(finder.data_from_files()) return explode_df(df, list(data_keys))
Add comments and docstring to OSMDataModel
Add comments and docstring to OSMDataModel
Python
bsd-3-clause
justincely/cos_monitoring
--- +++ @@ -8,6 +8,7 @@ class OSMDataModel(BaseDataModel): + """Data model for all OSM Shift monitors.""" def get_data(self): header_keys = ( @@ -18,6 +19,7 @@ data_keys = ('TIME', 'SHIFT_DISP', 'SHIFT_XDISP', 'SEGMENT') data_extensions = (1, 1, 1, 1) + # Find data from lampflash files finder = FileDataFinder( FILES_SOURCE, '*lampflash*',
3e67993eb17aca7571381d59b7fd65eab53dac98
day19/part2.py
day19/part2.py
inp = 3004953 elves = list(range(1, inp + 1)) i = 0 while len(elves) > 1: index = (i + int(len(elves) / 2)) % len(elves) elves.pop(index) if index < i: i -= 1 i = (i + 1) % len(elves) print(elves[0]) input()
inp = 3004953 class Elf: def __init__(self, num): self.num = num self.prev = None self.next = None def remove(self): self.prev.next = self.next self.next.prev = self.prev elves = list(map(Elf, range(1, inp + 1))) for i in range(inp): elves[i].prev = elves[(i - 1) % inp] elves[i].next = elves[(i + 1) % inp] count, current, across = inp, elves[0], elves[inp // 2] while current != across: across.remove() across = across.next if count % 2 == 1: across = across.next count -= 1 current = current.next print(current.num) input()
Replace list with a linked list for much better performance
Replace list with a linked list for much better performance
Python
unlicense
ultramega/adventofcode2016
--- +++ @@ -1,13 +1,28 @@ inp = 3004953 -elves = list(range(1, inp + 1)) -i = 0 -while len(elves) > 1: - index = (i + int(len(elves) / 2)) % len(elves) - elves.pop(index) - if index < i: - i -= 1 - i = (i + 1) % len(elves) +class Elf: + def __init__(self, num): + self.num = num + self.prev = None + self.next = None -print(elves[0]) + def remove(self): + self.prev.next = self.next + self.next.prev = self.prev + +elves = list(map(Elf, range(1, inp + 1))) +for i in range(inp): + elves[i].prev = elves[(i - 1) % inp] + elves[i].next = elves[(i + 1) % inp] + +count, current, across = inp, elves[0], elves[inp // 2] +while current != across: + across.remove() + across = across.next + if count % 2 == 1: + across = across.next + count -= 1 + current = current.next + +print(current.num) input()
561d98e59ea46b56d50341e06578b5c9fe95c73a
perfbucket/watcher.py
perfbucket/watcher.py
import os import sys import pyinotify import analyzer wm = pyinotify.WatchManager() class ProcessProfilerEvent(pyinotify.ProcessEvent): def process_IN_CLOSE_WRITE(self, event): if event.name.endswith(".json"): base = os.path.splitext(os.path.join(event.path, event.name))[0] analyzer.analyze_profiling_result(base) def monitor(directory): notifier = pyinotify.Notifier(wm, ProcessProfilerEvent()) mask = pyinotify.IN_CLOSE_WRITE # Watched events wdd = wm.add_watch(directory, mask) while True: try: # process the queue of events as explained above notifier.process_events() if notifier.check_events(): # read notified events and enqeue them notifier.read_events() # you can do some tasks here... except KeyboardInterrupt: # destroy the inotify's instance on this interrupt (stop monitoring) notifier.stop() break if __name__ == '__main__': monitor(sys.argv[1])
import os import sys import pyinotify import analyzer class ProcessProfilerEvent(pyinotify.ProcessEvent): def process_IN_CLOSE_WRITE(self, event): if event.name.endswith(".json"): base = os.path.splitext(os.path.join(event.path, event.name))[0] analyzer.analyze_profiling_result(base) def monitor(directory): wm = pyinotify.WatchManager() notifier = pyinotify.Notifier(wm, ProcessProfilerEvent()) mask = pyinotify.IN_CLOSE_WRITE # Watched events wdd = wm.add_watch(directory, mask) while True: try: # process the queue of events as explained above notifier.process_events() if notifier.check_events(): # read notified events and enqeue them notifier.read_events() # you can do some tasks here... except KeyboardInterrupt: # destroy the inotify's instance on this interrupt (stop monitoring) notifier.stop() break if __name__ == '__main__': monitor(sys.argv[1])
Change scope of watch manager.
Change scope of watch manager.
Python
agpl-3.0
davidstrauss/perfbucket,davidstrauss/perfbucket,davidstrauss/perfbucket
--- +++ @@ -2,8 +2,6 @@ import sys import pyinotify import analyzer - -wm = pyinotify.WatchManager() class ProcessProfilerEvent(pyinotify.ProcessEvent): def process_IN_CLOSE_WRITE(self, event): @@ -12,6 +10,7 @@ analyzer.analyze_profiling_result(base) def monitor(directory): + wm = pyinotify.WatchManager() notifier = pyinotify.Notifier(wm, ProcessProfilerEvent()) mask = pyinotify.IN_CLOSE_WRITE # Watched events wdd = wm.add_watch(directory, mask)
2497f494f0e3e7fb57aa8cb1deed0c05fd6b74b1
handler/FilesService.py
handler/FilesService.py
import tornado import time from bson.json_util import dumps from tornado.options import options class FilesServiceHandler(tornado.web.RequestHandler): def initialize(self, logger, mongodb): self.logger = logger self.mongodb = mongodb @tornado.web.asynchronous @tornado.gen.coroutine def post(self): self.logger.info('Request to file upload') for item in self.request.files.values(): file_info = item[0] self.logger.info('File uploaded: %s with mime type %s' % (file_info['filename'], file_info['content_type'])) name = '%s-%s' % (time.time(), file_info['filename']) with open('%s/%s' % (options.files_dir, name), 'w') as f: f.write(file_info['body']) self.logger.info('File saved at %s' % name) self.write('done')
import tornado import time from bson.json_util import dumps from tornado.options import options class FilesServiceHandler(tornado.web.RequestHandler): def initialize(self, logger, mongodb): self.logger = logger self.mongodb = mongodb[options.db_name]['Files'] @tornado.web.asynchronous @tornado.gen.coroutine def post(self): self.logger.info('Request to file upload') result = [] for item in self.request.files.values(): for file_info in item: timestamp = time.time() data = { 'name': '%s-%s' % (timestamp, file_info['filename']), 'location': 'TBD', 'context': 'context', 'realName': file_info['filename'], 'mimeType': file_info['content_type'], 'deleted': False, 'timestamp': timestamp, 'restrictions': { 'quota': False, 'session': False } } self.logger.info('File uploaded: %s with mime type %s' % (data['realName'], data['mimeType'])) with open('%s/%s' % (options.files_dir, data['name']), 'w') as f: f.write(file_info['body']) self.logger.info('File saved at %s' % data['name']) yield self.mongodb.save(data) result.append(data) self.write(dumps(result))
Save file info in DB
Save file info in DB
Python
apache-2.0
jiss-software/jiss-file-service,jiss-software/jiss-file-service,jiss-software/jiss-file-service
--- +++ @@ -7,22 +7,41 @@ class FilesServiceHandler(tornado.web.RequestHandler): def initialize(self, logger, mongodb): self.logger = logger - self.mongodb = mongodb + self.mongodb = mongodb[options.db_name]['Files'] @tornado.web.asynchronous @tornado.gen.coroutine def post(self): self.logger.info('Request to file upload') + result = [] + for item in self.request.files.values(): - file_info = item[0] + for file_info in item: + timestamp = time.time() - self.logger.info('File uploaded: %s with mime type %s' % (file_info['filename'], file_info['content_type'])) - name = '%s-%s' % (time.time(), file_info['filename']) + data = { + 'name': '%s-%s' % (timestamp, file_info['filename']), + 'location': 'TBD', + 'context': 'context', + 'realName': file_info['filename'], + 'mimeType': file_info['content_type'], + 'deleted': False, + 'timestamp': timestamp, + 'restrictions': { + 'quota': False, + 'session': False + } + } - with open('%s/%s' % (options.files_dir, name), 'w') as f: - f.write(file_info['body']) + self.logger.info('File uploaded: %s with mime type %s' % (data['realName'], data['mimeType'])) - self.logger.info('File saved at %s' % name) + with open('%s/%s' % (options.files_dir, data['name']), 'w') as f: + f.write(file_info['body']) - self.write('done') + self.logger.info('File saved at %s' % data['name']) + + yield self.mongodb.save(data) + result.append(data) + + self.write(dumps(result))
996713fc6aefe20b28c729c46532ae566d5160a1
paratemp/sim_setup/__init__.py
paratemp/sim_setup/__init__.py
"""This module has functions and classes useful for setting up simulations""" ######################################################################## # # # This test was written by Thomas Heavey in 2019. # # theavey@bu.edu thomasjheavey@gmail.com # # # # Copyright 2017-19 Thomas J. Heavey IV # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # # implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # # ######################################################################## from __future__ import absolute_import from .para_temp_setup import * from .sim_setup import * from .simulation import Simulation, SimpleSimulation from .pt_simulation import PTSimulation from .molecule import Molecule from .system import System
"""This module has functions and classes useful for setting up simulations""" ######################################################################## # # # This test was written by Thomas Heavey in 2019. # # theavey@bu.edu thomasjheavey@gmail.com # # # # Copyright 2017-19 Thomas J. Heavey IV # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # # implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # # ######################################################################## from __future__ import absolute_import from .para_temp_setup import * from .sim_setup import * from .molecule import Molecule from .system import System from .simulation import Simulation, SimpleSimulation from .pt_simulation import PTSimulation
Fix import order (some somewhat cyclic dependencies; should fix)
Fix import order (some somewhat cyclic dependencies; should fix)
Python
apache-2.0
theavey/ParaTemp,theavey/ParaTemp
--- +++ @@ -26,7 +26,7 @@ from .para_temp_setup import * from .sim_setup import * +from .molecule import Molecule +from .system import System from .simulation import Simulation, SimpleSimulation from .pt_simulation import PTSimulation -from .molecule import Molecule -from .system import System
d787039a58d63cd85068da996a12fc36c1d63804
ixxy_admin_utils/admin_actions.py
ixxy_admin_utils/admin_actions.py
def xlsx_export_action(modeladmin, request, queryset): from django.http import HttpResponse from import_export.formats import base_formats formats = modeladmin.get_export_formats() file_format = base_formats.XLSX() export_data = modeladmin.get_export_data(file_format, queryset, request=request) content_type = file_format.get_content_type() # Django 1.7 uses the content_type kwarg instead of mimetype try: response = HttpResponse(export_data, content_type=content_type) except TypeError: response = HttpResponse(export_data, mimetype=content_type) response['Content-Disposition'] = 'attachment; filename=%s' % ( modeladmin.get_export_filename(file_format), ) return response xlsx_export_action.short_description = "Export selected rows to Excel"
from django.http import HttpResponse from import_export.formats import base_formats def xlsx_export_action(modeladmin, request, queryset): formats = modeladmin.get_export_formats() file_format = base_formats.XLSX() export_data = modeladmin.get_export_data(file_format, queryset, request=request) content_type = file_format.get_content_type() # Django 1.7 uses the content_type kwarg instead of mimetype try: response = HttpResponse(export_data, content_type=content_type) except TypeError: response = HttpResponse(export_data, mimetype=content_type) response['Content-Disposition'] = 'attachment; filename=%s' % ( modeladmin.get_export_filename(file_format), ) return response xlsx_export_action.short_description = "Export selected rows to Excel"
Undo previous commit. It didn't work.
Undo previous commit. It didn't work.
Python
mit
DjangoAdminHackers/ixxy-admin-utils,DjangoAdminHackers/ixxy-admin-utils
--- +++ @@ -1,7 +1,9 @@ +from django.http import HttpResponse +from import_export.formats import base_formats + + def xlsx_export_action(modeladmin, request, queryset): - from django.http import HttpResponse - from import_export.formats import base_formats formats = modeladmin.get_export_formats() file_format = base_formats.XLSX()
7dfe4381ecd252530cb7dc274b2dc6aaa39f81cc
deps/pyextensibletype/extensibletype/test/test_interning.py
deps/pyextensibletype/extensibletype/test/test_interning.py
from .. import intern def test_global_interning(): try: intern.global_intern("hello") except AssertionError as e: pass else: raise Exception("Expects complaint about uninitialized table") intern.global_intern_initialize() id1 = intern.global_intern("hello") id2 = intern.global_intern("hello") id3 = intern.global_intern("hallo") assert id1 == id2 assert id1 != id3 def test_interning(): table = intern.InternTable() id1 = intern.global_intern("hello") id2 = intern.global_intern("hello") id3 = intern.global_intern("hallo") assert id1 == id2 assert id1 != id3 def test_intern_many(): table = intern.InternTable() for i in range(1000000): table.intern("my randrom string %d" % i) table.intern("my randrom string %d" % (i // 2)) table.intern("my randrom string %d" % (i // 4))
from .. import intern def test_global_interning(): # Can't really test for this with nose... # try: # intern.global_intern("hello") # except AssertionError as e: # pass # else: # raise Exception("Expects complaint about uninitialized table") intern.global_intern_initialize() id1 = intern.global_intern("hello") id2 = intern.global_intern("hello") id3 = intern.global_intern("hallo") assert id1 == id2 assert id1 != id3 def test_interning(): table = intern.InternTable() id1 = intern.global_intern("hello") id2 = intern.global_intern("hello") id3 = intern.global_intern("hallo") assert id1 == id2 assert id1 != id3 def test_intern_many(): table = intern.InternTable() for i in range(1000000): table.intern("my randrom string %d" % i) table.intern("my randrom string %d" % (i // 2)) table.intern("my randrom string %d" % (i // 4))
Disable global intern exception test
Disable global intern exception test
Python
bsd-2-clause
stuartarchibald/numba,pitrou/numba,pitrou/numba,shiquanwang/numba,seibert/numba,jriehl/numba,shiquanwang/numba,stefanseefeld/numba,stuartarchibald/numba,cpcloud/numba,cpcloud/numba,gdementen/numba,seibert/numba,numba/numba,sklam/numba,ssarangi/numba,gmarkall/numba,cpcloud/numba,sklam/numba,pitrou/numba,IntelLabs/numba,pombredanne/numba,cpcloud/numba,gdementen/numba,gdementen/numba,seibert/numba,IntelLabs/numba,pombredanne/numba,seibert/numba,GaZ3ll3/numba,gmarkall/numba,jriehl/numba,stuartarchibald/numba,pitrou/numba,ssarangi/numba,ssarangi/numba,jriehl/numba,numba/numba,gdementen/numba,pombredanne/numba,stefanseefeld/numba,sklam/numba,gmarkall/numba,stonebig/numba,seibert/numba,pitrou/numba,stefanseefeld/numba,jriehl/numba,numba/numba,shiquanwang/numba,ssarangi/numba,stonebig/numba,jriehl/numba,GaZ3ll3/numba,GaZ3ll3/numba,sklam/numba,pombredanne/numba,GaZ3ll3/numba,numba/numba,gmarkall/numba,sklam/numba,stefanseefeld/numba,stonebig/numba,stefanseefeld/numba,gdementen/numba,cpcloud/numba,numba/numba,IntelLabs/numba,stuartarchibald/numba,ssarangi/numba,stuartarchibald/numba,gmarkall/numba,stonebig/numba,stonebig/numba,pombredanne/numba,IntelLabs/numba,GaZ3ll3/numba,IntelLabs/numba
--- +++ @@ -1,12 +1,13 @@ from .. import intern def test_global_interning(): - try: - intern.global_intern("hello") - except AssertionError as e: - pass - else: - raise Exception("Expects complaint about uninitialized table") + # Can't really test for this with nose... + # try: + # intern.global_intern("hello") + # except AssertionError as e: + # pass + # else: + # raise Exception("Expects complaint about uninitialized table") intern.global_intern_initialize() id1 = intern.global_intern("hello")
5bece700c7ebbb2c9ea3ce2781863baf189e2fc0
cybox/test/objects/__init__.py
cybox/test/objects/__init__.py
# Copyright (c) 2013, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import cybox.utils class ObjectTestCase(object): """A base class for testing all subclasses of ObjectProperties. Each subclass of ObjectTestCase should subclass both unittest.TestCase and ObjectTestCase, and defined two class-level fields: - klass: the ObjectProperties subclass being tested - object_type: The name prefix used in the XML Schema bindings for the object. """ def test_type_exists(self): # Verify that the correct class has been added to the OBJECT_TYPES_DICT # dictionary in cybox.utils.nsparser # Skip this base class if type(self) == type(ObjectTestCase): return t = self.__class__.object_type expected_class = cybox.utils.get_class_for_object_type(t) actual_class = self.__class__.klass self.assertEqual(expected_class, actual_class) expected_namespace = expected_class._XSI_NS actual_namespace = cybox.utils.nsparser.OBJECT_TYPES_DICT.get(t).get('namespace_prefix') self.assertEqual(expected_namespace, actual_namespace) self.assertEqual(expected_class._XSI_TYPE, t)
# Copyright (c) 2013, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import cybox.test import cybox.utils class ObjectTestCase(object): """A base class for testing all subclasses of ObjectProperties. Each subclass of ObjectTestCase should subclass both unittest.TestCase and ObjectTestCase, and defined two class-level fields: - klass: the ObjectProperties subclass being tested - object_type: The name prefix used in the XML Schema bindings for the object. """ def test_type_exists(self): # Verify that the correct class has been added to the OBJECT_TYPES_DICT # dictionary in cybox.utils.nsparser # Skip this base class if type(self) == type(ObjectTestCase): return t = self.__class__.object_type expected_class = cybox.utils.get_class_for_object_type(t) actual_class = self.__class__.klass self.assertEqual(expected_class, actual_class) expected_namespace = expected_class._XSI_NS actual_namespace = cybox.utils.nsparser.OBJECT_TYPES_DICT.get(t).get('namespace_prefix') self.assertEqual(expected_namespace, actual_namespace) self.assertEqual(expected_class._XSI_TYPE, t) def test_object_reference(self): klass = self.__class__.klass ref_dict = {'object_reference': "some:object-reference-1", 'xsi:type': klass._XSI_TYPE} ref_dict2 = cybox.test.round_trip_dict(klass, ref_dict) self.assertEqual(ref_dict, ref_dict2)
Add (failing) test of object_reference on all ObjectProperties subclasses
Add (failing) test of object_reference on all ObjectProperties subclasses
Python
bsd-3-clause
CybOXProject/python-cybox
--- +++ @@ -1,6 +1,7 @@ # Copyright (c) 2013, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. +import cybox.test import cybox.utils @@ -34,3 +35,12 @@ self.assertEqual(expected_namespace, actual_namespace) self.assertEqual(expected_class._XSI_TYPE, t) + + def test_object_reference(self): + klass = self.__class__.klass + + ref_dict = {'object_reference': "some:object-reference-1", + 'xsi:type': klass._XSI_TYPE} + + ref_dict2 = cybox.test.round_trip_dict(klass, ref_dict) + self.assertEqual(ref_dict, ref_dict2)
a354a4f52bce3c3063678b046ba76a694c076652
web/celSearch/api/scripts/query_wikipedia.py
web/celSearch/api/scripts/query_wikipedia.py
''' Script used to query Wikipedia for summary of object ''' import sys import wikipedia def main(): # Check that we have the right number of arguments if (len(sys.argv) != 2): print 'Incorrect number of arguments; please pass in only one string that contains the subject' return 'Banana' print wikipedia.summary(sys.argv[1]) #return wikipedia.summary(sys.argv[1]) if __name__ == '__main__': main()
''' Script used to query Wikipedia for summary of object ''' import sys import wikipedia import nltk def main(): # Check that we have the right number of arguments if (len(sys.argv) != 2): print 'Incorrect number of arguments; please pass in only one string that contains the query' return 'Banana' # Get the noun from the query (uses the first noun it finds for now) print sys.argv[0] tokens = nltk.word_tokenize(sys.argv[1]) tagged = nltk.pos_tag(tokens) # Find first noun in query and provide Wikipedia summary for it for tag in tagged: if tag[1][0] == 'N': print wikipedia.summary(tag[0]) return if __name__ == '__main__': main()
Add nltk part to script
Add nltk part to script
Python
apache-2.0
christopher18/Celsearch,christopher18/Celsearch,christopher18/Celsearch
--- +++ @@ -4,15 +4,24 @@ import sys import wikipedia +import nltk def main(): # Check that we have the right number of arguments if (len(sys.argv) != 2): - print 'Incorrect number of arguments; please pass in only one string that contains the subject' + print 'Incorrect number of arguments; please pass in only one string that contains the query' return 'Banana' - print wikipedia.summary(sys.argv[1]) - #return wikipedia.summary(sys.argv[1]) + # Get the noun from the query (uses the first noun it finds for now) + print sys.argv[0] + tokens = nltk.word_tokenize(sys.argv[1]) + tagged = nltk.pos_tag(tokens) + + # Find first noun in query and provide Wikipedia summary for it + for tag in tagged: + if tag[1][0] == 'N': + print wikipedia.summary(tag[0]) + return if __name__ == '__main__':
2a7ce1ac70f8767e9d2b2a9f1d335cfcc63a92b6
rplugin/python3/LanguageClient/logger.py
rplugin/python3/LanguageClient/logger.py
import logging import tempfile logger = logging.getLogger("LanguageClient") with tempfile.NamedTemporaryFile( prefix="LanguageClient-", suffix=".log", delete=False) as tmp: tmpname = tmp.name fileHandler = logging.FileHandler(filename=tmpname) fileHandler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)-8s %(message)s", "%H:%M:%S")) logger.addHandler(fileHandler) logger.setLevel(logging.WARN)
import logging logger = logging.getLogger("LanguageClient") fileHandler = logging.FileHandler(filename="/tmp/LanguageClient.log") fileHandler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)-8s %(message)s", "%H:%M:%S")) logger.addHandler(fileHandler) logger.setLevel(logging.WARN)
Revert "Use tempfile lib for log file"
Revert "Use tempfile lib for log file" This reverts commit 6e8f35b83fc563c8349cb3be040c61a0588ca745. The commit caused severer issue than it fixed. In case one need to check the content of log file, there is no way to tell where the log file location/name is.
Python
mit
autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim
--- +++ @@ -1,12 +1,7 @@ import logging -import tempfile logger = logging.getLogger("LanguageClient") -with tempfile.NamedTemporaryFile( - prefix="LanguageClient-", - suffix=".log", delete=False) as tmp: - tmpname = tmp.name -fileHandler = logging.FileHandler(filename=tmpname) +fileHandler = logging.FileHandler(filename="/tmp/LanguageClient.log") fileHandler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)-8s %(message)s",
21a392df73324f111fa80e2fd8ce88b0e32c954c
python/algorithms/fibonacci.py
python/algorithms/fibonacci.py
def fib1(amount): """ Fibonacci generator example. The second variable is used to store the result. :param amount: Amount of numbers to produce. :return: Generator. >>> list(fib1(0)) [] >>> list(fib1(1)) [0] >>> list(fib1(3)) [0, 1, 1] >>> list(fib1(9)) [0, 1, 1, 2, 3, 5, 8, 13, 21] """ first, second = 0, 1 for _ in range(amount): yield first first, second = second + first, first def fib2(amount): """ Fibonacci generator example. The first variable is used to store the result. :param amount: Amount of numbers to produce. :return: Generator. >>> list(fib2(0)) [] >>> list(fib2(1)) [0] >>> list(fib2(3)) [0, 1, 1] >>> list(fib2(9)) [0, 1, 1, 2, 3, 5, 8, 13, 21] """ first, second = 1, 0 for _ in range(amount): first, second = second, first + second yield first if __name__ == '__main__': import doctest doctest.testmod()
"""Implementations calculation of Fibonacci numbers.""" def fib1(amount): """ Calculate Fibonacci numbers. The second variable is used to store the result. :param amount: Amount of numbers to produce. :return: Generator. >>> list(fib1(0)) [] >>> list(fib1(1)) [0] >>> list(fib1(3)) [0, 1, 1] >>> list(fib1(9)) [0, 1, 1, 2, 3, 5, 8, 13, 21] """ first, second = 0, 1 for _ in range(amount): yield first first, second = second + first, first def fib2(amount): """ Calculate Fibonacci numbers. The first variable is used to store the result. :param amount: Amount of numbers to produce. :return: Generator. >>> list(fib2(0)) [] >>> list(fib2(1)) [0] >>> list(fib2(3)) [0, 1, 1] >>> list(fib2(9)) [0, 1, 1, 2, 3, 5, 8, 13, 21] """ first, second = 1, 0 for _ in range(amount): first, second = second, first + second yield first if __name__ == '__main__': import doctest doctest.testmod()
Adjust doc strings in Fibonacci numbers implementation
Adjust doc strings in Fibonacci numbers implementation
Python
mit
pesh1983/exercises,pesh1983/exercises
--- +++ @@ -1,7 +1,11 @@ +"""Implementations calculation of Fibonacci numbers.""" + + def fib1(amount): """ - Fibonacci generator example. The second variable is used to store - the result. + Calculate Fibonacci numbers. + + The second variable is used to store the result. :param amount: Amount of numbers to produce. :return: Generator. @@ -22,8 +26,9 @@ def fib2(amount): """ - Fibonacci generator example. The first variable is used to store - the result. + Calculate Fibonacci numbers. + + The first variable is used to store the result. :param amount: Amount of numbers to produce. :return: Generator.
9c1190133a680717850a4d0f46a96591b7be4e33
autoencoder/api.py
autoencoder/api.py
from .io import preprocess from .train import train from .encode import encode def autoencode(count_matrix, kfold=None, reduced=False, censor_matrix=None, type='normal', learning_rate=1e-2, hidden_size=10, epochs=10): x = preprocess(count_matrix, kfold=kfold, censor=censor_matrix) model = train(x, hidden_size=hidden_size, learning_rate=learning_rate, aetype=type, epochs=epochs) encoded = encode(count_matrix, model, reduced=reduced) return encoded
from .io import preprocess from .train import train from .encode import encode def autoencode(count_matrix, kfold=None, reduced=False, mask=None, type='normal', learning_rate=1e-2, hidden_size=10, epochs=10): x = preprocess(count_matrix, kfold=kfold, mask=mask) model = train(x, hidden_size=hidden_size, learning_rate=learning_rate, aetype=type, epochs=epochs) encoded = encode(count_matrix, model, reduced=reduced) return encoded
Change mask parameter in API.
Change mask parameter in API.
Python
apache-2.0
theislab/dca,theislab/dca,theislab/dca
--- +++ @@ -4,12 +4,12 @@ def autoencode(count_matrix, kfold=None, reduced=False, - censor_matrix=None, type='normal', + mask=None, type='normal', learning_rate=1e-2, hidden_size=10, epochs=10): - x = preprocess(count_matrix, kfold=kfold, censor=censor_matrix) + x = preprocess(count_matrix, kfold=kfold, mask=mask) model = train(x, hidden_size=hidden_size, learning_rate=learning_rate, aetype=type, epochs=epochs) encoded = encode(count_matrix, model, reduced=reduced)
8d7e4cf37e73c1ff9827e94a06327921f553e2f4
learntools/computer_vision/ex4.py
learntools/computer_vision/ex4.py
from learntools.core import * import tensorflow as tf class Q1A(ThoughtExperiment): _solution = "" class Q1B(ThoughtExperiment): _solution = "" Q1 = MultipartProblem(Q1A, Q1B) class Q2A(ThoughtExperiment): _hint = r"Stacking the second layer expanded the receptive field by one neuron on each side, giving $3+1+1=5$ for each dimension. If you expanded by one neuron again, what would you get?" _solution = r"The third layer would have a $7 \times 7$ receptive field." class Q2B(ThoughtExperiment): _hint = r"This pooling layer collapses a $2 \times 2$ patch into a single pixel, effectively *doubling* the number of connections along each dimension. " _solution = r"Doubling a $7 \times 7$ field produces a $14 \times 14$ field for the final outputs." Q2 = MultipartProblem(Q2A, Q2B) class Q3(CodingProblem): _hint = "You just need a list of numbers, maybe three to five." _solution = CS(""" kernel = tf.constant([0.1, 0.2, 0.3, 0.4]) """) def check(self): pass qvars = bind_exercises(globals(), [ Q1, Q2, Q3, ], var_format='q_{n}', ) __all__ = list(qvars)
from learntools.core import * import tensorflow as tf # Free class Q1(CodingProblem): _solution = "" def check(self): pass class Q2A(ThoughtExperiment): _hint = r"Stacking the second layer expanded the receptive field by one neuron on each side, giving $3+1+1=5$ for each dimension. If you expanded by one neuron again, what would you get?" _solution = r"The third layer would have a $7 \times 7$ receptive field." class Q2B(ThoughtExperiment): _hint = r"This pooling layer collapses a $2 \times 2$ patch into a single pixel, effectively *doubling* the number of connections along each dimension. " _solution = r"Doubling a $7 \times 7$ field produces a $14 \times 14$ field for the final outputs." Q2 = MultipartProblem(Q2A, Q2B) class Q3(CodingProblem): _hint = "You just need a list of numbers, maybe three to five." _solution = CS(""" kernel = tf.constant([0.1, 0.2, 0.3, 0.4]) """) def check(self): pass qvars = bind_exercises(globals(), [ Q1, Q2, Q3, ], var_format='q_{n}', ) __all__ = list(qvars)
Change exercise 4 question 1
Change exercise 4 question 1
Python
apache-2.0
Kaggle/learntools,Kaggle/learntools
--- +++ @@ -2,14 +2,11 @@ import tensorflow as tf -class Q1A(ThoughtExperiment): +# Free +class Q1(CodingProblem): _solution = "" - - -class Q1B(ThoughtExperiment): - _solution = "" - -Q1 = MultipartProblem(Q1A, Q1B) + def check(self): + pass class Q2A(ThoughtExperiment): _hint = r"Stacking the second layer expanded the receptive field by one neuron on each side, giving $3+1+1=5$ for each dimension. If you expanded by one neuron again, what would you get?"
2ee895c61f546f83f4b7fa0c6a2ba72578c378be
problem_2/solution.py
problem_2/solution.py
f1, f2, s, n = 0, 1, 0, 4000000 while f2 < n: f2, f1 = f1, f1 + f2 if f2 % 2 == 0: s += f2 print s
def sum_even_fibonacci_numbers_1(): f1, f2, s, = 0, 1, 0, while f2 < 4000000: f2, f1 = f1, f1 + f2 if f2 % 2 == 0: s += f2 return s def sum_even_fibonacci_numbers_2(): s, a, b = 0, 1, 1 c = a + b while c < 4000000: s += c a = b + c b = a + c c = a + b return s
Add a second Python implementation of problem 2
Add a second Python implementation of problem 2
Python
mit
mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler
--- +++ @@ -1,6 +1,16 @@ -f1, f2, s, n = 0, 1, 0, 4000000 -while f2 < n: - f2, f1 = f1, f1 + f2 - if f2 % 2 == 0: - s += f2 -print s +def sum_even_fibonacci_numbers_1(): + f1, f2, s, = 0, 1, 0, + while f2 < 4000000: + f2, f1 = f1, f1 + f2 + if f2 % 2 == 0: + s += f2 + return s +def sum_even_fibonacci_numbers_2(): + s, a, b = 0, 1, 1 + c = a + b + while c < 4000000: + s += c + a = b + c + b = a + c + c = a + b + return s
5523ae2278bb0ca055ef7a6e218ac40ed4172bf3
webapp/byceps/blueprints/ticket/service.py
webapp/byceps/blueprints/ticket/service.py
# -*- coding: utf-8 -*- """ byceps.blueprints.ticket.service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2015 Jochen Kupperschmidt """ from ..party.models import Party from ..seating.models import Category from .models import Ticket def find_ticket_for_user(user, party): """Return the ticket used by the user for the party, or `None` if not found. """ if user.is_anonymous: return None return Ticket.query \ .filter(Ticket.used_by == user) \ .for_party(party) \ .first() def get_attended_parties(user): """Return the parties the user has attended.""" return Party.query \ .join(Category).join(Ticket).filter(Ticket.used_by == user) \ .all()
# -*- coding: utf-8 -*- """ byceps.blueprints.ticket.service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2015 Jochen Kupperschmidt """ from ...database import db from ..party.models import Party from ..seating.models import Category from .models import Ticket def find_ticket_for_user(user, party): """Return the ticket used by the user for the party, or `None` if not found. """ if user.is_anonymous: return None return Ticket.query \ .filter(Ticket.used_by == user) \ .options( db.joinedload_all('occupied_seat.area'), ) \ .for_party(party) \ .first() def get_attended_parties(user): """Return the parties the user has attended.""" return Party.query \ .join(Category).join(Ticket).filter(Ticket.used_by == user) \ .all()
Save a few SQL queries.
Save a few SQL queries.
Python
bsd-3-clause
m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
--- +++ @@ -6,6 +6,8 @@ :Copyright: 2006-2015 Jochen Kupperschmidt """ + +from ...database import db from ..party.models import Party from ..seating.models import Category @@ -22,6 +24,9 @@ return Ticket.query \ .filter(Ticket.used_by == user) \ + .options( + db.joinedload_all('occupied_seat.area'), + ) \ .for_party(party) \ .first()
7dc08364cbe513ce4b81483d9330789f5893fcee
Challenges/chall_03.py
Challenges/chall_03.py
#!/usr/local/bin/python3 # Python challenge - 3 # http://www.pythonchallenge.com/pc/def/equality.html import re ''' Hint: One small letter surrounded by EXACTLY three big bodyguards on each of its sides. ''' def main(): with open('bodyguard.txt', 'r') as bodyguard: pattern = re.compile(r'[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]') littles = '' text = bodyguard.read() littles = re.findall(pattern, text) print(''.join(littles)) return 0 # Keyword: linkedlist if __name__ == '__main__': main()
#!/usr/local/bin/python3 # Python challenge - 3 # http://www.pythonchallenge.com/pc/def/equality.html # Keyword: linkedlist import re def main(): ''' Hint: One small letter surrounded by EXACTLY three big bodyguards on each of its sides. Page source text saved in bodyguard.txt ''' with open('bodyguard.txt', 'r') as bodyguard: pattern = re.compile(r'[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]') littles = '' text = bodyguard.read() littles = re.findall(pattern, text) print(''.join(littles)) return 0 if __name__ == '__main__': main()
Refactor code, add hints from page
Refactor code, add hints from page
Python
mit
HKuz/PythonChallenge
--- +++ @@ -1,18 +1,18 @@ #!/usr/local/bin/python3 # Python challenge - 3 # http://www.pythonchallenge.com/pc/def/equality.html +# Keyword: linkedlist import re -''' -Hint: -One small letter surrounded by EXACTLY three big bodyguards on each of -its sides. -''' - - def main(): + ''' + Hint: + One small letter surrounded by EXACTLY three big bodyguards on each of + its sides. + Page source text saved in bodyguard.txt + ''' with open('bodyguard.txt', 'r') as bodyguard: pattern = re.compile(r'[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]') littles = '' @@ -22,8 +22,6 @@ print(''.join(littles)) return 0 -# Keyword: linkedlist - if __name__ == '__main__': main()
da85d9660166f67133b10953104ccd81b89d0b92
micawber/cache.py
micawber/cache.py
from __future__ import with_statement import os import pickle from contextlib import closing try: from redis import Redis except ImportError: Redis = None class Cache(object): def __init__(self): self._cache = {} def get(self, k): return self._cache.get(k) def set(self, k, v): self._cache[k] = v class PickleCache(Cache): def __init__(self, filename='cache.db'): self.filename = filename self._cache = self.load() def load(self): if os.path.exists(self.filename): with closing(open(self.filename, 'rb')) as fh: return pickle.load(fh) return {} def save(self): with closing(open(self.filename, 'wb')) as fh: pickle.dump(self._cache, fh) if Redis: class RedisCache(Cache): def __init__(self, namespace='micawber', **conn): self.namespace = namespace self.key_fn = lambda self, k: '%s.%s' % (self.namespace, k) self.conn = Redis(**conn) def get(self, k): cached = self.conn.get(self.key_fn(k)) if cached: return pickle.loads(cached) def set(self, k, v): self.conn.set(self.key_fn(k), pickle.dumps(v))
from __future__ import with_statement import os import pickle try: from redis import Redis except ImportError: Redis = None class Cache(object): def __init__(self): self._cache = {} def get(self, k): return self._cache.get(k) def set(self, k, v): self._cache[k] = v class PickleCache(Cache): def __init__(self, filename='cache.db'): self.filename = filename self._cache = self.load() def load(self): if os.path.exists(self.filename): with open(self.filename, 'rb') as fh: return pickle.load(fh) return {} def save(self): with open(self.filename, 'wb') as fh: pickle.dump(self._cache, fh) if Redis: class RedisCache(Cache): def __init__(self, namespace='micawber', **conn): self.namespace = namespace self.key_fn = lambda self, k: '%s.%s' % (self.namespace, k) self.conn = Redis(**conn) def get(self, k): cached = self.conn.get(self.key_fn(k)) if cached: return pickle.loads(cached) def set(self, k, v): self.conn.set(self.key_fn(k), pickle.dumps(v))
Remove a redundant use of contextlib.closing() decorator
Remove a redundant use of contextlib.closing() decorator Remove the unnecessary contextlib.closing() decorators from open() calls. The file objects returned by open() provide context manager API themselves and closing() is only necessary for external file-like objects that do not support it. This should work even in Python 2.6, see: https://docs.python.org/2.6/library/stdtypes.html#file.close
Python
mit
coleifer/micawber,coleifer/micawber
--- +++ @@ -1,7 +1,6 @@ from __future__ import with_statement import os import pickle -from contextlib import closing try: from redis import Redis except ImportError: @@ -26,12 +25,12 @@ def load(self): if os.path.exists(self.filename): - with closing(open(self.filename, 'rb')) as fh: + with open(self.filename, 'rb') as fh: return pickle.load(fh) return {} def save(self): - with closing(open(self.filename, 'wb')) as fh: + with open(self.filename, 'wb') as fh: pickle.dump(self._cache, fh)
2c7dc769874766b230bc11c7ec6f67d3c1157005
duplicatefiledir/__init__.py
duplicatefiledir/__init__.py
from fman import DirectoryPaneCommand, show_alert import distutils from distutils import dir_util, file_util import os.path class DuplicateFileDir(DirectoryPaneCommand): def __call__(self): selected_files = self.pane.get_selected_files() if len(selected_files) >= 1 or (len(selected_files) == 0 and self.get_chosen_files()): if len(selected_files) == 0 and self.get_chosen_files(): selected_files.append(self.get_chosen_files()[0]) # # Loop through each file/directory selected. # for filedir in selected_files: if os.path.isdir(filedir): # # It is a directory. Process as a directory. # newDir = filedir + "-copy" distutils.dir_util.copy_tree(filedir,newDir) else: # # It is a file. Process as a file. # dirPath, ofilenmc = os.path.split(filedir) ofilenm, ext = os.path.splitext(ofilenmc) nfilenm = os.path.join(dirPath,ofilenm + "-copy" + ext) distutils.file_util.copy_file(filedir,nfilenm)
from fman import DirectoryPaneCommand, show_alert from urllib.parse import urlparse import os.path from shutil import copytree, copyfile class DuplicateFileDir(DirectoryPaneCommand): def __call__(self): selected_files = self.pane.get_selected_files() if len(selected_files) >= 1 or (len(selected_files) == 0 and self.get_chosen_files()): if len(selected_files) == 0 and self.get_chosen_files(): selected_files.append(self.get_chosen_files()[0]) # # Loop through each file/directory selected. # for filedir in selected_files: p = urlparse(filedir) filepath = os.path.abspath(os.path.join(p.netloc, p.path)) if os.path.isdir(filepath): # # It is a directory. Process as a directory. # newDir = filepath + "-copy" copytree(filepath, newDir) else: if os.path.isfile(filepath): # # It is a file. Process as a file. # dirPath, ofilenmc = os.path.split(filepath) ofilenm, ext = os.path.splitext(ofilenmc) nfilenm = os.path.join(dirPath,ofilenm + "-copy" + ext) copyfile(filepath, nfilenm) else: show_alert('Bad file path : {0}'.format(filepath))
Make it work with last fman version (0.7) on linux
Make it work with last fman version (0.7) on linux
Python
mit
raguay/DuplicateFileDir
--- +++ @@ -1,7 +1,7 @@ from fman import DirectoryPaneCommand, show_alert -import distutils -from distutils import dir_util, file_util +from urllib.parse import urlparse import os.path +from shutil import copytree, copyfile class DuplicateFileDir(DirectoryPaneCommand): def __call__(self): @@ -13,17 +13,22 @@ # Loop through each file/directory selected. # for filedir in selected_files: - if os.path.isdir(filedir): + p = urlparse(filedir) + filepath = os.path.abspath(os.path.join(p.netloc, p.path)) + if os.path.isdir(filepath): # # It is a directory. Process as a directory. # - newDir = filedir + "-copy" - distutils.dir_util.copy_tree(filedir,newDir) + newDir = filepath + "-copy" + copytree(filepath, newDir) else: - # - # It is a file. Process as a file. - # - dirPath, ofilenmc = os.path.split(filedir) - ofilenm, ext = os.path.splitext(ofilenmc) - nfilenm = os.path.join(dirPath,ofilenm + "-copy" + ext) - distutils.file_util.copy_file(filedir,nfilenm) + if os.path.isfile(filepath): + # + # It is a file. Process as a file. + # + dirPath, ofilenmc = os.path.split(filepath) + ofilenm, ext = os.path.splitext(ofilenmc) + nfilenm = os.path.join(dirPath,ofilenm + "-copy" + ext) + copyfile(filepath, nfilenm) + else: + show_alert('Bad file path : {0}'.format(filepath))
2f80f786be8e0d235dcb98c4fa562bfe2b9e783f
jobs/spiders/visir.py
jobs/spiders/visir.py
import dateutil.parser import scrapy from jobs.items import JobsItem class VisirSpider(scrapy.Spider): name = "visir" start_urls = ['https://job.visir.is/search-results-jobs/'] def parse(self, response): for job in response.css('.thebox'): info = job.css('a')[1] item = JobsItem() item['spider'] = self.name item['url'] = url = info.css('a::attr(href)').extract_first() item['posted'] = dateutil.parser.parse(job.css('td::text').re(r'[\d.]+')[0]).isoformat() request = scrapy.Request(url, callback=self.parse_specific_job) request.meta['item'] = item yield request next_page = response.urljoin(response.css('.nextBtn a::attr(href)').extract_first()) if next_page != response.url: yield scrapy.Request(next_page, callback=self.parse) def parse_specific_job(self, response): item = response.meta['item'] item['company'] = response.css('.company-name::text').extract_first() item['title'] = response.css('h2::text').extract_first() yield item
import dateutil.parser import scrapy from jobs.items import JobsItem class VisirSpider(scrapy.Spider): name = "visir" start_urls = ['https://job.visir.is/search-results-jobs/'] def parse(self, response): for job in response.css('.thebox'): info = job.css('a')[1] item = JobsItem() item['spider'] = self.name item['url'] = url = info.css('a::attr(href)').extract_first() item['posted'] = dateutil.parser.parse(job.css('td::text').re(r'[\d.]+')[0], dayfirst=False).isoformat() request = scrapy.Request(url, callback=self.parse_specific_job) request.meta['item'] = item yield request next_page = response.urljoin(response.css('.nextBtn a::attr(href)').extract_first()) if next_page != response.url: yield scrapy.Request(next_page, callback=self.parse) def parse_specific_job(self, response): item = response.meta['item'] item['company'] = response.css('.company-name::text').extract_first() item['title'] = response.css('h2::text').extract_first() yield item
Fix parsing of dates for Visir.
Fix parsing of dates for Visir. Some dates are being wrongly parsed, so we need to specify some information about the order of things.
Python
apache-2.0
multiplechoice/workplace
--- +++ @@ -15,7 +15,7 @@ item = JobsItem() item['spider'] = self.name item['url'] = url = info.css('a::attr(href)').extract_first() - item['posted'] = dateutil.parser.parse(job.css('td::text').re(r'[\d.]+')[0]).isoformat() + item['posted'] = dateutil.parser.parse(job.css('td::text').re(r'[\d.]+')[0], dayfirst=False).isoformat() request = scrapy.Request(url, callback=self.parse_specific_job) request.meta['item'] = item
a24d6a25cb7ee5101e8131a9719744f79b23c11b
examples/quotes/quotes.py
examples/quotes/quotes.py
import sys print(sys.version_info) import random import time import networkzero as nw0 quotes = [ "Humpty Dumpty sat on a wall", "Hickory Dickory Dock", "Baa Baa Black Sheep", "Old King Cole was a merry old sould", ] def main(address_pattern=None): my_name = input("Name: ") my_address = nw0.advertise(my_name, address_pattern) print("Advertising %s on %s" % (my_name, my_address)) while True: services = [(name, address) for (name, address) in nw0.discover_all() if name != my_name] for name, address in services: topic, message = nw0.wait_for_notification(address, "quote", wait_for_s=0) if topic: print("%s says: %s" % (name, message)) quote = random.choice(quotes) nw0.send_notification(address, "quote", quote) time.sleep(0.5) if __name__ == '__main__': main(*sys.argv[1:])
import sys print(sys.version_info) import random import time import networkzero as nw0 quotes = [ "Humpty Dumpty sat on a wall", "Hickory Dickory Dock", "Baa Baa Black Sheep", "Old King Cole was a merry old sould", ] def main(address_pattern=None): my_name = input("Name: ") my_address = nw0.advertise(my_name, address_pattern) print("Advertising %s on %s" % (my_name, my_address)) while True: services = [(name, address) for (name, address) in nw0.discover_all() if name != my_name] for name, address in services: topic, message = nw0.wait_for_notification(address, "quote", wait_for_s=0) if topic: print("%s says: %s" % (name, message)) quote = random.choice(quotes) nw0.send_notification(my_address, "quote", quote) time.sleep(1) if __name__ == '__main__': main(*sys.argv[1:])
Send notification to the correct address
Send notification to the correct address
Python
mit
tjguk/networkzero,tjguk/networkzero,tjguk/networkzero
--- +++ @@ -19,15 +19,16 @@ while True: services = [(name, address) for (name, address) in nw0.discover_all() if name != my_name] - + for name, address in services: topic, message = nw0.wait_for_notification(address, "quote", wait_for_s=0) if topic: print("%s says: %s" % (name, message)) - quote = random.choice(quotes) - nw0.send_notification(address, "quote", quote) - time.sleep(0.5) + quote = random.choice(quotes) + nw0.send_notification(my_address, "quote", quote) + + time.sleep(1) if __name__ == '__main__': main(*sys.argv[1:])
0f95070880f40456fbb6d7b7ccd6e999cc6fb95a
dropbox_conflict_resolver.py
dropbox_conflict_resolver.py
import os import re ''' This is used to revert back a Dropbox conflict. So in this case I want to keep all the files that where converted to conflict copies. So I just strip out the conflict string ie (some computer names's conflict copy some date) .ext and remove that conflict part of the string, and overate the original file by that name. ''' for root, dirs, files, in os.walk(r"path to your drop box file with conflicts"): for file in files: file_matcher = re.search(r"(.+) (\(.+'s conflicted copy [0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]*\))(.+)?", file) if file_matcher: full_path = os.path.join(root, file) conflict_file_name = file_matcher.group(0) clean_file_name = file_matcher.group(1) conflict_string = file_matcher.group(2) file_ext = file_matcher.group(3) new_name_file_name = clean_file_name if file_ext: new_name_file_name += file_ext new_path = os.path.join(root, new_name_file_name) print("from: " + full_path + " to: " + new_path) os.replace(full_path, new_path)
import os import re ''' This is used to revert back a Dropbox conflict. So in this case I want to keep all the files that were converted to conflict copies. So I just strip out the conflict string ie (some computer names's conflict copy some date) .ext and remove that conflict part of the string, and override the original file by that name. ''' for root, dirs, files, in os.walk(r"path to your drop box file with conflicts"): for file in files: file_matcher = re.search(r"(.+) (\(.+'s conflicted copy [0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]*\))(.+)?", file) if file_matcher: full_path = os.path.join(root, file) conflict_file_name = file_matcher.group(0) clean_file_name = file_matcher.group(1) conflict_string = file_matcher.group(2) file_ext = file_matcher.group(3) new_name_file_name = clean_file_name if file_ext: new_name_file_name += file_ext new_path = os.path.join(root, new_name_file_name) print("from: " + full_path + " to: " + new_path) os.replace(full_path, new_path)
Fix a couple of typos in the program description
Fix a couple of typos in the program description
Python
apache-2.0
alexwhb/Dropbox-bulk-conflict-resolver
--- +++ @@ -2,9 +2,9 @@ import re ''' - This is used to revert back a Dropbox conflict. So in this case I want to keep all the files that where + This is used to revert back a Dropbox conflict. So in this case I want to keep all the files that were converted to conflict copies. So I just strip out the conflict string ie (some computer names's conflict copy some date) .ext - and remove that conflict part of the string, and overate the original file by that name. + and remove that conflict part of the string, and override the original file by that name. ''' for root, dirs, files, in os.walk(r"path to your drop box file with conflicts"): for file in files:
be07a935d041a6c2d1c641f9beebe1bb49891682
cooler/cli/__init__.py
cooler/cli/__init__.py
# -*- coding: utf-8 -*- from __future__ import division, print_function import logging import sys import click from .. import __version__, get_logger logging.basicConfig(stream=sys.stderr) logger = get_logger() logger.setLevel(logging.INFO) # Monkey patch click.core._verify_python3_env = lambda: None CONTEXT_SETTINGS = { 'help_option_names': ['-h', '--help'], } @click.version_option(version=__version__) @click.group(context_settings=CONTEXT_SETTINGS) @click.option( '--debug/--no-debug', help="Verbose logging", default=False) def cli(debug): if debug: logger.setLevel(logging.DEBUG) from . import ( makebins, digest, csort, cload, load, merge, copy, list_, info, dump, balance, aggregate, show, )
# -*- coding: utf-8 -*- from __future__ import division, print_function import logging import sys import click from .. import __version__, get_logger logging.basicConfig(stream=sys.stderr) logger = get_logger() logger.setLevel(logging.INFO) # Monkey patch click.core._verify_python3_env = lambda: None CONTEXT_SETTINGS = { 'help_option_names': ['-h', '--help'], } @click.version_option(version=__version__) @click.group(context_settings=CONTEXT_SETTINGS) @click.option( '--debug/--no-debug', help="Verbose logging", default=False) @click.option( '-pm', '--post-mortem', help="Post mortem debugging", is_flag=True, default=False) def cli(debug, post_mortem): if debug: logger.setLevel(logging.DEBUG) if post_mortem: import traceback try: import ipdb as pdb except ImportError: import pdb def _excepthook(exc_type, value, tb): traceback.print_exception(exc_type, value, tb) print() pdb.pm() sys.excepthook = _excepthook from . import ( makebins, digest, csort, cload, load, merge, copy, list_, info, dump, balance, aggregate, show, )
Add postmortem debugging option to CLI
Add postmortem debugging option to CLI
Python
bsd-3-clause
mirnylab/cooler
--- +++ @@ -26,9 +26,26 @@ '--debug/--no-debug', help="Verbose logging", default=False) -def cli(debug): +@click.option( + '-pm', '--post-mortem', + help="Post mortem debugging", + is_flag=True, + default=False) +def cli(debug, post_mortem): if debug: logger.setLevel(logging.DEBUG) + + if post_mortem: + import traceback + try: + import ipdb as pdb + except ImportError: + import pdb + def _excepthook(exc_type, value, tb): + traceback.print_exception(exc_type, value, tb) + print() + pdb.pm() + sys.excepthook = _excepthook from . import (
723ae54f260284aad442f076772189cb5820d62e
devtools/ci/push-docs-to-s3.py
devtools/ci/push-docs-to-s3.py
import os import pip import tempfile import subprocess import opentis.version BUCKET_NAME = 'openpathsampling.org' if not opentis.version.release: PREFIX = 'latest' else: PREFIX = opentis.version.short_version PREFIX = '' if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()): raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd') # The secret key is available as a secure environment variable # on travis-ci to push the build documentation to Amazon S3. with tempfile.NamedTemporaryFile('w') as f: f.write('''[default] access_key = {AWS_ACCESS_KEY_ID} secret_key = {AWS_SECRET_ACCESS_KEY} '''.format(**os.environ)) f.flush() template = ('s3cmd --config {config} ' 'sync docs/_build/ s3://{bucket}/{prefix}/') cmd = template.format( config=f.name, bucket=BUCKET_NAME ) return_val = subprocess.call(cmd.split()) # Sync index file. template = ('s3cmd --config {config} ' 'sync devtools/ci/index.html s3://{bucket}/') cmd = template.format( config=f.name, bucket=BUCKET_NAME ) return_val = subprocess.call(cmd.split())
import os import pip import tempfile import subprocess import opentis.version BUCKET_NAME = 'openpathsampling.org' if not opentis.version.release: PREFIX = 'latest' else: PREFIX = opentis.version.short_version PREFIX = '' if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()): raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd') # The secret key is available as a secure environment variable # on travis-ci to push the build documentation to Amazon S3. with tempfile.NamedTemporaryFile('w') as f: f.write('''[default] access_key = {AWS_ACCESS_KEY_ID} secret_key = {AWS_SECRET_ACCESS_KEY} '''.format(**os.environ)) f.flush() template = ('s3cmd --config {config} ' 'sync docs/_build/ s3://{bucket}/{prefix}/') cmd = template.format( config=f.name, bucket=BUCKET_NAME, prefix=PREFIX) return_val = subprocess.call(cmd.split()) # Sync index file. template = ('s3cmd --config {config} ' 'sync devtools/ci/index.html s3://{bucket}/') cmd = template.format( config=f.name, bucket=BUCKET_NAME) return_val = subprocess.call(cmd.split())
Fix for PREFIX omission in S3 push
Fix for PREFIX omission in S3 push
Python
mit
dwhswenson/openpathsampling,jhprinz/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,choderalab/openpathsampling,dwhswenson/openpathsampling,jhprinz/openpathsampling,openpathsampling/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling,choderalab/openpathsampling,jhprinz/openpathsampling
--- +++ @@ -28,8 +28,8 @@ 'sync docs/_build/ s3://{bucket}/{prefix}/') cmd = template.format( config=f.name, - bucket=BUCKET_NAME - ) + bucket=BUCKET_NAME, + prefix=PREFIX) return_val = subprocess.call(cmd.split()) # Sync index file. @@ -37,7 +37,6 @@ 'sync devtools/ci/index.html s3://{bucket}/') cmd = template.format( config=f.name, - bucket=BUCKET_NAME - ) + bucket=BUCKET_NAME) return_val = subprocess.call(cmd.split())
eb391dde8a157252a98fc9bb9b617bc821f7285a
email_from_template/utils.py
email_from_template/utils.py
from django.utils.functional import memoize from . import app_settings def get_render_method(): return from_dotted_path(app_settings.EMAIL_RENDER_METHOD) get_render_method = memoize(get_render_method, {}, 0) def get_context_processors(): return [from_dotted_path(x) for x in app_settings.EMAIL_CONTEXT_PROCESSORS] get_context_processors = memoize(get_context_processors, {}, 0) def from_dotted_path(fullpath): """ Returns the specified attribute of a module, specified by a string. ``from_dotted_path('a.b.c.d')`` is roughly equivalent to:: from a.b.c import d except that ``d`` is returned and not entered into the current namespace. """ module, attr = fullpath.rsplit('.', 1) return getattr(__import__(module, {}, {}, (attr,)), attr)
from django.utils.lru_cache import lru_cache from . import app_settings @lru_cache def get_render_method(): return from_dotted_path(app_settings.EMAIL_RENDER_METHOD) @lru_cache def get_context_processors(): return [from_dotted_path(x) for x in app_settings.EMAIL_CONTEXT_PROCESSORS] def from_dotted_path(fullpath): """ Returns the specified attribute of a module, specified by a string. ``from_dotted_path('a.b.c.d')`` is roughly equivalent to:: from a.b.c import d except that ``d`` is returned and not entered into the current namespace. """ module, attr = fullpath.rsplit('.', 1) return getattr(__import__(module, {}, {}, (attr,)), attr)
Use @lru_cache now that memoize is gone.
Use @lru_cache now that memoize is gone.
Python
bsd-3-clause
lamby/django-email-from-template
--- +++ @@ -1,14 +1,14 @@ -from django.utils.functional import memoize +from django.utils.lru_cache import lru_cache from . import app_settings +@lru_cache def get_render_method(): return from_dotted_path(app_settings.EMAIL_RENDER_METHOD) -get_render_method = memoize(get_render_method, {}, 0) +@lru_cache def get_context_processors(): return [from_dotted_path(x) for x in app_settings.EMAIL_CONTEXT_PROCESSORS] -get_context_processors = memoize(get_context_processors, {}, 0) def from_dotted_path(fullpath): """
75af7171d0245b528018c8e0d0d581916a9dc67d
examples/profilealignment.py
examples/profilealignment.py
# Create sequences to be aligned. from alignment.sequence import Sequence a = Sequence("what a beautiful day".split()) b = Sequence("what a disappointingly bad day".split()) print "Sequence A:", a print "Sequence B:", b print # Create a vocabulary and encode the sequences. from alignment.vocabulary import Vocabulary v = Vocabulary() aEncoded = v.encodeSequence(a) bEncoded = v.encodeSequence(b) print "Encoded A:", aEncoded print "Encoded B:", bEncoded print # Create a scoring and align the sequences using global aligner. from alignment.sequencealigner import SimpleScoring, GlobalSequenceAligner scoring = SimpleScoring(2, -1) aligner = GlobalSequenceAligner(scoring, -2) score, alignments = aligner.align(aEncoded, bEncoded, backtrace=True) # Create sequence profiles out of alignments. from alignment.profile import Profile profiles = [Profile.fromSequenceAlignment(a) for a in alignments] for encoded in profiles: profile = v.decodeProfile(encoded) print profile print # Create a soft scoring and align the first profile against sequence A. from alignment.profilealigner import SoftScoring, GlobalProfileAligner scoring = SoftScoring(scoring) aligner = GlobalProfileAligner(scoring, -2) score, alignments = aligner.align(profiles[0], Profile.fromSequence(aEncoded), backtrace=True) for encoded in alignments: alignment = v.decodeProfileAlignment(encoded) print alignment
from alignment.sequence import Sequence from alignment.vocabulary import Vocabulary from alignment.sequencealigner import SimpleScoring, GlobalSequenceAligner from alignment.profile import Profile from alignment.profilealigner import SoftScoring, GlobalProfileAligner # Create sequences to be aligned. a = Sequence('what a beautiful day'.split()) b = Sequence('what a disappointingly bad day'.split()) print 'Sequence A:', a print 'Sequence B:', b print # Create a vocabulary and encode the sequences. v = Vocabulary() aEncoded = v.encodeSequence(a) bEncoded = v.encodeSequence(b) print 'Encoded A:', aEncoded print 'Encoded B:', bEncoded print # Create a scoring and align the sequences using global aligner. scoring = SimpleScoring(2, -1) aligner = GlobalSequenceAligner(scoring, -2) score, alignments = aligner.align(aEncoded, bEncoded, backtrace=True) # Create sequence profiles out of alignments. profiles = [Profile.fromSequenceAlignment(a) for a in alignments] for encoded in profiles: profile = v.decodeProfile(encoded) print profile print # Create a soft scoring and align the first profile against sequence A. scoring = SoftScoring(scoring) aligner = GlobalProfileAligner(scoring, -2) score, alignments = aligner.align(profiles[0], Profile.fromSequence(aEncoded), backtrace=True) for encoded in alignments: alignment = v.decodeProfileAlignment(encoded) print alignment
Update the profile alignment example.
Update the profile alignment example.
Python
bsd-3-clause
eseraygun/python-entities,eseraygun/python-alignment
--- +++ @@ -1,40 +1,42 @@ +from alignment.sequence import Sequence +from alignment.vocabulary import Vocabulary +from alignment.sequencealigner import SimpleScoring, GlobalSequenceAligner +from alignment.profile import Profile +from alignment.profilealigner import SoftScoring, GlobalProfileAligner + + # Create sequences to be aligned. -from alignment.sequence import Sequence -a = Sequence("what a beautiful day".split()) -b = Sequence("what a disappointingly bad day".split()) -print "Sequence A:", a -print "Sequence B:", b +a = Sequence('what a beautiful day'.split()) +b = Sequence('what a disappointingly bad day'.split()) +print 'Sequence A:', a +print 'Sequence B:', b print # Create a vocabulary and encode the sequences. -from alignment.vocabulary import Vocabulary v = Vocabulary() aEncoded = v.encodeSequence(a) bEncoded = v.encodeSequence(b) -print "Encoded A:", aEncoded -print "Encoded B:", bEncoded +print 'Encoded A:', aEncoded +print 'Encoded B:', bEncoded print # Create a scoring and align the sequences using global aligner. -from alignment.sequencealigner import SimpleScoring, GlobalSequenceAligner scoring = SimpleScoring(2, -1) aligner = GlobalSequenceAligner(scoring, -2) score, alignments = aligner.align(aEncoded, bEncoded, backtrace=True) # Create sequence profiles out of alignments. -from alignment.profile import Profile profiles = [Profile.fromSequenceAlignment(a) for a in alignments] for encoded in profiles: - profile = v.decodeProfile(encoded) - print profile + profile = v.decodeProfile(encoded) + print profile print # Create a soft scoring and align the first profile against sequence A. -from alignment.profilealigner import SoftScoring, GlobalProfileAligner scoring = SoftScoring(scoring) aligner = GlobalProfileAligner(scoring, -2) -score, alignments = aligner.align(profiles[0], Profile.fromSequence(aEncoded), backtrace=True) +score, alignments = aligner.align(profiles[0], Profile.fromSequence(aEncoded), + backtrace=True) for encoded in alignments: - alignment = v.decodeProfileAlignment(encoded) - print alignment - + alignment = v.decodeProfileAlignment(encoded) + print alignment
4bef46ef98591d47d653eeb4f74bf00a8a1d5d69
correios/utils.py
correios/utils.py
from itertools import chain from typing import Sized, Iterable, Container, Set class RangeSet(Sized, Iterable, Container): def __init__(self, *ranges): self.ranges = [] for r in ranges: if isinstance(r, range): r = [r] elif isinstance(r, RangeSet): r = list(r.ranges) elif isinstance(r, Iterable) and not isinstance(r, Set): r = [range(*r)] else: msg = "RangeSet argument must be a range, RangeSet or an Iterable, not {}" raise ValueError(msg.format(type(r))) self.ranges.extend(r) def __iter__(self): return chain.from_iterable(r for r in self.ranges) def __contains__(self, elem): return any(elem in r for r in self.ranges) def __len__(self): return sum(len(r) for r in self.ranges)
from itertools import chain from typing import Container, Iterable, Sized class RangeSet(Sized, Iterable, Container): def __init__(self, *ranges): self.ranges = [] for r in ranges: if isinstance(r, range): self.ranges.append(r) continue try: element = list(r.ranges) except AttributeError: element = None try: element = element or [range(*r)] except: msg = "RangeSet argument must be a range, RangeSet or an Iterable, not {}" raise ValueError(msg.format(type(r))) self.ranges.extend(element) def __iter__(self): return chain.from_iterable(r for r in self.ranges) def __contains__(self, elem): return any(elem in r for r in self.ranges) def __len__(self): return sum(len(r) for r in self.ranges)
Use duck typing when creating a RangeSet
Use duck typing when creating a RangeSet
Python
apache-2.0
osantana/correios,solidarium/correios,olist/correios
--- +++ @@ -1,5 +1,5 @@ from itertools import chain -from typing import Sized, Iterable, Container, Set +from typing import Container, Iterable, Sized class RangeSet(Sized, Iterable, Container): @@ -8,16 +8,21 @@ for r in ranges: if isinstance(r, range): - r = [r] - elif isinstance(r, RangeSet): - r = list(r.ranges) - elif isinstance(r, Iterable) and not isinstance(r, Set): - r = [range(*r)] - else: + self.ranges.append(r) + continue + + try: + element = list(r.ranges) + except AttributeError: + element = None + + try: + element = element or [range(*r)] + except: msg = "RangeSet argument must be a range, RangeSet or an Iterable, not {}" raise ValueError(msg.format(type(r))) - self.ranges.extend(r) + self.ranges.extend(element) def __iter__(self): return chain.from_iterable(r for r in self.ranges)
55a4680bb07896f0bab06d836ade056d115f004f
dsub/_dsub_version.py
dsub/_dsub_version.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Single source of truth for dsub's version. This must remain small and dependency-free so that any dsub module may import it without creating circular dependencies. Note that this module is parsed as a text file by setup.py and changes to the format of this file could break setup.py. The version should follow formatting requirements specified in PEP-440. - https://www.python.org/dev/peps/pep-0440 A typical release sequence will be versioned as: 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ DSUB_VERSION = '0.1.9'
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Single source of truth for dsub's version. This must remain small and dependency-free so that any dsub module may import it without creating circular dependencies. Note that this module is parsed as a text file by setup.py and changes to the format of this file could break setup.py. The version should follow formatting requirements specified in PEP-440. - https://www.python.org/dev/peps/pep-0440 A typical release sequence will be versioned as: 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ DSUB_VERSION = '0.1.10.dev0'
Update version number to 0.1.10.dev0.
Update version number to 0.1.10.dev0. PiperOrigin-RevId: 202663603
Python
apache-2.0
DataBiosphere/dsub,DataBiosphere/dsub
--- +++ @@ -26,4 +26,4 @@ 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ -DSUB_VERSION = '0.1.9' +DSUB_VERSION = '0.1.10.dev0'
3fbbdec51cfd93217705adcae37b1bf22d5661fa
backend/playlist/serializers.py
backend/playlist/serializers.py
from rest_framework import serializers from .models import Cd, Cdtrack, Show, Playlist, PlaylistEntry class TrackSerializer(serializers.ModelSerializer): album = serializers.StringRelatedField( read_only=True ) class Meta: model = Cdtrack fields = ('trackid', 'url', 'tracknum', 'trackartist', 'tracktitle', 'tracklength', 'album') class ReleaseSerializer(serializers.HyperlinkedModelSerializer): tracks = serializers.PrimaryKeyRelatedField( many=True, read_only=True ) class Meta: model = Cd fields = ('id', 'url', 'arrivaldate', 'artist', 'title', 'year', 'local', 'compilation', 'female', 'tracks') class ShowSerializer(serializers.ModelSerializer): class Meta: model = Show fields = ('id', 'name', 'startTime', 'endTime', 'defaultHost') class PlaylistEntrySerializer(serializers.ModelSerializer): playlist = serializers.PrimaryKeyRelatedField( queryset = Playlist.objects.all() ) class Meta: model = PlaylistEntry fields = ('id', 'artist','album','title','duration','local','australian','female','newRelease','playlist' ) pass class PlaylistSerializer(serializers.ModelSerializer): entries =PlaylistEntrySerializer( many=True, read_only=True ) class Meta: model = Playlist fields = ('id', 'show', 'host', 'date', 'notes', 'entries')
from rest_framework import serializers from .models import Cd, Cdtrack, Show, Playlist, PlaylistEntry class TrackSerializer(serializers.ModelSerializer): album = serializers.StringRelatedField( read_only=True ) class Meta: model = Cdtrack fields = ('trackid', 'url', 'tracknum', 'trackartist', 'tracktitle', 'tracklength', 'album') class ReleaseSerializer(serializers.HyperlinkedModelSerializer): tracks = serializers.PrimaryKeyRelatedField( many=True, read_only=True ) class Meta: model = Cd fields = ('id', 'url', 'arrivaldate', 'artist', 'title', 'year', 'local', 'compilation', 'female', 'tracks') class ShowSerializer(serializers.ModelSerializer): class Meta: model = Show fields = ('id', 'name', 'startTime', 'endTime', 'defaultHost') class PlaylistEntrySerializer(serializers.ModelSerializer): playlist = serializers.PrimaryKeyRelatedField( queryset = Playlist.objects.all() ) class Meta: model = PlaylistEntry fields = ('id', 'artist','album','title','duration','local','australian','female','newRelease','playlist' ) pass class PlaylistSerializer(serializers.ModelSerializer): entries =PlaylistEntrySerializer( many=True, read_only=True ) class Meta: model = Playlist fields = ('id', 'show','showname', 'host', 'date', 'notes', 'entries')
Add showname to playlist API view.
Add showname to playlist API view. * Even though it's obsolete now, we need it for old shows.
Python
mit
ThreeDRadio/playlists,ThreeDRadio/playlists,ThreeDRadio/playlists
--- +++ @@ -45,4 +45,4 @@ class Meta: model = Playlist - fields = ('id', 'show', 'host', 'date', 'notes', 'entries') + fields = ('id', 'show','showname', 'host', 'date', 'notes', 'entries')
56aa0448fb3cd1df1a0fd43abc9a0e37e8ddf55b
trans_sync/management/commands/save_trans.py
trans_sync/management/commands/save_trans.py
# coding: utf-8 from __future__ import unicode_literals from optparse import make_option from django.core.management.base import NoArgsCommand class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option( '--dry-run', action='store_true', dest='dry_run', default=False, help='Do not actually send signals (and all connected stuff).' ), ) def handle(self, *args, **options): if not options['dry_run']: pass
# coding: utf-8 from __future__ import unicode_literals import os from os.path import join, isdir from optparse import make_option from django.core.management.base import NoArgsCommand from django.conf import settings from modeltranslation.translator import translator from babel.messages.catalog import Catalog from babel.messages.pofile import write_po class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option( '--dry-run', action='store_true', dest='dry_run', default=False, help='Do not actually save files.' ), ) def handle(self, *args, **options): if not options['dry_run']: pass locale_path = settings.LOCALE_MODEL_TRANS if not isdir(locale_path): os.mkdir(locale_path) for lang in [l[0] for l in list(settings.LANGUAGES)]: catalog = Catalog(locale=lang) for model in translator.get_registered_models(): opts = translator.get_options_for_model(model) for field in opts.get_field_names(): tr_field = "%s_%s" % (field, lang) for item in model.objects.all(): msgid = "%s.%s.%s" % (item._meta, item.pk, field) msgstr = "%s" % getattr(item, tr_field) catalog.add(id=msgid, string=msgstr) # write catalog to file lang_path = os.path.join(locale_path, lang) if not isdir(lang_path): os.mkdir(lang_path) f = open(join(lang_path, "LC_MESSAGES", "modeltranslation.po"), "w") write_po(f, catalog) f.close()
Save trans to .po files
Save trans to .po files
Python
mit
djentlemen/django-modeltranslation-sync
--- +++ @@ -1,8 +1,14 @@ # coding: utf-8 from __future__ import unicode_literals - +import os +from os.path import join, isdir from optparse import make_option from django.core.management.base import NoArgsCommand +from django.conf import settings +from modeltranslation.translator import translator + +from babel.messages.catalog import Catalog +from babel.messages.pofile import write_po class Command(NoArgsCommand): @@ -13,10 +19,36 @@ action='store_true', dest='dry_run', default=False, - help='Do not actually send signals (and all connected stuff).' + help='Do not actually save files.' ), ) def handle(self, *args, **options): if not options['dry_run']: pass + + locale_path = settings.LOCALE_MODEL_TRANS + if not isdir(locale_path): + os.mkdir(locale_path) + + for lang in [l[0] for l in list(settings.LANGUAGES)]: + + catalog = Catalog(locale=lang) + + for model in translator.get_registered_models(): + opts = translator.get_options_for_model(model) + + for field in opts.get_field_names(): + tr_field = "%s_%s" % (field, lang) + for item in model.objects.all(): + msgid = "%s.%s.%s" % (item._meta, item.pk, field) + msgstr = "%s" % getattr(item, tr_field) + catalog.add(id=msgid, string=msgstr) + + # write catalog to file + lang_path = os.path.join(locale_path, lang) + if not isdir(lang_path): + os.mkdir(lang_path) + f = open(join(lang_path, "LC_MESSAGES", "modeltranslation.po"), "w") + write_po(f, catalog) + f.close()
e2495040277fafdac4c0e060517cf667baa27c02
chinup/__init__.py
chinup/__init__.py
try: from .allauth import * except ImportError: from .chinup import * from .exceptions import * __version__ = '0.1'
from __future__ import absolute_import, unicode_literals try: from .allauth import * except ImportError: from .chinup import * from .exceptions import * __version__ = '0.1' # Configure logging to avoid warning. # https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library import logging if hasattr(logging, 'NullHandler'): logging.getLogger('chinup').addHandler(logging.NullHandler())
Configure package-level logging to avoid warning.
Configure package-level logging to avoid warning.
Python
mit
pagepart/chinup
--- +++ @@ -1,3 +1,5 @@ +from __future__ import absolute_import, unicode_literals + try: from .allauth import * except ImportError: @@ -7,3 +9,10 @@ __version__ = '0.1' + + +# Configure logging to avoid warning. +# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library +import logging +if hasattr(logging, 'NullHandler'): + logging.getLogger('chinup').addHandler(logging.NullHandler())
fc36b9bc2970c611a4fb5063463f27cfd96df21d
moksha/hub/messaging.py
moksha/hub/messaging.py
# This file is part of Moksha. # Copyright (C) 2008-2010 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: Luke Macken <lmacken@redhat.com> class MessagingHub(object): """ A generic messaging hub. This class represents the base functionality of the protocol-level hubs. """ def send_message(self, topic, message): raise NotImplementedError def subscribe(self, topic): raise NotImplementedError
# This file is part of Moksha. # Copyright (C) 2008-2010 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: Luke Macken <lmacken@redhat.com> class MessagingHub(object): """ A generic messaging hub. This class represents the base functionality of the protocol-level hubs. """ def send_message(self, topic, message): raise NotImplementedError def subscribe(self, topic, callback): raise NotImplementedError
Update our MessagingHub.subscribe method arguments
Update our MessagingHub.subscribe method arguments
Python
apache-2.0
ralphbean/moksha,pombredanne/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,lmacken/moksha,ralphbean/moksha,pombredanne/moksha,pombredanne/moksha,mokshaproject/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,lmacken/moksha
--- +++ @@ -25,5 +25,5 @@ def send_message(self, topic, message): raise NotImplementedError - def subscribe(self, topic): + def subscribe(self, topic, callback): raise NotImplementedError
a7fcc89755e01bf3dbe7090e2bf7f1211ce9af84
test/test_property.py
test/test_property.py
import unittest from odml import Property, Section, Document class TestProperty(unittest.TestCase): def setUp(self): pass def test_value(self): p = Property("property", 100) assert(p.value[0] == 100) def test_name(self): pass def test_parent(self): pass def test_dtype(self): pass def test_path(self): pass if __name__ == "__main__": print("TestProperty") tp = TestProperty() tp.test_value()
import unittest from odml import Property, Section, Document from odml.doc import BaseDocument from odml.section import BaseSection class TestProperty(unittest.TestCase): def setUp(self): pass def test_value(self): p = Property("property", 100) self.assertEqual(p.value[0], 100) def test_name(self): pass def test_parent(self): p = Property("property_section", parent=Section("S")) self.assertIsInstance(p.parent, BaseSection) self.assertEqual(len(p.parent._props), 1) with self.assertRaises(ValueError): Property("property_prop", parent=Property("P")) with self.assertRaises(ValueError): Property("property_doc", parent=Document()) def test_dtype(self): pass def test_path(self): pass class TestSection(unittest.TestCase): def setUp(self): pass def test_value(self): pass def test_name(self): pass def test_parent(self): s = Section("Section") self.assertIsNone(s.parent) s = Section("section_doc", parent=Document()) self.assertIsInstance(s.parent, BaseDocument) self.assertEqual(len(s.parent._sections), 1) s = Section("section_sec", parent=Section("S")) self.assertIsInstance(s.parent, BaseSection) self.assertEqual(len(s.parent._sections), 1) with self.assertRaises(ValueError): Section("section_property", parent=Property("P")) def test_dtype(self): pass def test_path(self): pass if __name__ == "__main__": print("TestProperty") tp = TestProperty() tp.test_value() tp.test_parent() print("TestSection") ts = TestSection() ts.test_parent()
Add tests to cover update parent functionality.
Add tests to cover update parent functionality.
Python
bsd-3-clause
lzehl/python-odml
--- +++ @@ -1,5 +1,7 @@ import unittest from odml import Property, Section, Document +from odml.doc import BaseDocument +from odml.section import BaseSection class TestProperty(unittest.TestCase): @@ -9,13 +11,51 @@ def test_value(self): p = Property("property", 100) - assert(p.value[0] == 100) + self.assertEqual(p.value[0], 100) def test_name(self): pass def test_parent(self): + p = Property("property_section", parent=Section("S")) + self.assertIsInstance(p.parent, BaseSection) + self.assertEqual(len(p.parent._props), 1) + with self.assertRaises(ValueError): + Property("property_prop", parent=Property("P")) + with self.assertRaises(ValueError): + Property("property_doc", parent=Document()) + + def test_dtype(self): pass + + def test_path(self): + pass + + +class TestSection(unittest.TestCase): + def setUp(self): + pass + + def test_value(self): + pass + + def test_name(self): + pass + + def test_parent(self): + s = Section("Section") + self.assertIsNone(s.parent) + + s = Section("section_doc", parent=Document()) + self.assertIsInstance(s.parent, BaseDocument) + self.assertEqual(len(s.parent._sections), 1) + + s = Section("section_sec", parent=Section("S")) + self.assertIsInstance(s.parent, BaseSection) + self.assertEqual(len(s.parent._sections), 1) + + with self.assertRaises(ValueError): + Section("section_property", parent=Property("P")) def test_dtype(self): pass @@ -27,3 +67,10 @@ print("TestProperty") tp = TestProperty() tp.test_value() + tp.test_parent() + + print("TestSection") + ts = TestSection() + ts.test_parent() + +
bee93012144e033b02c05a1e586620dfa7f4c883
words/models.py
words/models.py
from django.db import models class Word(models.Model): word = models.CharField(max_length=255) date_retired = models.DateTimeField(null=True, blank=True) date_active = models.DateTimeField(null=True, blank=True) views = models.IntegerField(default=0) @property def is_active(self): if self.date_retired: return False return bool(self.date_active)
from django.db import models class Word(models.Model): word = models.CharField(max_length=255) date_retired = models.DateTimeField(null=True, blank=True) date_active = models.DateTimeField(null=True, blank=True) views = models.IntegerField(default=0) @property def is_active(self): if self.date_retired: return False return bool(self.date_active) def __unicode__(self): return self.word
Make the word display nice
Make the word display nice
Python
bsd-2-clause
kylegibson/how_to_teach_your_baby_tracker
--- +++ @@ -12,3 +12,6 @@ if self.date_retired: return False return bool(self.date_active) + + def __unicode__(self): + return self.word
6765cefc1a5a928b3cff16c0f1014096f82c3d3b
test/test_services.py
test/test_services.py
import pytest @pytest.mark.parametrize("name, enabled, running", [ ("cron", "enabled", "running"), ("docker", "enabled", "running"), ("firewalld", "enabled", "running"), ("haveged", "enabled", "running"), ("ssh", "enabled", "running"), ]) def test_services(Service, name, enabled, running): is_enabled = Service(name).is_enabled print(is_enabled) if enabled == "enabled": assert is_enabled else: assert not is_enabled is_running = Service(name).is_running print(is_running) if running == "running": assert is_running else: assert not is_running
import pytest @pytest.mark.parametrize("name, enabled, running", [ ("cron", "enabled", "running"), ("docker", "enabled", "running"), ("firewalld", "enabled", "running"), ("haveged", "enabled", "running"), ("ssh", "enabled", "running"), ]) def test_services(host, name, enabled, running): svc = host.service(name) is_enabled = svc.is_enabled print(is_enabled) if enabled == "enabled": assert is_enabled else: assert not is_enabled is_running = svc.is_running print(is_running) if running == "running": assert is_running else: assert not is_running
Change test function as existing method deprecated
Change test function as existing method deprecated
Python
mit
wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build
--- +++ @@ -8,18 +8,20 @@ ("ssh", "enabled", "running"), ]) -def test_services(Service, name, enabled, running): - is_enabled = Service(name).is_enabled +def test_services(host, name, enabled, running): + + svc = host.service(name) + + is_enabled = svc.is_enabled print(is_enabled) if enabled == "enabled": assert is_enabled else: assert not is_enabled - is_running = Service(name).is_running + is_running = svc.is_running print(is_running) if running == "running": assert is_running else: assert not is_running -
eea647cf05d7143d800f834dd77aeafc32522100
groundstation/settings.py
groundstation/settings.py
PORT=1248 BEACON_TIMEOUT=5 DEFAULT_BUFSIZE=8192
PORT=1248 BEACON_TIMEOUT=5 DEFAULT_BUFSIZE=8192 DEFAULT_CACHE_LIFETIME=900
Add config key for default cache lifetime
Add config key for default cache lifetime
Python
mit
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
--- +++ @@ -1,3 +1,4 @@ PORT=1248 BEACON_TIMEOUT=5 DEFAULT_BUFSIZE=8192 +DEFAULT_CACHE_LIFETIME=900
81f3e4f10243cb31b600666a19112acee7c13f55
signac/db/__init__.py
signac/db/__init__.py
import warnings try: import pymongo # noqa except ImportError: warnings.warn("Failed to import pymongo. " "get_database will not be available.", ImportWarning) def get_database(*args, **kwargs): """Get a database handle. This function is only available if pymongo is installed.""" raise ImportError( "You need to install pymongo to use `get_database()`.") else: from .database import get_database __all__ = ['get_database']
import logging import warnings try: import pymongo # noqa except ImportError: warnings.warn("Failed to import pymongo. " "get_database will not be available.", ImportWarning) def get_database(*args, **kwargs): """Get a database handle. This function is only available if pymongo is installed.""" raise ImportError( "You need to install pymongo to use `get_database()`.") else: if pymongo.version_tuple[0] < 3: logging.getLogger(__name__).warn( "Your pymongo installation (version {}) is no longer " "supported by signac. Consider updating.".format(pymongo.version)) from .database import get_database __all__ = ['get_database']
Add warning about outdated pymongo versions.
Add warning about outdated pymongo versions. signac currently only supports pymongo versions 3.x.
Python
bsd-3-clause
csadorf/signac,csadorf/signac
--- +++ @@ -1,3 +1,4 @@ +import logging import warnings try: import pymongo # noqa @@ -12,6 +13,10 @@ raise ImportError( "You need to install pymongo to use `get_database()`.") else: + if pymongo.version_tuple[0] < 3: + logging.getLogger(__name__).warn( + "Your pymongo installation (version {}) is no longer " + "supported by signac. Consider updating.".format(pymongo.version)) from .database import get_database
ee4f8264d942d7af5f5b71ff6cd162f3ae1fe515
django_hash_filter/templatetags/hash_filter.py
django_hash_filter/templatetags/hash_filter.py
from django import template from django.template.defaultfilters import stringfilter from django.template.base import TemplateSyntaxError import hashlib from django_hash_filter.templatetags import get_available_hashes register = template.Library() @register.filter @stringfilter def hash(value, arg): """ Returns a hex-digest of the passed in value for the hash algorithm given. """ arg = str(arg).lower() if not arg in get_available_hashes(): raise TemplateSyntaxError("The %s hash algorithm does not exist." % arg) try: f = getattr(hashlib, arg) hashed = f(value).hexdigest() except Exception: raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg) return hashed
import hashlib import sys from django import template from django.template.defaultfilters import stringfilter from django.template.base import TemplateSyntaxError from django_hash_filter.templatetags import get_available_hashes register = template.Library() @register.filter @stringfilter def hash(value, arg): """ Returns a hex-digest of the passed in value for the hash algorithm given. """ arg = str(arg).lower() if sys.version_info >= (3,0): value = value.encode("utf-8") if not arg in get_available_hashes(): raise TemplateSyntaxError("The %s hash algorithm does not exist." % arg) try: f = getattr(hashlib, arg) hashed = f(value).hexdigest() except Exception: raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg) return hashed
Convert unicode string to byte array on Python 3
Convert unicode string to byte array on Python 3
Python
mit
andrewjsledge/django-hash-filter
--- +++ @@ -1,7 +1,9 @@ +import hashlib +import sys + from django import template from django.template.defaultfilters import stringfilter from django.template.base import TemplateSyntaxError -import hashlib from django_hash_filter.templatetags import get_available_hashes register = template.Library() @@ -13,6 +15,8 @@ Returns a hex-digest of the passed in value for the hash algorithm given. """ arg = str(arg).lower() + if sys.version_info >= (3,0): + value = value.encode("utf-8") if not arg in get_available_hashes(): raise TemplateSyntaxError("The %s hash algorithm does not exist." % arg) try:
df216bdc25ef29da821f577a517ccdca61448cf4
django_lightweight_queue/middleware/logging.py
django_lightweight_queue/middleware/logging.py
from __future__ import absolute_import import logging import traceback log = logging.getLogger(__name__) class LoggingMiddleware(object): def process_job(self, job): log.info("Running job %s", job) def process_result(self, job, result, duration): log.info("Finished job %s => %r (Time taken: %.2fs)", job, result, duration, ) def process_exception(self, job, duration, *exc_info): log.error("Exception when processing %r (duration: %.2fs): %s", job, duration, ''.join(traceback.format_exception(*exc_info)), )
from __future__ import absolute_import import logging import traceback log = logging.getLogger(__name__) class LoggingMiddleware(object): def process_job(self, job): log.info("Running job %s", job) def process_result(self, job, result, duration): log.info("Finished job => %r (Time taken: %.2fs)", result, duration, ) def process_exception(self, job, duration, *exc_info): log.error("Exception when processing job (duration: %.2fs): %s", duration, ''.join(traceback.format_exception(*exc_info)), )
Save over 50% of logfile 'bloat' by not repeating all args on success/failure
Save over 50% of logfile 'bloat' by not repeating all args on success/failure The data will be right above it just before we run the job.
Python
bsd-3-clause
prophile/django-lightweight-queue,prophile/django-lightweight-queue,thread/django-lightweight-queue,lamby/django-lightweight-queue,thread/django-lightweight-queue
--- +++ @@ -10,15 +10,13 @@ log.info("Running job %s", job) def process_result(self, job, result, duration): - log.info("Finished job %s => %r (Time taken: %.2fs)", - job, + log.info("Finished job => %r (Time taken: %.2fs)", result, duration, ) def process_exception(self, job, duration, *exc_info): - log.error("Exception when processing %r (duration: %.2fs): %s", - job, + log.error("Exception when processing job (duration: %.2fs): %s", duration, ''.join(traceback.format_exception(*exc_info)), )
802b9c2df754b3acf78e9e1facc1802a901e97a2
furry/furry.py
furry/furry.py
import discord from discord.ext import commands class Furry: """A cog that adds weird furry commands or something""" def __init__(self, bot): self.bot = bot @commands.command() async def owo(self): """OwO what's this?""" await self.bot.say("*Notices " + user.mention + "'s bulge* OwO what's this?") def setup(bot): bot.add_cog(Furry(bot))
import discord from discord.ext import commands class Furry: """A cog that adds weird furry commands or something""" def __init__(self, bot): self.bot = bot @commands.command() async def owo(self, user : discord.Member): """OwO what's this?""" await self.bot.say("*Notices " + user.mention + "'s bulge* OwO what's this?") def setup(bot): bot.add_cog(Furry(bot))
Fix the command and make it actually work
Fix the command and make it actually work Pass discord.Member as user
Python
apache-2.0
KazroFox/Kaz-Cogs
--- +++ @@ -8,7 +8,7 @@ self.bot = bot @commands.command() - async def owo(self): + async def owo(self, user : discord.Member): """OwO what's this?""" await self.bot.say("*Notices " + user.mention + "'s bulge* OwO what's this?")
6a508d01fa3fa0d4084406fcb2b5e41d1b614b7c
datalogger/__main__.py
datalogger/__main__.py
import sys from PyQt5.QtWidgets import QApplication from datalogger.api.workspace import Workspace from datalogger.analysis_window import AnalysisWindow from datalogger import __version__ def run_datalogger_full(): print("CUED DataLogger {}".format(__version__)) app = 0 app = QApplication(sys.argv) # Create the window w = AnalysisWindow() w.CurrentWorkspace = Workspace() #w.CurrentWorkspace.path = "//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/" # Load the workspace #CurrentWorkspace.load("//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/tests/test_workspace.wsp") w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/") # Run the program w.show() sys.exit(app.exec_()) if __name__ == '__main__': run_datalogger_full()
import sys from PyQt5.QtWidgets import QApplication from datalogger.api.workspace import Workspace from datalogger.analysis_window import AnalysisWindow from datalogger import __version__ def run_datalogger_full(): print("CUED DataLogger {}".format(__version__)) app = 0 app = QApplication(sys.argv) CurrentWorkspace = Workspace() # Create the window w = AnalysisWindow() w.CurrentWorkspace = CurrentWorkspace w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/") # Run the program w.show() sys.exit(app.exec_()) if __name__ == '__main__': run_datalogger_full()
Move workspace before window creation so config set for window
Move workspace before window creation so config set for window
Python
bsd-3-clause
torebutlin/cued_datalogger
--- +++ @@ -11,14 +11,13 @@ app = 0 app = QApplication(sys.argv) + CurrentWorkspace = Workspace() + # Create the window w = AnalysisWindow() - w.CurrentWorkspace = Workspace() - #w.CurrentWorkspace.path = "//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/" - # Load the workspace - #CurrentWorkspace.load("//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/tests/test_workspace.wsp") - + w.CurrentWorkspace = CurrentWorkspace + w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/") # Run the program
15f0a2e67fe942760707694370cc652f17e1c6b3
demo/tests/conftest.py
demo/tests/conftest.py
"""Unit tests configuration file.""" def pytest_configure(config): """Disable verbose output when running tests.""" terminal = config.pluginmanager.getplugin('terminal') base = terminal.TerminalReporter class QuietReporter(base): """A py.test reporting that only shows dots when running tests.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.verbosity = 0 self.showlongtestinfo = False self.showfspath = False terminal.TerminalReporter = QuietReporter
"""Unit tests configuration file.""" import logging def pytest_configure(config): """Disable verbose output when running tests.""" logging.basicConfig(level=logging.DEBUG) terminal = config.pluginmanager.getplugin('terminal') base = terminal.TerminalReporter class QuietReporter(base): """A py.test reporting that only shows dots when running tests.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.verbosity = 0 self.showlongtestinfo = False self.showfspath = False terminal.TerminalReporter = QuietReporter
Deploy Travis CI build 834 to GitHub
Deploy Travis CI build 834 to GitHub
Python
mit
jacebrowning/template-python-demo
--- +++ @@ -1,8 +1,12 @@ """Unit tests configuration file.""" + +import logging def pytest_configure(config): """Disable verbose output when running tests.""" + logging.basicConfig(level=logging.DEBUG) + terminal = config.pluginmanager.getplugin('terminal') base = terminal.TerminalReporter
70808a2243ebf04aa86d5b4539950b22cd96cc7d
maras/utils/__init__.py
maras/utils/__init__.py
''' Misc utilities ''' # Import python libs import os import binascii def rand_hex_str(size): ''' Return a random string of the passed size using hex encoding ''' return binascii.hexlify(os.urandom(size/2)) def rand_raw_str(size): ''' Return a raw byte string of the given size ''' return os.urandom(size)
''' Misc utilities ''' # Import python libs import os import time import struct import binascii import datetime # create a standard epoch so all platforms will count revs from # a standard epoch of jan 1 2014 STD_EPOCH = time.mktime(datetime.datetime(2014, 1, 1).timetuple()) def rand_hex_str(size): ''' Return a random string of the passed size using hex encoding ''' return binascii.hexlify(os.urandom(size/2)) def rand_raw_str(size): ''' Return a raw byte string of the given size ''' return os.urandom(size) def gen_rev(): ''' Return a revision based on timestamp ''' r_time = time.time() - STD_EPOCH return struct.pack('>Q', r_time * 1000000)
Add rev generation via normalized timestamps
Add rev generation via normalized timestamps
Python
apache-2.0
thatch45/maras
--- +++ @@ -4,7 +4,14 @@ # Import python libs import os +import time +import struct import binascii +import datetime + +# create a standard epoch so all platforms will count revs from +# a standard epoch of jan 1 2014 +STD_EPOCH = time.mktime(datetime.datetime(2014, 1, 1).timetuple()) def rand_hex_str(size): @@ -19,3 +26,11 @@ Return a raw byte string of the given size ''' return os.urandom(size) + + +def gen_rev(): + ''' + Return a revision based on timestamp + ''' + r_time = time.time() - STD_EPOCH + return struct.pack('>Q', r_time * 1000000)
ae2d52e323ea8959caf474d23de857d59b5b6ca8
spacy/tests/regression/test_issue3625.py
spacy/tests/regression/test_issue3625.py
from __future__ import unicode_literals from spacy.lang.hi import Hindi def test_issue3625(): """Test that default punctuation rules applies to hindi unicode characters""" nlp = Hindi() doc = nlp(u"hi. how हुए. होटल, होटल") assert [token.text for token in doc] == ['hi', '.', 'how', 'हुए', '.', 'होटल', ',', 'होटल']
# coding: utf8 from __future__ import unicode_literals from spacy.lang.hi import Hindi def test_issue3625(): """Test that default punctuation rules applies to hindi unicode characters""" nlp = Hindi() doc = nlp(u"hi. how हुए. होटल, होटल") assert [token.text for token in doc] == ['hi', '.', 'how', 'हुए', '.', 'होटल', ',', 'होटल']
Add default encoding utf-8 for test file
Add default encoding utf-8 for test file
Python
mit
honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy
--- +++ @@ -1,3 +1,4 @@ +# coding: utf8 from __future__ import unicode_literals from spacy.lang.hi import Hindi
ce873b24318fd6493f570f370db1d2c2d244bdcc
joby/spiders/data_science_jobs.py
joby/spiders/data_science_jobs.py
# -*- coding: utf-8 -*- from logging import getLogger from scrapy.spiders import Rule, CrawlSpider from scrapy.linkextractors import LinkExtractor class DataScienceJobsSpider(CrawlSpider): log = getLogger(__name__) name = 'data-science-jobs' allowed_domains = ['www.data-science-jobs.com', 'fonts.googleapis.com', 'jobs.lever.com'] start_urls = ['http://www.data-science-jobs.com/'] test = Rule(LinkExtractor(allow='family'), callback='parse') test2 = Rule(LinkExtractor(allow='comtravo'), callback='parse') job_links = Rule(LinkExtractor(allow='detail\/'), callback='parse') pagination_links = Rule(LinkExtractor(allow='\?page=\d+'), callback='parse') rules = [job_links, pagination_links, test, test2] response = None def parse(self, response): self.log.info('Parsing %s', response.url)
# -*- coding: utf-8 -*- from logging import getLogger from scrapy.spiders import Rule, CrawlSpider from scrapy.linkextractors import LinkExtractor class DataScienceJobsSpider(CrawlSpider): log = getLogger(__name__) name = 'data-science-jobs' allowed_domains = ['www.data-science-jobs.com'] start_urls = ['http://www.data-science-jobs.com'] job_links = Rule(LinkExtractor(allow='detail\/'), callback='parse_job') pagination_links = Rule(LinkExtractor(allow='page=\d+')) rules = [job_links, pagination_links] def parse_job(self, response): self.log.info('Parsing %s', response.url)
Rename the parser function to parse_jobs.
Rename the parser function to parse_jobs.
Python
mit
cyberbikepunk/job-spiders
--- +++ @@ -8,14 +8,13 @@ class DataScienceJobsSpider(CrawlSpider): log = getLogger(__name__) name = 'data-science-jobs' - allowed_domains = ['www.data-science-jobs.com', 'fonts.googleapis.com', 'jobs.lever.com'] - start_urls = ['http://www.data-science-jobs.com/'] - test = Rule(LinkExtractor(allow='family'), callback='parse') - test2 = Rule(LinkExtractor(allow='comtravo'), callback='parse') - job_links = Rule(LinkExtractor(allow='detail\/'), callback='parse') - pagination_links = Rule(LinkExtractor(allow='\?page=\d+'), callback='parse') - rules = [job_links, pagination_links, test, test2] - response = None + allowed_domains = ['www.data-science-jobs.com'] + start_urls = ['http://www.data-science-jobs.com'] + job_links = Rule(LinkExtractor(allow='detail\/'), callback='parse_job') + pagination_links = Rule(LinkExtractor(allow='page=\d+')) + rules = [job_links, pagination_links] - def parse(self, response): + def parse_job(self, response): self.log.info('Parsing %s', response.url) + +
b77e8f9a081517701cccf9f177c81eaca877e8c7
pombola/images/admin.py
pombola/images/admin.py
from django.contrib import admin from django.contrib.contenttypes.generic import GenericTabularInline from sorl.thumbnail import get_thumbnail from sorl.thumbnail.admin import AdminImageMixin from pombola.images import models class ImageAdmin(AdminImageMixin, admin.ModelAdmin): list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ] def thumbnail(self, obj): im = get_thumbnail(obj.image, '100x100') return '<img src="%s" />' % ( im.url ) thumbnail.allow_tags = True class ImageAdminInline(AdminImageMixin, GenericTabularInline): model = models.Image extra = 0 can_delete = True admin.site.register( models.Image, ImageAdmin )
from django.contrib import admin from django.contrib.contenttypes.generic import GenericTabularInline from sorl.thumbnail import get_thumbnail from sorl.thumbnail.admin import AdminImageMixin from pombola.images import models class ImageAdmin(AdminImageMixin, admin.ModelAdmin): list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ] def thumbnail(self, obj): if obj.image: im = get_thumbnail(obj.image, '100x100') return '<img src="%s" />' % ( im.url ) else: return "NO IMAGE FOUND" thumbnail.allow_tags = True class ImageAdminInline(AdminImageMixin, GenericTabularInline): model = models.Image extra = 0 can_delete = True admin.site.register( models.Image, ImageAdmin )
Handle entries that have no image associated with them
Handle entries that have no image associated with them
Python
agpl-3.0
ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,mysociety/pombola,hzj123/56th
--- +++ @@ -15,8 +15,11 @@ list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ] def thumbnail(self, obj): - im = get_thumbnail(obj.image, '100x100') - return '<img src="%s" />' % ( im.url ) + if obj.image: + im = get_thumbnail(obj.image, '100x100') + return '<img src="%s" />' % ( im.url ) + else: + return "NO IMAGE FOUND" thumbnail.allow_tags = True
a03b166f8297783819a43eeb78e5af4d52d11bcc
carbonate/list.py
carbonate/list.py
import os import re # Use the built-in version of scandir/walk if possible, otherwise # use the scandir module version try: from os import scandir, walk except ImportError: from scandir import scandir, walk def listMetrics(storage_dir, follow_sym_links=False, metric_suffix='wsp'): metric_regex = re.compile(".*\.%s$" % metric_suffix) storage_dir = storage_dir.rstrip(os.sep) for root, dirnames, filenames in walk(storage_dir, followlinks=follow_sym_links): for filename in filenames: if metric_regex.match(filename): root_path = root[len(storage_dir) + 1:] m_path = os.path.join(root_path, filename) m_name, m_ext = os.path.splitext(m_path) m_name = m_name.replace('/', '.') yield m_name
import os import re # Use the built-in version of scandir/walk if possible, otherwise # use the scandir module version try: from os import scandir, walk # noqa # pylint: disable=unused-import except ImportError: from scandir import scandir, walk # noqa # pylint: disable=unused-import def listMetrics(storage_dir, follow_sym_links=False, metric_suffix='wsp'): metric_regex = re.compile(".*\.%s$" % metric_suffix) storage_dir = storage_dir.rstrip(os.sep) for root, _, filenames in walk(storage_dir, followlinks=follow_sym_links): for filename in filenames: if metric_regex.match(filename): root_path = root[len(storage_dir) + 1:] m_path = os.path.join(root_path, filename) m_name, m_ext = os.path.splitext(m_path) m_name = m_name.replace('/', '.') yield m_name
Make pylint happy as per graphite-web example
Make pylint happy as per graphite-web example
Python
mit
criteo-forks/carbonate,jssjr/carbonate,deniszh/carbonate,graphite-project/carbonate,jssjr/carbonate,graphite-project/carbonate,criteo-forks/carbonate,jssjr/carbonate,deniszh/carbonate,deniszh/carbonate,criteo-forks/carbonate,graphite-project/carbonate
--- +++ @@ -4,9 +4,9 @@ # Use the built-in version of scandir/walk if possible, otherwise # use the scandir module version try: - from os import scandir, walk + from os import scandir, walk # noqa # pylint: disable=unused-import except ImportError: - from scandir import scandir, walk + from scandir import scandir, walk # noqa # pylint: disable=unused-import def listMetrics(storage_dir, follow_sym_links=False, metric_suffix='wsp'): @@ -14,8 +14,7 @@ storage_dir = storage_dir.rstrip(os.sep) - for root, dirnames, filenames in walk(storage_dir, - followlinks=follow_sym_links): + for root, _, filenames in walk(storage_dir, followlinks=follow_sym_links): for filename in filenames: if metric_regex.match(filename): root_path = root[len(storage_dir) + 1:]
119e95dedaf6633e1ca6367bfd13fa08192033bd
pywinauto/unittests/testall.py
pywinauto/unittests/testall.py
import unittest import os.path import os import sys sys.path.append(".") #from pywinauto.timings import Timings #Timings.Fast() excludes = ['test_sendkeys'] def run_tests(): testfolder = os.path.abspath(os.path.split(__file__)[0]) sys.path.append(testfolder) for root, dirs, files in os.walk(testfolder): test_modules = [ file.replace('.py', '') for file in files if file.startswith('test_') and file.endswith('.py')] test_modules = [mod for mod in test_modules if mod.lower() not in excludes] for mod in test_modules: #globals().update(__import__(mod, globals(), locals()).__dict__) # import it imported_mod = __import__(mod, globals(), locals()) #print imported_mod.__dict__ globals().update(imported_mod.__dict__) #runner = unittest.TextTestRunner(verbosity = 2) unittest.main()#testRunner = runner) if __name__ == '__main__': run_tests()
import os import sys import unittest import coverage # needs to be called before importing the modules cov = coverage.coverage(branch = True) cov.start() testfolder = os.path.abspath(os.path.dirname(__file__)) package_root = os.path.abspath(os.path.join(testfolder, r"..\..")) sys.path.append(package_root) import pywinauto modules_to_test = [pywinauto] def run_tests(): excludes = ['test_sendkeys'] suite = unittest.TestSuite() sys.path.append(testfolder) for root, dirs, files in os.walk(testfolder): test_modules = [ file.replace('.py', '') for file in files if file.startswith('test_') and file.endswith('.py')] test_modules = [mod for mod in test_modules if mod.lower() not in excludes] for mod in test_modules: #globals().update(__import__(mod, globals(), locals()).__dict__) # import it imported_mod = __import__(mod, globals(), locals()) suite.addTests( unittest.defaultTestLoader.loadTestsFromModule(imported_mod)) #unittest.main()#testRunner = runner) #runner = unittest.TextTestRunner(verbosity = 2) unittest.TextTestRunner(verbosity=1).run(suite) cov.stop() #print cov.analysis() print cov.report() cov.html_report( directory = os.path.join(package_root, "Coverage_report")) if __name__ == '__main__': run_tests()
Synchronize testing module with BetterBatch one - and integrate Coverage reporting
Synchronize testing module with BetterBatch one - and integrate Coverage reporting
Python
bsd-3-clause
cessor/pywinauto,bombilee/pywinauto,ohio813/pywinauto,nameoffnv/pywinauto,yongxin1029/pywinauto,clonly/pywinauto,vsajip/pywinauto,cessor/pywinauto,LogicalKnight/pywinauto,ohio813/pywinauto,nameoffnv/pywinauto,ldhwin/pywinauto,airelil/pywinauto,drinkertea/pywinauto,prasen-ftech/pywinauto,vsajip/pywinauto,wilsoc5/pywinauto,pjquirk/pjquirk-dotnetnames,LogicalKnight/pywinauto,ohio813/pywinauto,mjakop/pywinauto,ldhwin/pywinauto,pjquirk/pjquirk-dotnetnames,wilsoc5/pywinauto,nameoffnv/pywinauto,MagazinnikIvan/pywinauto,cessor/pywinauto,vane/pywinauto,moden-py/pywinauto,ldhwin/pywinauto,vane/pywinauto,bombilee/pywinauto,vsajip/pywinauto,prasen-ftech/pywinauto,manojklm/pywinauto-x64,LogicalKnight/pywinauto,vasily-v-ryabov/pywinauto,pjquirk/pjquirk-dotnetnames,LogicalKnight/pywinauto,cetygamer/pywinauto,clonly/pywinauto,prasen-ftech/pywinauto,manojklm/pywinauto-x64,pywinauto/pywinauto,vane/pywinauto,bombilee/pywinauto,mindw/pywinauto,yongxin1029/pywinauto,mjakop/pywinauto,moden-py/pywinauto,manojklm/pywinauto-x64,mindw/pywinauto,pjquirk/pjquirk-dotnetnames
--- +++ @@ -1,20 +1,28 @@ +import os +import sys import unittest -import os.path -import os -import sys -sys.path.append(".") +import coverage -#from pywinauto.timings import Timings -#Timings.Fast() +# needs to be called before importing the modules +cov = coverage.coverage(branch = True) +cov.start() -excludes = ['test_sendkeys'] +testfolder = os.path.abspath(os.path.dirname(__file__)) +package_root = os.path.abspath(os.path.join(testfolder, r"..\..")) +sys.path.append(package_root) + +import pywinauto + +modules_to_test = [pywinauto] + def run_tests(): - testfolder = os.path.abspath(os.path.split(__file__)[0]) + excludes = ['test_sendkeys'] + + suite = unittest.TestSuite() sys.path.append(testfolder) - for root, dirs, files in os.walk(testfolder): test_modules = [ @@ -28,12 +36,20 @@ #globals().update(__import__(mod, globals(), locals()).__dict__) # import it imported_mod = __import__(mod, globals(), locals()) - #print imported_mod.__dict__ - globals().update(imported_mod.__dict__) + suite.addTests( + unittest.defaultTestLoader.loadTestsFromModule(imported_mod)) + + #unittest.main()#testRunner = runner) #runner = unittest.TextTestRunner(verbosity = 2) - unittest.main()#testRunner = runner) + unittest.TextTestRunner(verbosity=1).run(suite) + cov.stop() + #print cov.analysis() + print cov.report() + cov.html_report( + directory = os.path.join(package_root, "Coverage_report")) + if __name__ == '__main__': run_tests()
29032ee9dc69b1f3226358c3a6b74a7e42d71f07
generationkwh/amortizations.py
generationkwh/amortizations.py
# -*- coding:utf8 -*- from plantmeter.isodates import isodate from dateutil.relativedelta import relativedelta waitYears = 1 expirationYears = 25 def previousAmortizationDate(purchase_date, current_date): years = relativedelta( isodate(current_date), isodate(purchase_date), ).years if years <= waitYears: return None firstAmortization = ( isodate(purchase_date) + relativedelta(years = min(years,expirationYears) ) return str(amortizationDate) def pendingAmortization(purchase_date, current_date, investment_amount, amortized_amount): years = relativedelta( isodate(current_date), isodate(purchase_date), ).years yearly_amortitzation = investment_amount / expirationYears if years <= waitYears: return 0 if years >= expirationYears: return investment_amount - amortized_amount toAmortize = (years-1)*yearly_amortitzation - amortized_amount return max(0, toAmortize) # vim: et ts=4 sw=4
# -*- coding:utf8 -*- from plantmeter.isodates import isodate from dateutil.relativedelta import relativedelta waitYears = 1 expirationYears = 25 def previousAmortizationDate(purchase_date, current_date): years = relativedelta( isodate(current_date), isodate(purchase_date), ).years if years <= waitYears: return None firstAmortization = ( isodate(purchase_date) + relativedelta(years = min(years,expirationYears) )) return str(firstAmortization) def pendingAmortization(purchase_date, current_date, investment_amount, amortized_amount): years = relativedelta( isodate(current_date), isodate(purchase_date), ).years yearly_amortitzation = investment_amount / expirationYears if years <= waitYears: return 0 if years >= expirationYears: return investment_amount - amortized_amount toAmortize = (years-1)*yearly_amortitzation - amortized_amount return max(0, toAmortize) # vim: et ts=4 sw=4
Modify return variable and partenesis
Modify return variable and partenesis
Python
agpl-3.0
Som-Energia/somenergia-generationkwh,Som-Energia/somenergia-generationkwh
--- +++ @@ -19,9 +19,9 @@ firstAmortization = ( isodate(purchase_date) + relativedelta(years = min(years,expirationYears) - ) + )) - return str(amortizationDate) + return str(firstAmortization) def pendingAmortization(purchase_date, current_date, investment_amount, amortized_amount):
9a879fb583f7f4190a4601a9a488ba61414395e0
kivymd/card.py
kivymd/card.py
# -*- coding: utf-8 -*- from kivy.lang import Builder from kivy.properties import BoundedNumericProperty, ReferenceListProperty from kivy.uix.boxlayout import BoxLayout from kivymd.elevationbehavior import ElevationBehavior from kivymd.theming import ThemableBehavior from kivy.metrics import dp Builder.load_string(''' <MDCard> canvas: Color: rgba: self.background_color RoundedRectangle: size: self.size pos: self.pos radius: [self.border_radius] background_color: self.theme_cls.bg_light ''') class MDCard(ThemableBehavior, ElevationBehavior, BoxLayout): r = BoundedNumericProperty(1., min=0., max=1.) g = BoundedNumericProperty(1., min=0., max=1.) b = BoundedNumericProperty(1., min=0., max=1.) a = BoundedNumericProperty(0., min=0., max=1.) border_radius = BoundedNumericProperty(dp(3),min=0) background_color = ReferenceListProperty(r, g, b, a)
# -*- coding: utf-8 -*- from kivy.lang import Builder from kivy.properties import BoundedNumericProperty, ReferenceListProperty, ListProperty,BooleanProperty from kivy.uix.boxlayout import BoxLayout from kivymd.elevationbehavior import ElevationBehavior from kivymd.theming import ThemableBehavior from kivy.metrics import dp Builder.load_string(''' <MDCard> canvas: Color: rgba: self.background_color RoundedRectangle: size: self.size pos: self.pos radius: [self.border_radius] Color: rgba: self.theme_cls.divider_color a: self.border_color_a Line: rounded_rectangle: (self.pos[0],self.pos[1],self.size[0],self.size[1],self.border_radius) background_color: self.theme_cls.bg_light ''') class MDCard(ThemableBehavior, ElevationBehavior, BoxLayout): r = BoundedNumericProperty(1., min=0., max=1.) g = BoundedNumericProperty(1., min=0., max=1.) b = BoundedNumericProperty(1., min=0., max=1.) a = BoundedNumericProperty(0., min=0., max=1.) border_radius = BoundedNumericProperty(dp(3),min=0) border_color_a = BoundedNumericProperty(0, min=0., max=1.) background_color = ReferenceListProperty(r, g, b, a)
Add border as option (set via alpha)
Add border as option (set via alpha)
Python
mit
cruor99/KivyMD
--- +++ @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- from kivy.lang import Builder -from kivy.properties import BoundedNumericProperty, ReferenceListProperty +from kivy.properties import BoundedNumericProperty, ReferenceListProperty, ListProperty,BooleanProperty from kivy.uix.boxlayout import BoxLayout from kivymd.elevationbehavior import ElevationBehavior from kivymd.theming import ThemableBehavior @@ -15,6 +15,11 @@ size: self.size pos: self.pos radius: [self.border_radius] + Color: + rgba: self.theme_cls.divider_color + a: self.border_color_a + Line: + rounded_rectangle: (self.pos[0],self.pos[1],self.size[0],self.size[1],self.border_radius) background_color: self.theme_cls.bg_light ''') @@ -26,4 +31,5 @@ a = BoundedNumericProperty(0., min=0., max=1.) border_radius = BoundedNumericProperty(dp(3),min=0) + border_color_a = BoundedNumericProperty(0, min=0., max=1.) background_color = ReferenceListProperty(r, g, b, a)
7ea053bfc1b557ce4a4df4905af4a5491517490b
default_config.py
default_config.py
# Default Config # Override these values in the instance/local_config.py file, not this one. DEBUG = None DOMAIN = 'localhost' HOST = 'localhost' PORT = 5000
"""\ Default Config Override these values in the instance/local_config.py file, not this one. """ DEBUG = None DOMAIN = 'localhost' HOST = 'localhost' PORT = 5000
Use doc-string, not comments in default config.
Use doc-string, not comments in default config.
Python
mit
joeyespo/tabhouse.org,joeyespo/tabhouse,joeyespo/tabhouse,joeyespo/tabhouse.org
--- +++ @@ -1,5 +1,9 @@ -# Default Config -# Override these values in the instance/local_config.py file, not this one. +"""\ +Default Config + +Override these values in the instance/local_config.py file, not this one. +""" + DEBUG = None
1736d7b7aed3ce3049186ce97e24941de0187caf
oidc_provider/lib/utils/common.py
oidc_provider/lib/utils/common.py
from django.conf import settings as django_settings from django.core.urlresolvers import reverse from oidc_provider import settings def get_issuer(): """ Construct the issuer full url. Basically is the site url with some path appended. """ site_url = settings.get('SITE_URL') path = reverse('oidc_provider:provider_info') \ .split('/.well-known/openid-configuration/')[0] issuer = site_url + path return issuer def get_rsa_key(): """ Load the rsa key previously created with `creatersakey` command. """ file_path = settings.get('OIDC_RSA_KEY_FOLDER') + '/OIDC_RSA_KEY.pem' with open(file_path, 'r') as f: key = f.read() return key
from django.conf import settings as django_settings from django.core.urlresolvers import reverse from oidc_provider import settings def get_issuer(): """ Construct the issuer full url. Basically is the site url with some path appended. """ site_url = settings.get('SITE_URL') path = reverse('oidc_provider:provider_info') \ .split('/.well-known/openid-configuration/')[0] issuer = site_url + path return issuer def get_rsa_key(): """ Load the rsa key previously created with `creatersakey` command. """ file_path = settings.get('OIDC_RSA_KEY_FOLDER') + '/OIDC_RSA_KEY.pem' try: with open(file_path, 'r') as f: key = f.read() except IOError: raise IOError('We could not find your key file on: ' + file_path) return key
Add IOError custom message when rsa key file is missing.
Add IOError custom message when rsa key file is missing.
Python
mit
ByteInternet/django-oidc-provider,torreco/django-oidc-provider,juanifioren/django-oidc-provider,bunnyinc/django-oidc-provider,wayward710/django-oidc-provider,bunnyinc/django-oidc-provider,wayward710/django-oidc-provider,wojtek-fliposports/django-oidc-provider,nmohoric/django-oidc-provider,nmohoric/django-oidc-provider,ByteInternet/django-oidc-provider,torreco/django-oidc-provider,wojtek-fliposports/django-oidc-provider,juanifioren/django-oidc-provider
--- +++ @@ -22,7 +22,10 @@ Load the rsa key previously created with `creatersakey` command. """ file_path = settings.get('OIDC_RSA_KEY_FOLDER') + '/OIDC_RSA_KEY.pem' - with open(file_path, 'r') as f: - key = f.read() + try: + with open(file_path, 'r') as f: + key = f.read() + except IOError: + raise IOError('We could not find your key file on: ' + file_path) return key
dc0129224dc01f4e9cdaa57ee2aff307a4f5d7d3
project/utils/logger.py
project/utils/logger.py
# -*- coding: utf-8 -*- import datetime import logging import os def set_up_logging(): """ Main logger for usual bot needs """ logs_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'logs') if not os.path.exists(logs_directory): os.mkdir(logs_directory) logger = logging.getLogger('tenhou') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) file_name = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S') + '.log' fh = logging.FileHandler(os.path.join(logs_directory, file_name)) fh.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') ch.setFormatter(formatter) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) logger = logging.getLogger('ai') logger.setLevel(logging.DEBUG) logger.addHandler(ch) logger.addHandler(fh)
# -*- coding: utf-8 -*- import datetime import logging import os import hashlib from utils.settings_handler import settings def set_up_logging(): """ Logger for tenhou communication and AI output """ logs_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'logs') if not os.path.exists(logs_directory): os.mkdir(logs_directory) # we shouldn't be afraid about collision # also, we need it to distinguish different bots logs (if they were run in the same time) name_hash = hashlib.sha1(settings.USER_ID.encode('utf-8')).hexdigest()[:5] logger = logging.getLogger('tenhou') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) file_name = '{}_{}.log'.format(name_hash, datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')) fh = logging.FileHandler(os.path.join(logs_directory, file_name)) fh.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') ch.setFormatter(formatter) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) logger = logging.getLogger('ai') logger.setLevel(logging.DEBUG) logger.addHandler(ch) logger.addHandler(fh)
Add hash from the bot name to the log name
Add hash from the bot name to the log name
Python
mit
huangenyan/Lattish,MahjongRepository/tenhou-python-bot,MahjongRepository/tenhou-python-bot,huangenyan/Lattish
--- +++ @@ -2,17 +2,23 @@ import datetime import logging +import os +import hashlib -import os +from utils.settings_handler import settings def set_up_logging(): """ - Main logger for usual bot needs + Logger for tenhou communication and AI output """ logs_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'logs') if not os.path.exists(logs_directory): os.mkdir(logs_directory) + + # we shouldn't be afraid about collision + # also, we need it to distinguish different bots logs (if they were run in the same time) + name_hash = hashlib.sha1(settings.USER_ID.encode('utf-8')).hexdigest()[:5] logger = logging.getLogger('tenhou') logger.setLevel(logging.DEBUG) @@ -20,7 +26,7 @@ ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) - file_name = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S') + '.log' + file_name = '{}_{}.log'.format(name_hash, datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')) fh = logging.FileHandler(os.path.join(logs_directory, file_name)) fh.setLevel(logging.DEBUG)
a174fbd637bf9ccc7b8a97a251c016495f92f6a9
eliot/__init__.py
eliot/__init__.py
""" Eliot: Logging as Storytelling Suppose we turn from outside estimates of a man, to wonder, with keener interest, what is the report of his own consciousness about his doings or capacity: with what hindrances he is carrying on his daily labors; what fading of hopes, or what deeper fixity of self-delusion the years are marking off within him; and with what spirit he wrestles against universal pressure, which will one day be too heavy for him, and bring his heart to its final pause. -- George Eliot, "Middlemarch" """ from ._version import __version__ # Expose the public API: from ._message import Message from ._action import startAction, startTask, Action from ._output import ILogger, Logger, MemoryLogger from ._validation import Field, MessageType, ActionType from ._traceback import writeTraceback, writeFailure addDestination = Logger._destinations.add removeDestination = Logger._destinations.remove __all__ = ["Message", "writeTraceback", "writeFailure", "startAction", "startTask", "Action", "Field", "MessageType", "ActionType", "ILogger", "Logger", "MemoryLogger", "addDestination", "removeDestination", "__version__", ]
""" Eliot: Logging as Storytelling Suppose we turn from outside estimates of a man, to wonder, with keener interest, what is the report of his own consciousness about his doings or capacity: with what hindrances he is carrying on his daily labors; what fading of hopes, or what deeper fixity of self-delusion the years are marking off within him; and with what spirit he wrestles against universal pressure, which will one day be too heavy for him, and bring his heart to its final pause. -- George Eliot, "Middlemarch" """ from ._version import __version__ # Expose the public API: from ._message import Message from ._action import startAction, startTask, Action from ._output import ILogger, Logger, MemoryLogger from ._validation import Field, fields, MessageType, ActionType from ._traceback import writeTraceback, writeFailure addDestination = Logger._destinations.add removeDestination = Logger._destinations.remove __all__ = ["Message", "writeTraceback", "writeFailure", "startAction", "startTask", "Action", "Field", "fields", "MessageType", "ActionType", "ILogger", "Logger", "MemoryLogger", "addDestination", "removeDestination", "__version__", ]
Add fields to the public API.
Add fields to the public API.
Python
apache-2.0
ClusterHQ/eliot,ScatterHQ/eliot,iffy/eliot,ScatterHQ/eliot,ScatterHQ/eliot
--- +++ @@ -18,7 +18,7 @@ from ._message import Message from ._action import startAction, startTask, Action from ._output import ILogger, Logger, MemoryLogger -from ._validation import Field, MessageType, ActionType +from ._validation import Field, fields, MessageType, ActionType from ._traceback import writeTraceback, writeFailure addDestination = Logger._destinations.add removeDestination = Logger._destinations.remove @@ -26,7 +26,7 @@ __all__ = ["Message", "writeTraceback", "writeFailure", "startAction", "startTask", "Action", - "Field", "MessageType", "ActionType", + "Field", "fields", "MessageType", "ActionType", "ILogger", "Logger", "MemoryLogger", "addDestination", "removeDestination",
fb1f6f30fc7ba2d3dcce357168a05669c934c234
build/oggm/run_test.py
build/oggm/run_test.py
#!/usr/bin/env python import os os.environ["MPLBACKEND"] = 'agg' import matplotlib matplotlib.use('agg') import pytest import oggm import sys import ssl ssl._create_default_https_context = ssl._create_unverified_context initial_dir = os.getcwd() oggm_file = os.path.abspath(oggm.__file__) oggm_dir = os.path.dirname(oggm_file) sys.exit(pytest.main([oggm_dir, '--mpl']))
#!/usr/bin/env python import os os.environ["MPLBACKEND"] = 'agg' import matplotlib matplotlib.use('agg') import pytest import oggm import sys import ssl ssl._create_default_https_context = ssl._create_unverified_context if os.name == 'nt': sys.exit(0) initial_dir = os.getcwd() oggm_file = os.path.abspath(oggm.__file__) oggm_dir = os.path.dirname(oggm_file) sys.exit(pytest.main([oggm_dir, '--mpl']))
Disable testing on Windows for now, it just takes too long for any CI service
Disable testing on Windows for now, it just takes too long for any CI service
Python
mit
OGGM/OGGM-Anaconda
--- +++ @@ -13,6 +13,9 @@ import ssl ssl._create_default_https_context = ssl._create_unverified_context +if os.name == 'nt': + sys.exit(0) + initial_dir = os.getcwd() oggm_file = os.path.abspath(oggm.__file__) oggm_dir = os.path.dirname(oggm_file)
b1cc99458d22b8ed54326de6b4eafececb3a8093
jobs/telemetry_aggregator.py
jobs/telemetry_aggregator.py
#!/home/hadoop/anaconda2/bin/ipython import logging from os import environ from mozaggregator.aggregator import aggregate_metrics from mozaggregator.db import submit_aggregates logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) date = environ['date'] logger.info("Running job for {}".format(date)) aggregates = aggregate_metrics(sc, ("nightly", "aurora", "beta", "release"), date) logger.info("Number of build-id aggregates: {}".format(aggregates[0].count())) logger.info("Number of submission date aggregates: {}".format(aggregates[1].count())) submit_aggregates(aggregates)
#!/home/hadoop/anaconda2/bin/ipython import logging from os import environ from mozaggregator.aggregator import aggregate_metrics from mozaggregator.db import submit_aggregates date = environ['date'] print "Running job for {}".format(date) aggregates = aggregate_metrics(sc, ("nightly", "aurora", "beta", "release"), date) print "Number of build-id aggregates: {}".format(aggregates[0].count()) print "Number of submission date aggregates: {}".format(aggregates[1].count()) submit_aggregates(aggregates)
Use simple prints for logging.
Use simple prints for logging.
Python
mpl-2.0
opentrials/opentrials-airflow,opentrials/opentrials-airflow
--- +++ @@ -5,14 +5,9 @@ from mozaggregator.aggregator import aggregate_metrics from mozaggregator.db import submit_aggregates -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -logger.addHandler(logging.StreamHandler()) - date = environ['date'] - -logger.info("Running job for {}".format(date)) +print "Running job for {}".format(date) aggregates = aggregate_metrics(sc, ("nightly", "aurora", "beta", "release"), date) -logger.info("Number of build-id aggregates: {}".format(aggregates[0].count())) -logger.info("Number of submission date aggregates: {}".format(aggregates[1].count())) +print "Number of build-id aggregates: {}".format(aggregates[0].count()) +print "Number of submission date aggregates: {}".format(aggregates[1].count()) submit_aggregates(aggregates)
1983885acfccfe4ffa010401fd9ef0971bb6c12c
etcd3/__init__.py
etcd3/__init__.py
from __future__ import absolute_import from etcd3.client import Etcd3Client from etcd3.client import client from etcd3.client import Transactions __author__ = 'Louis Taylor' __email__ = 'louis@kragniz.eu' __version__ = '0.1.0' __all__ = ['Etcd3Client', 'client', 'etcdrpc', 'utils', 'Transactions']
from __future__ import absolute_import from etcd3.client import Etcd3Client from etcd3.client import client from etcd3.client import Transactions from etcd3.members import Member __author__ = 'Louis Taylor' __email__ = 'louis@kragniz.eu' __version__ = '0.1.0' __all__ = ['Etcd3Client', 'client', 'etcdrpc', 'utils', 'Transactions', 'Member']
Make Member part of the public api
Make Member part of the public api
Python
apache-2.0
kragniz/python-etcd3
--- +++ @@ -3,9 +3,11 @@ from etcd3.client import Etcd3Client from etcd3.client import client from etcd3.client import Transactions +from etcd3.members import Member __author__ = 'Louis Taylor' __email__ = 'louis@kragniz.eu' __version__ = '0.1.0' -__all__ = ['Etcd3Client', 'client', 'etcdrpc', 'utils', 'Transactions'] +__all__ = ['Etcd3Client', 'client', 'etcdrpc', 'utils', 'Transactions', + 'Member']
fb1db28198b54b6288a9e7d499b43f6f1a51284c
partner_deduplicate_by_website/__manifest__.py
partner_deduplicate_by_website/__manifest__.py
# Copyright 2016 Tecnativa - Pedro M. Baeza # Copyright 2017 Tecnativa - Vicent Cubells # Copyright 2018 Tecnativa - Cristina Martin # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). { "name": "Deduplicate Contacts by Website", "version": "13.0.1.0.0", "category": "Tools", "website": "https://github.com/OCA/crm", "author": "Tecnativa, " "Odoo Community Association (OCA)", "license": "AGPL-3", "installable": True, "depends": ["contacts"], "data": ["wizards/partner_merge_view.xml"], }
# Copyright 2016 Tecnativa - Pedro M. Baeza # Copyright 2017 Tecnativa - Vicent Cubells # Copyright 2018 Tecnativa - Cristina Martin # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). { "name": "Deduplicate Contacts by Website", "version": "13.0.1.0.0", "category": "Tools", "website": "https://github.com/OCA/partner-contact", "author": "Tecnativa, " "Odoo Community Association (OCA)", "license": "AGPL-3", "installable": True, "depends": ["contacts"], "data": ["wizards/partner_merge_view.xml"], }
Fix website attribute in manifest
Fix website attribute in manifest
Python
agpl-3.0
OCA/partner-contact,OCA/partner-contact
--- +++ @@ -7,7 +7,7 @@ "name": "Deduplicate Contacts by Website", "version": "13.0.1.0.0", "category": "Tools", - "website": "https://github.com/OCA/crm", + "website": "https://github.com/OCA/partner-contact", "author": "Tecnativa, " "Odoo Community Association (OCA)", "license": "AGPL-3", "installable": True,
db84de91e665a131ad82be3ed49eb291afd5342d
oratioignoreparser.py
oratioignoreparser.py
import os import re class OratioIgnoreParser(): def __init__(self): self.ignored_paths = ["oratiomodule.tar.gz"] def load(self, oratio_ignore_path): with open(oratio_ignore_path, "r") as f: self.ignored_paths.extend([line.strip() for line in f]) def should_be_ignored(self, filepath): for ig in self.ignored_paths: compiled_regex = re.compile('^' + re.escape(ig).replace('\\*', '.*') + '$') if compiled_regex.search(filepath) or compiled_regex.search(filepath.split('/')[-1]): return True return False def list_files(self, directory): filepaths = [] ignored_files = [] for root, dirs, files in os.walk("."): for name in files: relative_path = os.path.join(root, name) if relative_path.startswith("./"): relative_path = relative_path[2:] if not self.should_be_ignored(relative_path): filepaths.append(relative_path) else: ignored_files.append(relative_path) return filepaths, ignored_files
import os import re class OratioIgnoreParser(): def __init__(self): self.ignored_paths = ["oratiomodule.tar.gz"] def load(self, oratio_ignore_path): with open(oratio_ignore_path, "r") as f: self.ignored_paths.extend([line.strip() for line in f]) def should_be_ignored(self, filepath): for ig in self.ignored_paths: compiled_regex = re.compile( '^' + re.escape(ig).replace('\\*', '.*') + '$' ) if compiled_regex.search(filepath) or \ compiled_regex.search(filepath.split('/')[-1]): return True return False def list_files(self, directory): filepaths = [] ignored_files = [] for root, dirs, files in os.walk("."): for name in files: relative_path = os.path.join(root, name) if relative_path.startswith("./"): relative_path = relative_path[2:] if not self.should_be_ignored(relative_path): filepaths.append(relative_path) else: ignored_files.append(relative_path) return filepaths, ignored_files
Make all lines shorter than 80 characters
Make all lines shorter than 80 characters
Python
mit
oratio-io/oratio-cli,oratio-io/oratio-cli
--- +++ @@ -12,8 +12,11 @@ def should_be_ignored(self, filepath): for ig in self.ignored_paths: - compiled_regex = re.compile('^' + re.escape(ig).replace('\\*', '.*') + '$') - if compiled_regex.search(filepath) or compiled_regex.search(filepath.split('/')[-1]): + compiled_regex = re.compile( + '^' + re.escape(ig).replace('\\*', '.*') + '$' + ) + if compiled_regex.search(filepath) or \ + compiled_regex.search(filepath.split('/')[-1]): return True return False